treewide: Drop is_ssa asserts

We only see SSA now.

Via Coccinelle patch:

    @@
    expression x;
    @@

    -assert(x.is_ssa);

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Acked-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24432>
This commit is contained in:
Alyssa Rosenzweig 2023-08-01 11:17:24 -04:00 committed by Marge Bot
parent d559764e7c
commit 5fead24365
118 changed files with 0 additions and 348 deletions

View file

@ -52,7 +52,6 @@ static LLVMTypeRef get_def_type(struct ac_nir_context *ctx, const nir_ssa_def *d
static LLVMValueRef get_src(struct ac_nir_context *nir, nir_src src)
{
assert(src.is_ssa);
return nir->ssa_defs[src.ssa->index];
}
@ -1477,8 +1476,6 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_te
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
assert(instr->dest.is_ssa);
/* Buffers don't support A16. */
if (args->a16)
args->coords[0] = LLVMBuildZExt(ctx->ac.builder, args->coords[0], ctx->ac.i32, "");

View file

@ -182,7 +182,6 @@ v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr)
b->cursor = nir_after_instr(&instr->instr);
assert(instr->dest.is_ssa);
nir_ssa_def *result = &instr->dest.ssa;
if (util_format_is_pure_uint(format)) {
result = nir_format_unpack_uint(b, result, bits16, 4);

View file

@ -75,8 +75,6 @@ lower_line_smooth_func(struct lower_line_smooth_state *state,
intr->num_components != 4)
continue;
assert(intr->src[0].is_ssa);
lower_line_smooth_intrinsic(state, &b, intr);
progress = true;
}

View file

@ -122,7 +122,6 @@ lower_deref_instr(nir_builder *b, nir_intrinsic_instr *instr,
for (nir_deref_instr *d = deref; d->deref_type != nir_deref_type_var;
d = nir_deref_instr_parent(d)) {
assert(d->deref_type == nir_deref_type_array);
assert(d->arr.index.is_ssa);
unsigned array_stride = ATOMIC_COUNTER_SIZE;
if (glsl_type_is_array(d->type))

View file

@ -194,7 +194,6 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
break;
/* We use nir_address_format_32bit_index_offset */
assert(deref->dest.is_ssa);
assert(deref->dest.ssa.bit_size == 32);
deref->dest.ssa.num_components = 2;
@ -261,7 +260,6 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
* from the SSBO.
*/
if (glsl_type_is_boolean(deref->type)) {
assert(intrin->dest.is_ssa);
b.cursor = nir_after_instr(&intrin->instr);
intrin->dest.ssa.bit_size = 32;
nir_ssa_def *bval = nir_i2b(&b, &intrin->dest.ssa);
@ -289,7 +287,6 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
* step but in practice it doesn't cost much.
*/
if (glsl_type_is_boolean(deref->type)) {
assert(intrin->src[1].is_ssa);
b.cursor = nir_before_instr(&intrin->instr);
nir_ssa_def *ival = nir_b2i32(&b, intrin->src[1].ssa);
nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],

View file

@ -281,8 +281,6 @@ lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state,
b->cursor = nir_before_instr(&instr->instr);
if (texture_idx >= 0) {
assert(instr->src[texture_idx].src.is_ssa);
nir_deref_instr *texture_deref =
lower_deref(b, state, nir_src_as_deref(instr->src[texture_idx].src));
/* only lower non-bindless: */
@ -294,7 +292,6 @@ lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state,
}
if (sampler_idx >= 0) {
assert(instr->src[sampler_idx].src.is_ssa);
nir_deref_instr *sampler_deref =
lower_deref(b, state, nir_src_as_deref(instr->src[sampler_idx].src));
/* only lower non-bindless: */

View file

@ -587,7 +587,6 @@ fixup_phi_srcs(clone_state *state)
/* Remove from this list */
list_del(&src->src.use_link);
assert(src->src.is_ssa);
src->src.ssa = remap_local(state, src->src.ssa);
list_addtail(&src->src.use_link, &src->src.ssa->uses);

View file

@ -441,11 +441,9 @@ static bool
coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
{
nir_foreach_phi(phi, block) {
assert(phi->dest.is_ssa);
merge_node *dest_node = get_merge_node(&phi->dest.ssa, state);
nir_foreach_phi_src(src, phi) {
assert(src->src.is_ssa);
if (nir_src_is_undef(src->src))
continue;
@ -464,9 +462,7 @@ aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
{
nir_foreach_parallel_copy_entry(entry, pcopy) {
assert(!entry->src_is_reg);
assert(entry->src.is_ssa);
assert(!entry->dest_is_reg);
assert(entry->dest.dest.is_ssa);
assert(entry->dest.dest.ssa.num_components ==
entry->src.ssa->num_components);
@ -625,7 +621,6 @@ remove_no_op_phi(nir_instr *instr, struct from_ssa_state *state)
#ifndef NDEBUG
nir_phi_instr *phi = nir_instr_as_phi(instr);
assert(phi->dest.is_ssa);
struct hash_entry *entry =
_mesa_hash_table_search(state->merge_node_table, &phi->dest.ssa);
assert(entry != NULL);

View file

@ -767,21 +767,16 @@ nir_instr_get_dest_ssa_def(nir_instr *instr)
{
switch (instr->type) {
case nir_instr_type_alu:
assert(nir_instr_as_alu(instr)->dest.dest.is_ssa);
return &nir_instr_as_alu(instr)->dest.dest.ssa;
case nir_instr_type_deref:
assert(nir_instr_as_deref(instr)->dest.is_ssa);
return &nir_instr_as_deref(instr)->dest.ssa;
case nir_instr_type_load_const:
return &nir_instr_as_load_const(instr)->def;
case nir_instr_type_phi:
assert(nir_instr_as_phi(instr)->dest.is_ssa);
return &nir_instr_as_phi(instr)->dest.ssa;
case nir_instr_type_intrinsic:
assert(nir_instr_as_intrinsic(instr)->dest.is_ssa);
return &nir_instr_as_intrinsic(instr)->dest.ssa;
case nir_instr_type_tex:
assert(nir_instr_as_tex(instr)->dest.is_ssa);
return &nir_instr_as_tex(instr)->dest.ssa;
default:
unreachable("We never ask for any of these");

View file

@ -12,7 +12,6 @@ bool
nir_legacy_float_mod_folds(nir_alu_instr *mod)
{
assert(mod->op == nir_op_fabs || mod->op == nir_op_fneg);
assert(mod->dest.dest.is_ssa);
/* No legacy user supports fp64 modifiers */
if (mod->dest.dest.ssa.bit_size == 64)
@ -223,7 +222,6 @@ chase_fsat(nir_ssa_def **def)
/* Otherwise, we're good */
nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
assert(alu->dest.dest.is_ssa);
*def = &alu->dest.dest.ssa;
return true;
}

View file

@ -109,7 +109,6 @@ propagate_across_edge(nir_block *pred, nir_block *succ,
memcpy(live, succ->live_in, state->bitset_words * sizeof *live);
nir_foreach_phi(phi, succ) {
assert(phi->dest.is_ssa);
set_ssa_def_dead(&phi->dest.ssa, live);
}

View file

@ -336,7 +336,6 @@ phi_instr_as_alu(nir_phi_instr *phi)
{
nir_alu_instr *first = NULL;
nir_foreach_phi_src(src, phi) {
assert(src->src.is_ssa);
if (src->src.ssa->parent_instr->type != nir_instr_type_alu)
return NULL;
@ -356,7 +355,6 @@ static bool
alu_src_has_identity_swizzle(nir_alu_instr *alu, unsigned src_idx)
{
assert(nir_op_infos[alu->op].input_sizes[src_idx] == 0);
assert(alu->dest.dest.is_ssa);
for (unsigned i = 0; i < alu->dest.dest.ssa.num_components; i++) {
if (alu->src[src_idx].swizzle[i] != i)
return false;
@ -599,7 +597,6 @@ find_array_access_via_induction(loop_info_state *state,
if (d->deref_type != nir_deref_type_array)
continue;
assert(d->arr.index.is_ssa);
nir_loop_variable *array_index = get_loop_var(d->arr.index.ssa, state);
if (array_index->type != basic_induction)
@ -1237,7 +1234,6 @@ find_trip_count(loop_info_state *state, unsigned execution_mode,
list_for_each_entry(nir_loop_terminator, terminator,
&state->loop->info->loop_terminator_list,
loop_terminator_link) {
assert(terminator->nif->condition.is_ssa);
nir_ssa_scalar cond = { terminator->nif->condition.ssa, 0 };
if (!nir_ssa_scalar_is_alu(cond)) {

View file

@ -49,8 +49,6 @@ lower_alu_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
nir_ssa_def *lowered = NULL;
assert(instr->dest.dest.is_ssa);
b->cursor = nir_before_instr(&instr->instr);
b->exact = instr->exact;

View file

@ -51,8 +51,6 @@ inst_is_vector_alu(const nir_instr *instr, const void *_state)
/* There is no ALU instruction which has a scalar destination, scalar
* src[0], and some other vector source.
*/
assert(alu->dest.dest.is_ssa);
assert(alu->src[0].src.is_ssa);
return alu->dest.dest.ssa.num_components > 1 ||
nir_op_infos[alu->op].input_sizes[0] > 1;
}
@ -189,7 +187,6 @@ lower_alu_instr_width(nir_builder *b, nir_instr *instr, void *_data)
unsigned num_src = nir_op_infos[alu->op].num_inputs;
unsigned i, chan;
assert(alu->dest.dest.is_ssa);
assert(alu->dest.write_mask != 0);
b->exact = alu->exact;

View file

@ -106,7 +106,6 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
b.cursor = nir_after_instr(&intrin->instr);
if (intrin->intrinsic == nir_intrinsic_store_deref) {
assert(intrin->src[1].is_ssa);
nir_ssa_def *value = intrin->src[1].ssa;
if (nir_src_is_const(deref->arr.index)) {

View file

@ -219,13 +219,11 @@ static void
lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
nir_phi_instr *last_phi)
{
assert(phi->dest.is_ssa);
unsigned old_bit_size = phi->dest.ssa.bit_size;
assert(old_bit_size < bit_size);
nir_foreach_phi_src(src, phi) {
b->cursor = nir_after_block_before_jump(src->pred);
assert(src->src.is_ssa);
nir_ssa_def *new_src = nir_u2uN(b, src->src.ssa, bit_size);
nir_instr_rewrite_src(&phi->instr, &src->src, nir_src_for_ssa(new_src));
@ -347,7 +345,6 @@ lower_64bit_phi_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
return false;
nir_phi_instr *phi = nir_instr_as_phi(instr);
assert(phi->dest.is_ssa);
if (phi->dest.ssa.bit_size <= 32)
return false;

View file

@ -541,7 +541,6 @@ nir_lower_blend_instr(nir_builder *b, nir_instr *instr, void *data)
/* Grab the input color. We always want 4 channels during blend. Dead
* code will clean up any channels we don't need.
*/
assert(store->src[0].is_ssa);
nir_ssa_def *src = nir_pad_vector(b, store->src[0].ssa, 4);
assert(nir_src_as_uint(store->src[1]) == 0 && "store_output invariant");

View file

@ -364,7 +364,6 @@ lower_phi_instr(nir_builder *b, nir_phi_instr *phi)
if (dst_bit_size == 0) {
dst_bit_size = src_bit_size;
} else if (src_bit_size != dst_bit_size) {
assert(phi_src->src.is_ssa);
b->cursor = nir_before_src(&phi_src->src);
nir_op convert_op = get_bool_convert_opcode(dst_bit_size);
nir_ssa_def *new_src =

View file

@ -47,8 +47,6 @@ lower_alu_instr(nir_alu_instr *alu)
{
const nir_op_info *op_info = &nir_op_infos[alu->op];
assert(alu->dest.dest.is_ssa);
switch (alu->op) {
case nir_op_mov:
case nir_op_vec2:

View file

@ -215,7 +215,6 @@ nir_lower_cl_images(nir_shader *shader, bool lower_image_derefs, bool lower_samp
NIR_SRC_INIT);
continue;
} else {
assert(tex->src[i].src.is_ssa);
b.cursor = nir_before_instr(&tex->instr);
/* Back-ends expect a 32-bit thing, not 64-bit */
nir_ssa_def *offset = nir_u2u32(&b, tex->src[i].src.ssa);
@ -254,7 +253,6 @@ nir_lower_cl_images(nir_shader *shader, bool lower_image_derefs, bool lower_samp
if (!lower_image_derefs)
break;
assert(intrin->src[0].is_ssa);
b.cursor = nir_before_instr(&intrin->instr);
/* Back-ends expect a 32-bit thing, not 64-bit */
nir_ssa_def *offset = nir_u2u32(&b, intrin->src[0].ssa);

View file

@ -151,7 +151,6 @@ find_output_in_block(nir_block *block, unsigned drvloc)
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if ((intr->intrinsic == nir_intrinsic_store_output) &&
nir_intrinsic_base(intr) == drvloc) {
assert(intr->src[0].is_ssa);
assert(nir_src_is_const(intr->src[1]));
return intr->src[0].ssa;
}

View file

@ -461,9 +461,6 @@ lower_doubles_instr_to_soft(nir_builder *b, nir_alu_instr *instr,
if (!(options & nir_lower_fp64_full_software))
return NULL;
assert(instr->dest.dest.is_ssa);
const char *name;
const char *mangled_name;
const struct glsl_type *return_type = glsl_uint64_t_type();
@ -673,7 +670,6 @@ should_lower_double_instr(const nir_instr *instr, const void *_data)
const nir_alu_instr *alu = nir_instr_as_alu(instr);
assert(alu->dest.dest.is_ssa);
bool is_64 = alu->dest.dest.ssa.bit_size == 64;
unsigned num_srcs = nir_op_infos[alu->op].num_inputs;

View file

@ -85,8 +85,6 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr *
nir_tex_instr *tex;
nir_ssa_def *def;
assert(intr->dest.is_ssa);
b->cursor = nir_before_instr(&intr->instr);
texcoord = get_texcoord(b, state);

View file

@ -65,7 +65,6 @@ lower_fragcolor_instr(nir_builder *b, nir_instr *intr, void *data)
return false;
b->cursor = nir_after_instr(&instr->instr);
assert(instr->src[1].is_ssa);
nir_ssa_def *frag_color = instr->src[1].ssa;
ralloc_free(out->name);

View file

@ -176,7 +176,6 @@ set_path_vars_cond(nir_builder *b, struct path_fork *fork, nir_src condition,
break;
}
else {
assert(condition.is_ssa);
nir_ssa_def *ssa_def = condition.ssa;
assert(ssa_def->bit_size == 1);
assert(ssa_def->num_components == 1);

View file

@ -83,7 +83,6 @@ lower(nir_builder *b, nir_instr *instr, void *data)
nir_pop_if(b, NULL);
if (has_dest) {
assert(intr->dest.is_ssa);
nir_ssa_def *phi = nir_if_phi(b, &intr->dest.ssa, undef);
/* We can't use nir_ssa_def_rewrite_uses_after on phis, so use the global

View file

@ -169,7 +169,6 @@ lower_indirect_derefs_block(nir_block *block, nir_builder *b,
assert(path.path[0] == base);
if (intrin->intrinsic == nir_intrinsic_store_deref) {
assert(intrin->src[1].is_ssa);
emit_load_store_deref(b, intrin, base, &path.path[1],
NULL, intrin->src[1].ssa);
} else {

View file

@ -1082,13 +1082,10 @@ should_lower_int64_alu_instr(const nir_alu_instr *alu,
case nir_op_u2u8:
case nir_op_u2u16:
case nir_op_u2u32:
assert(alu->src[0].src.is_ssa);
if (alu->src[0].src.ssa->bit_size != 64)
return false;
break;
case nir_op_bcsel:
assert(alu->src[1].src.is_ssa);
assert(alu->src[2].src.is_ssa);
assert(alu->src[1].src.ssa->bit_size ==
alu->src[2].src.ssa->bit_size);
if (alu->src[1].src.ssa->bit_size != 64)
@ -1100,8 +1097,6 @@ should_lower_int64_alu_instr(const nir_alu_instr *alu,
case nir_op_ilt:
case nir_op_uge:
case nir_op_ige:
assert(alu->src[0].src.is_ssa);
assert(alu->src[1].src.is_ssa);
assert(alu->src[0].src.ssa->bit_size ==
alu->src[1].src.ssa->bit_size);
if (alu->src[0].src.ssa->bit_size != 64)
@ -1110,12 +1105,10 @@ should_lower_int64_alu_instr(const nir_alu_instr *alu,
case nir_op_ufind_msb:
case nir_op_find_lsb:
case nir_op_bit_count:
assert(alu->src[0].src.is_ssa);
if (alu->src[0].src.ssa->bit_size != 64)
return false;
break;
case nir_op_amul:
assert(alu->dest.dest.is_ssa);
if (options->has_imul24)
return false;
if (alu->dest.dest.ssa.bit_size != 64)
@ -1127,7 +1120,6 @@ should_lower_int64_alu_instr(const nir_alu_instr *alu,
case nir_op_u2f32:
case nir_op_i2f16:
case nir_op_u2f16:
assert(alu->src[0].src.is_ssa);
if (alu->src[0].src.ssa->bit_size != 64)
return false;
break;
@ -1135,7 +1127,6 @@ should_lower_int64_alu_instr(const nir_alu_instr *alu,
case nir_op_f2i64:
FALLTHROUGH;
default:
assert(alu->dest.dest.is_ssa);
if (alu->dest.dest.ssa.bit_size != 64)
return false;
break;
@ -1239,7 +1230,6 @@ lower_scan_iadd64(nir_builder *b, const nir_intrinsic_instr *intrin)
* no larger than 256 which seems reasonable.) We can then scan on each of
* the chunks and add them back together at the end.
*/
assert(intrin->src[0].is_ssa);
nir_ssa_def *x = intrin->src[0].ssa;
nir_ssa_def *x_low =
nir_u2u32(b, nir_iand_imm(b, x, 0xffffff));
@ -1281,19 +1271,16 @@ should_lower_int64_intrinsic(const nir_intrinsic_instr *intrin,
case nir_intrinsic_quad_swap_horizontal:
case nir_intrinsic_quad_swap_vertical:
case nir_intrinsic_quad_swap_diagonal:
assert(intrin->dest.is_ssa);
return intrin->dest.ssa.bit_size == 64 &&
(options->lower_int64_options & nir_lower_subgroup_shuffle64);
case nir_intrinsic_vote_ieq:
assert(intrin->src[0].is_ssa);
return intrin->src[0].ssa->bit_size == 64 &&
(options->lower_int64_options & nir_lower_vote_ieq64);
case nir_intrinsic_reduce:
case nir_intrinsic_inclusive_scan:
case nir_intrinsic_exclusive_scan:
assert(intrin->dest.is_ssa);
if (intrin->dest.ssa.bit_size != 64)
return false;
@ -1331,7 +1318,6 @@ lower_int64_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin)
return split_64bit_subgroup_op(b, intrin);
case nir_intrinsic_vote_ieq:
assert(intrin->src[0].is_ssa);
return lower_vote_ieq(b, intrin->src[0].ssa);
case nir_intrinsic_reduce:

View file

@ -48,10 +48,6 @@ nir_lower_interpolation_instr(nir_builder *b, nir_instr *instr, void *cb_data)
if (intr->intrinsic != nir_intrinsic_load_interpolated_input)
return false;
assert(intr->dest.is_ssa);
assert(intr->src[0].is_ssa);
assert(intr->src[1].is_ssa);
nir_intrinsic_instr *bary_intrinsic =
nir_instr_as_intrinsic(intr->src[0].ssa->parent_instr);

View file

@ -345,7 +345,6 @@ lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
nir_ssa_def *array_index, nir_variable *var, nir_ssa_def *offset,
unsigned component, const struct glsl_type *type)
{
assert(intrin->dest.is_ssa);
if (intrin->dest.ssa.bit_size == 64 &&
(state->options & nir_lower_io_lower_64bit_to_32)) {
nir_builder *b = &state->builder;
@ -457,7 +456,6 @@ lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
nir_ssa_def *array_index, nir_variable *var, nir_ssa_def *offset,
unsigned component, const struct glsl_type *type)
{
assert(intrin->src[1].is_ssa);
if (intrin->src[1].ssa->bit_size == 64 &&
(state->options & nir_lower_io_lower_64bit_to_32)) {
nir_builder *b = &state->builder;
@ -572,7 +570,6 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
var->data.precision == GLSL_PRECISION_MEDIUM ||
var->data.precision == GLSL_PRECISION_LOW;
assert(intrin->dest.is_ssa);
nir_ssa_def *load =
nir_load_interpolated_input(&state->builder,
intrin->dest.ssa.num_components,
@ -1478,7 +1475,6 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
nir_intrinsic_set_range(load, range);
}
assert(intrin->dest.is_ssa);
load->num_components = num_components;
nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
@ -1812,7 +1808,6 @@ nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
nir_ssa_def *base_addr,
nir_address_format addr_format)
{
assert(deref->dest.is_ssa);
switch (deref->deref_type) {
case nir_deref_type_var:
return build_addr_for_var(b, deref->var, addr_format);
@ -1914,7 +1909,6 @@ nir_lower_explicit_io_instr(nir_builder *b,
}
case nir_intrinsic_store_deref: {
assert(intrin->src[1].is_ssa);
nir_ssa_def *value = intrin->src[1].ssa;
nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
if (vec_stride > scalar_size) {
@ -1949,7 +1943,6 @@ nir_lower_explicit_io_instr(nir_builder *b,
}
case nir_intrinsic_store_deref_block_intel: {
assert(intrin->src[1].is_ssa);
nir_ssa_def *value = intrin->src[1].ssa;
const nir_component_mask_t write_mask = 0;
build_explicit_io_store(b, intrin, addr, addr_format,
@ -2093,7 +2086,6 @@ lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
nir_ssa_def *base_addr = NULL;
if (deref->deref_type != nir_deref_type_var) {
assert(deref->parent.is_ssa);
base_addr = deref->parent.ssa;
}
@ -2110,7 +2102,6 @@ static void
lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
nir_address_format addr_format)
{
assert(intrin->src[0].is_ssa);
nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
}
@ -2173,7 +2164,6 @@ lower_explicit_io_mode_check(nir_builder *b, nir_intrinsic_instr *intrin,
return;
}
assert(intrin->src[0].is_ssa);
nir_ssa_def *addr = intrin->src[0].ssa;
b->cursor = nir_instr_remove(&intrin->instr);

View file

@ -45,8 +45,6 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
{
b->cursor = nir_before_instr(&intr->instr);
assert(intr->dest.is_ssa);
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < intr->num_components; i++) {
@ -79,8 +77,6 @@ lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
{
b->cursor = nir_before_instr(&intr->instr);
assert(intr->dest.is_ssa);
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
nir_ssa_def *base_offset = nir_get_io_offset_src(intr)->ssa;
@ -326,8 +322,6 @@ lower_load_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
{
b->cursor = nir_before_instr(&intr->instr);
assert(intr->dest.is_ssa);
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
nir_variable **chan_vars;

View file

@ -562,7 +562,6 @@ nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
nir_component_mask_t old_wrmask = nir_intrinsic_write_mask(intrin);
assert(intrin->src[1].is_ssa);
nir_ssa_def *old_value = intrin->src[1].ssa;
nir_ssa_scalar comps[4];
for (unsigned c = 0; c < intrin->num_components; c++) {

View file

@ -40,7 +40,6 @@ dup_mem_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
nir_src *intrin_offset_src = nir_get_io_offset_src(intrin);
for (unsigned i = 0; i < info->num_srcs; i++) {
assert(intrin->src[i].is_ssa);
if (i == 0 && data != NULL) {
assert(!info->has_dest);
assert(&intrin->src[i] != intrin_offset_src);
@ -59,7 +58,6 @@ dup_mem_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
nir_intrinsic_set_align(dup, align_mul, align_offset);
if (info->has_dest) {
assert(intrin->dest.is_ssa);
nir_ssa_dest_init(&dup->instr, &dup->dest, num_components, bit_size);
} else {
nir_intrinsic_set_write_mask(dup, (1 << num_components) - 1);
@ -75,7 +73,6 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
nir_lower_mem_access_bit_sizes_cb mem_access_size_align_cb,
const void *cb_data)
{
assert(intrin->dest.is_ssa);
const unsigned bit_size = intrin->dest.ssa.bit_size;
const unsigned num_components = intrin->dest.ssa.num_components;
const unsigned bytes_read = num_components * (bit_size / 8);

View file

@ -136,7 +136,6 @@ lower_memcpy_impl(nir_function_impl *impl)
}
} else {
found_non_const_memcpy = true;
assert(cpy->src[2].is_ssa);
nir_ssa_def *size = cpy->src[2].ssa;
/* In this case, we don't have any idea what the size is so we

View file

@ -275,7 +275,6 @@ nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
switch (intrin->intrinsic) {
case nir_intrinsic_load_view_index: {
assert(intrin->dest.is_ssa);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, view_index);
break;
}

View file

@ -48,7 +48,6 @@ nu_handle_init(struct nu_handle *h, nir_src *src)
if (nir_src_is_const(deref->arr.index))
return false;
assert(deref->arr.index.is_ssa);
h->handle = deref->arr.index.ssa;
h->parent_deref = parent;

View file

@ -51,7 +51,6 @@ lower_point_size_instr(nir_builder *b, nir_instr *instr, void *data)
b->cursor = nir_before_instr(instr);
assert(intr->src[1].is_ssa);
assert(intr->src[1].ssa->num_components == 1);
nir_ssa_def *psiz = intr->src[1].ssa;

View file

@ -59,7 +59,6 @@ lower_point_smooth(nir_builder *b, nir_instr *instr, UNUSED void *_state)
out_src_idx = 1;
}
assert(intr->src[out_src_idx].is_ssa);
assert(intr->num_components == 4);
b->cursor = nir_before_instr(&intr->instr);

View file

@ -47,7 +47,6 @@ lower_polylinesmooth(nir_builder *b, nir_instr *instr, void *data)
nir_intrinsic_src_type(intr) != nir_type_float32)
return false;
assert(intr->src[0].is_ssa);
assert(intr->num_components == 4);
b->cursor = nir_before_instr(&intr->instr);

View file

@ -137,13 +137,11 @@ lower_readonly_image_instr_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
switch (intrin->intrinsic) {
case nir_intrinsic_image_deref_load: {
assert(intrin->src[1].is_ssa);
nir_ssa_def *coord =
nir_trim_vector(b, intrin->src[1].ssa, coord_components);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_coord, coord);
tex->coord_components = coord_components;
assert(intrin->src[3].is_ssa);
nir_ssa_def *lod = intrin->src[3].ssa;
tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
@ -155,7 +153,6 @@ lower_readonly_image_instr_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
}
case nir_intrinsic_image_deref_size: {
assert(intrin->src[1].is_ssa);
nir_ssa_def *lod = intrin->src[1].ssa;
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);

View file

@ -39,7 +39,6 @@ lower_tex_src_to_offset(nir_builder *b,
/* We compute first the offsets */
nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr);
while (deref->deref_type != nir_deref_type_var) {
assert(deref->parent.is_ssa);
nir_deref_instr *parent =
nir_instr_as_deref(deref->parent.ssa->parent_instr);

View file

@ -914,7 +914,6 @@ rewrite_phis_to_pred(nir_block *block, nir_block *pred)
nir_foreach_phi_src(phi_src, phi) {
if (phi_src->pred == pred) {
found = true;
assert(phi_src->src.is_ssa);
nir_ssa_def_rewrite_uses(&phi->dest.ssa, phi_src->src.ssa);
break;
}

View file

@ -60,7 +60,6 @@ lower_single_sampled_instr(nir_builder *b,
case nir_intrinsic_interp_deref_at_centroid:
case nir_intrinsic_interp_deref_at_sample:
b->cursor = nir_before_instr(instr);
assert(intrin->src[0].is_ssa);
lowered = nir_load_deref(b, nir_src_as_deref(intrin->src[0]));
break;

View file

@ -146,7 +146,6 @@ lower_subgroup_op_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin,
static nir_ssa_def *
lower_vote_eq_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin)
{
assert(intrin->src[0].is_ssa);
nir_ssa_def *value = intrin->src[0].ssa;
nir_ssa_def *result = NULL;
@ -172,7 +171,6 @@ lower_vote_eq_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin)
static nir_ssa_def *
lower_vote_eq(nir_builder *b, nir_intrinsic_instr *intrin)
{
assert(intrin->src[0].is_ssa);
nir_ssa_def *value = intrin->src[0].ssa;
/* We have to implicitly lower to scalar */
@ -244,22 +242,18 @@ lower_to_shuffle(nir_builder *b, nir_intrinsic_instr *intrin,
bool is_shuffle = false;
switch (intrin->intrinsic) {
case nir_intrinsic_shuffle_xor:
assert(intrin->src[1].is_ssa);
index = nir_ixor(b, index, intrin->src[1].ssa);
is_shuffle = true;
break;
case nir_intrinsic_shuffle_up:
assert(intrin->src[1].is_ssa);
index = nir_isub(b, index, intrin->src[1].ssa);
is_shuffle = true;
break;
case nir_intrinsic_shuffle_down:
assert(intrin->src[1].is_ssa);
index = nir_iadd(b, index, intrin->src[1].ssa);
is_shuffle = true;
break;
case nir_intrinsic_quad_broadcast:
assert(intrin->src[1].is_ssa);
index = nir_ior(b, nir_iand_imm(b, index, ~0x3),
intrin->src[1].ssa);
break;
@ -335,8 +329,6 @@ glsl_type_for_ssa(nir_ssa_def *def)
static nir_ssa_def *
lower_shuffle(nir_builder *b, nir_intrinsic_instr *intrin)
{
assert(intrin->src[0].is_ssa);
assert(intrin->src[1].is_ssa);
nir_ssa_def *val = intrin->src[0].ssa;
nir_ssa_def *id = intrin->src[1].ssa;
@ -716,7 +708,6 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
case nir_intrinsic_ballot_bit_count_reduce:
case nir_intrinsic_ballot_find_lsb:
case nir_intrinsic_ballot_find_msb: {
assert(intrin->src[0].is_ssa);
nir_ssa_def *int_val = ballot_type_to_uint(b, intrin->src[0].ssa,
options);
@ -746,7 +737,6 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
switch (intrin->intrinsic) {
case nir_intrinsic_ballot_bitfield_extract: {
assert(intrin->src[1].is_ssa);
nir_ssa_def *idx = intrin->src[1].ssa;
if (int_val->num_components > 1) {
/* idx will be truncated by nir_ushr, so we just need to select
@ -773,7 +763,6 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
case nir_intrinsic_ballot_bit_count_exclusive:
case nir_intrinsic_ballot_bit_count_inclusive: {
assert(intrin->src[0].is_ssa);
nir_ssa_def *int_val = ballot_type_to_uint(b, intrin->src[0].ssa,
options);
if (options->lower_ballot_bit_count_to_mbcnt_amd) {

View file

@ -42,7 +42,6 @@ struct lower_sysval_state {
static nir_ssa_def *
sanitize_32bit_sysval(nir_builder *b, nir_intrinsic_instr *intrin)
{
assert(intrin->dest.is_ssa);
const unsigned bit_size = intrin->dest.ssa.bit_size;
if (bit_size == 32)
return NULL;
@ -498,7 +497,6 @@ lower_compute_system_value_instr(nir_builder *b,
if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
return NULL;
assert(intrin->dest.is_ssa);
const unsigned bit_size = intrin->dest.ssa.bit_size;
switch (intrin->intrinsic) {

View file

@ -166,7 +166,6 @@ lower_offset(nir_builder *b, nir_tex_instr *tex)
int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
assert(coord_index >= 0);
assert(tex->src[coord_index].src.is_ssa);
nir_ssa_def *coord = tex->src[coord_index].src.ssa;
b->cursor = nir_before_instr(&tex->instr);
@ -301,7 +300,6 @@ static nir_ssa_def *
sample_plane(nir_builder *b, nir_tex_instr *tex, int plane,
const nir_lower_tex_options *options)
{
assert(tex->dest.is_ssa);
assert(nir_tex_instr_dest_size(tex) == 4);
assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
assert(tex->op == nir_texop_tex);
@ -660,7 +658,6 @@ lower_gradient_cube_map(nir_builder *b, nir_tex_instr *tex)
{
assert(tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE);
assert(tex->op == nir_texop_txd);
assert(tex->dest.is_ssa);
/* Use textureSize() to get the width and height of LOD 0 */
nir_ssa_def *size = nir_i2f32(b, nir_get_texture_size(b, tex));
@ -813,7 +810,6 @@ lower_gradient(nir_builder *b, nir_tex_instr *tex)
assert(tex->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
assert(tex->op == nir_texop_txd);
assert(tex->dest.is_ssa);
/* Use textureSize() to get the width and height of LOD 0 */
unsigned component_mask;
@ -1015,8 +1011,6 @@ get_zero_or_one(nir_builder *b, nir_alu_type type, uint8_t swizzle_val)
static void
swizzle_tg4_broadcom(nir_builder *b, nir_tex_instr *tex)
{
assert(tex->dest.is_ssa);
b->cursor = nir_after_instr(&tex->instr);
assert(nir_tex_instr_dest_size(tex) == 4);
@ -1030,8 +1024,6 @@ swizzle_tg4_broadcom(nir_builder *b, nir_tex_instr *tex)
static void
swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
{
assert(tex->dest.is_ssa);
b->cursor = nir_after_instr(&tex->instr);
nir_ssa_def *swizzled;
@ -1070,7 +1062,6 @@ swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
static void
linearize_srgb_result(nir_builder *b, nir_tex_instr *tex)
{
assert(tex->dest.is_ssa);
assert(nir_tex_instr_dest_size(tex) == 4);
assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
@ -1299,7 +1290,6 @@ nir_lower_txs_cube_array(nir_builder *b, nir_tex_instr *tex)
b->cursor = nir_after_instr(&tex->instr);
assert(tex->dest.is_ssa);
assert(tex->dest.ssa.num_components == 3);
nir_ssa_def *size = &tex->dest.ssa;
size = nir_vec3(b, nir_channel(b, size, 1),
@ -1440,7 +1430,6 @@ lower_index_to_offset(nir_builder *b, nir_tex_instr *tex)
if ((*index) == 0)
continue;
assert(tex->src[i].src.is_ssa);
nir_ssa_def *sum = nir_iadd_imm(b, tex->src[i].src.ssa, *index);
nir_instr_rewrite_src(&tex->instr, &tex->src[i].src,
nir_src_for_ssa(sum));

View file

@ -164,7 +164,6 @@ nir_lower_two_sided_color_instr(nir_builder *b, nir_instr *instr, void *data)
}
nir_ssa_def *color = nir_bcsel(b, face, front, back);
assert(intr->dest.is_ssa);
nir_ssa_def_rewrite_uses(&intr->dest.ssa, color);
return true;

View file

@ -603,8 +603,6 @@ rename_variables(struct lower_variables_state *state)
for (unsigned i = intrin->num_components; i < NIR_MAX_VEC_COMPONENTS; i++)
mov->src[0].swizzle[i] = 0;
assert(intrin->dest.is_ssa);
mov->dest.write_mask = (1 << intrin->num_components) - 1;
nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
intrin->num_components,
@ -630,7 +628,6 @@ rename_variables(struct lower_variables_state *state)
/* Should have been removed before rename_variables(). */
assert(node != UNDEF_NODE);
assert(intrin->src[1].is_ssa);
nir_ssa_def *value = intrin->src[1].ssa;
if (!node->lower_to_ssa)

View file

@ -54,7 +54,6 @@ lower_vec3_to_vec4_instr(nir_builder *b, nir_instr *instr, void *data)
if (!nir_deref_mode_is_in_set(deref, modes))
break;
assert(intrin->dest.is_ssa);
intrin->num_components = 4;
intrin->dest.ssa.num_components = 4;
@ -74,7 +73,6 @@ lower_vec3_to_vec4_instr(nir_builder *b, nir_instr *instr, void *data)
if (!nir_deref_mode_is_in_set(deref, modes))
break;
assert(intrin->src[1].is_ssa);
nir_ssa_def *data = intrin->src[1].ssa;
b->cursor = nir_before_instr(&intrin->instr);

View file

@ -30,7 +30,6 @@ insert_store(nir_builder *b, nir_ssa_def *reg, nir_alu_instr *vec,
unsigned start_idx)
{
assert(start_idx < nir_op_infos[vec->op].num_inputs);
assert(vec->src[start_idx].src.is_ssa);
nir_ssa_def *src = vec->src[start_idx].src.ssa;
unsigned num_components = nir_dest_num_components(vec->dest.dest);
@ -74,7 +73,6 @@ try_coalesce(nir_builder *b, nir_ssa_def *reg, nir_alu_instr *vec,
unsigned start_idx, struct data *data)
{
assert(start_idx < nir_op_infos[vec->op].num_inputs);
assert(vec->src[start_idx].src.is_ssa);
/* If we are going to do a reswizzle, then the vecN operation must be the
* only use of the source value.
@ -201,7 +199,6 @@ lower(nir_builder *b, nir_instr *instr, void *data_)
if (vec->op == nir_op_mov || !nir_op_is_vec(vec->op))
return false;
assert(vec->dest.dest.is_ssa);
unsigned num_components = nir_dest_num_components(vec->dest.dest);
/* Special case: if all sources are the same, just swizzle instead to avoid
@ -237,7 +234,6 @@ lower(nir_builder *b, nir_instr *instr, void *data_)
unsigned swiz[NIR_MAX_VEC_COMPONENTS] = {0};
for (unsigned i = 0; i < num_components; ++i) {
assert(vec->src[i].src.is_ssa);
swiz[i] = vec->src[i].swizzle[0];
}

View file

@ -49,8 +49,6 @@ update_fragcoord(nir_builder *b, nir_intrinsic_instr *intr)
{
nir_ssa_def *wpos = &intr->dest.ssa;
assert(intr->dest.is_ssa);
b->cursor = nir_after_instr(&intr->instr);
nir_ssa_def *spos = nir_load_sample_pos_or_center(b);

View file

@ -75,7 +75,6 @@ emit_wpos_adjustment(lower_wpos_ytransform_state *state,
nir_builder *b = &state->b;
nir_ssa_def *wpostrans, *wpos_temp, *wpos_temp_y, *wpos_input;
assert(intr->dest.is_ssa);
wpos_input = &intr->dest.ssa;
b->cursor = nir_after_instr(&intr->instr);

View file

@ -131,7 +131,6 @@ combine_stores(struct combine_stores_state *state,
nir_intrinsic_instr *store = combo->stores[i];
if (combo->write_mask & (1 << i)) {
assert(store);
assert(store->src[1].is_ssa);
/* If store->num_components == 1 then we are in the deref-of-vec case
* and store->src[1] is a scalar. Otherwise, we're a regular vector

View file

@ -80,7 +80,6 @@ nir_opt_conditional_discard_block(nir_builder *b, nir_block *block)
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
nir_intrinsic_op op = intrin->intrinsic;
assert(if_stmt->condition.is_ssa);
nir_ssa_def *cond = if_stmt->condition.ssa;
b->cursor = nir_before_cf_node(prev_node);
@ -97,7 +96,6 @@ nir_opt_conditional_discard_block(nir_builder *b, nir_block *block)
case nir_intrinsic_discard_if:
case nir_intrinsic_demote_if:
case nir_intrinsic_terminate_if:
assert(intrin->src[0].is_ssa);
cond = nir_iand(b, cond, intrin->src[0].ssa);
break;
default:

View file

@ -105,12 +105,10 @@ opt_constant_if(nir_if *if_stmt, bool condition)
if (phi_src->pred != last_block)
continue;
assert(phi_src->src.is_ssa);
def = phi_src->src.ssa;
}
assert(def);
assert(phi->dest.is_ssa);
nir_ssa_def_rewrite_uses(&phi->dest.ssa, def);
nir_instr_remove(&phi->instr);
}

View file

@ -161,7 +161,6 @@ nir_opt_idiv_const_instr(nir_builder *b, nir_instr *instr, void *user_data)
alu->op != nir_op_irem)
return false;
assert(alu->dest.dest.is_ssa);
assert(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa);
if (alu->dest.dest.ssa.bit_size < *min_bit_size)

View file

@ -1190,7 +1190,6 @@ clone_alu_and_replace_src_defs(nir_builder *b, const nir_alu_instr *alu,
nalu->dest.write_mask = alu->dest.write_mask;
for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
assert(alu->src[i].src.is_ssa);
nalu->src[i].src = nir_src_for_ssa(src_defs[i]);
nalu->src[i].negate = alu->src[i].negate;
nalu->src[i].abs = alu->src[i].abs;
@ -1336,7 +1335,6 @@ opt_if_evaluate_condition_use(nir_builder *b, nir_if *nif)
bool progress = false;
/* Evaluate any uses of the if condition inside the if branches */
assert(nif->condition.is_ssa);
nir_foreach_use_including_if_safe(use_src, nif->condition.ssa) {
if (!(use_src->is_if && use_src->parent_if == nif))
progress |= evaluate_condition_use(b, nif, use_src);

View file

@ -45,9 +45,6 @@ src_is_single_use_shuffle(nir_src src, nir_ssa_def **data, nir_ssa_def **index)
if (nir_ssa_def_used_by_if(&shuffle->dest.ssa))
return false;
assert(shuffle->src[0].is_ssa);
assert(shuffle->src[1].is_ssa);
*data = shuffle->src[0].ssa;
*index = shuffle->src[1].ssa;

View file

@ -456,7 +456,6 @@ nir_opt_peephole_select_block(nir_block *block, nir_shader *shader,
assert(exec_list_length(&phi->srcs) == 2);
nir_foreach_phi_src(src, phi) {
assert(src->pred == then_block || src->pred == else_block);
assert(src->src.is_ssa);
unsigned idx = src->pred == then_block ? 1 : 2;
nir_src_copy(&sel->src[idx].src, &src->src, &sel->instr);

View file

@ -196,8 +196,6 @@ try_move_narrowing_dst(nir_builder *b, nir_phi_instr *phi)
{
nir_op op = INVALID_OP;
assert(phi->dest.is_ssa);
/* If the phi has already been narrowed, nothing more to do: */
if (phi->dest.ssa.bit_size != 32)
return false;
@ -231,8 +229,6 @@ try_move_narrowing_dst(nir_builder *b, nir_phi_instr *phi)
/* Push the conversion into the new phi sources: */
nir_foreach_phi_src (src, phi) {
assert(src->src.is_ssa);
/* insert new conversion instr in block of original phi src: */
b->cursor = nir_after_instr_and_phis(src->src.ssa->parent_instr);
nir_ssa_def *old_src = src->src.ssa;
@ -306,8 +302,6 @@ find_widening_op(nir_phi_instr *phi, unsigned *bit_size)
*bit_size = 0;
nir_foreach_phi_src (src, phi) {
assert(src->src.is_ssa);
nir_instr *instr = src->src.ssa->parent_instr;
if (instr->type == nir_instr_type_load_const) {
has_load_const = true;
@ -344,8 +338,6 @@ find_widening_op(nir_phi_instr *phi, unsigned *bit_size)
* sequence to make the rest of the transformation possible:
*/
nir_foreach_phi_src (src, phi) {
assert(src->src.is_ssa);
nir_instr *instr = src->src.ssa->parent_instr;
if (instr->type != nir_instr_type_load_const)
continue;
@ -363,8 +355,6 @@ find_widening_op(nir_phi_instr *phi, unsigned *bit_size)
static bool
try_move_widening_src(nir_builder *b, nir_phi_instr *phi)
{
assert(phi->dest.is_ssa);
/* If the phi has already been narrowed, nothing more to do: */
if (phi->dest.ssa.bit_size != 32)
return false;
@ -382,8 +372,6 @@ try_move_widening_src(nir_builder *b, nir_phi_instr *phi)
/* Remove the widening conversions from the phi sources: */
nir_foreach_phi_src (src, phi) {
assert(src->src.is_ssa);
nir_instr *instr = src->src.ssa->parent_instr;
nir_ssa_def *new_src;

View file

@ -74,8 +74,6 @@ remove_phis_block(nir_block *block, nir_builder *b)
bool srcs_same = true;
nir_foreach_phi_src(src, phi) {
assert(src->src.is_ssa);
/* For phi nodes at the beginning of loops, we may encounter some
* sources from backedges that point back to the destination of the
* same phi, i.e. something like:
@ -126,7 +124,6 @@ remove_phis_block(nir_block *block, nir_builder *b)
def = nir_mov_alu(b, mov->src[0], def->num_components);
}
assert(phi->dest.is_ssa);
nir_ssa_def_rewrite_uses(&phi->dest.ssa, def);
nir_instr_remove(&phi->instr);

View file

@ -41,8 +41,6 @@ opt_undef_csel(nir_alu_instr *instr)
if (!nir_op_is_selection(instr->op))
return false;
assert(instr->dest.dest.is_ssa);
for (int i = 1; i <= 2; i++) {
if (!instr->src[i].src.is_ssa)
continue;
@ -80,8 +78,6 @@ opt_undef_vecN(nir_builder *b, nir_alu_instr *alu)
if (!nir_op_is_vec(alu->op))
return false;
assert(alu->dest.dest.is_ssa);
for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
if (!alu->src[i].src.is_ssa ||
alu->src[i].src.ssa->parent_instr->type != nir_instr_type_ssa_undef)

View file

@ -862,7 +862,6 @@ print_deref_link(const nir_deref_instr *instr, bool whole_chain, print_state *st
return;
}
assert(instr->parent.is_ssa);
nir_deref_instr *parent =
nir_instr_as_deref(instr->parent.ssa->parent_instr);

View file

@ -2106,7 +2106,6 @@ ssa_def_bits_used(const nir_ssa_def *def, int recur)
case nir_intrinsic_quad_swap_vertical:
case nir_intrinsic_quad_swap_diagonal:
if (src_idx == 0) {
assert(use_intrin->dest.is_ssa);
bits_used |= ssa_def_bits_used(&use_intrin->dest.ssa, recur);
} else {
if (use_intrin->intrinsic == nir_intrinsic_quad_broadcast) {

View file

@ -234,7 +234,6 @@ static void
nir_schedule_load_reg_deps(nir_intrinsic_instr *load,
nir_deps_state *state)
{
assert(load->src[0].is_ssa);
nir_ssa_def *reg = load->src[0].ssa;
(void)nir_reg_get_decl(reg);
@ -253,7 +252,6 @@ static void
nir_schedule_store_reg_deps(nir_intrinsic_instr *store,
nir_deps_state *state)
{
assert(store->src[1].is_ssa);
nir_ssa_def *reg = store->src[1].ssa;
(void)nir_reg_get_decl(reg);
@ -554,7 +552,6 @@ nir_schedule_regs_freed_load_reg(nir_intrinsic_instr *load,
nir_schedule_regs_freed_state *state)
{
assert(nir_is_load_reg(load));
assert(load->src[0].is_ssa);
if (load->intrinsic == nir_intrinsic_load_reg_indirect)
nir_schedule_regs_freed_src_cb(&load->src[1], state);
@ -568,7 +565,6 @@ nir_schedule_regs_freed_load_reg(nir_intrinsic_instr *load,
state->regs_freed += nir_schedule_reg_pressure(reg);
}
assert(load->dest.is_ssa);
nir_schedule_regs_freed_def_cb(&load->dest.ssa, state);
}
@ -950,7 +946,6 @@ nir_schedule_mark_load_reg_scheduled(nir_intrinsic_instr *load,
nir_schedule_scoreboard *scoreboard)
{
assert(nir_is_load_reg(load));
assert(load->src[0].is_ssa);
nir_ssa_def *reg = load->src[0].ssa;
if (load->intrinsic == nir_intrinsic_load_reg_indirect)
@ -959,7 +954,6 @@ nir_schedule_mark_load_reg_scheduled(nir_intrinsic_instr *load,
nir_schedule_mark_use(scoreboard, reg, &load->instr,
nir_schedule_reg_pressure(reg));
assert(load->dest.is_ssa);
nir_schedule_mark_def_scheduled(&load->dest.ssa, scoreboard);
}

View file

@ -229,15 +229,6 @@ match_value(const nir_algebraic_table *table,
{
uint8_t new_swizzle[NIR_MAX_VEC_COMPONENTS];
/* Searching only works on SSA values because, if it's not SSA, we can't
* know if the value changed between one instance of that value in the
* expression and another. Also, the replace operation will place reads of
* that value right before the last instruction in the expression we're
* replacing so those reads will happen after the original reads and may
* not be valid if they're register reads.
*/
assert(instr->src[src].src.is_ssa);
/* If the source is an explicitly sized source, then we need to reset
* both the number of components and the swizzle.
*/
@ -370,8 +361,6 @@ match_expression(const nir_algebraic_table *table, const nir_search_expression *
if (!nir_op_matches_search_op(instr->op, expr->opcode))
return false;
assert(instr->dest.dest.is_ssa);
if (expr->value.bit_size > 0 &&
instr->dest.dest.ssa.bit_size != expr->value.bit_size)
return false;
@ -665,8 +654,6 @@ nir_replace_instr(nir_builder *build, nir_alu_instr *instr,
for (unsigned i = 0; i < instr->dest.dest.ssa.num_components; ++i)
swizzle[i] = i;
assert(instr->dest.dest.is_ssa);
struct match_state state;
state.inexact_match = false;
state.has_exact_alu = false;

View file

@ -734,8 +734,6 @@ write_alu(write_ctx *ctx, const nir_alu_instr *alu)
header.alu.op = alu->op;
header.alu.packed_src_ssa_16bit = is_alu_src_ssa_16bit(ctx, alu);
assert(alu->dest.dest.is_ssa);
if (header.alu.packed_src_ssa_16bit) {
/* For packed srcs of SSA ALUs, this field stores the swizzles. */
header.alu.writemask_or_two_swizzles = alu->src[0].swizzle[0];
@ -747,7 +745,6 @@ write_alu(write_ctx *ctx, const nir_alu_instr *alu)
if (header.alu.packed_src_ssa_16bit) {
for (unsigned i = 0; i < num_srcs; i++) {
assert(alu->src[i].src.is_ssa);
unsigned idx = write_lookup_object(ctx, alu->src[i].src.ssa);
assert(idx < (1 << 16));
blob_write_uint16(ctx->blob, idx);
@ -1045,7 +1042,6 @@ read_deref(read_ctx *ctx, union packed_instr header)
} else if (deref->deref_type == nir_deref_type_cast) {
deref->modes = decode_deref_modes(header.deref.modes);
} else {
assert(deref->parent.is_ssa);
deref->modes = nir_instr_as_deref(deref->parent.ssa->parent_instr)->modes;
}
@ -1483,7 +1479,6 @@ write_phi(write_ctx *ctx, const nir_phi_instr *phi)
write_dest(ctx, &phi->dest, header, phi->instr.type);
nir_foreach_phi_src(src, phi) {
assert(src->src.is_ssa);
size_t blob_offset = blob_reserve_uint32(ctx->blob);
ASSERTED size_t blob_offset2 = blob_reserve_uint32(ctx->blob);
assert(blob_offset + sizeof(uint32_t) == blob_offset2);

View file

@ -1174,8 +1174,6 @@ src_is_load_deref(nir_src src, nir_src deref_src)
if (load == NULL || load->intrinsic != nir_intrinsic_load_deref)
return false;
assert(load->src[0].is_ssa);
return load->src[0].ssa == deref_src.ssa;
}
@ -1195,7 +1193,6 @@ get_non_self_referential_store_comps(nir_intrinsic_instr *store)
{
nir_component_mask_t comps = nir_intrinsic_write_mask(store);
assert(store->src[1].is_ssa);
nir_instr *src_instr = store->src[1].ssa->parent_instr;
if (src_instr->type != nir_instr_type_alu)
return comps;

View file

@ -1975,7 +1975,6 @@ vec_src_comp_as_int(nir_src src, unsigned comp)
if (nir_src_is_const(src))
return nir_src_comp_as_int(src, comp);
assert(src.is_ssa);
nir_ssa_scalar s = { src.ssa, comp };
assert(nir_op_is_vec(nir_ssa_scalar_alu_op(s)));
return nir_ssa_scalar_as_int(nir_ssa_scalar_chase_alu_src(s, comp));

View file

@ -70,7 +70,6 @@ static nir_ssa_def *
check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
int32_t direction, int32_t shift)
{
assert(alu_instr->src[1].src.is_ssa);
nir_ssa_def *shift_ssa = alu_instr->src[1].src.ssa;
/* Only propagate if the shift is a const value so we can check value range
@ -201,7 +200,6 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
/* 'offset_src_idx' holds the index of the source that represent the offset. */
new_intrinsic = nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
assert(intrinsic->src[offset_src_idx].is_ssa);
nir_ssa_def *offset = intrinsic->src[offset_src_idx].ssa;
/* Since we don't have value range checking, we first try to propagate
@ -221,7 +219,6 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
*target_src = nir_src_for_ssa(offset);
if (has_dest) {
assert(intrinsic->dest.is_ssa);
nir_ssa_def *dest = &intrinsic->dest.ssa;
nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
dest->num_components, dest->bit_size);

View file

@ -193,7 +193,6 @@ lower_tex_prefetch_block(nir_block *block)
int idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
/* First source should be the sampling coordinate. */
nir_tex_src *coord = &tex->src[idx];
assert(coord->src.is_ssa);
if (ir3_nir_coord_offset(coord->src.ssa) >= 0) {
tex->op = nir_texop_tex_prefetch;

View file

@ -169,8 +169,6 @@ move_varying_inputs_block(state *state, nir_block *block)
continue;
}
assert(intr->dest.is_ssa);
move_instruction_to_start_block(state, instr);
progress = true;

View file

@ -49,7 +49,6 @@ lower_multiview_mask(nir_shader *nir, uint32_t *mask)
if (var->data.location != VARYING_SLOT_POS)
continue;
assert(intrin->src[1].is_ssa);
nir_ssa_def *orig_src = intrin->src[1].ssa;
b.cursor = nir_before_instr(instr);

View file

@ -2341,7 +2341,6 @@ visit_intrinsic(struct lp_build_nir_context *bld_base,
break;
}
if (result[0]) {
assert(instr->dest.is_ssa);
assign_ssa_dest(bld_base, &instr->dest.ssa, result);
}
}
@ -2388,7 +2387,6 @@ visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
params.resource = resource;
bld_base->tex_size(bld_base, &params);
assert(instr->dest.is_ssa);
assign_ssa_dest(bld_base, &instr->dest.ssa,
&sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
}
@ -2699,7 +2697,6 @@ visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
}
}
assert(instr->dest.is_ssa);
assign_ssa_dest(bld_base, &instr->dest.ssa, texel);
}

View file

@ -3594,8 +3594,6 @@ nir_to_tgsi_lower_tex_instr_arg(nir_builder *b,
if (tex_src < 0)
return;
assert(instr->src[tex_src].src.is_ssa);
nir_ssa_def *def = instr->src[tex_src].src.ssa;
for (int i = 0; i < def->num_components; i++) {
s->channels[s->i++] = nir_get_ssa_scalar(def, i);

View file

@ -669,8 +669,6 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
b.cursor = nir_before_instr(instr);
assert(load->src[0].is_ssa);
if (load->src[0].ssa == temp_ubo_name) {
nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
nir_instr_rewrite_src(instr, &load->src[0],

View file

@ -137,7 +137,6 @@ lower_pos_read(nir_builder *b, struct nir_instr *instr,
pos = nir_vector_insert_imm(b, pos, depth, 2);
assert(intr->dest.is_ssa);
nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, pos,
pos->parent_instr);
}

View file

@ -611,7 +611,6 @@ emit_intrinsic(struct etna_compile *c, nir_intrinsic_instr * intr)
} break;
case nir_intrinsic_load_front_face:
case nir_intrinsic_load_frag_coord:
assert(intr->dest.is_ssa); /* TODO - lower phis could cause this */
break;
case nir_intrinsic_load_input:
case nir_intrinsic_load_instance_id:

View file

@ -408,7 +408,6 @@ make_src_noconst(struct ir2_context *ctx, nir_src src)
struct ir2_instr *instr;
if (nir_src_as_const_value(src)) {
assert(src.is_ssa);
instr = instr_create_alu(ctx, nir_op_mov, src.ssa->num_components);
instr->src[0] = make_src(ctx, src);
return ir2_src(instr->idx, 0, IR2_SRC_SSA);

View file

@ -706,8 +706,6 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
b.cursor = nir_before_instr(instr);
assert(load->src[0].is_ssa);
if (load->src[0].ssa == temp_ubo_name) {
nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
nir_instr_rewrite_src(instr, &load->src[0],

View file

@ -251,8 +251,6 @@ static bool gpir_emit_intrinsic(gpir_block *block, nir_instr *ni)
{
gpir_node *child = gpir_node_find(block, &instr->src[0], 0);
assert(child);
assert(instr->src[0].is_ssa);
assert(instr->src[1].is_ssa);
register_node_reg(block, child, instr->src[1].ssa->index);
return true;
}

View file

@ -42,11 +42,8 @@ clone_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin)
nir_intrinsic_instr *new_intrin =
nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intrin->instr));
assert(new_intrin->dest.is_ssa);
unsigned num_srcs = nir_intrinsic_infos[new_intrin->intrinsic].num_srcs;
for (unsigned i = 0; i < num_srcs; i++) {
assert(new_intrin->src[i].is_ssa);
}
nir_builder_instr_insert(b, &new_intrin->instr);

View file

@ -178,7 +178,6 @@ static bool ppir_emit_alu(ppir_block *block, nir_instr *ni)
/* Add parent node as a the folded dest ssa node to keep
* the dependency chain */
nir_alu_src *ns = &instr->src[0];
assert(ns->src.is_ssa);
ppir_node *parent = block->comp->var_nodes[ns->src.ssa->index];
assert(parent);
block->comp->var_nodes[dst->ssa.index] = parent;
@ -321,7 +320,6 @@ static bool ppir_emit_intrinsic(ppir_block *block, nir_instr *ni)
case nir_intrinsic_load_frag_coord:
case nir_intrinsic_load_point_coord:
case nir_intrinsic_load_front_face: {
assert(instr->dest.is_ssa);
mask = u_bit_consecutive(0, instr->num_components);
ppir_op op;

View file

@ -407,7 +407,6 @@ r600_lower_deref_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
for (nir_deref_instr *d = deref; d->deref_type != nir_deref_type_var;
d = nir_deref_instr_parent(d)) {
assert(d->deref_type == nir_deref_type_array);
assert(d->arr.index.is_ssa);
unsigned array_stride = 1;
if (glsl_type_is_array(d->type))

View file

@ -121,7 +121,6 @@ LowerLoad64Uniform::lower(nir_instr *instr)
auto intr = nir_instr_as_intrinsic(instr);
int old_components = nir_dest_num_components(intr->dest);
assert(old_components <= 2);
assert(intr->dest.is_ssa);
intr->dest.ssa.num_components *= 2;
intr->dest.ssa.bit_size = 32;
intr->num_components *= 2;

View file

@ -212,8 +212,6 @@ update_alu_mask(nir_src *src, void *data)
static uint32_t
get_dest_usee_mask(nir_intrinsic_instr *op)
{
assert(op->dest.is_ssa);
MaskQuery mq = {0};
mq.full_mask = (1 << nir_dest_num_components(op->dest)) - 1;

View file

@ -146,8 +146,6 @@ r600_create_new_load(nir_builder *b,
b->cursor = nir_before_instr(&intr->instr);
assert(intr->dest.is_ssa);
nir_intrinsic_instr *new_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic);
nir_ssa_dest_init(&new_intr->instr, &new_intr->dest, num_comps,
intr->dest.ssa.bit_size);

View file

@ -170,7 +170,6 @@ ValueFactory::allocate_pinned_vec4(int sel, bool is_ssa)
void
ValueFactory::inject_value(const nir_dest& dest, int chan, PVirtualValue value)
{
assert(dest.is_ssa);
RegisterKey key(dest.ssa.index, chan, vp_ssa);
sfn_log << SfnLog::reg << "Inject value with key " << key << "\n";
assert(m_values.find(key) == m_values.end());
@ -211,7 +210,6 @@ public:
PRegister
ValueFactory::dest(const nir_dest& dst, int chan, Pin pin_channel, uint8_t chan_mask)
{
assert(dst.is_ssa);
return dest(dst.ssa, chan, pin_channel, chan_mask);
}
@ -283,7 +281,6 @@ ValueFactory::dest_vec4(const nir_dest& dst, Pin pin)
{
if (pin != pin_group && pin != pin_chgr)
pin = pin_chan;
assert(dst.is_ssa);
PRegister x = dest(dst, 0, pin);
PRegister y = dest(dst, 1, pin);
PRegister z = dest(dst, 2, pin);
@ -331,7 +328,6 @@ ValueFactory::src(const nir_src& src, int chan)
{
sfn_log << SfnLog::reg << "search (ref) " << (void *)&src << "\n";
assert(src.is_ssa);
sfn_log << SfnLog::reg << "search ssa " << src.ssa->index << " c:" << chan
<< " got ";
auto val = ssa_src(*src.ssa, chan);

View file

@ -51,8 +51,6 @@ vc4_nir_lower_txf_ms_instr(nir_builder *b, nir_instr *instr, void *data)
nir_ssa_def *coord = NULL, *sample_index = NULL;
for (int i = 0; i < txf_ms->num_srcs; i++) {
assert(txf_ms->src[i].src.is_ssa);
switch (txf_ms->src[i].src_type) {
case nir_tex_src_coord:
coord = txf_ms->src[i].src.ssa;

View file

@ -257,7 +257,6 @@ ntq_get_src(struct vc4_compile *c, nir_src src, int i)
{
struct hash_entry *entry;
assert(src.is_ssa);
nir_intrinsic_instr *load = nir_load_reg_for_def(src.ssa);
if (load == NULL) {
entry = _mesa_hash_table_search(c->def_ht, src.ssa);
@ -830,7 +829,6 @@ ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
/* If packing from a vec4 op (as expected), identify it so that we can
* peek back at what generated its sources.
*/
assert(instr->src[0].src.is_ssa);
if (instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
nir_op_vec4) {
@ -997,7 +995,6 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
struct qreg *src)
{
assert(instr->src[0].src.is_ssa);
if (nir_load_reg_for_def(instr->src[0].src.ssa))
goto out;
if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)

View file

@ -3451,11 +3451,9 @@ emit_is_sparse_texels_resident(struct ntv_context *ctx, nir_intrinsic_instr *int
SpvId type = get_dest_type(ctx, &intr->dest, nir_type_uint);
/* this will always be stored with the ssa index of the parent instr */
assert(intr->src[0].is_ssa);
nir_ssa_def *ssa = intr->src[0].ssa;
assert(ssa->parent_instr->type == nir_instr_type_alu);
nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
assert(alu->src[0].src.is_ssa);
unsigned index = alu->src[0].src.ssa->index;
assert(index < ctx->num_defs);
assert(ctx->resident_defs[index] != 0);

View file

@ -433,7 +433,6 @@ replicate_derefs(nir_builder *b, nir_deref_instr *old, nir_deref_instr *new)
case nir_deref_type_var:
return new;
case nir_deref_type_array:
assert(old->arr.index.is_ssa);
return nir_build_deref_array(b, replicate_derefs(b, parent, new), old->arr.index.ssa);
case nir_deref_type_struct:
return nir_build_deref_struct(b, replicate_derefs(b, parent, new), old->strct.index);
@ -458,7 +457,6 @@ lower_pv_mode_gs_store(nir_builder *b,
gl_varying_slot location = var->data.location;
unsigned location_frac = var->data.location_frac;
assert(state->varyings[location][location_frac]);
assert(intrin->src[1].is_ssa);
nir_ssa_def *pos_counter = nir_load_var(b, state->pos_counter);
nir_ssa_def *index = lower_pv_mode_gs_ring_index(b, state, pos_counter);
nir_deref_instr *varying_deref = nir_build_deref_var(b, state->varyings[location][location_frac]);
@ -889,7 +887,6 @@ lower_line_smooth_gs_store(nir_builder *b,
unsigned location_frac = var->data.location_frac;
if (location != VARYING_SLOT_POS) {
assert(state->varyings[location]);
assert(intrin->src[1].is_ssa);
nir_store_var(b, state->varyings[location][location_frac],
intrin->src[1].ssa,
nir_intrinsic_write_mask(intrin));
@ -1713,7 +1710,6 @@ lower_txf_lod_robustness_instr(nir_builder *b, nir_instr *in, void *data)
if (nir_src_is_const(lod_src) && nir_src_as_const_value(lod_src)->u32 == 0)
return false;
assert(lod_src.is_ssa);
nir_ssa_def *lod = lod_src.ssa;
int offset_idx = nir_tex_instr_src_index(txf, nir_tex_src_texture_offset);

View file

@ -23,8 +23,6 @@ rusticl_lower_intrinsics_instr(
switch (intrins->intrinsic) {
case nir_intrinsic_image_deref_format:
case nir_intrinsic_image_deref_order: {
assert(intrins->src[0].is_ssa);
int32_t offset;
nir_deref_instr *deref;
nir_ssa_def *val;

View file

@ -59,7 +59,6 @@ static void lower_load_global_constant_to_scalar(nir_builder *b,
/* Scalarize the load_global_constant. */
b->cursor = nir_before_instr(&intr->instr);
assert(intr->dest.is_ssa);
assert(intr->num_components > 1);
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];

View file

@ -496,7 +496,6 @@ bool
fs_visitor::optimize_extract_to_float(nir_alu_instr *instr,
const fs_reg &result)
{
assert(instr->src[0].src.is_ssa);
if (!instr->src[0].src.ssa->parent_instr)
return false;
@ -982,13 +981,11 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
fs_reg temp = result;
bool need_extra_copy = false;
assert(instr->dest.dest.is_ssa);
nir_intrinsic_instr *store_reg =
nir_store_reg_for_def(&instr->dest.dest.ssa);
if (store_reg != NULL) {
nir_ssa_def *dest_reg = store_reg->src[1].ssa;
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
assert(instr->src[i].src.is_ssa);
nir_intrinsic_instr *load_reg =
nir_load_reg_for_def(instr->src[i].src.ssa);
if (load_reg == NULL)
@ -1977,16 +1974,12 @@ fs_visitor::nir_emit_load_const(const fs_builder &bld,
bool
fs_visitor::get_nir_src_bindless(const nir_src &src)
{
assert(src.is_ssa);
return nir_ssa_bind_infos[src.ssa->index].bindless;
}
unsigned
fs_visitor::get_nir_src_block(const nir_src &src)
{
assert(src.is_ssa);
return nir_ssa_bind_infos[src.ssa->index].valid ?
nir_ssa_bind_infos[src.ssa->index].block :
UINT32_MAX;
@ -1995,7 +1988,6 @@ fs_visitor::get_nir_src_block(const nir_src &src)
static bool
is_resource_src(nir_src src)
{
assert(src.is_ssa);
return src.ssa->parent_instr->type == nir_instr_type_intrinsic &&
nir_instr_as_intrinsic(src.ssa->parent_instr)->intrinsic == nir_intrinsic_resource_intel;
}
@ -2011,7 +2003,6 @@ fs_visitor::get_resource_nir_src(const nir_src &src)
fs_reg
fs_visitor::get_nir_src(const nir_src &src)
{
assert(src.is_ssa);
nir_intrinsic_instr *load_reg = nir_load_reg_for_def(src.ssa);
fs_reg reg;
@ -2067,7 +2058,6 @@ fs_visitor::get_nir_src_imm(const nir_src &src)
fs_reg
fs_visitor::get_nir_dest(const nir_dest &dest)
{
assert(dest.is_ssa);
nir_intrinsic_instr *store_reg = nir_store_reg_for_def(&dest.ssa);
if (!store_reg) {
const brw_reg_type reg_type =
@ -2092,7 +2082,6 @@ fs_visitor::get_nir_dest(const nir_dest &dest)
nir_component_mask_t
fs_visitor::get_nir_write_mask(const nir_alu_dest &dest)
{
assert(dest.dest.is_ssa);
assert(dest.write_mask == nir_component_mask(dest.dest.ssa.num_components));
nir_intrinsic_instr *store_reg = nir_store_reg_for_def(&dest.dest.ssa);
@ -6711,7 +6700,6 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
const unsigned dest_size = nir_tex_instr_dest_size(instr);
if (devinfo->ver >= 9 &&
instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
assert(instr->dest.is_ssa);
unsigned write_mask = nir_ssa_def_components_read(&instr->dest.ssa);
assert(write_mask != 0); /* dead code should have been eliminated */
if (instr->is_sparse) {

View file

@ -41,10 +41,8 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
nir_ssa_def *src = NULL, *dest = NULL;
if (write) {
assert(intr->src[0].is_ssa);
assert(intr->num_components == intr->src[0].ssa->num_components);
} else {
assert(intr->dest.is_ssa);
assert(intr->num_components == intr->dest.ssa.num_components);
}
@ -764,13 +762,11 @@ lower_bit_size_callback(const nir_instr *instr, UNUSED void *data)
* 32-bit and so the bit size of the instruction is given by the
* source.
*/
assert(alu->src[0].src.is_ssa);
return alu->src[0].src.ssa->bit_size >= 32 ? 0 : 32;
default:
break;
}
assert(alu->dest.dest.is_ssa);
if (alu->dest.dest.ssa.bit_size >= 32)
return 0;
@ -2056,8 +2052,6 @@ brw_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform,
nir_ssa_def *base_addr, unsigned off)
{
assert(load_uniform->intrinsic == nir_intrinsic_load_uniform);
assert(load_uniform->dest.is_ssa);
assert(load_uniform->src[0].is_ssa);
unsigned bit_size = load_uniform->dest.ssa.bit_size;
assert(bit_size >= 8 && bit_size % 8 == 0);

View file

@ -114,7 +114,6 @@ lower_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
return false;
nir_alu_instr *alu = nir_instr_as_alu(instr);
assert(alu->dest.dest.is_ssa);
if (!nir_op_infos[alu->op].is_conversion)
return false;

View file

@ -72,7 +72,6 @@ lower_shading_rate_output_instr(nir_builder *b, nir_instr *instr,
b->cursor = is_store ? nir_before_instr(instr) : nir_after_instr(instr);
if (is_store) {
assert(intrin->src[0].is_ssa);
nir_ssa_def *bit_field = intrin->src[0].ssa;
nir_ssa_def *fp16_x =
nir_i2f16(b,

View file

@ -48,7 +48,6 @@ are_all_uses_fadd(nir_ssa_def *def)
case nir_op_mov:
case nir_op_fneg:
case nir_op_fabs:
assert(use_alu->dest.dest.is_ssa);
if (!are_all_uses_fadd(&use_alu->dest.dest.ssa))
return false;
break;
@ -167,7 +166,6 @@ brw_nir_opt_peephole_ffma_instr(nir_builder *b,
if (add->op != nir_op_fadd)
return false;
assert(add->dest.dest.is_ssa);
if (add->exact)
return false;
@ -237,8 +235,6 @@ brw_nir_opt_peephole_ffma_instr(nir_builder *b,
}
nir_alu_src_copy(&ffma->src[2], &add->src[1 - add_mul_src], ffma);
assert(add->dest.dest.is_ssa);
nir_ssa_dest_init(&ffma->instr, &ffma->dest.dest,
add->dest.dest.ssa.num_components, bit_size);
nir_ssa_def_rewrite_uses(&add->dest.dest.ssa, &ffma->dest.dest.ssa);

View file

@ -28,7 +28,6 @@ static bool
resize_deref(nir_builder *b, nir_deref_instr *deref,
unsigned num_components, unsigned bit_size)
{
assert(deref->dest.is_ssa);
if (deref->dest.ssa.num_components == num_components &&
deref->dest.ssa.bit_size == bit_size)
return false;
@ -38,7 +37,6 @@ resize_deref(nir_builder *b, nir_deref_instr *deref,
(deref->deref_type == nir_deref_type_array ||
deref->deref_type == nir_deref_type_ptr_as_array)) {
b->cursor = nir_before_instr(&deref->instr);
assert(deref->arr.index.is_ssa);
nir_ssa_def *idx;
if (nir_src_is_const(deref->arr.index)) {
idx = nir_imm_intN_t(b, nir_src_as_int(deref->arr.index), bit_size);
@ -522,7 +520,6 @@ brw_nir_create_raygen_trampoline(const struct brw_compiler *compiler,
b.cursor = nir_before_instr(&intrin->instr);
nir_ssa_def *global_arg_addr =
load_trampoline_param(&b, rt_disp_globals_addr, 1, 64);
assert(intrin->dest.is_ssa);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
global_arg_addr);
nir_instr_remove(instr);

Some files were not shown because too many files have changed in this diff Show more