treewide: sed out more is_ssa

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24432>
This commit is contained in:
Alyssa Rosenzweig 2023-08-01 12:07:25 -04:00 committed by Marge Bot
parent a8013644a1
commit 95e3df39c0
48 changed files with 7 additions and 98 deletions

View file

@ -462,7 +462,6 @@ agx_nir_ssa_index(nir_ssa_def *ssa)
static inline agx_index
agx_src_index(nir_src *src)
{
assert(src->is_ssa);
return agx_nir_ssa_index(src->ssa);
}

View file

@ -308,7 +308,6 @@ ntq_add_pending_tmu_flush(struct v3d_compile *c,
if (num_components > 0) {
c->tmu.output_fifo_size += num_components;
assert(dest->is_ssa);
nir_intrinsic_instr *store = nir_store_reg_for_def(&dest->ssa);
if (store != NULL) {
nir_ssa_def *reg = store->src[1].ssa;
@ -775,7 +774,6 @@ ntq_store_dest(struct v3d_compile *c, nir_dest *dest, int chan,
assert(result.file == QFILE_TEMP && last_inst &&
(last_inst == c->defs[result.index] || is_reused_uniform));
assert(dest->is_ssa);
nir_intrinsic_instr *store = nir_store_reg_for_def(&dest->ssa);
if (store == NULL) {
assert(chan < dest->ssa.num_components);

View file

@ -1763,7 +1763,6 @@ nir_visitor::add_instr(nir_instr *instr, unsigned num_components,
nir_builder_instr_insert(&b, instr);
if (dest) {
assert(dest->is_ssa);
this->result = &dest->ssa;
}
}

View file

@ -1759,7 +1759,7 @@ get_store_value(nir_intrinsic_instr *intrin)
nir_component_mask_t
nir_src_components_read(const nir_src *src)
{
assert(src->is_ssa && src->parent_instr);
assert(src->parent_instr);
if (src->parent_instr->type == nir_instr_type_alu) {
nir_alu_instr *alu = nir_instr_as_alu(src->parent_instr);
@ -2168,13 +2168,6 @@ cursor_next_instr(nir_cursor cursor)
unreachable("Inavlid cursor option");
}
ASSERTED static bool
dest_is_ssa(nir_dest *dest, void *_state)
{
(void) _state;
return dest->is_ssa;
}
bool
nir_function_impl_lower_instructions(nir_function_impl *impl,
nir_instr_filter_cb filter,
@ -2195,7 +2188,6 @@ nir_function_impl_lower_instructions(nir_function_impl *impl,
continue;
}
assert(nir_foreach_dest(instr, dest_is_ssa, NULL));
nir_ssa_def *old_def = nir_instr_ssa_def(instr);
struct list_head old_uses;
if (old_def != NULL) {

View file

@ -199,7 +199,6 @@ static void
__clone_src(clone_state *state, void *ninstr_or_if,
nir_src *nsrc, const nir_src *src)
{
assert(src->is_ssa);
nsrc->is_ssa = true;
nsrc->ssa = remap_local(state, src->ssa);
}
@ -208,7 +207,6 @@ static void
__clone_dst(clone_state *state, nir_instr *ninstr,
nir_dest *ndst, const nir_dest *dst)
{
assert(dst->is_ssa);
ndst->is_ssa = true;
nir_ssa_dest_init(ninstr, ndst, dst->ssa.num_components,
dst->ssa.bit_size);

View file

@ -664,7 +664,6 @@ rewrite_src(nir_src *src, void *void_state)
{
struct from_ssa_state *state = void_state;
assert(src->is_ssa);
nir_ssa_def *reg = reg_for_ssa_def(src->ssa, state);
if (reg == NULL)
return true;

View file

@ -30,7 +30,6 @@
static bool
src_is_invocation_id(const nir_src *src)
{
assert(src->is_ssa);
nir_ssa_scalar s = nir_ssa_scalar_resolved(src->ssa, 0);
return s.def->parent_instr->type == nir_instr_type_intrinsic &&
nir_instr_as_intrinsic(s.def->parent_instr)->intrinsic ==
@ -40,7 +39,6 @@ src_is_invocation_id(const nir_src *src)
static bool
src_is_local_invocation_index(const nir_src *src)
{
assert(src->is_ssa);
nir_ssa_scalar s = nir_ssa_scalar_resolved(src->ssa, 0);
return s.def->parent_instr->type == nir_instr_type_intrinsic &&
nir_instr_as_intrinsic(s.def->parent_instr)->intrinsic ==

View file

@ -58,9 +58,6 @@ instr_each_src_and_dest_is_ssa(const nir_instr *instr)
static bool
instr_can_rewrite(const nir_instr *instr)
{
/* We only handle SSA. */
assert(instr_each_src_and_dest_is_ssa(instr));
switch (instr->type) {
case nir_instr_type_alu:
case nir_instr_type_deref:
@ -88,7 +85,6 @@ instr_can_rewrite(const nir_instr *instr)
static uint32_t
hash_src(uint32_t hash, const nir_src *src)
{
assert(src->is_ssa);
hash = HASH(hash, src->ssa);
return hash;
}

View file

@ -42,7 +42,6 @@ nir_legacy_float_mod_folds(nir_alu_instr *mod)
static nir_legacy_alu_src
chase_alu_src_helper(const nir_src *src)
{
assert(src->is_ssa && "registers lowered to intrinsics");
nir_intrinsic_instr *load = nir_load_reg_for_def(src->ssa);
if (load) {
@ -92,7 +91,6 @@ chase_source_mod(nir_ssa_def **ssa, nir_op op, uint8_t *swizzle)
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
swizzle[i] = alu->src[0].swizzle[swizzle[i]];
assert(alu->src[0].src.is_ssa && "registers lowered to intrinsics");
*ssa = alu->src[0].src.ssa;
return true;
}
@ -100,8 +98,6 @@ chase_source_mod(nir_ssa_def **ssa, nir_op op, uint8_t *swizzle)
nir_legacy_alu_src
nir_legacy_chase_alu_src(const nir_alu_src *src, bool fuse_fabs)
{
assert(src->src.is_ssa && "registers lowered to intrinsics");
if (src->src.ssa->parent_instr->type == nir_instr_type_alu) {
nir_legacy_alu_src out = {
.src.is_ssa = true,
@ -129,7 +125,6 @@ nir_legacy_chase_alu_src(const nir_alu_src *src, bool fuse_fabs)
static nir_legacy_alu_dest
chase_alu_dest_helper(nir_dest *dest)
{
assert(dest->is_ssa && "registers lowered to intrinsics");
nir_intrinsic_instr *store = nir_store_reg_for_def(&dest->ssa);
if (store) {
@ -227,7 +222,6 @@ chase_fsat(nir_ssa_def **def)
nir_legacy_alu_dest
nir_legacy_chase_alu_dest(nir_dest *dest)
{
assert(dest->is_ssa && "registers lowered to intrinsics");
nir_ssa_def *def = &dest->ssa;
/* Try SSA fsat. No users support 64-bit modifiers. */
@ -320,7 +314,6 @@ fuse_mods_with_registers(nir_builder *b, nir_instr *instr, void *fuse_fabs_)
nir_legacy_alu_dest dest = nir_legacy_chase_alu_dest(&alu->dest.dest);
if (dest.fsat) {
assert(dest.dest.is_ssa && "not fully chased");
nir_intrinsic_instr *store = nir_store_reg_for_def(dest.dest.ssa);
if (store) {

View file

@ -75,8 +75,6 @@ lower_large_src(nir_src *src, void *s)
{
lower_state *state = s;
assert(src->is_ssa);
nir_instr *parent = src->ssa->parent_instr;
/* No need to visit instructions we've already visited.. this also

View file

@ -633,7 +633,6 @@ consume_dual_stores(nir_builder *b, nir_instr *instr, void *data)
int rt = color_index_for_location(sem.location);
assert(rt >= 0 && rt < 8 && "bounds for dual-source blending");
assert(store->src[0].is_ssa && "must be SSA");
outputs[rt] = store->src[0].ssa;
nir_instr_remove(instr);

View file

@ -79,7 +79,6 @@ set_const_initialiser(nir_deref_instr **p, nir_constant *top_level_init,
/* Now that we have selected the corrent nir_constant we copy the constant
* values to it.
*/
assert(const_src->is_ssa);
nir_instr *src_instr = const_src->ssa->parent_instr;
assert(src_instr->type == nir_instr_type_load_const);
nir_load_const_instr* load = nir_instr_as_load_const(src_instr);

View file

@ -1144,7 +1144,7 @@ split_64bit_subgroup_op(nir_builder *b, const nir_intrinsic_instr *intrin)
/* This works on subgroup ops with a single 64-bit source which can be
* trivially lowered by doing the exact same op on both halves.
*/
assert(intrin->src[0].is_ssa && intrin->src[0].ssa->bit_size == 64);
assert(nir_src_bit_size(intrin->src[0]) == 64);
nir_ssa_def *split_src0[2] = {
nir_unpack_64_2x32_split_x(b, intrin->src[0].ssa),
nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa),
@ -1161,7 +1161,7 @@ split_64bit_subgroup_op(nir_builder *b, const nir_intrinsic_instr *intrin)
/* Other sources must be less than 64 bits and get copied directly */
for (unsigned j = 1; j < info->num_srcs; j++) {
assert(intrin->src[j].is_ssa && intrin->src[j].ssa->bit_size < 64);
assert(nir_src_bit_size(intrin->src[j]) < 64);
split->src[j] = nir_src_for_ssa(intrin->src[j].ssa);
}

View file

@ -533,7 +533,7 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
}
/* None of the supported APIs allow interpolation on 64-bit things */
assert(intrin->dest.is_ssa && intrin->dest.ssa.bit_size <= 32);
assert(nir_dest_bit_size(intrin->dest) <= 32);
nir_intrinsic_op bary_op;
switch (intrin->intrinsic) {

View file

@ -81,7 +81,6 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
const uint32_t whole_align = nir_intrinsic_align(intrin);
nir_src *offset_src = nir_get_io_offset_src(intrin);
const bool offset_is_const = nir_src_is_const(*offset_src);
assert(offset_src->is_ssa);
nir_ssa_def *offset = offset_src->ssa;
nir_mem_access_size_align requested =
@ -241,7 +240,6 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
const uint32_t whole_align = nir_intrinsic_align(intrin);
nir_src *offset_src = nir_get_io_offset_src(intrin);
const bool offset_is_const = nir_src_is_const(*offset_src);
assert(offset_src->is_ssa);
nir_ssa_def *offset = offset_src->ssa;
nir_component_mask_t writemask = nir_intrinsic_write_mask(intrin);

View file

@ -56,7 +56,6 @@ nu_handle_init(struct nu_handle *h, nir_src *src)
if (nir_src_is_const(*src))
return false;
assert(src->is_ssa);
h->handle = src->ssa;
h->parent_deref = NULL;

View file

@ -101,7 +101,6 @@ static bool
src_is_in_bitset(nir_src *src, void *_set)
{
struct sized_bitset *set = _set;
assert(src->is_ssa);
/* Any SSA values which were added after we generated liveness information
* are things generated by this pass and, while most of it is arithmetic
@ -371,7 +370,6 @@ static nir_ssa_def *
get_phi_builder_def_for_src(nir_src *src, struct pbv_array *pbv_arr,
nir_block *block)
{
assert(src->is_ssa);
struct nir_phi_builder_value *pbv =
get_phi_builder_value_for_def(src->ssa, pbv_arr);
@ -423,7 +421,6 @@ add_src_to_call_live_bitset(nir_src *src, void *state)
{
BITSET_WORD *call_live = state;
assert(src->is_ssa);
BITSET_SET(call_live, src->ssa->index);
return true;
}

View file

@ -124,7 +124,6 @@ move_vec_src_uses_to_dest_block(nir_block *block)
if (use->parent_instr->type != nir_instr_type_alu)
continue;
assert(use->is_ssa);
nir_alu_instr *use_alu = nir_instr_as_alu(use->parent_instr);

View file

@ -717,7 +717,6 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state,
}
*value = entry->src;
assert(value->is_ssa);
const struct glsl_type *type = entry->dst.instr->type;
unsigned num_components = glsl_get_vector_elements(type);

View file

@ -165,7 +165,6 @@ gcm_build_block_info(struct exec_list *cf_list, struct gcm_state *state,
static bool
is_src_scalarizable(nir_src *src)
{
assert(src->is_ssa);
nir_instr *src_instr = src->ssa->parent_instr;
switch (src_instr->type) {
@ -428,7 +427,6 @@ gcm_schedule_early_src(nir_src *src, void *void_state)
struct gcm_state *state = void_state;
nir_instr *instr = state->instr;
assert(src->is_ssa);
gcm_schedule_early_instr(src->ssa->parent_instr, void_state);

View file

@ -98,7 +98,6 @@ can_move_src(nir_src *src, void *state)
{
opt_preamble_ctx *ctx = state;
assert(src->is_ssa);
return ctx->states[src->ssa->index].can_move;
}

View file

@ -42,7 +42,6 @@
static uint32_t
hash_src(uint32_t hash, const nir_src *src)
{
assert(src->is_ssa);
void *hash_data = nir_src_is_const(*src) ? NULL : src->ssa;
return HASH(hash, hash_data);
@ -84,8 +83,6 @@ hash_instr(const void *data)
static bool
srcs_equal(const nir_src *src1, const nir_src *src2)
{
assert(src1->is_ssa);
assert(src2->is_ssa);
return src1->ssa == src2->ssa ||
(nir_src_is_const(*src1) && nir_src_is_const(*src2));

View file

@ -384,14 +384,12 @@ static void print_src(const nir_src *src, print_state *state, nir_alu_type src_t
static void
print_src(const nir_src *src, print_state *state, nir_alu_type src_type)
{
assert(src->is_ssa);
print_ssa_use(src->ssa, state, src_type);
}
static void
print_dest(nir_dest *dest, print_state *state)
{
assert(dest->is_ssa);
print_ssa_def(&dest->ssa, state);
}

View file

@ -470,7 +470,6 @@ union packed_src {
static void
write_src_full(write_ctx *ctx, const nir_src *src, union packed_src header)
{
assert(src->is_ssa);
header.any.object_idx = write_lookup_object(ctx, src->ssa);
blob_write_uint32(ctx->blob, header.u32);
}
@ -616,7 +615,6 @@ write_dest(write_ctx *ctx, const nir_dest *dst, union packed_instr header,
union packed_dest dest;
dest.u8 = 0;
assert(dst->is_ssa);
dest.num_components =
encode_num_components_in_3bits(dst->ssa.num_components);
dest.bit_size = encode_bit_size_3bits(dst->ssa.bit_size);
@ -665,7 +663,6 @@ write_dest(write_ctx *ctx, const nir_dest *dst, union packed_instr header,
if (dest.num_components == NUM_COMPONENTS_IS_SEPARATE_7)
blob_write_uint32(ctx->blob, dst->ssa.num_components);
assert(dst->is_ssa);
write_add_object(ctx, &dst->ssa);
}

View file

@ -123,7 +123,6 @@ def_is_invariant(nir_ssa_def *def, nir_loop *loop)
static bool
src_is_invariant(nir_src *src, void *state)
{
assert(src->is_ssa);
return def_is_invariant(src->ssa, (nir_loop *)state);
}

View file

@ -116,7 +116,6 @@ trivialize_src(nir_src *src, void *state_)
{
struct trivialize_src_state *state = state_;
assert(src->is_ssa && "register intrinsics only");
nir_instr *parent = src->ssa->parent_instr;
if (parent->type != nir_instr_type_intrinsic)
return true;

View file

@ -161,7 +161,6 @@ validate_src(nir_src *src, validate_state *state,
else
validate_assert(state, src->parent_if == state->if_stmt);
validate_assert(state, src->is_ssa);
validate_ssa_src(src, state, bit_sizes, num_components);
}
@ -193,7 +192,6 @@ validate_ssa_def(nir_ssa_def *def, validate_state *state)
list_validate(&def->uses);
nir_foreach_use_including_if(src, def) {
validate_assert(state, src->is_ssa);
validate_assert(state, src->ssa == def);
bool already_seen = false;
@ -207,7 +205,6 @@ static void
validate_dest(nir_dest *dest, validate_state *state,
unsigned bit_sizes, unsigned num_components)
{
validate_assert(state, dest->is_ssa);
if (bit_sizes)
validate_assert(state, dest->ssa.bit_size & bit_sizes);
if (num_components)

View file

@ -203,7 +203,6 @@ ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
struct ir3_instruction **
ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
{
assert(dst->is_ssa);
struct ir3_instruction **value = ir3_get_dst_ssa(ctx, &dst->ssa, n);
compile_assert(ctx, !ctx->last_dst);
@ -216,7 +215,6 @@ ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
struct ir3_instruction *const *
ir3_get_src(struct ir3_context *ctx, nir_src *src)
{
assert(src->is_ssa);
struct hash_entry *entry;
entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
compile_assert(ctx, entry);
@ -253,7 +251,6 @@ ir3_put_dst(struct ir3_context *ctx, nir_dest *dst)
}
}
assert(dst->is_ssa);
ctx->last_dst = NULL;
ctx->last_dst_n = 0;
}

View file

@ -124,8 +124,6 @@ check_precondition_block(precond_state *state, nir_block *block)
static bool
move_src(nir_src *src, void *state)
{
/* At this point we shouldn't have any non-ssa src: */
assert(src->is_ssa);
move_instruction_to_start_block(state, src->ssa->parent_instr);
return true;
}

View file

@ -141,8 +141,6 @@ static inline bool is_sysval(nir_instr *instr)
static inline unsigned
src_index(nir_function_impl *impl, nir_src *src)
{
assert(src->is_ssa);
nir_intrinsic_instr *load = nir_load_reg_for_def(src->ssa);
if (load) {
@ -161,8 +159,6 @@ src_index(nir_function_impl *impl, nir_src *src)
static inline unsigned
dest_index(nir_function_impl *impl, nir_dest *dest)
{
assert(dest->is_ssa);
nir_intrinsic_instr *store = nir_store_reg_for_def(&dest->ssa);
if (store) {

View file

@ -157,7 +157,6 @@ etna_ra_assign(struct etna_compile *c, nir_shader *shader)
case nir_op_flog2:
case nir_op_fsin:
case nir_op_fcos:
assert(dest->is_ssa);
comp = REG_CLASS_VIRT_VEC2T;
break;
default:

View file

@ -276,7 +276,6 @@ set_legacy_index(struct ir2_context *ctx, nir_legacy_dest dst,
if (dst.is_ssa) {
ctx->ssa_map[dst.ssa->index] = instr->idx;
} else {
assert(instr->is_ssa);
reg = &ctx->reg[dst.reg.handle->index];
instr->is_ssa = false;

View file

@ -95,7 +95,6 @@ static void register_node_reg(gpir_block *block, gpir_node *node, int index)
*/
static void register_node(gpir_block *block, gpir_node *node, nir_dest *dest)
{
assert(dest->is_ssa);
register_node_ssa(block, node, &dest->ssa);
}
@ -104,7 +103,6 @@ static gpir_node *gpir_node_find(gpir_block *block, nir_src *src,
{
gpir_reg *reg = NULL;
gpir_node *pred = NULL;
assert(src->is_ssa);
if (src->ssa->num_components > 1) {
for (int i = 0; i < GPIR_VECTOR_SSA_NUM; i++) {
if (block->comp->vector_ssa[i].ssa == src->ssa->index) {
@ -210,7 +208,6 @@ static gpir_node *gpir_create_load(gpir_block *block, nir_dest *dest,
static bool gpir_create_vector_load(gpir_block *block, nir_dest *dest, int index)
{
assert(dest->is_ssa);
assert(index < GPIR_VECTOR_SSA_NUM);
block->comp->vector_ssa[index].ssa = dest->ssa.index;

View file

@ -391,7 +391,6 @@ static bool ppir_emit_intrinsic(ppir_block *block, nir_instr *ni)
return false;
}
assert(instr->src->is_ssa);
if (!block->comp->uses_discard) {
node = block->comp->var_nodes[instr->src->ssa->index];
assert(node);
@ -538,7 +537,6 @@ static bool ppir_emit_tex(ppir_block *block, nir_instr *ni)
FALLTHROUGH;
case nir_tex_src_coord: {
nir_src *ns = &instr->src[i].src;
assert(ns->is_ssa);
ppir_node *child = block->comp->var_nodes[ns->ssa->index];
if (child->op == ppir_op_load_varying) {
/* If the successor is load_texture, promote it to load_coords */

View file

@ -335,8 +335,6 @@ private:
return nullptr;
}
assert(new_bufid->is_ssa);
nir_intrinsic_set_base(intr, new_base);
nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(new_bufid->ssa));
return &intr->dest.ssa;

View file

@ -1053,7 +1053,6 @@ double2vec2(nir_src *src, UNUSED void *state)
if (nir_src_bit_size(*src) != 64)
return true;
assert(src->is_ssa);
src->ssa->bit_size = 32;
src->ssa->num_components *= 2;
return true;

View file

@ -196,7 +196,6 @@ ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
(result.file == QFILE_TEMP &&
last_inst && last_inst == c->defs[result.index]));
assert(dest->is_ssa);
nir_intrinsic_instr *store = nir_store_reg_for_def(&dest->ssa);
if (store == NULL) {
assert(chan < dest->ssa.num_components);

View file

@ -4002,7 +4002,6 @@ add_rebuild_src(nir_src *src, void *state)
{
struct rebuild_resource *res = (struct rebuild_resource *) state;
assert(src->is_ssa);
for (nir_ssa_def *def : res->array) {
if (def == src->ssa)
return true;

View file

@ -202,7 +202,6 @@ brw_nir_adjust_task_payload_offsets_instr(struct nir_builder *b,
* TODO(mesh): Figure out how to handle 8-bit, 16-bit.
*/
assert(offset_src->is_ssa);
nir_ssa_def *offset = nir_ishr_imm(b, offset_src->ssa, 2);
nir_instr_rewrite_src(&intrin->instr, offset_src, nir_src_for_ssa(offset));
@ -1186,7 +1185,6 @@ brw_nir_adjust_offset(nir_builder *b, nir_intrinsic_instr *intrin, uint32_t pitc
nir_src *index_src = nir_get_io_arrayed_index_src(intrin);
nir_src *offset_src = nir_get_io_offset_src(intrin);
assert(index_src->is_ssa);
b->cursor = nir_before_instr(&intrin->instr);
nir_ssa_def *offset =
nir_iadd(b,

View file

@ -136,7 +136,6 @@ store_resume_addr(nir_builder *b, nir_intrinsic_instr *call)
/* By the time we get here, any remaining shader/function memory
* pointers have been lowered to SSA values.
*/
assert(nir_get_shader_call_payload_src(call)->is_ssa);
nir_ssa_def *payload_addr =
nir_get_shader_call_payload_src(call)->ssa;
brw_nir_rt_store_scratch(b, offset, BRW_BTD_STACK_ALIGN,

View file

@ -343,7 +343,7 @@ clc_lower_nonnormalized_samplers(nir_shader *nir,
continue;
nir_src *sampler_src = &tex->src[sampler_src_idx].src;
assert(sampler_src->is_ssa && sampler_src->ssa->parent_instr->type == nir_instr_type_deref);
assert(sampler_src->ssa->parent_instr->type == nir_instr_type_deref);
nir_variable *sampler = nir_deref_instr_get_variable(
nir_instr_as_deref(sampler_src->ssa->parent_instr));

View file

@ -2312,9 +2312,7 @@ clear_pass_flags(nir_function_impl *impl)
static bool
add_dest_to_worklist(nir_dest *dest, void *state)
{
assert(dest->is_ssa);
nir_foreach_use_including_if(src, &dest->ssa) {
assert(src->is_ssa);
if (src->is_if) {
nir_if *nif = src->parent_if;
nir_foreach_block_in_cf_node(block, &nif->cf_node) {

View file

@ -63,7 +63,6 @@ add_instr_and_srcs_to_set(struct set *instr_set, nir_instr *instr);
static bool
add_srcs_to_set(nir_src *src, void *state)
{
assert(src->is_ssa);
add_instr_and_srcs_to_set(state, src->ssa->parent_instr);
return true;
}
@ -309,11 +308,11 @@ remove_tess_level_accesses(nir_builder *b, nir_instr *instr, void *_data)
return false;
if (intr->intrinsic == nir_intrinsic_store_output) {
assert(intr->src[0].is_ssa && intr->src[0].ssa->num_components == 1);
assert(nir_src_num_components(intr->src[0]) == 1);
nir_instr_remove(instr);
} else {
b->cursor = nir_after_instr(instr);
assert(intr->dest.is_ssa && intr->dest.ssa.num_components == 1);
assert(nir_dest_num_components(intr->dest) == 1);
nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_ssa_undef(b, 1, intr->dest.ssa.bit_size));
}
return true;

View file

@ -2161,7 +2161,6 @@ static void
store_dest_value(struct ntd_context *ctx, nir_dest *dest, unsigned chan,
const struct dxil_value *value)
{
assert(dest->is_ssa);
assert(value);
store_ssa_def(ctx, &dest->ssa, chan, value);
}
@ -2201,7 +2200,6 @@ static const struct dxil_value *
get_src(struct ntd_context *ctx, nir_src *src, unsigned chan,
nir_alu_type type)
{
assert(src->is_ssa);
const struct dxil_value *value = get_src_ssa(ctx, src->ssa, chan);
const int bit_size = nir_src_bit_size(*src);

View file

@ -690,7 +690,6 @@ Converter::convert(nir_alu_dest *dest)
Converter::LValues&
Converter::convert(nir_dest *dest)
{
assert(dest->is_ssa);
return convert(&dest->ssa);
}
@ -716,7 +715,6 @@ Converter::getSrc(nir_alu_src *src, uint8_t component)
Value*
Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
{
assert(src->is_ssa);
return getSrc(src->ssa, idx);
}

View file

@ -954,7 +954,6 @@ bi_src_index(nir_src *src)
if (nir_src_is_const(*src) && nir_src_bit_size(*src) <= 32) {
return bi_imm_u32(nir_src_as_uint(*src));
} else {
assert(src->is_ssa);
return bi_get_index(src->ssa->index);
}
}
@ -962,7 +961,6 @@ bi_src_index(nir_src *src)
static inline bi_index
bi_dest_index(nir_dest *dst)
{
assert(dst->is_ssa);
return bi_get_index(dst->ssa.index);
}

View file

@ -297,7 +297,6 @@ mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset,
void
mir_set_ubo_offset(midgard_instruction *ins, nir_src *src, unsigned bias)
{
assert(src->is_ssa);
struct mir_address match = mir_match_offset(src->ssa, false, false);
if (match.B.def) {

View file

@ -51,7 +51,6 @@ lower_xfb_output(nir_builder *b, nir_intrinsic_instr *intr,
b, buf,
nir_u2u64(b, nir_iadd_imm(b, nir_imul_imm(b, index, stride), offset)));
assert(intr->src[0].is_ssa && "must lower XFB before lowering SSA");
nir_ssa_def *src = intr->src[0].ssa;
nir_ssa_def *value =
nir_channels(b, src, BITFIELD_MASK(num_components) << start_component);