mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-20 22:30:12 +01:00
nir: Drop unused name from nir_ssa_dest_init
Since 624e799cc3 ("nir: Drop nir_ssa_def::name and nir_register::name"), SSA
defs don't have names, making the name argument unused. Drop it from the
signature and fix the call sites. This was done with the help of the following
Coccinelle semantic patch:
@@
expression A, B, C, D, E;
@@
-nir_ssa_dest_init(A, B, C, D, E);
+nir_ssa_dest_init(A, B, C, D);
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Reviewed-by: Emma Anholt <emma@anholt.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23078>
This commit is contained in:
parent
7adf6c75ef
commit
01e9ee79f7
118 changed files with 283 additions and 323 deletions
|
|
@ -116,8 +116,9 @@ process_instr(nir_builder *b, nir_instr *instr, void *_)
|
|||
new_intrin->num_components = intrin->num_components;
|
||||
|
||||
if (op != nir_intrinsic_store_global_amd)
|
||||
nir_ssa_dest_init(&new_intrin->instr, &new_intrin->dest, intrin->dest.ssa.num_components,
|
||||
intrin->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&new_intrin->instr, &new_intrin->dest,
|
||||
intrin->dest.ssa.num_components,
|
||||
intrin->dest.ssa.bit_size);
|
||||
|
||||
unsigned num_src = nir_intrinsic_infos[intrin->intrinsic].num_srcs;
|
||||
for (unsigned i = 0; i < num_src; i++)
|
||||
|
|
|
|||
|
|
@ -419,7 +419,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
|
|||
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
|
||||
new_tex->src[0].src_type = tex->src[i].src_type;
|
||||
nir_ssa_dest_init(&new_tex->instr, &new_tex->dest,
|
||||
nir_tex_instr_dest_size(new_tex), 32, NULL);
|
||||
nir_tex_instr_dest_size(new_tex), 32);
|
||||
nir_builder_instr_insert(b, &new_tex->instr);
|
||||
desc = &new_tex->dest.ssa;
|
||||
break;
|
||||
|
|
@ -438,7 +438,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
|
|||
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
|
||||
new_tex->src[0].src_type = tex->src[i].src_type;
|
||||
nir_ssa_dest_init(&new_tex->instr, &new_tex->dest,
|
||||
nir_tex_instr_dest_size(new_tex), 32, NULL);
|
||||
nir_tex_instr_dest_size(new_tex), 32);
|
||||
nir_builder_instr_insert(b, &new_tex->instr);
|
||||
sampler_desc = &new_tex->dest.ssa;
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -293,7 +293,7 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data)
|
|||
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
|
||||
new_tex->src[0].src_type = tex->src[i].src_type;
|
||||
nir_ssa_dest_init(&new_tex->instr, &new_tex->dest,
|
||||
nir_tex_instr_dest_size(new_tex), 32, NULL);
|
||||
nir_tex_instr_dest_size(new_tex), 32);
|
||||
nir_builder_instr_insert(b, &new_tex->instr);
|
||||
desc = &new_tex->dest.ssa;
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
|
|||
nir_intrinsic_instr *clone_intr = nir_instr_as_intrinsic(clone);
|
||||
|
||||
/* Shrink the load to count contiguous components */
|
||||
nir_ssa_dest_init(clone, &clone_intr->dest, count, bit_size, NULL);
|
||||
nir_ssa_dest_init(clone, &clone_intr->dest, count, bit_size);
|
||||
nir_ssa_def *clone_vec = &clone_intr->dest.ssa;
|
||||
clone_intr->num_components = count;
|
||||
|
||||
|
|
|
|||
|
|
@ -404,7 +404,7 @@ bias_for_tex(nir_builder *b, nir_tex_instr *tex)
|
|||
query->op = nir_texop_lod_bias_agx;
|
||||
query->dest_type = nir_type_float16;
|
||||
|
||||
nir_ssa_dest_init(instr, &query->dest, 1, 16, NULL);
|
||||
nir_ssa_dest_init(instr, &query->dest, 1, 16);
|
||||
return &query->dest.ssa;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ build_background_op(nir_builder *b, enum agx_meta_op op, unsigned rt,
|
|||
|
||||
tex->coord_components = 2;
|
||||
tex->texture_index = rt;
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
|
||||
return nir_trim_vector(b, &tex->dest.ssa, nr);
|
||||
|
|
|
|||
|
|
@ -153,8 +153,8 @@ lower_load_bitsize(nir_builder *b,
|
|||
}
|
||||
}
|
||||
|
||||
nir_ssa_dest_init(&new_intr->instr, &new_intr->dest,
|
||||
1, bit_size, NULL);
|
||||
nir_ssa_dest_init(&new_intr->instr, &new_intr->dest, 1,
|
||||
bit_size);
|
||||
dest_components[component] = &new_intr->dest.ssa;
|
||||
|
||||
nir_builder_instr_insert(b, &new_intr->instr);
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ rewrite_offset(nir_builder *b,
|
|||
nir_intrinsic_instr *size =
|
||||
nir_intrinsic_instr_create(b->shader, buffer_size_op);
|
||||
size->src[0] = nir_src_for_ssa(nir_imm_int(b, buffer_idx));
|
||||
nir_ssa_dest_init(&size->instr, &size->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&size->instr, &size->dest, 1, 32);
|
||||
nir_builder_instr_insert(b, &size->instr);
|
||||
|
||||
/* Compute the maximum offset being accessed and if it is
|
||||
|
|
@ -204,8 +204,7 @@ lower_image(struct v3d_compile *c,
|
|||
size_inst->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
|
||||
nir_intrinsic_set_image_array(size_inst, is_array);
|
||||
size_inst->num_components = num_coords;
|
||||
nir_ssa_dest_init(&size_inst->instr, &size_inst->dest,
|
||||
num_coords, 32, NULL);
|
||||
nir_ssa_dest_init(&size_inst->instr, &size_inst->dest, num_coords, 32);
|
||||
nir_ssa_def *size = &size_inst->dest.ssa;
|
||||
nir_builder_instr_insert(b, &size_inst->instr);
|
||||
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ v3d_nir_lower_load_scratch(nir_builder *b, nir_intrinsic_instr *instr)
|
|||
nir_intrinsic_instr_create(b->shader, instr->intrinsic);
|
||||
chan_instr->num_components = 1;
|
||||
nir_ssa_dest_init(&chan_instr->instr, &chan_instr->dest, 1,
|
||||
instr->dest.ssa.bit_size, NULL);
|
||||
instr->dest.ssa.bit_size);
|
||||
|
||||
chan_instr->src[0] = nir_src_for_ssa(chan_offset);
|
||||
|
||||
|
|
|
|||
|
|
@ -2015,7 +2015,7 @@ get_texel_buffer_copy_fs(struct v3dv_device *device, VkFormat format,
|
|||
tex->dest_type = nir_type_uint32;
|
||||
tex->is_array = false;
|
||||
tex->coord_components = 1;
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "texel buffer result");
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(&b, &tex->instr);
|
||||
|
||||
uint32_t swiz[4];
|
||||
|
|
@ -3369,7 +3369,7 @@ build_nir_tex_op_read(struct nir_builder *b,
|
|||
tex->is_array = glsl_sampler_type_is_array(sampler_type);
|
||||
tex->coord_components = tex_pos->num_components;
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
return &tex->dest.ssa;
|
||||
}
|
||||
|
|
@ -3397,7 +3397,7 @@ build_nir_tex_op_ms_fetch_sample(struct nir_builder *b,
|
|||
tex->is_array = false;
|
||||
tex->coord_components = tex_pos->num_components;
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
return &tex->dest.ssa;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1306,10 +1306,10 @@ nir_visitor::visit(ir_call *ir)
|
|||
assert(ir->return_deref);
|
||||
if (ir->return_deref->type->is_integer_64()) {
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
ir->return_deref->type->vector_elements, 64, NULL);
|
||||
ir->return_deref->type->vector_elements, 64);
|
||||
} else {
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
ir->return_deref->type->vector_elements, 32, NULL);
|
||||
ir->return_deref->type->vector_elements, 32);
|
||||
}
|
||||
nir_builder_instr_insert(&b, &instr->instr);
|
||||
break;
|
||||
|
|
@ -1334,7 +1334,7 @@ nir_visitor::visit(ir_call *ir)
|
|||
|
||||
/* Set the intrinsic destination. */
|
||||
if (ir->return_deref) {
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32);
|
||||
}
|
||||
|
||||
/* Set the intrinsic parameters. */
|
||||
|
|
@ -1390,8 +1390,7 @@ nir_visitor::visit(ir_call *ir)
|
|||
} else
|
||||
num_components = ir->return_deref->type->vector_elements;
|
||||
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
num_components, 32, NULL);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, num_components, 32);
|
||||
}
|
||||
|
||||
if (op == nir_intrinsic_image_deref_size) {
|
||||
|
|
@ -1532,7 +1531,7 @@ nir_visitor::visit(ir_call *ir)
|
|||
break;
|
||||
}
|
||||
case nir_intrinsic_shader_clock:
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, 2, 32, NULL);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, 2, 32);
|
||||
nir_intrinsic_set_memory_scope(instr, NIR_SCOPE_SUBGROUP);
|
||||
nir_builder_instr_insert(&b, &instr->instr);
|
||||
break;
|
||||
|
|
@ -1583,8 +1582,8 @@ nir_visitor::visit(ir_call *ir)
|
|||
|
||||
/* Setup destination register */
|
||||
unsigned bit_size = type->is_boolean() ? 32 : glsl_get_bit_size(type);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
type->vector_elements, bit_size, NULL);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, type->vector_elements,
|
||||
bit_size);
|
||||
|
||||
nir_builder_instr_insert(&b, &instr->instr);
|
||||
|
||||
|
|
@ -1649,8 +1648,7 @@ nir_visitor::visit(ir_call *ir)
|
|||
assert(ir->return_deref);
|
||||
unsigned bit_size = glsl_get_bit_size(ir->return_deref->type);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
ir->return_deref->type->vector_elements,
|
||||
bit_size, NULL);
|
||||
ir->return_deref->type->vector_elements, bit_size);
|
||||
nir_intrinsic_set_atomic_op(instr, atomic_op);
|
||||
nir_builder_instr_insert(&b, &instr->instr);
|
||||
break;
|
||||
|
|
@ -1660,7 +1658,7 @@ nir_visitor::visit(ir_call *ir)
|
|||
FALLTHROUGH;
|
||||
case nir_intrinsic_vote_any:
|
||||
case nir_intrinsic_vote_all: {
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1, NULL);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1);
|
||||
|
||||
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
|
||||
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
|
||||
|
|
@ -1671,7 +1669,7 @@ nir_visitor::visit(ir_call *ir)
|
|||
|
||||
case nir_intrinsic_ballot: {
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
ir->return_deref->type->vector_elements, 64, NULL);
|
||||
ir->return_deref->type->vector_elements, 64);
|
||||
instr->num_components = ir->return_deref->type->vector_elements;
|
||||
|
||||
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
|
||||
|
|
@ -1682,7 +1680,7 @@ nir_visitor::visit(ir_call *ir)
|
|||
}
|
||||
case nir_intrinsic_read_invocation: {
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
ir->return_deref->type->vector_elements, 32, NULL);
|
||||
ir->return_deref->type->vector_elements, 32);
|
||||
instr->num_components = ir->return_deref->type->vector_elements;
|
||||
|
||||
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
|
||||
|
|
@ -1696,7 +1694,7 @@ nir_visitor::visit(ir_call *ir)
|
|||
}
|
||||
case nir_intrinsic_read_first_invocation: {
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
ir->return_deref->type->vector_elements, 32, NULL);
|
||||
ir->return_deref->type->vector_elements, 32);
|
||||
instr->num_components = ir->return_deref->type->vector_elements;
|
||||
|
||||
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
|
||||
|
|
@ -1706,12 +1704,12 @@ nir_visitor::visit(ir_call *ir)
|
|||
break;
|
||||
}
|
||||
case nir_intrinsic_is_helper_invocation: {
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1, NULL);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1);
|
||||
nir_builder_instr_insert(&b, &instr->instr);
|
||||
break;
|
||||
}
|
||||
case nir_intrinsic_is_sparse_texels_resident: {
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1, NULL);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1);
|
||||
|
||||
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
|
||||
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
|
||||
|
|
@ -1880,7 +1878,7 @@ nir_visitor::add_instr(nir_instr *instr, unsigned num_components,
|
|||
nir_dest *dest = get_instr_dest(instr);
|
||||
|
||||
if (dest)
|
||||
nir_ssa_dest_init(instr, dest, num_components, bit_size, NULL);
|
||||
nir_ssa_dest_init(instr, dest, num_components, bit_size);
|
||||
|
||||
nir_builder_instr_insert(&b, instr);
|
||||
|
||||
|
|
|
|||
|
|
@ -1799,8 +1799,7 @@ nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
|
|||
/* note: does *not* take ownership of 'name' */
|
||||
void
|
||||
nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
|
||||
unsigned num_components, unsigned bit_size,
|
||||
const char *name)
|
||||
unsigned num_components, unsigned bit_size)
|
||||
{
|
||||
dest->is_ssa = true;
|
||||
nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size);
|
||||
|
|
|
|||
|
|
@ -4450,8 +4450,7 @@ void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest,
|
|||
nir_dest new_dest);
|
||||
|
||||
void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
|
||||
unsigned num_components, unsigned bit_size,
|
||||
const char *name);
|
||||
unsigned num_components, unsigned bit_size);
|
||||
void nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
|
||||
unsigned num_components, unsigned bit_size);
|
||||
static inline void
|
||||
|
|
@ -4461,7 +4460,7 @@ nir_ssa_dest_init_for_type(nir_instr *instr, nir_dest *dest,
|
|||
{
|
||||
assert(glsl_type_is_vector_or_scalar(type));
|
||||
nir_ssa_dest_init(instr, dest, glsl_get_components(type),
|
||||
glsl_get_bit_size(type), name);
|
||||
glsl_get_bit_size(type));
|
||||
}
|
||||
void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_ssa_def *new_ssa);
|
||||
void nir_ssa_def_rewrite_uses_src(nir_ssa_def *def, nir_src new_src);
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr
|
|||
}
|
||||
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_components,
|
||||
bit_size, NULL);
|
||||
bit_size);
|
||||
instr->dest.write_mask = nir_component_mask(num_components);
|
||||
|
||||
nir_builder_instr_insert(build, &instr->instr);
|
||||
|
|
@ -310,10 +310,8 @@ nir_build_tex_deref_instr(nir_builder *build, nir_texop op,
|
|||
}
|
||||
assert(src_idx == num_srcs);
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest,
|
||||
nir_tex_instr_dest_size(tex),
|
||||
nir_alu_type_get_type_size(tex->dest_type),
|
||||
NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, nir_tex_instr_dest_size(tex),
|
||||
nir_alu_type_get_type_size(tex->dest_type));
|
||||
nir_builder_instr_insert(build, &tex->instr);
|
||||
|
||||
return &tex->dest.ssa;
|
||||
|
|
@ -337,7 +335,7 @@ nir_vec_scalars(nir_builder *build, nir_ssa_scalar *comp, unsigned num_component
|
|||
* can't re-guess the num_components when num_components == 1 (nir_op_mov).
|
||||
*/
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_components,
|
||||
comp[0].def->bit_size, NULL);
|
||||
comp[0].def->bit_size);
|
||||
instr->dest.write_mask = nir_component_mask(num_components);
|
||||
|
||||
nir_builder_instr_insert(build, &instr->instr);
|
||||
|
|
@ -394,8 +392,7 @@ nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index,
|
|||
load->num_components = num_components;
|
||||
load->const_index[0] = index;
|
||||
|
||||
nir_ssa_dest_init(&load->instr, &load->dest,
|
||||
num_components, bit_size, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
|
||||
nir_builder_instr_insert(build, &load->instr);
|
||||
return &load->dest.ssa;
|
||||
}
|
||||
|
|
@ -482,8 +479,8 @@ nir_if_phi(nir_builder *build, nir_ssa_def *then_def, nir_ssa_def *else_def)
|
|||
|
||||
assert(then_def->num_components == else_def->num_components);
|
||||
assert(then_def->bit_size == else_def->bit_size);
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
then_def->num_components, then_def->bit_size, NULL);
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest, then_def->num_components,
|
||||
then_def->bit_size);
|
||||
|
||||
nir_builder_instr_insert(build, &phi->instr);
|
||||
|
||||
|
|
|
|||
|
|
@ -495,7 +495,7 @@ nir_mov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
|
|||
|
||||
nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
|
||||
nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components,
|
||||
nir_src_bit_size(src.src), NULL);
|
||||
nir_src_bit_size(src.src));
|
||||
mov->exact = build->exact;
|
||||
mov->dest.write_mask = (1 << num_components) - 1;
|
||||
mov->src[0] = src;
|
||||
|
|
@ -1220,7 +1220,7 @@ nir_build_deref_var(nir_builder *build, nir_variable *var)
|
|||
deref->var = var;
|
||||
|
||||
nir_ssa_dest_init(&deref->instr, &deref->dest, 1,
|
||||
nir_get_ptr_bitsize(build->shader), NULL);
|
||||
nir_get_ptr_bitsize(build->shader));
|
||||
|
||||
nir_builder_instr_insert(build, &deref->instr);
|
||||
|
||||
|
|
@ -1247,7 +1247,7 @@ nir_build_deref_array(nir_builder *build, nir_deref_instr *parent,
|
|||
|
||||
nir_ssa_dest_init(&deref->instr, &deref->dest,
|
||||
parent->dest.ssa.num_components,
|
||||
parent->dest.ssa.bit_size, NULL);
|
||||
parent->dest.ssa.bit_size);
|
||||
|
||||
nir_builder_instr_insert(build, &deref->instr);
|
||||
|
||||
|
|
@ -1285,7 +1285,7 @@ nir_build_deref_ptr_as_array(nir_builder *build, nir_deref_instr *parent,
|
|||
|
||||
nir_ssa_dest_init(&deref->instr, &deref->dest,
|
||||
parent->dest.ssa.num_components,
|
||||
parent->dest.ssa.bit_size, NULL);
|
||||
parent->dest.ssa.bit_size);
|
||||
|
||||
nir_builder_instr_insert(build, &deref->instr);
|
||||
|
||||
|
|
@ -1307,7 +1307,7 @@ nir_build_deref_array_wildcard(nir_builder *build, nir_deref_instr *parent)
|
|||
|
||||
nir_ssa_dest_init(&deref->instr, &deref->dest,
|
||||
parent->dest.ssa.num_components,
|
||||
parent->dest.ssa.bit_size, NULL);
|
||||
parent->dest.ssa.bit_size);
|
||||
|
||||
nir_builder_instr_insert(build, &deref->instr);
|
||||
|
||||
|
|
@ -1330,7 +1330,7 @@ nir_build_deref_struct(nir_builder *build, nir_deref_instr *parent,
|
|||
|
||||
nir_ssa_dest_init(&deref->instr, &deref->dest,
|
||||
parent->dest.ssa.num_components,
|
||||
parent->dest.ssa.bit_size, NULL);
|
||||
parent->dest.ssa.bit_size);
|
||||
|
||||
nir_builder_instr_insert(build, &deref->instr);
|
||||
|
||||
|
|
@ -1350,8 +1350,8 @@ nir_build_deref_cast(nir_builder *build, nir_ssa_def *parent,
|
|||
deref->parent = nir_src_for_ssa(parent);
|
||||
deref->cast.ptr_stride = ptr_stride;
|
||||
|
||||
nir_ssa_dest_init(&deref->instr, &deref->dest,
|
||||
parent->num_components, parent->bit_size, NULL);
|
||||
nir_ssa_dest_init(&deref->instr, &deref->dest, parent->num_components,
|
||||
parent->bit_size);
|
||||
|
||||
nir_builder_instr_insert(build, &deref->instr);
|
||||
|
||||
|
|
@ -1374,7 +1374,7 @@ nir_alignment_deref_cast(nir_builder *build, nir_deref_instr *parent,
|
|||
|
||||
nir_ssa_dest_init(&deref->instr, &deref->dest,
|
||||
parent->dest.ssa.num_components,
|
||||
parent->dest.ssa.bit_size, NULL);
|
||||
parent->dest.ssa.bit_size);
|
||||
|
||||
nir_builder_instr_insert(build, &deref->instr);
|
||||
|
||||
|
|
@ -1589,8 +1589,7 @@ nir_load_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
|
|||
load->num_components = num_components;
|
||||
load->src[0] = nir_src_for_ssa(addr);
|
||||
nir_intrinsic_set_align(load, align, 0);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest,
|
||||
num_components, bit_size, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
|
||||
nir_builder_instr_insert(build, &load->instr);
|
||||
return &load->dest.ssa;
|
||||
}
|
||||
|
|
@ -1621,8 +1620,7 @@ nir_load_global_constant(nir_builder *build, nir_ssa_def *addr, unsigned align,
|
|||
load->num_components = num_components;
|
||||
load->src[0] = nir_src_for_ssa(addr);
|
||||
nir_intrinsic_set_align(load, align, 0);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest,
|
||||
num_components, bit_size, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
|
||||
nir_builder_instr_insert(build, &load->instr);
|
||||
return &load->dest.ssa;
|
||||
}
|
||||
|
|
@ -1749,7 +1747,7 @@ nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
|
|||
{
|
||||
unsigned num_components = op == nir_intrinsic_load_barycentric_model ? 3 : 2;
|
||||
nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
|
||||
nir_ssa_dest_init(&bary->instr, &bary->dest, num_components, 32, NULL);
|
||||
nir_ssa_dest_init(&bary->instr, &bary->dest, num_components, 32);
|
||||
nir_intrinsic_set_interp_mode(bary, interp_mode);
|
||||
nir_builder_instr_insert(build, &bary->instr);
|
||||
return &bary->dest.ssa;
|
||||
|
|
|
|||
|
|
@ -120,9 +120,9 @@ _nir_build_${name}(nir_builder *build${intrinsic_decl_list(opcode)})
|
|||
% endif
|
||||
% if opcode.has_dest:
|
||||
% if opcode.dest_components == 0:
|
||||
nir_ssa_dest_init(&intrin->instr, &intrin->dest, intrin->num_components, ${get_intrinsic_bitsize(opcode)}, NULL);
|
||||
nir_ssa_dest_init(&intrin->instr, &intrin->dest, intrin->num_components, ${get_intrinsic_bitsize(opcode)});
|
||||
% else:
|
||||
nir_ssa_dest_init(&intrin->instr, &intrin->dest, ${opcode.dest_components}, ${get_intrinsic_bitsize(opcode)}, NULL);
|
||||
nir_ssa_dest_init(&intrin->instr, &intrin->dest, ${opcode.dest_components}, ${get_intrinsic_bitsize(opcode)});
|
||||
% endif
|
||||
% endif
|
||||
% for i in range(opcode.num_srcs):
|
||||
|
|
|
|||
|
|
@ -375,8 +375,8 @@ nir_get_texture_size(nir_builder *b, nir_tex_instr *tex)
|
|||
txs->src[idx].src = nir_src_for_ssa(nir_imm_int(b, 0));
|
||||
txs->src[idx].src_type = nir_tex_src_lod;
|
||||
|
||||
nir_ssa_dest_init(&txs->instr, &txs->dest,
|
||||
nir_tex_instr_dest_size(txs), 32, NULL);
|
||||
nir_ssa_dest_init(&txs->instr, &txs->dest, nir_tex_instr_dest_size(txs),
|
||||
32);
|
||||
nir_builder_instr_insert(b, &txs->instr);
|
||||
|
||||
return &txs->dest.ssa;
|
||||
|
|
@ -427,7 +427,7 @@ nir_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
|
|||
}
|
||||
}
|
||||
|
||||
nir_ssa_dest_init(&tql->instr, &tql->dest, 2, 32, NULL);
|
||||
nir_ssa_dest_init(&tql->instr, &tql->dest, 2, 32);
|
||||
nir_builder_instr_insert(b, &tql->instr);
|
||||
|
||||
/* The LOD is the y component of the result */
|
||||
|
|
|
|||
|
|
@ -257,7 +257,7 @@ __clone_dst(clone_state *state, nir_instr *ninstr,
|
|||
ndst->is_ssa = dst->is_ssa;
|
||||
if (dst->is_ssa) {
|
||||
nir_ssa_dest_init(ninstr, ndst, dst->ssa.num_components,
|
||||
dst->ssa.bit_size, NULL);
|
||||
dst->ssa.bit_size);
|
||||
if (likely(state->remap_table))
|
||||
add_remap(state, &ndst->ssa, &dst->ssa);
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -172,8 +172,8 @@ get_texture_size(struct ycbcr_state *state, nir_deref_instr *texture)
|
|||
tex->src[0].src_type = nir_tex_src_texture_deref;
|
||||
tex->src[0].src = nir_src_for_ssa(&texture->dest.ssa);
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest,
|
||||
nir_tex_instr_dest_size(tex), 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, nir_tex_instr_dest_size(tex),
|
||||
32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
|
||||
state->image_size = nir_i2f32(b, &tex->dest.ssa);
|
||||
|
|
@ -273,7 +273,7 @@ create_plane_tex_instr_implicit(struct ycbcr_state *state,
|
|||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest,
|
||||
old_tex->dest.ssa.num_components,
|
||||
nir_dest_bit_size(old_tex->dest), NULL);
|
||||
nir_dest_bit_size(old_tex->dest));
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
|
||||
return &tex->dest.ssa;
|
||||
|
|
|
|||
|
|
@ -788,9 +788,7 @@ rematerialize_deref_in_block(nir_deref_instr *deref,
|
|||
}
|
||||
|
||||
nir_ssa_dest_init(&new_deref->instr, &new_deref->dest,
|
||||
deref->dest.ssa.num_components,
|
||||
deref->dest.ssa.bit_size,
|
||||
NULL);
|
||||
deref->dest.ssa.num_components, deref->dest.ssa.bit_size);
|
||||
nir_builder_instr_insert(b, &new_deref->instr);
|
||||
|
||||
return new_deref;
|
||||
|
|
|
|||
|
|
@ -402,7 +402,7 @@ isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx)
|
|||
nir_parallel_copy_entry);
|
||||
nir_ssa_dest_init(&pcopy->instr, &entry->dest,
|
||||
phi->dest.ssa.num_components,
|
||||
phi->dest.ssa.bit_size, NULL);
|
||||
phi->dest.ssa.bit_size);
|
||||
entry->dest.ssa.divergent = nir_src_is_divergent(src->src);
|
||||
exec_list_push_tail(&pcopy->entries, &entry->node);
|
||||
|
||||
|
|
@ -416,8 +416,7 @@ isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx)
|
|||
nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
|
||||
nir_parallel_copy_entry);
|
||||
nir_ssa_dest_init(&block_pcopy->instr, &entry->dest,
|
||||
phi->dest.ssa.num_components, phi->dest.ssa.bit_size,
|
||||
NULL);
|
||||
phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
|
||||
entry->dest.ssa.divergent = phi->dest.ssa.divergent;
|
||||
exec_list_push_tail(&block_pcopy->entries, &entry->node);
|
||||
|
||||
|
|
|
|||
|
|
@ -91,8 +91,7 @@ static void
|
|||
nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components,
|
||||
unsigned bit_size)
|
||||
{
|
||||
nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
|
||||
bit_size, NULL);
|
||||
nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components, bit_size);
|
||||
alu->dest.write_mask = (1 << num_components) - 1;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -181,8 +181,7 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b, un
|
|||
}
|
||||
|
||||
nir_ssa_dest_init(&new_instr->instr, &new_instr->dest,
|
||||
instr->dest.ssa.num_components,
|
||||
instr->dest.ssa.bit_size, NULL);
|
||||
instr->dest.ssa.num_components, instr->dest.ssa.bit_size);
|
||||
nir_instr_insert_before(&instr->instr, &new_instr->instr);
|
||||
nir_instr_remove(&instr->instr);
|
||||
|
||||
|
|
|
|||
|
|
@ -322,10 +322,10 @@ split_phi(nir_builder *b, nir_phi_instr *phi)
|
|||
nir_phi_instr_add_src(lowered[1], src->pred, nir_src_for_ssa(y));
|
||||
}
|
||||
|
||||
nir_ssa_dest_init(&lowered[0]->instr, &lowered[0]->dest,
|
||||
num_components, 32, NULL);
|
||||
nir_ssa_dest_init(&lowered[1]->instr, &lowered[1]->dest,
|
||||
num_components, 32, NULL);
|
||||
nir_ssa_dest_init(&lowered[0]->instr, &lowered[0]->dest, num_components,
|
||||
32);
|
||||
nir_ssa_dest_init(&lowered[1]->instr, &lowered[1]->dest, num_components,
|
||||
32);
|
||||
|
||||
b->cursor = nir_before_instr(&phi->instr);
|
||||
nir_builder_instr_insert(b, &lowered[0]->instr);
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ lower_bitmap(nir_shader *shader, nir_builder *b,
|
|||
nir_src_for_ssa(nir_channels(b, texcoord,
|
||||
(1 << tex->coord_components) - 1));
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
|
||||
/* kill if tex != 0.0.. take .x or .w channel according to format: */
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr *
|
|||
nir_src_for_ssa(nir_channels(b, texcoord,
|
||||
(1 << tex->coord_components) - 1));
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
def = &tex->dest.ssa;
|
||||
|
||||
|
|
@ -176,7 +176,7 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr *
|
|||
tex->src[2].src_type = nir_tex_src_coord;
|
||||
tex->src[2].src = nir_src_for_ssa(nir_channels(b, def, 0x3));
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
def_xy = &tex->dest.ssa;
|
||||
|
||||
|
|
@ -190,7 +190,7 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr *
|
|||
tex->src[0].src_type = nir_tex_src_coord;
|
||||
tex->src[0].src = nir_src_for_ssa(nir_channels(b, def, 0xc));
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
def_zw = &tex->dest.ssa;
|
||||
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ nir_lower_fb_read_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
|
|||
tex->src[2].src = nir_src_for_ssa(nir_imm_intN_t(b, io.location - FRAG_RESULT_DATA0, 32));
|
||||
tex->src[2].src_type = nir_tex_src_texture_handle;
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
|
||||
nir_ssa_def_rewrite_uses(&intr->dest.ssa, &tex->dest.ssa);
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ lower_image_samples_identical_to_fragment_mask_load(nir_builder *b, nir_intrinsi
|
|||
break;
|
||||
}
|
||||
|
||||
nir_ssa_dest_init(&fmask_load->instr, &fmask_load->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&fmask_load->instr, &fmask_load->dest, 1, 32);
|
||||
nir_builder_instr_insert(b, &fmask_load->instr);
|
||||
|
||||
nir_ssa_def *samples_identical = nir_ieq_imm(b, &fmask_load->dest.ssa, 0);
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ emit_load_store_deref(nir_builder *b, nir_intrinsic_instr *orig_instr,
|
|||
|
||||
nir_ssa_dest_init(&load->instr, &load->dest,
|
||||
orig_instr->dest.ssa.num_components,
|
||||
orig_instr->dest.ssa.bit_size, NULL);
|
||||
orig_instr->dest.ssa.bit_size);
|
||||
nir_builder_instr_insert(b, &load->instr);
|
||||
*dest = &load->dest.ssa;
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -141,7 +141,8 @@ try_lower_input_load(nir_builder *b, nir_intrinsic_instr *load,
|
|||
|
||||
tex->texture_non_uniform = nir_intrinsic_access(load) & ACCESS_NON_UNIFORM;
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, nir_tex_instr_dest_size(tex), 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, nir_tex_instr_dest_size(tex),
|
||||
32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
|
||||
if (tex->is_sparse) {
|
||||
|
|
|
|||
|
|
@ -1160,7 +1160,7 @@ split_64bit_subgroup_op(nir_builder *b, const nir_intrinsic_instr *intrin)
|
|||
sizeof(intrin->const_index));
|
||||
|
||||
nir_ssa_dest_init(&split->instr, &split->dest,
|
||||
intrin->dest.ssa.num_components, 32, NULL);
|
||||
intrin->dest.ssa.num_components, 32);
|
||||
nir_builder_instr_insert(b, &split->instr);
|
||||
|
||||
res[i] = &split->dest.ssa;
|
||||
|
|
@ -1176,7 +1176,7 @@ build_vote_ieq(nir_builder *b, nir_ssa_def *x)
|
|||
nir_intrinsic_instr_create(b->shader, nir_intrinsic_vote_ieq);
|
||||
vote->src[0] = nir_src_for_ssa(x);
|
||||
vote->num_components = x->num_components;
|
||||
nir_ssa_dest_init(&vote->instr, &vote->dest, 1, 1, NULL);
|
||||
nir_ssa_dest_init(&vote->instr, &vote->dest, 1, 1);
|
||||
nir_builder_instr_insert(b, &vote->instr);
|
||||
return &vote->dest.ssa;
|
||||
}
|
||||
|
|
@ -1200,8 +1200,8 @@ build_scan_intrinsic(nir_builder *b, nir_intrinsic_op scan_op,
|
|||
nir_intrinsic_set_reduction_op(scan, reduction_op);
|
||||
if (scan_op == nir_intrinsic_reduce)
|
||||
nir_intrinsic_set_cluster_size(scan, cluster_size);
|
||||
nir_ssa_dest_init(&scan->instr, &scan->dest,
|
||||
val->num_components, val->bit_size, NULL);
|
||||
nir_ssa_dest_init(&scan->instr, &scan->dest, val->num_components,
|
||||
val->bit_size);
|
||||
nir_builder_instr_insert(b, &scan->instr);
|
||||
return &scan->dest.ssa;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -327,8 +327,7 @@ emit_load(struct lower_io_state *state,
|
|||
load->src[0] = nir_src_for_ssa(offset);
|
||||
}
|
||||
|
||||
nir_ssa_dest_init(&load->instr, &load->dest,
|
||||
num_components, bit_size, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
|
||||
nir_builder_instr_insert(b, &load->instr);
|
||||
|
||||
return &load->dest.ssa;
|
||||
|
|
@ -549,7 +548,7 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
|
|||
nir_intrinsic_instr *bary_setup =
|
||||
nir_intrinsic_instr_create(state->builder.shader, bary_op);
|
||||
|
||||
nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
|
||||
nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32);
|
||||
nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
|
||||
|
||||
if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
|
||||
|
|
@ -1476,8 +1475,7 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
|
||||
assert(intrin->dest.is_ssa);
|
||||
load->num_components = num_components;
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, num_components,
|
||||
bit_size, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
|
||||
|
||||
assert(bit_size % 8 == 0);
|
||||
|
||||
|
|
@ -1784,8 +1782,8 @@ build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
|
||||
|
||||
assert(intrin->dest.ssa.num_components == 1);
|
||||
nir_ssa_dest_init(&atomic->instr, &atomic->dest,
|
||||
1, intrin->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1,
|
||||
intrin->dest.ssa.bit_size);
|
||||
|
||||
assert(atomic->dest.ssa.bit_size % 8 == 0);
|
||||
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var,
|
|||
|
||||
if (intr->intrinsic != nir_intrinsic_store_deref) {
|
||||
nir_ssa_dest_init(&element_intr->instr, &element_intr->dest,
|
||||
intr->num_components, intr->dest.ssa.bit_size, NULL);
|
||||
intr->num_components, intr->dest.ssa.bit_size);
|
||||
|
||||
if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset ||
|
||||
intr->intrinsic == nir_intrinsic_interp_deref_at_sample ||
|
||||
|
|
|
|||
|
|
@ -52,8 +52,8 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
|
|||
for (unsigned i = 0; i < intr->num_components; i++) {
|
||||
nir_intrinsic_instr *chan_intr =
|
||||
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
|
||||
nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest,
|
||||
1, intr->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest, 1,
|
||||
intr->dest.ssa.bit_size);
|
||||
chan_intr->num_components = 1;
|
||||
|
||||
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
|
||||
|
|
@ -87,8 +87,8 @@ lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
|
|||
for (unsigned i = 0; i < intr->num_components; i++) {
|
||||
nir_intrinsic_instr *chan_intr =
|
||||
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
|
||||
nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest,
|
||||
1, intr->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest, 1,
|
||||
intr->dest.ssa.bit_size);
|
||||
chan_intr->num_components = 1;
|
||||
|
||||
nir_intrinsic_set_align_offset(chan_intr,
|
||||
|
|
@ -351,8 +351,8 @@ lower_load_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
|
|||
|
||||
nir_intrinsic_instr *chan_intr =
|
||||
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
|
||||
nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest,
|
||||
1, intr->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest, 1,
|
||||
intr->dest.ssa.bit_size);
|
||||
chan_intr->num_components = 1;
|
||||
|
||||
nir_deref_instr *deref = nir_build_deref_var(b, chan_var);
|
||||
|
|
|
|||
|
|
@ -209,7 +209,7 @@ emit_interp(nir_builder *b, nir_deref_instr **old_interp_deref,
|
|||
new_interp->num_components = interp->num_components;
|
||||
nir_ssa_dest_init(&new_interp->instr, &new_interp->dest,
|
||||
interp->dest.ssa.num_components,
|
||||
interp->dest.ssa.bit_size, NULL);
|
||||
interp->dest.ssa.bit_size);
|
||||
|
||||
nir_builder_instr_insert(b, &new_interp->instr);
|
||||
nir_store_deref(b, temp_deref, &new_interp->dest.ssa,
|
||||
|
|
|
|||
|
|
@ -210,7 +210,7 @@ lower_locals_to_regs_block(nir_block *block,
|
|||
if (intrin->dest.is_ssa) {
|
||||
nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
|
||||
intrin->num_components,
|
||||
intrin->dest.ssa.bit_size, NULL);
|
||||
intrin->dest.ssa.bit_size);
|
||||
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
|
||||
&mov->dest.dest.ssa);
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -60,8 +60,7 @@ dup_mem_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
|
||||
if (info->has_dest) {
|
||||
assert(intrin->dest.is_ssa);
|
||||
nir_ssa_dest_init(&dup->instr, &dup->dest,
|
||||
num_components, bit_size, NULL);
|
||||
nir_ssa_dest_init(&dup->instr, &dup->dest, num_components, bit_size);
|
||||
} else {
|
||||
nir_intrinsic_set_write_mask(dup, (1 << num_components) - 1);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -205,14 +205,13 @@ lower_phis_to_scalar_block(nir_block *block,
|
|||
|
||||
nir_alu_instr *vec = nir_alu_instr_create(state->shader, vec_op);
|
||||
nir_ssa_dest_init(&vec->instr, &vec->dest.dest,
|
||||
phi->dest.ssa.num_components,
|
||||
bit_size, NULL);
|
||||
phi->dest.ssa.num_components, bit_size);
|
||||
vec->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
|
||||
|
||||
for (unsigned i = 0; i < phi->dest.ssa.num_components; i++) {
|
||||
nir_phi_instr *new_phi = nir_phi_instr_create(state->shader);
|
||||
nir_ssa_dest_init(&new_phi->instr, &new_phi->dest, 1,
|
||||
phi->dest.ssa.bit_size, NULL);
|
||||
phi->dest.ssa.bit_size);
|
||||
|
||||
vec->src[i].src = nir_src_for_ssa(&new_phi->dest.ssa);
|
||||
|
||||
|
|
@ -220,7 +219,7 @@ lower_phis_to_scalar_block(nir_block *block,
|
|||
/* We need to insert a mov to grab the i'th component of src */
|
||||
nir_alu_instr *mov = nir_alu_instr_create(state->shader,
|
||||
nir_op_mov);
|
||||
nir_ssa_dest_init(&mov->instr, &mov->dest.dest, 1, bit_size, NULL);
|
||||
nir_ssa_dest_init(&mov->instr, &mov->dest.dest, 1, bit_size);
|
||||
mov->dest.write_mask = 1;
|
||||
nir_src_copy(&mov->src[0].src, &src->src, &mov->instr);
|
||||
mov->src[0].swizzle[0] = i;
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ lower_readonly_image_instr_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
|
|||
assert(num_srcs == 3);
|
||||
|
||||
tex->dest_type = nir_intrinsic_dest_type(intrin);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -165,8 +165,7 @@ lower_readonly_image_instr_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
|
|||
assert(num_srcs == 2);
|
||||
|
||||
tex->dest_type = nir_type_uint32;
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest,
|
||||
coord_components, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, coord_components, 32);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -95,8 +95,7 @@ rewrite_dest(nir_dest *dest, void *_state)
|
|||
return true;
|
||||
|
||||
list_del(&dest->reg.def_link);
|
||||
nir_ssa_dest_init(instr, dest, reg->num_components,
|
||||
reg->bit_size, NULL);
|
||||
nir_ssa_dest_init(instr, dest, reg->num_components, reg->bit_size);
|
||||
|
||||
nir_phi_builder_value_set_block_def(value, instr->block, &dest->ssa);
|
||||
|
||||
|
|
@ -179,7 +178,7 @@ rewrite_alu_instr(nir_alu_instr *alu, struct regs_to_ssa_state *state)
|
|||
alu->dest.write_mask = (1 << num_components) - 1;
|
||||
list_del(&alu->dest.dest.reg.def_link);
|
||||
nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
|
||||
reg->bit_size, NULL);
|
||||
reg->bit_size);
|
||||
|
||||
nir_op vecN_op = nir_op_vec(reg->num_components);
|
||||
|
||||
|
|
@ -200,7 +199,7 @@ rewrite_alu_instr(nir_alu_instr *alu, struct regs_to_ssa_state *state)
|
|||
}
|
||||
|
||||
nir_ssa_dest_init(&vec->instr, &vec->dest.dest, reg->num_components,
|
||||
reg->bit_size, NULL);
|
||||
reg->bit_size);
|
||||
nir_instr_insert(nir_after_instr(&alu->instr), &vec->instr);
|
||||
|
||||
nir_phi_builder_value_set_block_def(value, alu->instr.block,
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ nir_load_ssbo_prop(nir_builder *b, nir_intrinsic_op op,
|
|||
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
|
||||
load->num_components = 1;
|
||||
nir_src_copy(&load->src[0], idx, &load->instr);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 1, bitsize, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 1, bitsize);
|
||||
nir_builder_instr_insert(b, &load->instr);
|
||||
return &load->dest.ssa;
|
||||
}
|
||||
|
|
@ -117,7 +117,7 @@ lower_ssbo_instr(nir_builder *b, nir_intrinsic_instr *intr)
|
|||
} else {
|
||||
nir_ssa_dest_init(&global->instr, &global->dest,
|
||||
intr->dest.ssa.num_components,
|
||||
intr->dest.ssa.bit_size, NULL);
|
||||
intr->dest.ssa.bit_size);
|
||||
|
||||
if (is_atomic) {
|
||||
nir_src_copy(&global->src[1], &intr->src[2], &global->instr);
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ lower_subgroups_64bit_split_intrinsic(nir_builder *b, nir_intrinsic_instr *intri
|
|||
comp = nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa);
|
||||
|
||||
nir_intrinsic_instr *intr = nir_intrinsic_instr_create(b->shader, intrin->intrinsic);
|
||||
nir_ssa_dest_init(&intr->instr, &intr->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&intr->instr, &intr->dest, 1, 32);
|
||||
intr->const_index[0] = intrin->const_index[0];
|
||||
intr->const_index[1] = intrin->const_index[1];
|
||||
intr->src[0] = nir_src_for_ssa(comp);
|
||||
|
|
@ -117,8 +117,8 @@ lower_subgroup_op_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
for (unsigned i = 0; i < intrin->num_components; i++) {
|
||||
nir_intrinsic_instr *chan_intrin =
|
||||
nir_intrinsic_instr_create(b->shader, intrin->intrinsic);
|
||||
nir_ssa_dest_init(&chan_intrin->instr, &chan_intrin->dest,
|
||||
1, intrin->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&chan_intrin->instr, &chan_intrin->dest, 1,
|
||||
intrin->dest.ssa.bit_size);
|
||||
chan_intrin->num_components = 1;
|
||||
|
||||
/* value */
|
||||
|
|
@ -153,8 +153,8 @@ lower_vote_eq_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin)
|
|||
for (unsigned i = 0; i < intrin->num_components; i++) {
|
||||
nir_intrinsic_instr *chan_intrin =
|
||||
nir_intrinsic_instr_create(b->shader, intrin->intrinsic);
|
||||
nir_ssa_dest_init(&chan_intrin->instr, &chan_intrin->dest,
|
||||
1, intrin->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&chan_intrin->instr, &chan_intrin->dest, 1,
|
||||
intrin->dest.ssa.bit_size);
|
||||
chan_intrin->num_components = 1;
|
||||
chan_intrin->src[0] = nir_src_for_ssa(nir_channel(b, value, i));
|
||||
nir_builder_instr_insert(b, &chan_intrin->instr);
|
||||
|
|
@ -213,7 +213,7 @@ lower_shuffle_to_swizzle(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
nir_intrinsic_set_swizzle_mask(swizzle, (mask << 10) | 0x1f);
|
||||
nir_ssa_dest_init(&swizzle->instr, &swizzle->dest,
|
||||
intrin->dest.ssa.num_components,
|
||||
intrin->dest.ssa.bit_size, NULL);
|
||||
intrin->dest.ssa.bit_size);
|
||||
|
||||
if (options->lower_to_scalar && swizzle->num_components > 1) {
|
||||
return lower_subgroup_op_to_scalar(b, swizzle, options->lower_shuffle_to_32bit);
|
||||
|
|
@ -309,7 +309,7 @@ lower_to_shuffle(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
shuffle->src[1] = nir_src_for_ssa(index);
|
||||
nir_ssa_dest_init(&shuffle->instr, &shuffle->dest,
|
||||
intrin->dest.ssa.num_components,
|
||||
intrin->dest.ssa.bit_size, NULL);
|
||||
intrin->dest.ssa.bit_size);
|
||||
|
||||
bool lower_to_32bit = options->lower_shuffle_to_32bit && is_shuffle;
|
||||
if (options->lower_to_scalar && shuffle->num_components > 1) {
|
||||
|
|
@ -588,7 +588,7 @@ lower_dynamic_quad_broadcast(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
nir_src_copy(&qbcst->src[0], &intrin->src[0], &qbcst->instr);
|
||||
nir_ssa_dest_init(&qbcst->instr, &qbcst->dest,
|
||||
intrin->dest.ssa.num_components,
|
||||
intrin->dest.ssa.bit_size, NULL);
|
||||
intrin->dest.ssa.bit_size);
|
||||
|
||||
nir_ssa_def *qbcst_dst = NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -329,7 +329,7 @@ sample_plane(nir_builder *b, nir_tex_instr *tex, int plane,
|
|||
plane_tex->sampler_index = tex->sampler_index;
|
||||
|
||||
nir_ssa_dest_init(&plane_tex->instr, &plane_tex->dest, 4,
|
||||
nir_dest_bit_size(tex->dest), NULL);
|
||||
nir_dest_bit_size(tex->dest));
|
||||
|
||||
nir_builder_instr_insert(b, &plane_tex->instr);
|
||||
|
||||
|
|
@ -824,8 +824,9 @@ lower_tex_to_txd(nir_builder *b, nir_tex_instr *tex)
|
|||
txd->src[tex->num_srcs + 1].src = nir_src_for_ssa(dfdy);
|
||||
txd->src[tex->num_srcs + 1].src_type = nir_tex_src_ddy;
|
||||
|
||||
nir_ssa_dest_init(&txd->instr, &txd->dest, nir_dest_num_components(tex->dest),
|
||||
nir_dest_bit_size(tex->dest), NULL);
|
||||
nir_ssa_dest_init(&txd->instr, &txd->dest,
|
||||
nir_dest_num_components(tex->dest),
|
||||
nir_dest_bit_size(tex->dest));
|
||||
nir_builder_instr_insert(b, &txd->instr);
|
||||
nir_ssa_def_rewrite_uses(&tex->dest.ssa, &txd->dest.ssa);
|
||||
nir_instr_remove(&tex->instr);
|
||||
|
|
@ -864,8 +865,9 @@ lower_txb_to_txl(nir_builder *b, nir_tex_instr *tex)
|
|||
txl->src[tex->num_srcs - 1].src = nir_src_for_ssa(lod);
|
||||
txl->src[tex->num_srcs - 1].src_type = nir_tex_src_lod;
|
||||
|
||||
nir_ssa_dest_init(&txl->instr, &txl->dest, nir_dest_num_components(tex->dest),
|
||||
nir_dest_bit_size(tex->dest), NULL);
|
||||
nir_ssa_dest_init(&txl->instr, &txl->dest,
|
||||
nir_dest_num_components(tex->dest),
|
||||
nir_dest_bit_size(tex->dest));
|
||||
nir_builder_instr_insert(b, &txl->instr);
|
||||
nir_ssa_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
|
||||
nir_instr_remove(&tex->instr);
|
||||
|
|
@ -1176,7 +1178,7 @@ lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex)
|
|||
tex_copy->src[tex_copy->num_srcs - 1] = src;
|
||||
|
||||
nir_ssa_dest_init(&tex_copy->instr, &tex_copy->dest,
|
||||
nir_tex_instr_dest_size(tex), 32, NULL);
|
||||
nir_tex_instr_dest_size(tex), 32);
|
||||
|
||||
nir_builder_instr_insert(b, &tex_copy->instr);
|
||||
|
||||
|
|
@ -1298,7 +1300,7 @@ nir_lower_ms_txf_to_fragment_fetch(nir_builder *b, nir_tex_instr *tex)
|
|||
fmask_fetch->is_array = tex->is_array;
|
||||
fmask_fetch->texture_non_uniform = tex->texture_non_uniform;
|
||||
fmask_fetch->dest_type = nir_type_uint32;
|
||||
nir_ssa_dest_init(&fmask_fetch->instr, &fmask_fetch->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&fmask_fetch->instr, &fmask_fetch->dest, 1, 32);
|
||||
|
||||
fmask_fetch->num_srcs = 0;
|
||||
for (unsigned i = 0; i < tex->num_srcs; i++) {
|
||||
|
|
@ -1331,7 +1333,7 @@ nir_lower_samples_identical_to_fragment_fetch(nir_builder *b, nir_tex_instr *tex
|
|||
nir_tex_instr *fmask_fetch = nir_instr_as_tex(nir_instr_clone(b->shader, &tex->instr));
|
||||
fmask_fetch->op = nir_texop_fragment_mask_fetch_amd;
|
||||
fmask_fetch->dest_type = nir_type_uint32;
|
||||
nir_ssa_dest_init(&fmask_fetch->instr, &fmask_fetch->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&fmask_fetch->instr, &fmask_fetch->dest, 1, 32);
|
||||
nir_builder_instr_insert(b, &fmask_fetch->instr);
|
||||
|
||||
nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_ieq_imm(b, &fmask_fetch->dest.ssa, 0));
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ nir_lower_tex_shadow_impl(nir_builder *b, nir_instr *instr, void *options)
|
|||
}
|
||||
|
||||
/* NIR expects a vec4 result from the above texture instructions */
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
|
||||
nir_ssa_def *tex_r = nir_channel(b, &tex->dest.ssa, 0);
|
||||
nir_ssa_def *cmp = tex->src[comp_index].src.ssa;
|
||||
|
|
|
|||
|
|
@ -610,7 +610,7 @@ rename_variables(struct lower_variables_state *state)
|
|||
mov->dest.write_mask = (1 << intrin->num_components) - 1;
|
||||
nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
|
||||
intrin->num_components,
|
||||
intrin->dest.ssa.bit_size, NULL);
|
||||
intrin->dest.ssa.bit_size);
|
||||
|
||||
nir_instr_insert_before(&intrin->instr, &mov->instr);
|
||||
nir_instr_remove(&intrin->instr);
|
||||
|
|
|
|||
|
|
@ -202,7 +202,7 @@ rewrite_compare_instruction(nir_builder *bld, nir_alu_instr *orig_cmp,
|
|||
mov_add->dest.write_mask = orig_add->dest.write_mask;
|
||||
nir_ssa_dest_init(&mov_add->instr, &mov_add->dest.dest,
|
||||
orig_add->dest.dest.ssa.num_components,
|
||||
orig_add->dest.dest.ssa.bit_size, NULL);
|
||||
orig_add->dest.dest.ssa.bit_size);
|
||||
mov_add->src[0].src = nir_src_for_ssa(fadd);
|
||||
|
||||
nir_builder_instr_insert(bld, &mov_add->instr);
|
||||
|
|
@ -211,7 +211,7 @@ rewrite_compare_instruction(nir_builder *bld, nir_alu_instr *orig_cmp,
|
|||
mov_cmp->dest.write_mask = orig_cmp->dest.write_mask;
|
||||
nir_ssa_dest_init(&mov_cmp->instr, &mov_cmp->dest.dest,
|
||||
orig_cmp->dest.dest.ssa.num_components,
|
||||
orig_cmp->dest.dest.ssa.bit_size, NULL);
|
||||
orig_cmp->dest.dest.ssa.bit_size);
|
||||
mov_cmp->src[0].src = nir_src_for_ssa(cmp);
|
||||
|
||||
nir_builder_instr_insert(bld, &mov_cmp->instr);
|
||||
|
|
|
|||
|
|
@ -522,8 +522,8 @@ opt_split_alu_of_phi(nir_builder *b, nir_loop *loop)
|
|||
nir_phi_instr_add_src(phi, prev_block, nir_src_for_ssa(prev_value));
|
||||
nir_phi_instr_add_src(phi, continue_block, nir_src_for_ssa(alu_copy));
|
||||
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
alu_copy->num_components, alu_copy->bit_size, NULL);
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest, alu_copy->num_components,
|
||||
alu_copy->bit_size);
|
||||
|
||||
b->cursor = nir_after_phis(header_block);
|
||||
nir_builder_instr_insert(b, &phi->instr);
|
||||
|
|
@ -684,11 +684,9 @@ opt_simplify_bcsel_of_phi(nir_builder *b, nir_loop *loop)
|
|||
nir_phi_get_src_from_block(nir_instr_as_phi(bcsel->src[continue_src].src.ssa->parent_instr),
|
||||
continue_block)->src);
|
||||
|
||||
nir_ssa_dest_init(&phi->instr,
|
||||
&phi->dest,
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
nir_dest_num_components(bcsel->dest.dest),
|
||||
nir_dest_bit_size(bcsel->dest.dest),
|
||||
NULL);
|
||||
nir_dest_bit_size(bcsel->dest.dest));
|
||||
|
||||
b->cursor = nir_after_phis(header_block);
|
||||
nir_builder_instr_insert(b, &phi->instr);
|
||||
|
|
@ -1208,7 +1206,7 @@ clone_alu_and_replace_src_defs(nir_builder *b, const nir_alu_instr *alu,
|
|||
|
||||
nir_ssa_dest_init(&nalu->instr, &nalu->dest.dest,
|
||||
alu->dest.dest.ssa.num_components,
|
||||
alu->dest.dest.ssa.bit_size, NULL);
|
||||
alu->dest.dest.ssa.bit_size);
|
||||
|
||||
nalu->dest.saturate = alu->dest.saturate;
|
||||
nalu->dest.write_mask = alu->dest.write_mask;
|
||||
|
|
|
|||
|
|
@ -461,8 +461,7 @@ nir_opt_peephole_select_block(nir_block *block, nir_shader *shader,
|
|||
}
|
||||
|
||||
nir_ssa_dest_init(&sel->instr, &sel->dest.dest,
|
||||
phi->dest.ssa.num_components,
|
||||
phi->dest.ssa.bit_size, NULL);
|
||||
phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
|
||||
sel->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
|
||||
|
||||
nir_ssa_def_rewrite_uses(&phi->dest.ssa,
|
||||
|
|
|
|||
|
|
@ -227,8 +227,7 @@ try_move_narrowing_dst(nir_builder *b, nir_phi_instr *phi)
|
|||
nir_phi_instr *new_phi = nir_phi_instr_create(b->shader);
|
||||
nir_ssa_dest_init(&new_phi->instr, &new_phi->dest,
|
||||
phi->dest.ssa.num_components,
|
||||
nir_alu_type_get_type_size(nir_op_infos[op].output_type),
|
||||
NULL);
|
||||
nir_alu_type_get_type_size(nir_op_infos[op].output_type));
|
||||
|
||||
/* Push the conversion into the new phi sources: */
|
||||
nir_foreach_phi_src (src, phi) {
|
||||
|
|
@ -379,8 +378,7 @@ try_move_widening_src(nir_builder *b, nir_phi_instr *phi)
|
|||
/* construct replacement phi instruction: */
|
||||
nir_phi_instr *new_phi = nir_phi_instr_create(b->shader);
|
||||
nir_ssa_dest_init(&new_phi->instr, &new_phi->dest,
|
||||
phi->dest.ssa.num_components,
|
||||
bit_size, NULL);
|
||||
phi->dest.ssa.num_components, bit_size);
|
||||
|
||||
/* Remove the widening conversions from the phi sources: */
|
||||
nir_foreach_phi_src (src, phi) {
|
||||
|
|
|
|||
|
|
@ -275,7 +275,8 @@ optimize_and_rewrite_atomic(nir_builder *b, nir_intrinsic_instr *intrin)
|
|||
|
||||
nir_ssa_def old_result = intrin->dest.ssa;
|
||||
list_replace(&intrin->dest.ssa.uses, &old_result.uses);
|
||||
nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, intrin->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1,
|
||||
intrin->dest.ssa.bit_size);
|
||||
|
||||
nir_ssa_def *result = optimize_atomic(b, intrin, return_prev);
|
||||
|
||||
|
|
|
|||
|
|
@ -206,8 +206,8 @@ instr_try_combine(struct set *instr_set, nir_instr *instr1, nir_instr *instr2)
|
|||
b.cursor = nir_after_instr(instr1);
|
||||
|
||||
nir_alu_instr *new_alu = nir_alu_instr_create(b.shader, alu1->op);
|
||||
nir_ssa_dest_init(&new_alu->instr, &new_alu->dest.dest,
|
||||
total_components, alu1->dest.dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&new_alu->instr, &new_alu->dest.dest, total_components,
|
||||
alu1->dest.dest.ssa.bit_size);
|
||||
new_alu->dest.write_mask = (1 << total_components) - 1;
|
||||
new_alu->instr.pass_flags = alu1->instr.pass_flags;
|
||||
|
||||
|
|
|
|||
|
|
@ -233,7 +233,7 @@ nir_phi_builder_value_get_block_def(struct nir_phi_builder_value *val,
|
|||
*/
|
||||
nir_phi_instr *phi = nir_phi_instr_create(val->builder->shader);
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest, val->num_components,
|
||||
val->bit_size, NULL);
|
||||
val->bit_size);
|
||||
phi->instr.block = dom;
|
||||
exec_list_push_tail(&val->phis, &phi->instr.node);
|
||||
def = &phi->dest.ssa;
|
||||
|
|
|
|||
|
|
@ -126,8 +126,8 @@ repair_ssa_def(nir_ssa_def *def, void *void_state)
|
|||
cast->parent = nir_src_for_ssa(block_def);
|
||||
cast->cast.ptr_stride = nir_deref_instr_array_stride(deref);
|
||||
|
||||
nir_ssa_dest_init(&cast->instr, &cast->dest,
|
||||
def->num_components, def->bit_size, NULL);
|
||||
nir_ssa_dest_init(&cast->instr, &cast->dest, def->num_components,
|
||||
def->bit_size);
|
||||
nir_instr_insert(nir_before_instr(src->parent_instr),
|
||||
&cast->instr);
|
||||
block_def = &cast->dest.ssa;
|
||||
|
|
|
|||
|
|
@ -451,7 +451,7 @@ construct_value(nir_builder *build,
|
|||
|
||||
nir_alu_instr *alu = nir_alu_instr_create(build->shader, op);
|
||||
nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
|
||||
dst_bit_size, NULL);
|
||||
dst_bit_size);
|
||||
alu->dest.write_mask = (1 << num_components) - 1;
|
||||
alu->dest.saturate = false;
|
||||
|
||||
|
|
|
|||
|
|
@ -771,7 +771,7 @@ read_dest(read_ctx *ctx, nir_dest *dst, nir_instr *instr,
|
|||
num_components = blob_read_uint32(ctx->blob);
|
||||
else
|
||||
num_components = decode_num_components_in_3bits(dest.ssa.num_components);
|
||||
nir_ssa_dest_init(instr, dst, num_components, bit_size, NULL);
|
||||
nir_ssa_dest_init(instr, dst, num_components, bit_size);
|
||||
dst->ssa.divergent = dest.ssa.divergent;
|
||||
read_add_object(ctx, &dst->ssa);
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -228,8 +228,7 @@ split_phi(nir_builder *b, nir_phi_instr *phi)
|
|||
|
||||
nir_alu_instr *vec = nir_alu_instr_create(b->shader, vec_op);
|
||||
nir_ssa_dest_init(&vec->instr, &vec->dest.dest,
|
||||
phi->dest.ssa.num_components,
|
||||
64, NULL);
|
||||
phi->dest.ssa.num_components, 64);
|
||||
vec->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
|
||||
|
||||
int num_comp[2] = {2, phi->dest.ssa.num_components - 2};
|
||||
|
|
@ -239,7 +238,7 @@ split_phi(nir_builder *b, nir_phi_instr *phi)
|
|||
for (unsigned i = 0; i < 2; i++) {
|
||||
new_phi[i] = nir_phi_instr_create(b->shader);
|
||||
nir_ssa_dest_init(&new_phi[i]->instr, &new_phi[i]->dest, num_comp[i],
|
||||
phi->dest.ssa.bit_size, NULL);
|
||||
phi->dest.ssa.bit_size);
|
||||
|
||||
nir_foreach_phi_src(src, phi) {
|
||||
/* Insert at the end of the predecessor but before the jump
|
||||
|
|
|
|||
|
|
@ -227,8 +227,8 @@ convert_loop_exit_for_ssa(nir_ssa_def *def, void *void_state)
|
|||
|
||||
/* Initialize a phi-instruction */
|
||||
nir_phi_instr *phi = nir_phi_instr_create(state->shader);
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
def->num_components, def->bit_size, "LCSSA-phi");
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest, def->num_components,
|
||||
def->bit_size);
|
||||
|
||||
/* Create a phi node with as many sources pointing to the same ssa_def as
|
||||
* the block has predecessors.
|
||||
|
|
@ -253,8 +253,7 @@ convert_loop_exit_for_ssa(nir_ssa_def *def, void *void_state)
|
|||
cast->cast.ptr_stride = nir_deref_instr_array_stride(instr);
|
||||
|
||||
nir_ssa_dest_init(&cast->instr, &cast->dest,
|
||||
phi->dest.ssa.num_components,
|
||||
phi->dest.ssa.bit_size, NULL);
|
||||
phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
|
||||
nir_instr_insert(nir_after_phis(state->block_after_loop), &cast->instr);
|
||||
dest = &cast->dest.ssa;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,8 +51,8 @@ nir_phi_instr *create_one_source_phi(nir_shader *shader, nir_block *pred,
|
|||
{
|
||||
nir_phi_instr *phi = nir_phi_instr_create(shader);
|
||||
nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(def));
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
def->num_components, def->bit_size, NULL);
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest, def->num_components,
|
||||
def->bit_size);
|
||||
|
||||
return phi;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -185,7 +185,7 @@ nir_load_store_vectorize_test::get_resource(uint32_t binding, bool ssbo)
|
|||
|
||||
nir_intrinsic_instr *res = nir_intrinsic_instr_create(
|
||||
b->shader, nir_intrinsic_vulkan_resource_index);
|
||||
nir_ssa_dest_init(&res->instr, &res->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&res->instr, &res->dest, 1, 32);
|
||||
res->num_components = 1;
|
||||
res->src[0] = nir_src_for_ssa(nir_imm_zero(b, 1, 32));
|
||||
nir_intrinsic_set_desc_type(
|
||||
|
|
@ -220,7 +220,7 @@ nir_load_store_vectorize_test::create_indirect_load(
|
|||
return NULL;
|
||||
}
|
||||
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, intrinsic);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, components, bit_size, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, components, bit_size);
|
||||
load->num_components = components;
|
||||
if (res) {
|
||||
load->src[0] = nir_src_for_ssa(res);
|
||||
|
|
@ -281,7 +281,7 @@ nir_load_store_vectorize_test::create_indirect_store(
|
|||
return;
|
||||
}
|
||||
nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, intrinsic);
|
||||
nir_ssa_dest_init(&store->instr, &store->dest, components, bit_size, NULL);
|
||||
nir_ssa_dest_init(&store->instr, &store->dest, components, bit_size);
|
||||
store->num_components = components;
|
||||
if (res) {
|
||||
store->src[0] = nir_src_for_ssa(value);
|
||||
|
|
@ -1222,7 +1222,7 @@ TEST_F(nir_load_store_vectorize_test, shared_load_distant_64bit)
|
|||
{
|
||||
nir_variable *var = nir_variable_create(b->shader, nir_var_mem_shared, glsl_array_type(glsl_uint_type(), 4, 0), "var");
|
||||
nir_deref_instr *deref = nir_build_deref_var(b, var);
|
||||
nir_ssa_dest_init(&deref->instr, &deref->dest, 1, 64, NULL);
|
||||
nir_ssa_dest_init(&deref->instr, &deref->dest, 1, 64);
|
||||
|
||||
create_shared_load(nir_build_deref_array_imm(b, deref, 0x100000000), 0x1);
|
||||
create_shared_load(nir_build_deref_array_imm(b, deref, 0x200000001), 0x2);
|
||||
|
|
|
|||
|
|
@ -83,9 +83,8 @@ loop_builder(nir_builder *b, loop_builder_param p)
|
|||
|
||||
nir_loop *loop = nir_push_loop(b);
|
||||
{
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
ssa_0->num_components, ssa_0->bit_size,
|
||||
NULL);
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest, ssa_0->num_components,
|
||||
ssa_0->bit_size);
|
||||
|
||||
nir_phi_instr_add_src(phi, ssa_0->parent_instr->block,
|
||||
nir_src_for_ssa(ssa_0));
|
||||
|
|
@ -154,9 +153,8 @@ loop_builder_invert(nir_builder *b, loop_builder_invert_param p)
|
|||
|
||||
nir_loop *loop = nir_push_loop(b);
|
||||
{
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
ssa_0->num_components, ssa_0->bit_size,
|
||||
NULL);
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest, ssa_0->num_components,
|
||||
ssa_0->bit_size);
|
||||
|
||||
nir_phi_instr_add_src(phi, ssa_0->parent_instr->block,
|
||||
nir_src_for_ssa(ssa_0));
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ loop_unroll_test_helper(nir_builder *bld, nir_ssa_def *init,
|
|||
nir_block *head_block = nir_loop_first_block(loop);
|
||||
|
||||
nir_phi_instr *phi = nir_phi_instr_create(bld->shader);
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest, 1, 32);
|
||||
|
||||
nir_phi_instr_add_src(phi, top_block, nir_src_for_ssa(init));
|
||||
|
||||
|
|
|
|||
|
|
@ -58,8 +58,8 @@ nir_phi_instr *create_one_source_phi(nir_shader *shader, nir_block *pred,
|
|||
|
||||
nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(def));
|
||||
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
def->num_components, def->bit_size, NULL);
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest, def->num_components,
|
||||
def->bit_size);
|
||||
|
||||
return phi;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ nir_mod_analysis_test::nir_imul_vec2y(nir_builder *b, nir_ssa_def *src0, nir_ssa
|
|||
instr->src[1].src = nir_src_for_ssa(src1);
|
||||
instr->src[1].swizzle[0] = 1;
|
||||
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest.dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest.dest, 1, 32);
|
||||
instr->dest.write_mask = 1;
|
||||
|
||||
nir_builder_instr_insert(b, &instr->instr);
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ TEST_F(nir_opt_if_test, opt_if_simplification_single_source_phi_after_if)
|
|||
nir_phi_instr_add_src(phi, then_block, nir_src_for_ssa(one));
|
||||
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
one->num_components, one->bit_size, NULL);
|
||||
one->num_components, one->bit_size);
|
||||
|
||||
nir_builder_instr_insert(&bld, &phi->instr);
|
||||
|
||||
|
|
@ -148,7 +148,7 @@ TEST_F(nir_opt_if_test, opt_if_alu_of_phi_progress)
|
|||
nir_loop *loop = nir_push_loop(&bld);
|
||||
{
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
x->num_components, x->bit_size, NULL);
|
||||
x->num_components, x->bit_size);
|
||||
|
||||
nir_phi_instr_add_src(phi, x->parent_instr->block, nir_src_for_ssa(x));
|
||||
|
||||
|
|
|
|||
|
|
@ -300,8 +300,7 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_simple)
|
|||
nir_loop *loop = nir_push_loop(&bld);
|
||||
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
v->num_components, v->bit_size,
|
||||
NULL);
|
||||
v->num_components, v->bit_size);
|
||||
|
||||
nir_phi_instr_add_src(phi, v->parent_instr->block,
|
||||
nir_src_for_ssa(v));
|
||||
|
|
@ -410,8 +409,7 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_swizzle)
|
|||
nir_loop *loop = nir_push_loop(&bld);
|
||||
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
v->num_components, v->bit_size,
|
||||
NULL);
|
||||
v->num_components, v->bit_size);
|
||||
|
||||
nir_phi_instr_add_src(phi, v->parent_instr->block,
|
||||
nir_src_for_ssa(v));
|
||||
|
|
@ -521,8 +519,7 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_phi_out)
|
|||
nir_loop *loop = nir_push_loop(&bld);
|
||||
|
||||
nir_ssa_dest_init(&phi->instr, &phi->dest,
|
||||
v->num_components, v->bit_size,
|
||||
NULL);
|
||||
v->num_components, v->bit_size);
|
||||
|
||||
nir_phi_instr_add_src(phi, v->parent_instr->block,
|
||||
nir_src_for_ssa(v));
|
||||
|
|
|
|||
|
|
@ -3195,7 +3195,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
|
|||
instr->dest_type = dest_type;
|
||||
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
nir_tex_instr_dest_size(instr), 32, NULL);
|
||||
nir_tex_instr_dest_size(instr), 32);
|
||||
|
||||
vtn_assert(glsl_get_vector_elements(ret_type->type) ==
|
||||
nir_tex_instr_result_size(instr));
|
||||
|
|
@ -3676,8 +3676,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
|
|||
bit_size = MIN2(bit_size, 32);
|
||||
|
||||
nir_ssa_dest_init(&intrin->instr, &intrin->dest,
|
||||
nir_intrinsic_dest_components(intrin),
|
||||
bit_size, NULL);
|
||||
nir_intrinsic_dest_components(intrin), bit_size);
|
||||
|
||||
nir_builder_instr_insert(&b->nb, &intrin->instr);
|
||||
|
||||
|
|
@ -3942,12 +3941,11 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
|
|||
|
||||
if (opcode == SpvOpAtomicFlagTestAndSet) {
|
||||
/* map atomic flag to a 32-bit atomic integer. */
|
||||
nir_ssa_dest_init(&atomic->instr, &atomic->dest,
|
||||
1, 32, NULL);
|
||||
nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1, 32);
|
||||
} else {
|
||||
nir_ssa_dest_init(&atomic->instr, &atomic->dest,
|
||||
glsl_get_vector_elements(type->type),
|
||||
glsl_get_bit_size(type->type), NULL);
|
||||
glsl_get_bit_size(type->type));
|
||||
|
||||
vtn_push_nir_ssa(b, w[2], &atomic->dest.ssa);
|
||||
}
|
||||
|
|
@ -3967,8 +3965,7 @@ create_vec(struct vtn_builder *b, unsigned num_components, unsigned bit_size)
|
|||
{
|
||||
nir_op op = nir_op_vec(num_components);
|
||||
nir_alu_instr *vec = nir_alu_instr_create(b->shader, op);
|
||||
nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
|
||||
bit_size, NULL);
|
||||
nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components, bit_size);
|
||||
vec->dest.write_mask = (1 << num_components) - 1;
|
||||
|
||||
return vec;
|
||||
|
|
@ -5761,7 +5758,7 @@ vtn_handle_ray_intrinsic(struct vtn_builder *b, SpvOp opcode,
|
|||
nir_intrinsic_report_ray_intersection);
|
||||
intrin->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
|
||||
intrin->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
|
||||
nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, 1, NULL);
|
||||
nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, 1);
|
||||
nir_builder_instr_insert(&b->nb, &intrin->instr);
|
||||
vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa);
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -214,7 +214,7 @@ vtn_handle_amd_shader_explicit_vertex_parameter_instruction(struct vtn_builder *
|
|||
intrin->num_components = glsl_get_vector_elements(deref->type);
|
||||
nir_ssa_dest_init(&intrin->instr, &intrin->dest,
|
||||
glsl_get_vector_elements(deref->type),
|
||||
glsl_get_bit_size(deref->type), NULL);
|
||||
glsl_get_bit_size(deref->type));
|
||||
|
||||
nir_builder_instr_insert(&b->nb, &intrin->instr);
|
||||
|
||||
|
|
|
|||
|
|
@ -683,7 +683,7 @@ handle_glsl450_interpolation(struct vtn_builder *b, enum GLSLstd450 opcode,
|
|||
intrin->num_components = glsl_get_vector_elements(deref->type);
|
||||
nir_ssa_dest_init(&intrin->instr, &intrin->dest,
|
||||
glsl_get_vector_elements(deref->type),
|
||||
glsl_get_bit_size(deref->type), NULL);
|
||||
glsl_get_bit_size(deref->type));
|
||||
|
||||
nir_builder_instr_insert(&b->nb, &intrin->instr);
|
||||
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
|
|||
nir_intrinsic_instr *ballot =
|
||||
nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_ballot);
|
||||
ballot->src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[3 + has_scope]));
|
||||
nir_ssa_dest_init(&ballot->instr, &ballot->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&ballot->instr, &ballot->dest, 4, 32);
|
||||
ballot->num_components = 4;
|
||||
nir_builder_instr_insert(&b->nb, &ballot->instr);
|
||||
vtn_push_nir_ssa(b, w[2], &ballot->dest.ssa);
|
||||
|
|
|
|||
|
|
@ -251,7 +251,7 @@ vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var,
|
|||
nir_address_format addr_format = vtn_mode_to_address_format(b, var->mode);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
nir_address_format_num_components(addr_format),
|
||||
nir_address_format_bit_size(addr_format), NULL);
|
||||
nir_address_format_bit_size(addr_format));
|
||||
instr->num_components = instr->dest.ssa.num_components;
|
||||
nir_builder_instr_insert(&b->nb, &instr->instr);
|
||||
|
||||
|
|
@ -274,7 +274,7 @@ vtn_resource_reindex(struct vtn_builder *b, enum vtn_variable_mode mode,
|
|||
nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
nir_address_format_num_components(addr_format),
|
||||
nir_address_format_bit_size(addr_format), NULL);
|
||||
nir_address_format_bit_size(addr_format));
|
||||
instr->num_components = instr->dest.ssa.num_components;
|
||||
nir_builder_instr_insert(&b->nb, &instr->instr);
|
||||
|
||||
|
|
@ -296,7 +296,7 @@ vtn_descriptor_load(struct vtn_builder *b, enum vtn_variable_mode mode,
|
|||
nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
|
||||
nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
|
||||
nir_address_format_num_components(addr_format),
|
||||
nir_address_format_bit_size(addr_format), NULL);
|
||||
nir_address_format_bit_size(addr_format));
|
||||
desc_load->num_components = desc_load->dest.ssa.num_components;
|
||||
nir_builder_instr_insert(&b->nb, &desc_load->instr);
|
||||
|
||||
|
|
|
|||
|
|
@ -146,7 +146,7 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
|
|||
load->num_components = 2;
|
||||
load->src[offset_src_idx] = nir_src_for_ssa(off);
|
||||
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 2, 32, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 2, 32);
|
||||
nir_builder_instr_insert(b, &load->instr);
|
||||
|
||||
components[i] = nir_pack_64_2x32(b, &load->dest.ssa);
|
||||
|
|
|
|||
|
|
@ -224,7 +224,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
|
|||
assert(intrinsic->dest.is_ssa);
|
||||
nir_ssa_def *dest = &intrinsic->dest.ssa;
|
||||
nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
|
||||
dest->num_components, dest->bit_size, NULL);
|
||||
dest->num_components, dest->bit_size);
|
||||
new_dest = &new_intrinsic->dest.ssa;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ nir_lower_layer_id(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
|
|||
.num_slots = 1,
|
||||
};
|
||||
nir_intrinsic_set_io_semantics(load_input, semantics);
|
||||
nir_ssa_dest_init(&load_input->instr, &load_input->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&load_input->instr, &load_input->dest, 1, 32);
|
||||
nir_builder_instr_insert(b, &load_input->instr);
|
||||
nir_ssa_def_rewrite_uses(&intr->dest.ssa, &load_input->dest.ssa);
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -176,8 +176,8 @@ replace_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
|
|||
new_intr->num_components = intr->num_components;
|
||||
|
||||
if (nir_intrinsic_infos[op].has_dest)
|
||||
nir_ssa_dest_init(&new_intr->instr, &new_intr->dest, intr->num_components,
|
||||
intr->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&new_intr->instr, &new_intr->dest,
|
||||
intr->num_components, intr->dest.ssa.bit_size);
|
||||
|
||||
nir_builder_instr_insert(b, &new_intr->instr);
|
||||
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused)
|
|||
load->num_components = c;
|
||||
load->src[0] = nir_src_for_ssa(addr);
|
||||
nir_intrinsic_set_align(load, nir_intrinsic_align(intr), 0);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, c, bit_size, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, c, bit_size);
|
||||
nir_builder_instr_insert(b, &load->instr);
|
||||
|
||||
addr = nir_iadd(b,
|
||||
|
|
|
|||
|
|
@ -623,7 +623,7 @@ build_blit_fs_shader(bool zscale)
|
|||
tex->src[0].src = nir_src_for_ssa(nir_load_var(b, in_coords));
|
||||
tex->coord_components = coord_components;
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
|
||||
nir_store_var(b, out_color, &tex->dest.ssa, 0xf);
|
||||
|
|
@ -682,7 +682,7 @@ build_ms_copy_fs_shader(void)
|
|||
tex->src[1].src_type = nir_tex_src_ms_index;
|
||||
tex->src[1].src = nir_src_for_ssa(nir_load_sample_id(b));
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
|
||||
nir_store_var(b, out_color, &tex->dest.ssa, 0xf);
|
||||
|
|
|
|||
|
|
@ -322,8 +322,7 @@ lower_ssbo_ubo_intrinsic(struct tu_device *dev,
|
|||
if (info->has_dest) {
|
||||
nir_ssa_dest_init(©->instr, ©->dest,
|
||||
intrin->dest.ssa.num_components,
|
||||
intrin->dest.ssa.bit_size,
|
||||
NULL);
|
||||
intrin->dest.ssa.bit_size);
|
||||
results[i] = ©->dest.ssa;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ nir_lower_pstipple_block(nir_block *block,
|
|||
tex->sampler_index = state->stip_tex->data.binding;
|
||||
tex->src[0].src_type = nir_tex_src_coord;
|
||||
tex->src[0].src = nir_src_for_ssa(texcoord);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
|
||||
|
|
|
|||
|
|
@ -766,7 +766,7 @@ ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
|
|||
}
|
||||
load->src[srcn++] = nir_src_for_ssa(offset);
|
||||
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &load->instr);
|
||||
|
||||
src = nir_src_for_ssa(&load->dest.ssa);
|
||||
|
|
@ -1574,8 +1574,7 @@ ttn_tex(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
|
|||
assert(src_number == instr->num_srcs);
|
||||
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
||||
nir_tex_instr_dest_size(instr),
|
||||
32, NULL);
|
||||
nir_tex_instr_dest_size(instr), 32);
|
||||
nir_builder_instr_insert(b, &instr->instr);
|
||||
|
||||
/* Resolve the writemask on the texture op. */
|
||||
|
|
@ -1635,11 +1634,11 @@ ttn_txq(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
|
|||
txs->src[1].src = nir_src_for_ssa(ttn_channel(b, src[0], X));
|
||||
txs->src[1].src_type = nir_tex_src_lod;
|
||||
|
||||
nir_ssa_dest_init(&txs->instr, &txs->dest,
|
||||
nir_tex_instr_dest_size(txs), 32, NULL);
|
||||
nir_ssa_dest_init(&txs->instr, &txs->dest, nir_tex_instr_dest_size(txs),
|
||||
32);
|
||||
nir_builder_instr_insert(b, &txs->instr);
|
||||
|
||||
nir_ssa_dest_init(&qlv->instr, &qlv->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&qlv->instr, &qlv->dest, 1, 32);
|
||||
nir_builder_instr_insert(b, &qlv->instr);
|
||||
|
||||
ttn_move_dest_masked(b, dest, &txs->dest.ssa, TGSI_WRITEMASK_XYZ);
|
||||
|
|
@ -1797,7 +1796,7 @@ ttn_mem(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
|
|||
|
||||
if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_LOAD) {
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
|
||||
32, NULL);
|
||||
32);
|
||||
nir_builder_instr_insert(b, &instr->instr);
|
||||
ttn_move_dest(b, dest, &instr->dest.ssa);
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -497,8 +497,7 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
|
|||
nir_intrinsic_set_range(load_ubo, ~0);
|
||||
nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
|
||||
intrin->dest.ssa.num_components,
|
||||
intrin->dest.ssa.bit_size,
|
||||
NULL);
|
||||
intrin->dest.ssa.bit_size);
|
||||
nir_builder_instr_insert(&b, &load_ubo->instr);
|
||||
|
||||
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
|
||||
|
|
@ -636,7 +635,7 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
|
|||
nir_intrinsic_set_align(load, 4, 0);
|
||||
nir_intrinsic_set_range_base(load, 0);
|
||||
nir_intrinsic_set_range(load, ~0);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, comps, 32);
|
||||
nir_builder_instr_insert(&b, &load->instr);
|
||||
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
|
||||
&load->dest.ssa);
|
||||
|
|
|
|||
|
|
@ -681,7 +681,7 @@ get_stencil_resolve_fs(struct d3d12_context *ctx, bool no_flip)
|
|||
txs->is_array = false;
|
||||
txs->dest_type = nir_type_int;
|
||||
|
||||
nir_ssa_dest_init(&txs->instr, &txs->dest, 2, 32, "tex");
|
||||
nir_ssa_dest_init(&txs->instr, &txs->dest, 2, 32);
|
||||
nir_builder_instr_insert(&b, &txs->instr);
|
||||
|
||||
pos_src = nir_vec4(&b,
|
||||
|
|
@ -709,7 +709,7 @@ get_stencil_resolve_fs(struct d3d12_context *ctx, bool no_flip)
|
|||
tex->is_array = false;
|
||||
tex->coord_components = 2;
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(&b, &tex->instr);
|
||||
|
||||
nir_store_var(&b, stencil_out, nir_channel(&b, &tex->dest.ssa, 1), 0x1);
|
||||
|
|
|
|||
|
|
@ -720,7 +720,7 @@ insert_vec_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
|
|||
}
|
||||
|
||||
mov->dest.write_mask = (1 << num_components) - 1;
|
||||
nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components, 32, NULL);
|
||||
nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components, 32);
|
||||
|
||||
/* replace vec srcs with inserted mov */
|
||||
for (unsigned i = 0, j = 0; i < 4; i++) {
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
|
|||
vec->src[i].src = nir_src_for_ssa(src1->ssa);
|
||||
|
||||
vec->dest.write_mask = 0xf;
|
||||
nir_ssa_dest_init(&vec->instr, &vec->dest.dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&vec->instr, &vec->dest.dest, 4, 32);
|
||||
|
||||
nir_tex_instr_remove_src(tex, src1_idx);
|
||||
nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(&vec->dest.dest.ssa));
|
||||
|
|
@ -190,7 +190,7 @@ etna_lower_alu_impl(nir_function_impl *impl, bool has_new_transcendentals)
|
|||
mul->src[1].swizzle[0] = 1;
|
||||
|
||||
mul->dest.write_mask = 1;
|
||||
nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32);
|
||||
|
||||
ssa->num_components = 2;
|
||||
|
||||
|
|
|
|||
|
|
@ -47,8 +47,8 @@ lima_nir_duplicate_intrinsic(nir_builder *b, nir_intrinsic_instr *itr,
|
|||
else
|
||||
dupl->src[0].reg = itr->src[0].reg;
|
||||
|
||||
nir_ssa_dest_init(&dupl->instr, &dupl->dest,
|
||||
dupl->num_components, itr->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&dupl->instr, &dupl->dest, dupl->num_components,
|
||||
itr->dest.ssa.bit_size);
|
||||
|
||||
dupl->instr.pass_flags = 1;
|
||||
nir_builder_instr_insert(b, &dupl->instr);
|
||||
|
|
@ -80,8 +80,8 @@ lima_nir_duplicate_intrinsic(nir_builder *b, nir_intrinsic_instr *itr,
|
|||
else
|
||||
dupl->src[0].reg = itr->src[0].reg;
|
||||
|
||||
nir_ssa_dest_init(&dupl->instr, &dupl->dest,
|
||||
dupl->num_components, itr->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&dupl->instr, &dupl->dest, dupl->num_components,
|
||||
itr->dest.ssa.bit_size);
|
||||
|
||||
dupl->instr.pass_flags = 1;
|
||||
nir_builder_instr_insert(b, &dupl->instr);
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@ lower_load_uniform_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
|
|||
for (unsigned i = 0; i < intr->num_components; i++) {
|
||||
nir_intrinsic_instr *chan_intr =
|
||||
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
|
||||
nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest,
|
||||
1, intr->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest, 1,
|
||||
intr->dest.ssa.bit_size);
|
||||
chan_intr->num_components = 1;
|
||||
|
||||
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr) * 4 + i);
|
||||
|
|
|
|||
|
|
@ -76,9 +76,7 @@ lima_nir_split_load_input_instr(nir_builder *b,
|
|||
b->shader,
|
||||
intrin->intrinsic);
|
||||
nir_ssa_dest_init(&new_intrin->instr, &new_intrin->dest,
|
||||
nir_dest_num_components(alu->dest.dest),
|
||||
ssa->bit_size,
|
||||
NULL);
|
||||
nir_dest_num_components(alu->dest.dest), ssa->bit_size);
|
||||
new_intrin->num_components = nir_dest_num_components(alu->dest.dest);
|
||||
nir_intrinsic_set_base(new_intrin, nir_intrinsic_base(intrin));
|
||||
nir_intrinsic_set_component(new_intrin, nir_intrinsic_component(intrin) + swizzle);
|
||||
|
|
|
|||
|
|
@ -1112,7 +1112,7 @@ LowerTexToBackend::lower_txf_ms(nir_tex_instr *tex)
|
|||
}
|
||||
|
||||
auto fetch_sample = nir_instr_as_tex(nir_instr_clone(b->shader, &tex->instr));
|
||||
nir_ssa_dest_init(&fetch_sample->instr, &fetch_sample->dest, 4, 32, "sample_index");
|
||||
nir_ssa_dest_init(&fetch_sample->instr, &fetch_sample->dest, 4, 32);
|
||||
|
||||
int used_coord_mask = 0;
|
||||
nir_ssa_def *backend1 = prep_src(new_coord, used_coord_mask);
|
||||
|
|
|
|||
|
|
@ -550,7 +550,8 @@ r600_lower_shared_io_impl(nir_function *func)
|
|||
nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_local_shared_r600);
|
||||
load->num_components = nir_dest_num_components(op->dest);
|
||||
load->src[0] = nir_src_for_ssa(addr);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, 32, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, load->num_components,
|
||||
32);
|
||||
nir_ssa_def_rewrite_uses(&op->dest.ssa, &load->dest.ssa);
|
||||
nir_builder_instr_insert(&b, &load->instr);
|
||||
} else {
|
||||
|
|
@ -601,11 +602,9 @@ r600_lower_fs_pos_input_impl(nir_builder *b, nir_instr *instr, void *_options)
|
|||
(void)_options;
|
||||
auto old_ir = nir_instr_as_intrinsic(instr);
|
||||
auto load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_input);
|
||||
nir_ssa_dest_init(&load->instr,
|
||||
&load->dest,
|
||||
nir_ssa_dest_init(&load->instr, &load->dest,
|
||||
old_ir->dest.ssa.num_components,
|
||||
old_ir->dest.ssa.bit_size,
|
||||
NULL);
|
||||
old_ir->dest.ssa.bit_size);
|
||||
nir_intrinsic_set_io_semantics(load, nir_intrinsic_io_semantics(old_ir));
|
||||
|
||||
nir_intrinsic_set_base(load, nir_intrinsic_base(old_ir));
|
||||
|
|
|
|||
|
|
@ -249,9 +249,9 @@ class LowerSplit64op : public NirLowerInstruction {
|
|||
auto phi_lo = nir_phi_instr_create(b->shader);
|
||||
auto phi_hi = nir_phi_instr_create(b->shader);
|
||||
nir_ssa_dest_init(
|
||||
&phi_lo->instr, &phi_lo->dest, phi->dest.ssa.num_components * 2, 32, "");
|
||||
&phi_lo->instr, &phi_lo->dest, phi->dest.ssa.num_components * 2, 32);
|
||||
nir_ssa_dest_init(
|
||||
&phi_hi->instr, &phi_hi->dest, phi->dest.ssa.num_components * 2, 32, "");
|
||||
&phi_hi->instr, &phi_hi->dest, phi->dest.ssa.num_components * 2, 32);
|
||||
nir_foreach_phi_src(s, phi)
|
||||
{
|
||||
auto lo = nir_unpack_32_2x16_split_x(b, nir_ssa_for_src(b, s->src, 1));
|
||||
|
|
@ -590,7 +590,7 @@ LowerSplit64BitVar::split_double_load_uniform(nir_intrinsic_instr *intr)
|
|||
nir_intrinsic_set_range(load2, nir_intrinsic_range(intr));
|
||||
load2->num_components = second_components;
|
||||
|
||||
nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64, nullptr);
|
||||
nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64);
|
||||
nir_builder_instr_insert(b, &load2->instr);
|
||||
|
||||
intr->dest.ssa.num_components = intr->num_components = 2;
|
||||
|
|
@ -618,7 +618,7 @@ LowerSplit64BitVar::split_double_load_ssbo(nir_intrinsic_instr *intr)
|
|||
auto new_src0 = nir_src_for_ssa(nir_iadd_imm(b, intr->src[0].ssa, 1));
|
||||
nir_instr_rewrite_src(&load2->instr, &load2->src[0], new_src0);
|
||||
load2->num_components = second_components;
|
||||
nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64, nullptr);
|
||||
nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64);
|
||||
|
||||
nir_intrinsic_set_dest_type(load2, nir_intrinsic_dest_type(intr));
|
||||
nir_builder_instr_insert(b, &load2->instr);
|
||||
|
|
@ -644,7 +644,7 @@ LowerSplit64BitVar::split_double_load_ubo(nir_intrinsic_instr *intr)
|
|||
|
||||
load2->num_components = second_components;
|
||||
|
||||
nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64, nullptr);
|
||||
nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64);
|
||||
nir_builder_instr_insert(b, &load2->instr);
|
||||
|
||||
intr->dest.ssa.num_components = intr->num_components = 2;
|
||||
|
|
|
|||
|
|
@ -490,7 +490,7 @@ NirLowerFSOutToVector::create_combined_vector(nir_builder *b,
|
|||
k += s->num_components;
|
||||
}
|
||||
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_comp, 32, NULL);
|
||||
nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_comp, 32);
|
||||
instr->dest.write_mask = (1 << num_comp) - 1;
|
||||
nir_builder_instr_insert(b, &instr->instr);
|
||||
return &instr->dest.dest.ssa;
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ static nir_ssa_def *
|
|||
emit_load_param_base(nir_builder *b, nir_intrinsic_op op)
|
||||
{
|
||||
nir_intrinsic_instr *result = nir_intrinsic_instr_create(b->shader, op);
|
||||
nir_ssa_dest_init(&result->instr, &result->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&result->instr, &result->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &result->instr);
|
||||
return &result->dest.ssa;
|
||||
}
|
||||
|
|
@ -286,7 +286,7 @@ r600_load_rel_patch_id(nir_builder *b)
|
|||
{
|
||||
auto patch_id =
|
||||
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_tcs_rel_patch_id_r600);
|
||||
nir_ssa_dest_init(&patch_id->instr, &patch_id->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&patch_id->instr, &patch_id->dest, 1, 32);
|
||||
nir_builder_instr_insert(b, &patch_id->instr);
|
||||
return &patch_id->dest.ssa;
|
||||
}
|
||||
|
|
@ -454,7 +454,7 @@ r600_lower_tess_io_impl(nir_builder *b, nir_instr *instr, enum pipe_prim_type pr
|
|||
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_local_shared_r600);
|
||||
tf->num_components = ncomps;
|
||||
tf->src[0] = nir_src_for_ssa(addr_outer);
|
||||
nir_ssa_dest_init(&tf->instr, &tf->dest, tf->num_components, 32, NULL);
|
||||
nir_ssa_dest_init(&tf->instr, &tf->dest, tf->num_components, 32);
|
||||
nir_builder_instr_insert(b, &tf->instr);
|
||||
if (ncomps < 4) {
|
||||
auto undef = nir_ssa_undef(b, 1, 32);
|
||||
|
|
@ -551,7 +551,7 @@ r600_append_tcs_TF_emission(nir_shader *shader, enum pipe_prim_type prim_type)
|
|||
|
||||
auto invocation_id =
|
||||
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_invocation_id);
|
||||
nir_ssa_dest_init(&invocation_id->instr, &invocation_id->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&invocation_id->instr, &invocation_id->dest, 1, 32);
|
||||
nir_builder_instr_insert(b, &invocation_id->instr);
|
||||
|
||||
nir_push_if(b, nir_ieq_imm(b, &invocation_id->dest.ssa, 0));
|
||||
|
|
@ -566,14 +566,14 @@ r600_append_tcs_TF_emission(nir_shader *shader, enum pipe_prim_type prim_type)
|
|||
tf_outer->num_components = outer_comps;
|
||||
tf_outer->src[0] = nir_src_for_ssa(addr_outer);
|
||||
nir_ssa_dest_init(
|
||||
&tf_outer->instr, &tf_outer->dest, tf_outer->num_components, 32, NULL);
|
||||
&tf_outer->instr, &tf_outer->dest, tf_outer->num_components, 32);
|
||||
nir_builder_instr_insert(b, &tf_outer->instr);
|
||||
|
||||
std::vector<nir_ssa_def *> tf_out;
|
||||
|
||||
auto tf_out_base =
|
||||
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_tcs_tess_factor_base_r600);
|
||||
nir_ssa_dest_init(&tf_out_base->instr, &tf_out_base->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&tf_out_base->instr, &tf_out_base->dest, 1, 32);
|
||||
nir_builder_instr_insert(b, &tf_out_base->instr);
|
||||
|
||||
auto out_addr0 = nir_build_alu(b,
|
||||
|
|
@ -619,7 +619,7 @@ r600_append_tcs_TF_emission(nir_shader *shader, enum pipe_prim_type prim_type)
|
|||
tf_inner->num_components = inner_comps;
|
||||
tf_inner->src[0] = nir_src_for_ssa(addr1);
|
||||
nir_ssa_dest_init(
|
||||
&tf_inner->instr, &tf_inner->dest, tf_inner->num_components, 32, NULL);
|
||||
&tf_inner->instr, &tf_inner->dest, tf_inner->num_components, 32);
|
||||
nir_builder_instr_insert(b, &tf_inner->instr);
|
||||
|
||||
tf_out.push_back(nir_vec2(b,
|
||||
|
|
|
|||
|
|
@ -149,8 +149,8 @@ r600_create_new_load(nir_builder *b,
|
|||
assert(intr->dest.is_ssa);
|
||||
|
||||
nir_intrinsic_instr *new_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic);
|
||||
nir_ssa_dest_init(
|
||||
&new_intr->instr, &new_intr->dest, num_comps, intr->dest.ssa.bit_size, NULL);
|
||||
nir_ssa_dest_init(&new_intr->instr, &new_intr->dest, num_comps,
|
||||
intr->dest.ssa.bit_size);
|
||||
new_intr->num_components = num_comps;
|
||||
|
||||
nir_deref_instr *deref = nir_build_deref_var(b, var);
|
||||
|
|
|
|||
|
|
@ -303,7 +303,7 @@ vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
|
|||
nir_intrinsic_instr_create(c->s, intr->intrinsic);
|
||||
intr_comp->num_components = 1;
|
||||
nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1,
|
||||
intr->dest.ssa.bit_size, NULL);
|
||||
intr->dest.ssa.bit_size);
|
||||
|
||||
/* Convert the uniform offset to bytes. If it happens
|
||||
* to be a constant, constant-folding will clean up
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ vc4_nir_lower_txf_ms_instr(nir_builder *b, nir_instr *instr, void *data)
|
|||
|
||||
txf->src[0].src_type = nir_tex_src_coord;
|
||||
txf->src[0].src = nir_src_for_ssa(nir_vec2(b, addr, nir_imm_int(b, 0)));
|
||||
nir_ssa_dest_init(&txf->instr, &txf->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&txf->instr, &txf->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &txf->instr);
|
||||
|
||||
return &txf->dest.ssa;
|
||||
|
|
|
|||
|
|
@ -237,7 +237,7 @@ lower_basevertex_instr(nir_builder *b, nir_instr *in, void *data)
|
|||
load->src[0] = nir_src_for_ssa(nir_imm_int(b, ZINK_GFX_PUSHCONST_DRAW_MODE_IS_INDEXED));
|
||||
nir_intrinsic_set_range(load, 4);
|
||||
load->num_components = 1;
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, "draw_mode_is_indexed");
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 1, 32);
|
||||
nir_builder_instr_insert(b, &load->instr);
|
||||
|
||||
nir_ssa_def *composite = nir_build_alu(b, nir_op_bcsel,
|
||||
|
|
@ -278,7 +278,7 @@ lower_drawid_instr(nir_builder *b, nir_instr *in, void *data)
|
|||
load->src[0] = nir_src_for_ssa(nir_imm_int(b, ZINK_GFX_PUSHCONST_DRAW_ID));
|
||||
nir_intrinsic_set_range(load, 4);
|
||||
load->num_components = 1;
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, "draw_id");
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 1, 32);
|
||||
nir_builder_instr_insert(b, &load->instr);
|
||||
|
||||
nir_ssa_def_rewrite_uses(&instr->dest.ssa, &load->dest.ssa);
|
||||
|
|
@ -1736,7 +1736,7 @@ lower_txf_lod_robustness_instr(nir_builder *b, nir_instr *in, void *data)
|
|||
nir_src_copy(&levels->src[!!(offset_idx >= 0)].src, &txf->src[handle_idx].src, &levels->instr);
|
||||
}
|
||||
nir_ssa_dest_init(&levels->instr, &levels->dest,
|
||||
nir_tex_instr_dest_size(levels), 32, NULL);
|
||||
nir_tex_instr_dest_size(levels), 32);
|
||||
nir_builder_instr_insert(b, &levels->instr);
|
||||
|
||||
nir_if *lod_oob_if = nir_push_if(b, nir_ilt(b, lod, &levels->dest.ssa));
|
||||
|
|
@ -2340,7 +2340,8 @@ rewrite_atomic_ssbo_instr(nir_builder *b, nir_instr *instr, struct bo_vars *bo)
|
|||
for (unsigned i = 0; i < num_components; i++) {
|
||||
nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, offset);
|
||||
nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(b->shader, op);
|
||||
nir_ssa_dest_init(&new_instr->instr, &new_instr->dest, 1, nir_dest_bit_size(intr->dest), "");
|
||||
nir_ssa_dest_init(&new_instr->instr, &new_instr->dest, 1,
|
||||
nir_dest_bit_size(intr->dest));
|
||||
nir_intrinsic_set_atomic_op(new_instr, nir_intrinsic_atomic_op(intr));
|
||||
new_instr->src[0] = nir_src_for_ssa(&deref_arr->dest.ssa);
|
||||
/* deref ops have no offset src, so copy the srcs after it */
|
||||
|
|
|
|||
|
|
@ -176,7 +176,8 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *
|
|||
}
|
||||
|
||||
nir_ssa_dest_init(&array_tex->instr, &array_tex->dest,
|
||||
nir_tex_instr_dest_size(array_tex), nir_dest_bit_size(tex->dest), NULL);
|
||||
nir_tex_instr_dest_size(array_tex),
|
||||
nir_dest_bit_size(tex->dest));
|
||||
nir_builder_instr_insert(b, &array_tex->instr);
|
||||
return &array_tex->dest.ssa;
|
||||
}
|
||||
|
|
@ -445,8 +446,9 @@ lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex)
|
|||
txl->src[s].src_type = nir_tex_src_lod;
|
||||
|
||||
b->cursor = nir_before_instr(&tex->instr);
|
||||
nir_ssa_dest_init(&txl->instr, &txl->dest, nir_dest_num_components(tex->dest),
|
||||
nir_dest_bit_size(tex->dest), NULL);
|
||||
nir_ssa_dest_init(&txl->instr, &txl->dest,
|
||||
nir_dest_num_components(tex->dest),
|
||||
nir_dest_bit_size(tex->dest));
|
||||
nir_builder_instr_insert(b, &txl->instr);
|
||||
nir_ssa_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
|
||||
return txl;
|
||||
|
|
|
|||
|
|
@ -67,11 +67,8 @@ static void lower_load_global_constant_to_scalar(nir_builder *b,
|
|||
for (uint8_t i = 0; i < intr->num_components; i++) {
|
||||
nir_intrinsic_instr *chan_intr =
|
||||
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
|
||||
nir_ssa_dest_init(&chan_intr->instr,
|
||||
&chan_intr->dest,
|
||||
1,
|
||||
intr->dest.ssa.bit_size,
|
||||
NULL);
|
||||
nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest, 1,
|
||||
intr->dest.ssa.bit_size);
|
||||
chan_intr->num_components = 1;
|
||||
|
||||
nir_intrinsic_set_access(chan_intr, nir_intrinsic_access(intr));
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ blorp_create_nir_tex_instr(nir_builder *b, struct brw_blorp_blit_vars *v,
|
|||
tex->src[0].src = nir_src_for_ssa(pos);
|
||||
tex->coord_components = 3;
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
|
||||
return tex;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ blorp_nir_txf_ms_mcs(nir_builder *b, nir_ssa_def *xy_pos, nir_ssa_def *layer)
|
|||
tex->texture_index = 0;
|
||||
tex->sampler_index = 0;
|
||||
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
|
||||
nir_builder_instr_insert(b, &tex->instr);
|
||||
|
||||
return &tex->dest.ssa;
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ implement_sub_group_ballot_builtin(nir_function *func)
|
|||
nir_intrinsic_instr_create(b.shader, nir_intrinsic_ballot);
|
||||
ballot->src[0] = nir_src_for_ssa(cond);
|
||||
ballot->num_components = 1;
|
||||
nir_ssa_dest_init(&ballot->instr, &ballot->dest, 1, 32, NULL);
|
||||
nir_ssa_dest_init(&ballot->instr, &ballot->dest, 1, 32);
|
||||
nir_builder_instr_insert(&b, &ballot->instr);
|
||||
|
||||
nir_store_deref(&b, ret, &ballot->dest.ssa, ~0);
|
||||
|
|
@ -189,7 +189,7 @@ lower_kernel_intrinsics(nir_shader *nir)
|
|||
nir_intrinsic_set_range(load, nir->num_uniforms);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest,
|
||||
intrin->dest.ssa.num_components,
|
||||
intrin->dest.ssa.bit_size, NULL);
|
||||
intrin->dest.ssa.bit_size);
|
||||
nir_builder_instr_insert(&b, &load->instr);
|
||||
|
||||
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, &load->dest.ssa);
|
||||
|
|
@ -217,7 +217,7 @@ lower_kernel_intrinsics(nir_shader *nir)
|
|||
nir_intrinsic_set_base(load, kernel_sysvals_start +
|
||||
offsetof(struct brw_kernel_sysvals, num_work_groups));
|
||||
nir_intrinsic_set_range(load, 3 * 4);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 3, 32, NULL);
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 3, 32);
|
||||
nir_builder_instr_insert(&b, &load->instr);
|
||||
|
||||
/* We may need to do a bit-size cast here */
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue