diff --git a/src/intel/compiler/brw_builder.h b/src/intel/compiler/brw_builder.h index 947513f6bc3..32064b10356 100644 --- a/src/intel/compiler/brw_builder.h +++ b/src/intel/compiler/brw_builder.h @@ -96,6 +96,42 @@ public: return bld; } + brw_builder + at_start(bblock_t *block) const + { + brw_builder bld = *this; + bld.block = block; + bld.cursor = block->instructions.head_sentinel.next; + return bld; + } + + brw_builder + at_end(bblock_t *block) const + { + brw_builder bld = *this; + bld.block = block; + bld.cursor = &block->instructions.tail_sentinel; + return bld; + } + + brw_builder + before(brw_inst *ref) const + { + brw_builder bld = *this; + bld.block = ref->block; + bld.cursor = ref; + return bld; + } + + brw_builder + after(brw_inst *ref) const + { + brw_builder bld = *this; + bld.block = ref->block; + bld.cursor = ref->next; + return bld; + } + /** * Construct a builder specifying the default SIMD width and group of * channel enable signals, inheriting other code generation parameters diff --git a/src/intel/compiler/brw_from_nir.cpp b/src/intel/compiler/brw_from_nir.cpp index 58be1b25f34..455383ae106 100644 --- a/src/intel/compiler/brw_from_nir.cpp +++ b/src/intel/compiler/brw_from_nir.cpp @@ -3813,11 +3813,11 @@ emit_is_helper_invocation(nir_to_brw_state &ntb, brw_reg result) brw_inst *mov = b.MOV(offset(result, b, i), brw_imm_ud(~0)); - /* The at() ensures that any code emitted to get the predicate happens + /* The before() ensures that any code emitted to get the predicate happens * before the mov right above. This is not an issue elsewhere because * lowering code already set up the builder this way. */ - brw_emit_predicate_on_sample_mask(b.at(NULL, mov), mov); + brw_emit_predicate_on_sample_mask(b.before(mov), mov); mov->predicate_inverse = true; } } diff --git a/src/intel/compiler/brw_lower.cpp b/src/intel/compiler/brw_lower.cpp index 73e7d6b2b0f..bf4a2ab2c2c 100644 --- a/src/intel/compiler/brw_lower.cpp +++ b/src/intel/compiler/brw_lower.cpp @@ -323,7 +323,7 @@ brw_lower_barycentrics(brw_shader &s) for (unsigned i = 0; i < 2; i++) { for (unsigned g = 0; g < inst->exec_size / 8; g++) { - brw_inst *mov = ibld.at(block, inst->next).group(8, g) + brw_inst *mov = ibld.after(inst).group(8, g) .MOV(horiz_offset(offset(inst->dst, ibld, i), 8 * g), offset(tmp, ubld, 2 * g + i)); diff --git a/src/intel/compiler/brw_lower_regioning.cpp b/src/intel/compiler/brw_lower_regioning.cpp index f2418f948be..3f2ca520dda 100644 --- a/src/intel/compiler/brw_lower_regioning.cpp +++ b/src/intel/compiler/brw_lower_regioning.cpp @@ -563,7 +563,7 @@ namespace { tmp = horiz_stride(tmp, stride); /* Emit a MOV taking care of all the destination modifiers. */ - brw_inst *mov = ibld.at(inst->block, inst->next).MOV(inst->dst, tmp); + brw_inst *mov = ibld.after(inst).MOV(inst->dst, tmp); mov->saturate = inst->saturate; if (!has_inconsistent_cmod(inst)) mov->conditional_mod = inst->conditional_mod; @@ -697,8 +697,8 @@ namespace { } for (unsigned j = 0; j < n; j++) { - brw_inst *jnst = ibld.at(inst->block, inst->next).MOV(subscript(inst->dst, raw_type, j), - subscript(tmp, raw_type, j)); + brw_inst *jnst = ibld.after(inst).MOV(subscript(inst->dst, raw_type, j), + subscript(tmp, raw_type, j)); if (has_subdword_integer_region_restriction(v->devinfo, jnst)) { /* The copy isn't guaranteed to comply with all subdword integer * regioning restrictions in some cases. Lower it recursively. diff --git a/src/intel/compiler/brw_lower_simd_width.cpp b/src/intel/compiler/brw_lower_simd_width.cpp index 1c97937545f..9c3b2fe1955 100644 --- a/src/intel/compiler/brw_lower_simd_width.cpp +++ b/src/intel/compiler/brw_lower_simd_width.cpp @@ -664,7 +664,7 @@ brw_lower_simd_width(brw_shader &s) /* Builder matching the original instruction. */ const brw_builder bld = brw_builder(&s); const brw_builder ibld = - bld.at(block, inst).exec_all(inst->force_writemask_all) + bld.before(inst).exec_all(inst->force_writemask_all) .group(inst->exec_size, inst->group / inst->exec_size); /* Split the copies in chunks of the execution width of either the @@ -742,15 +742,15 @@ brw_lower_simd_width(brw_shader &s) const brw_builder lbld = ibld.group(lower_width, i); for (unsigned j = 0; j < inst->sources; j++) - split_inst.src[j] = emit_unzip(lbld.at(block, inst), inst, j); + split_inst.src[j] = emit_unzip(lbld.before(inst), inst, j); - split_inst.dst = emit_zip(lbld.at(block, inst), + split_inst.dst = emit_zip(lbld.before(inst), lbld.at(block, after_inst), inst); split_inst.size_written = split_inst.dst.component_size(lower_width) * dst_size + residency_size; - lbld.at(block, inst->next).emit(split_inst); + lbld.after(inst).emit(split_inst); } inst->remove(); diff --git a/src/intel/compiler/brw_opt_address_reg_load.cpp b/src/intel/compiler/brw_opt_address_reg_load.cpp index f08822d5695..54df987dae8 100644 --- a/src/intel/compiler/brw_opt_address_reg_load.cpp +++ b/src/intel/compiler/brw_opt_address_reg_load.cpp @@ -37,7 +37,7 @@ opt_address_reg_load_local(brw_shader &s, bblock_t *block, const brw_def_analysi src_inst->sources > 2) continue; - brw_builder ubld = brw_builder(&s).at(block, inst).uniform(); + brw_builder ubld = brw_builder(&s).before(inst).uniform(); brw_reg sources[3]; for (unsigned i = 0; i < src_inst->sources; i++) { sources[i] = inst->src[i].file == VGRF ? component(src_inst->src[i], 0) : src_inst->src[i]; diff --git a/src/intel/compiler/brw_opt_txf_combiner.cpp b/src/intel/compiler/brw_opt_txf_combiner.cpp index fab2c3e5e3e..4d73ad9c3e1 100644 --- a/src/intel/compiler/brw_opt_txf_combiner.cpp +++ b/src/intel/compiler/brw_opt_txf_combiner.cpp @@ -151,7 +151,7 @@ brw_opt_combine_convergent_txf(brw_shader &s) const unsigned lanes = CLAMP(count - curr, min_simd, max_simd); const unsigned width = util_next_power_of_two(lanes); const brw_builder ubld = - brw_builder(&s).at(block, txfs[curr]).exec_all().group(width, 0); + brw_builder(&s).before(txfs[curr]).exec_all().group(width, 0); const brw_builder ubld1 = ubld.group(1, 0); enum brw_reg_type coord_type = diff --git a/src/intel/compiler/brw_reg_allocate.cpp b/src/intel/compiler/brw_reg_allocate.cpp index 68b432efac7..650e45f58df 100644 --- a/src/intel/compiler/brw_reg_allocate.cpp +++ b/src/intel/compiler/brw_reg_allocate.cpp @@ -1292,7 +1292,7 @@ brw_reg_alloc::spill_reg(unsigned spill_reg) emit_unspill(ubld, &fs->shader_stats, spill_src, subset_spill_offset, regs_written(inst), ip); - emit_spill(ubld.at(block, inst->next), &fs->shader_stats, spill_src, + emit_spill(ubld.after(inst), &fs->shader_stats, spill_src, subset_spill_offset, regs_written(inst), ip); } diff --git a/src/intel/compiler/brw_shader.cpp b/src/intel/compiler/brw_shader.cpp index 1100492c4c3..ded1cef6157 100644 --- a/src/intel/compiler/brw_shader.cpp +++ b/src/intel/compiler/brw_shader.cpp @@ -671,8 +671,7 @@ brw_shader::assign_curb_setup() gl_shader_stage_is_mesh(stage)) && brw_cs_prog_data(prog_data)->uses_inline_push_addr); assert(devinfo->has_lsc); - brw_builder ubld = brw_builder(this, 1).exec_all().at( - cfg->first_block(), cfg->first_block()->start()); + brw_builder ubld = brw_builder(this, 1).exec_all().at_start(cfg->first_block()); brw_reg base_addr; if (pull_constants_a64) { @@ -812,8 +811,7 @@ brw_shader::assign_curb_setup() uint64_t want_zero = used & prog_data->zero_push_reg; if (want_zero) { - brw_builder ubld = brw_builder(this, 8).exec_all().at( - cfg->first_block(), cfg->first_block()->start()); + brw_builder ubld = brw_builder(this, 8).exec_all().at_start(cfg->first_block()); /* push_reg_mask_param is in 32-bit units */ unsigned mask_param = prog_data->push_reg_mask_param; diff --git a/src/intel/compiler/brw_workaround.cpp b/src/intel/compiler/brw_workaround.cpp index afbd89cf80d..92fd303bf1b 100644 --- a/src/intel/compiler/brw_workaround.cpp +++ b/src/intel/compiler/brw_workaround.cpp @@ -260,7 +260,7 @@ brw_workaround_nomask_control_flow(brw_shader &s) inst->predicate_trivial = true; if (save_flag) - ubld.group(1, 0).at(block, inst->next).MOV(flag, tmp); + ubld.group(1, 0).after(inst).MOV(flag, tmp); progress = true; }