intel/brw: Rename fs_builder to brw_builder

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33076>
This commit is contained in:
Caio Oliveira 2024-12-29 15:41:04 -08:00 committed by Marge Bot
parent f2d4c9db92
commit 5ac82efd35
34 changed files with 391 additions and 391 deletions

View file

@ -28,21 +28,21 @@
#include "brw_eu.h"
#include "brw_fs.h"
static inline brw_reg offset(const brw_reg &, const brw::fs_builder &,
static inline brw_reg offset(const brw_reg &, const brw::brw_builder &,
unsigned);
namespace brw {
/**
* Toolbox to assemble an FS IR program out of individual instructions.
* Toolbox to assemble an BRW IR program out of individual instructions.
*/
class fs_builder {
class brw_builder {
public:
/**
* Construct an fs_builder that inserts instructions into \p shader.
* Construct an brw_builder that inserts instructions into \p shader.
* \p dispatch_width gives the native execution width of the program.
*/
fs_builder(fs_visitor *shader,
unsigned dispatch_width) :
brw_builder(fs_visitor *shader,
unsigned dispatch_width) :
shader(shader), block(NULL), cursor(NULL),
_dispatch_width(dispatch_width),
_group(0),
@ -51,15 +51,15 @@ namespace brw {
{
}
explicit fs_builder(fs_visitor *s) : fs_builder(s, s->dispatch_width) {}
explicit brw_builder(fs_visitor *s) : brw_builder(s, s->dispatch_width) {}
/**
* Construct an fs_builder that inserts instructions into \p shader
* Construct an brw_builder that inserts instructions into \p shader
* before instruction \p inst in basic block \p block. The default
* execution controls and debug annotation are initialized from the
* instruction passed as argument.
*/
fs_builder(fs_visitor *shader, bblock_t *block, fs_inst *inst) :
brw_builder(fs_visitor *shader, bblock_t *block, fs_inst *inst) :
shader(shader), block(block), cursor(inst),
_dispatch_width(inst->exec_size),
_group(inst->group),
@ -73,25 +73,25 @@ namespace brw {
}
/**
* Construct an fs_builder that inserts instructions before \p cursor in
* Construct an brw_builder that inserts instructions before \p cursor in
* basic block \p block, inheriting other code generation parameters
* from this.
*/
fs_builder
brw_builder
at(bblock_t *block, exec_node *cursor) const
{
fs_builder bld = *this;
brw_builder bld = *this;
bld.block = block;
bld.cursor = cursor;
return bld;
}
/**
* Construct an fs_builder appending instructions at the end of the
* Construct an brw_builder appending instructions at the end of the
* instruction list of the shader, inheriting other code generation
* parameters from this.
*/
fs_builder
brw_builder
at_end() const
{
return at(NULL, (exec_node *)&shader->instructions.tail_sentinel);
@ -105,10 +105,10 @@ namespace brw {
* \p n gives the default SIMD width, \p i gives the slot group used for
* predication and control flow masking in multiples of \p n channels.
*/
fs_builder
brw_builder
group(unsigned n, unsigned i) const
{
fs_builder bld = *this;
brw_builder bld = *this;
if (n <= dispatch_width() && i < dispatch_width() / n) {
bld._group += i * n;
@ -133,7 +133,7 @@ namespace brw {
/**
* Alias for group() with width equal to eight.
*/
fs_builder
brw_builder
quarter(unsigned i) const
{
return group(8, i);
@ -144,10 +144,10 @@ namespace brw {
* disabled if \p b is true. If control flow execution masking is
* already disabled this has no effect.
*/
fs_builder
brw_builder
exec_all(bool b = true) const
{
fs_builder bld = *this;
brw_builder bld = *this;
if (b)
bld.force_writemask_all = true;
return bld;
@ -156,7 +156,7 @@ namespace brw {
/**
* Construct a builder for SIMD8-as-scalar
*/
fs_builder
brw_builder
scalar_group() const
{
return exec_all().group(8 * reg_unit(shader->devinfo), 0);
@ -165,10 +165,10 @@ namespace brw {
/**
* Construct a builder with the given debug annotation info.
*/
fs_builder
brw_builder
annotate(const char *str) const
{
fs_builder bld = *this;
brw_builder bld = *this;
bld.annotation.str = str;
return bld;
}
@ -400,7 +400,7 @@ namespace brw {
* send). Once we teach const/copy propagation about scalars we
* should go back to scalar destinations here.
*/
const fs_builder xbld = scalar_group();
const brw_builder xbld = scalar_group();
const brw_reg chan_index = xbld.vgrf(BRW_TYPE_UD);
/* FIND_LIVE_CHANNEL will only write a single component after
@ -813,7 +813,7 @@ namespace brw {
brw_reg
BROADCAST(brw_reg value, brw_reg index) const
{
const fs_builder xbld = scalar_group();
const brw_builder xbld = scalar_group();
const brw_reg dst = xbld.vgrf(value.type);
assert(is_uniform(index));
@ -931,7 +931,7 @@ namespace brw {
* stored differently, so care must be taken to offset properly.
*/
static inline brw_reg
offset(const brw_reg &reg, const brw::fs_builder &bld, unsigned delta)
offset(const brw_reg &reg, const brw::brw_builder &bld, unsigned delta)
{
/* If the value is convergent (stored as one or more SIMD8), offset using
* SIMD8 and select component 0.

View file

@ -64,13 +64,13 @@ static bool
run_cs(fs_visitor &s, bool allow_spilling)
{
assert(gl_shader_stage_is_compute(s.stage));
const fs_builder bld = fs_builder(&s).at_end();
const brw_builder bld = brw_builder(&s).at_end();
s.payload_ = new cs_thread_payload(s);
if (s.devinfo->platform == INTEL_PLATFORM_HSW && s.prog_data->total_shared > 0) {
/* Move SLM index from g0.0[27:24] to sr0.1[11:8] */
const fs_builder abld = bld.exec_all().group(1, 0);
const brw_builder abld = bld.exec_all().group(1, 0);
abld.MOV(retype(brw_sr0_reg(1), BRW_TYPE_UW),
suboffset(retype(brw_vec1_grf(0, 0), BRW_TYPE_UW), 1));
}

View file

@ -21,7 +21,7 @@
using namespace brw;
static fs_inst *
brw_emit_single_fb_write(fs_visitor &s, const fs_builder &bld,
brw_emit_single_fb_write(fs_visitor &s, const brw_builder &bld,
brw_reg color0, brw_reg color1,
brw_reg src0_alpha, unsigned components,
bool null_rt)
@ -61,7 +61,7 @@ brw_emit_single_fb_write(fs_visitor &s, const fs_builder &bld,
static void
brw_do_emit_fb_writes(fs_visitor &s, int nr_color_regions, bool replicate_alpha)
{
const fs_builder bld = fs_builder(&s).at_end();
const brw_builder bld = brw_builder(&s).at_end();
fs_inst *inst = NULL;
for (int target = 0; target < nr_color_regions; target++) {
@ -69,7 +69,7 @@ brw_do_emit_fb_writes(fs_visitor &s, int nr_color_regions, bool replicate_alpha)
if (s.outputs[target].file == BAD_FILE)
continue;
const fs_builder abld = bld.annotate(
const brw_builder abld = bld.annotate(
ralloc_asprintf(s.mem_ctx, "FB write target %d", target));
brw_reg src0_alpha;
@ -184,8 +184,8 @@ static void
brw_emit_interpolation_setup(fs_visitor &s)
{
const struct intel_device_info *devinfo = s.devinfo;
const fs_builder bld = fs_builder(&s).at_end();
fs_builder abld = bld.annotate("compute pixel centers");
const brw_builder bld = brw_builder(&s).at_end();
brw_builder abld = bld.annotate("compute pixel centers");
s.pixel_x = bld.vgrf(BRW_TYPE_F);
s.pixel_y = bld.vgrf(BRW_TYPE_F);
@ -255,7 +255,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
*/
struct brw_reg r1_0 = retype(brw_vec1_reg(FIXED_GRF, 1, 0), BRW_TYPE_UB);
const fs_builder dbld =
const brw_builder dbld =
abld.exec_all().group(MIN2(16, s.dispatch_width) * 2, 0);
if (devinfo->verx10 >= 125) {
@ -305,7 +305,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
break;
case INTEL_SOMETIMES: {
const fs_builder dbld =
const brw_builder dbld =
abld.exec_all().group(MIN2(16, s.dispatch_width) * 2, 0);
check_dynamic_msaa_flag(dbld, wm_prog_data,
@ -353,7 +353,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
}
for (unsigned i = 0; i < DIV_ROUND_UP(s.dispatch_width, 16); i++) {
const fs_builder hbld = abld.group(MIN2(16, s.dispatch_width), i);
const brw_builder hbld = abld.group(MIN2(16, s.dispatch_width), i);
/* According to the "PS Thread Payload for Normal Dispatch"
* pages on the BSpec, subspan X/Y coordinates are stored in
* R1.2-R1.5/R2.2-R2.5 on gfx6+, and on R0.10-R0.13/R1.10-R1.13
@ -365,7 +365,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
const struct brw_reg gi_uw = retype(gi_reg, BRW_TYPE_UW);
if (devinfo->verx10 >= 125) {
const fs_builder dbld =
const brw_builder dbld =
abld.exec_all().group(hbld.dispatch_width() * 2, 0);
const brw_reg int_pixel_x = dbld.vgrf(BRW_TYPE_UW);
const brw_reg int_pixel_y = dbld.vgrf(BRW_TYPE_UW);
@ -402,7 +402,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
* Thus we can do a single add(16) in SIMD8 or an add(32) in SIMD16
* to compute our pixel centers.
*/
const fs_builder dbld =
const brw_builder dbld =
abld.exec_all().group(hbld.dispatch_width() * 2, 0);
brw_reg int_pixel_xy = dbld.vgrf(BRW_TYPE_UW);
@ -511,7 +511,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
if (wm_key->persample_interp == INTEL_SOMETIMES) {
assert(!devinfo->needs_unlit_centroid_workaround);
const fs_builder ubld = bld.exec_all().group(16, 0);
const brw_builder ubld = bld.exec_all().group(16, 0);
bool loaded_flag = false;
for (int i = 0; i < INTEL_BARYCENTRIC_MODE_COUNT; ++i) {
@ -623,7 +623,7 @@ brw_emit_repclear_shader(fs_visitor &s)
BRW_VERTICAL_STRIDE_8, BRW_WIDTH_2, BRW_HORIZONTAL_STRIDE_4,
BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
const fs_builder bld = fs_builder(&s).at_end();
const brw_builder bld = brw_builder(&s).at_end();
bld.exec_all().group(4, 0).MOV(color_output, color_input);
if (key->nr_color_regions > 1) {
@ -1460,7 +1460,7 @@ run_fs(fs_visitor &s, bool allow_spilling, bool do_rep_send)
const struct intel_device_info *devinfo = s.devinfo;
struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(s.prog_data);
brw_wm_prog_key *wm_key = (brw_wm_prog_key *) s.key;
const fs_builder bld = fs_builder(&s).at_end();
const brw_builder bld = brw_builder(&s).at_end();
const nir_shader *nir = s.nir;
assert(s.stage == MESA_SHADER_FRAGMENT);

View file

@ -42,7 +42,7 @@ brw_emit_gs_thread_end(fs_visitor &s)
s.emit_gs_control_data_bits(s.final_gs_vertex_count);
}
const fs_builder abld = fs_builder(&s).at_end().annotate("thread end");
const brw_builder abld = brw_builder(&s).at_end().annotate("thread end");
fs_inst *inst;
if (gs_prog_data->static_vertex_count != -1) {
@ -92,7 +92,7 @@ run_gs(fs_visitor &s)
s.payload_ = new gs_thread_payload(s);
const fs_builder bld = fs_builder(&s).at_end();
const brw_builder bld = brw_builder(&s).at_end();
s.final_gs_vertex_count = bld.vgrf(BRW_TYPE_UD);
@ -105,7 +105,7 @@ run_gs(fs_visitor &s)
* Otherwise, we need to initialize it to 0 here.
*/
if (s.gs_compile->control_data_header_size_bits <= 32) {
const fs_builder abld = bld.annotate("initialize control data bits");
const brw_builder abld = bld.annotate("initialize control data bits");
abld.MOV(s.control_data_bits, brw_imm_ud(0u));
}
}

View file

@ -268,7 +268,7 @@ brw_nir_align_launch_mesh_workgroups(nir_shader *nir)
static void
brw_emit_urb_fence(fs_visitor &s)
{
const fs_builder bld1 = fs_builder(&s).at_end().exec_all().group(1, 0);
const brw_builder bld1 = brw_builder(&s).at_end().exec_all().group(1, 0);
brw_reg dst = bld1.vgrf(BRW_TYPE_UD);
fs_inst *fence = bld1.emit(SHADER_OPCODE_MEMORY_FENCE, dst,
brw_vec8_grf(0, 0),

View file

@ -49,7 +49,7 @@ brw_set_tcs_invocation_id(fs_visitor &s)
const struct intel_device_info *devinfo = s.devinfo;
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(s.prog_data);
struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
const fs_builder bld = fs_builder(&s).at_end();
const brw_builder bld = brw_builder(&s).at_end();
const unsigned instance_id_mask =
(devinfo->verx10 >= 125) ? INTEL_MASK(7, 0) :
@ -99,7 +99,7 @@ brw_emit_tcs_thread_end(fs_visitor &s)
if (s.mark_last_urb_write_with_eot())
return;
const fs_builder bld = fs_builder(&s).at_end();
const brw_builder bld = brw_builder(&s).at_end();
/* Emit a URB write to end the thread. On Broadwell, we use this to write
* zero to the "TR DS Cache Disable" bit (we haven't implemented a fancy
@ -133,7 +133,7 @@ run_tcs(fs_visitor &s)
assert(s.stage == MESA_SHADER_TESS_CTRL);
struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(s.prog_data);
const fs_builder bld = fs_builder(&s).at_end();
const brw_builder bld = brw_builder(&s).at_end();
assert(vue_prog_data->dispatch_mode == INTEL_DISPATCH_MODE_TCS_SINGLE_PATCH ||
vue_prog_data->dispatch_mode == INTEL_DISPATCH_MODE_TCS_MULTI_PATCH);

View file

@ -925,7 +925,7 @@ fs_visitor::assign_curb_setup()
if (is_compute && devinfo->verx10 >= 125 && uniform_push_length > 0) {
assert(devinfo->has_lsc);
fs_builder ubld = fs_builder(this, 1).exec_all().at(
brw_builder ubld = brw_builder(this, 1).exec_all().at(
cfg->first_block(), cfg->first_block()->start());
/* The base offset for our push data is passed in as R0.0[31:6]. We have
@ -1031,7 +1031,7 @@ fs_visitor::assign_curb_setup()
uint64_t want_zero = used & prog_data->zero_push_reg;
if (want_zero) {
fs_builder ubld = fs_builder(this, 8).exec_all().at(
brw_builder ubld = brw_builder(this, 8).exec_all().at(
cfg->first_block(), cfg->first_block()->start());
/* push_reg_mask_param is in 32-bit units */
@ -1048,7 +1048,7 @@ fs_visitor::assign_curb_setup()
brw_imm_v(0x01234567));
ubld.SHL(shifted, horiz_offset(shifted, 8), brw_imm_w(8));
fs_builder ubld16 = ubld.group(16, 0);
brw_builder ubld16 = ubld.group(16, 0);
b32 = ubld16.vgrf(BRW_TYPE_D);
ubld16.group(16, 0).ASR(b32, shifted, brw_imm_w(15));
}
@ -1159,7 +1159,7 @@ brw_get_subgroup_id_param_index(const intel_device_info *devinfo,
* than 16 for fragment shaders.
*/
brw_reg
brw_sample_mask_reg(const fs_builder &bld)
brw_sample_mask_reg(const brw_builder &bld)
{
const fs_visitor &s = *bld.shader;
@ -1211,7 +1211,7 @@ brw_fb_write_msg_control(const fs_inst *inst,
* Predicate the specified instruction on the sample mask.
*/
void
brw_emit_predicate_on_sample_mask(const fs_builder &bld, fs_inst *inst)
brw_emit_predicate_on_sample_mask(const brw_builder &bld, fs_inst *inst)
{
assert(bld.shader->stage == MESA_SHADER_FRAGMENT &&
bld.group() == inst->group &&
@ -1602,7 +1602,7 @@ bool brw_should_print_shader(const nir_shader *shader, uint64_t debug_flag)
namespace brw {
brw_reg
fetch_payload_reg(const brw::fs_builder &bld, uint8_t regs[2],
fetch_payload_reg(const brw::brw_builder &bld, uint8_t regs[2],
brw_reg_type type, unsigned n)
{
if (!regs[0])
@ -1610,7 +1610,7 @@ namespace brw {
if (bld.dispatch_width() > 16) {
const brw_reg tmp = bld.vgrf(type, n);
const brw::fs_builder hbld = bld.exec_all().group(16, 0);
const brw::brw_builder hbld = bld.exec_all().group(16, 0);
const unsigned m = bld.dispatch_width() / hbld.dispatch_width();
brw_reg *const components = new brw_reg[m * n];
@ -1631,7 +1631,7 @@ namespace brw {
}
brw_reg
fetch_barycentric_reg(const brw::fs_builder &bld, uint8_t regs[2])
fetch_barycentric_reg(const brw::brw_builder &bld, uint8_t regs[2])
{
if (!regs[0])
return brw_reg();
@ -1639,7 +1639,7 @@ namespace brw {
return fetch_payload_reg(bld, regs, BRW_TYPE_F, 2);
const brw_reg tmp = bld.vgrf(BRW_TYPE_F, 2);
const brw::fs_builder hbld = bld.exec_all().group(8, 0);
const brw::brw_builder hbld = bld.exec_all().group(8, 0);
const unsigned m = bld.dispatch_width() / hbld.dispatch_width();
brw_reg *const components = new brw_reg[2 * m];
@ -1656,7 +1656,7 @@ namespace brw {
}
void
check_dynamic_msaa_flag(const fs_builder &bld,
check_dynamic_msaa_flag(const brw_builder &bld,
const struct brw_wm_prog_data *wm_prog_data,
enum intel_msaa_flags flag)
{

View file

@ -139,7 +139,7 @@ struct brw_gs_compile
};
namespace brw {
class fs_builder;
class brw_builder;
}
struct brw_shader_stats {
@ -216,7 +216,7 @@ struct fs_thread_payload : public thread_payload {
struct cs_thread_payload : public thread_payload {
cs_thread_payload(const fs_visitor &v);
void load_subgroup_id(const brw::fs_builder &bld, brw_reg &dest) const;
void load_subgroup_id(const brw::brw_builder &bld, brw_reg &dest) const;
brw_reg local_invocation_id[3];
@ -244,7 +244,7 @@ struct bs_thread_payload : public thread_payload {
brw_reg global_arg_ptr;
brw_reg local_arg_ptr;
void load_shader_type(const brw::fs_builder &bld, brw_reg &dest) const;
void load_shader_type(const brw::brw_builder &bld, brw_reg &dest) const;
};
enum instruction_scheduler_mode {
@ -479,12 +479,12 @@ sample_mask_flag_subreg(const fs_visitor &s)
namespace brw {
brw_reg
fetch_payload_reg(const brw::fs_builder &bld, uint8_t regs[2],
fetch_payload_reg(const brw::brw_builder &bld, uint8_t regs[2],
brw_reg_type type = BRW_TYPE_F,
unsigned n = 1);
brw_reg
fetch_barycentric_reg(const brw::fs_builder &bld, uint8_t regs[2]);
fetch_barycentric_reg(const brw::brw_builder &bld, uint8_t regs[2]);
inline brw_reg
dynamic_msaa_flags(const struct brw_wm_prog_data *wm_prog_data)
@ -493,7 +493,7 @@ namespace brw {
}
void
check_dynamic_msaa_flag(const fs_builder &bld,
check_dynamic_msaa_flag(const brw_builder &bld,
const struct brw_wm_prog_data *wm_prog_data,
enum intel_msaa_flags flag);
@ -501,7 +501,7 @@ namespace brw {
lower_src_modifiers(fs_visitor *v, bblock_t *block, fs_inst *inst, unsigned i);
}
void shuffle_from_32bit_read(const brw::fs_builder &bld,
void shuffle_from_32bit_read(const brw::brw_builder &bld,
const brw_reg &dst,
const brw_reg &src,
uint32_t first_component,
@ -515,8 +515,8 @@ uint32_t brw_fb_write_msg_control(const fs_inst *inst,
void brw_compute_urb_setup_index(struct brw_wm_prog_data *wm_prog_data);
brw_reg brw_sample_mask_reg(const brw::fs_builder &bld);
void brw_emit_predicate_on_sample_mask(const brw::fs_builder &bld, fs_inst *inst);
brw_reg brw_sample_mask_reg(const brw::brw_builder &bld);
void brw_emit_predicate_on_sample_mask(const brw::brw_builder &bld, fs_inst *inst);
int brw_get_subgroup_id_param_index(const intel_device_info *devinfo,
const brw_stage_prog_data *prog_data);

File diff suppressed because it is too large Load diff

View file

@ -288,18 +288,18 @@ private:
void build_interference_graph(bool allow_spilling);
brw_reg build_ex_desc(const fs_builder &bld, unsigned reg_size, bool unspill);
brw_reg build_ex_desc(const brw_builder &bld, unsigned reg_size, bool unspill);
brw_reg build_lane_offsets(const fs_builder &bld,
uint32_t spill_offset, int ip);
brw_reg build_single_offset(const fs_builder &bld,
brw_reg build_lane_offsets(const brw_builder &bld,
uint32_t spill_offset, int ip);
brw_reg build_legacy_scratch_header(const fs_builder &bld,
brw_reg build_single_offset(const brw_builder &bld,
uint32_t spill_offset, int ip);
brw_reg build_legacy_scratch_header(const brw_builder &bld,
uint32_t spill_offset, int ip);
void emit_unspill(const fs_builder &bld, struct brw_shader_stats *stats,
void emit_unspill(const brw_builder &bld, struct brw_shader_stats *stats,
brw_reg dst, uint32_t spill_offset, unsigned count, int ip);
void emit_spill(const fs_builder &bld, struct brw_shader_stats *stats,
void emit_spill(const brw_builder &bld, struct brw_shader_stats *stats,
brw_reg src, uint32_t spill_offset, unsigned count, int ip);
void set_spill_costs();
@ -677,7 +677,7 @@ fs_reg_alloc::build_interference_graph(bool allow_spilling)
}
brw_reg
fs_reg_alloc::build_single_offset(const fs_builder &bld, uint32_t spill_offset, int ip)
fs_reg_alloc::build_single_offset(const brw_builder &bld, uint32_t spill_offset, int ip)
{
brw_reg offset = retype(alloc_spill_reg(1, ip), BRW_TYPE_UD);
fs_inst *inst = bld.MOV(offset, brw_imm_ud(spill_offset));
@ -686,7 +686,7 @@ fs_reg_alloc::build_single_offset(const fs_builder &bld, uint32_t spill_offset,
}
brw_reg
fs_reg_alloc::build_ex_desc(const fs_builder &bld, unsigned reg_size, bool unspill)
fs_reg_alloc::build_ex_desc(const brw_builder &bld, unsigned reg_size, bool unspill)
{
/* Use a different area of the address register than what is used in
* brw_lower_logical_sends.c (brw_address_reg(2)) so we don't have
@ -724,11 +724,11 @@ fs_reg_alloc::build_ex_desc(const fs_builder &bld, unsigned reg_size, bool unspi
}
brw_reg
fs_reg_alloc::build_lane_offsets(const fs_builder &bld, uint32_t spill_offset, int ip)
fs_reg_alloc::build_lane_offsets(const brw_builder &bld, uint32_t spill_offset, int ip)
{
assert(bld.dispatch_width() <= 16 * reg_unit(bld.shader->devinfo));
const fs_builder ubld = bld.exec_all();
const brw_builder ubld = bld.exec_all();
const unsigned reg_count = ubld.dispatch_width() / 8;
brw_reg offset = retype(alloc_spill_reg(reg_count, ip), BRW_TYPE_UD);
@ -776,11 +776,11 @@ fs_reg_alloc::build_lane_offsets(const fs_builder &bld, uint32_t spill_offset, i
* Generate a scratch header for pre-LSC platforms.
*/
brw_reg
fs_reg_alloc::build_legacy_scratch_header(const fs_builder &bld,
fs_reg_alloc::build_legacy_scratch_header(const brw_builder &bld,
uint32_t spill_offset, int ip)
{
const fs_builder ubld8 = bld.exec_all().group(8, 0);
const fs_builder ubld1 = bld.exec_all().group(1, 0);
const brw_builder ubld8 = bld.exec_all().group(8, 0);
const brw_builder ubld1 = bld.exec_all().group(1, 0);
/* Allocate a spill header and make it interfere with g0 */
brw_reg header = retype(alloc_spill_reg(1, ip), BRW_TYPE_UD);
@ -799,7 +799,7 @@ fs_reg_alloc::build_legacy_scratch_header(const fs_builder &bld,
}
void
fs_reg_alloc::emit_unspill(const fs_builder &bld,
fs_reg_alloc::emit_unspill(const brw_builder &bld,
struct brw_shader_stats *stats,
brw_reg dst,
uint32_t spill_offset, unsigned count, int ip)
@ -819,7 +819,7 @@ fs_reg_alloc::emit_unspill(const fs_builder &bld,
const bool use_transpose =
bld.dispatch_width() > 16 * reg_unit(devinfo) ||
bld.has_writemask_all();
const fs_builder ubld = use_transpose ? bld.exec_all().group(1, 0) : bld;
const brw_builder ubld = use_transpose ? bld.exec_all().group(1, 0) : bld;
brw_reg offset;
if (use_transpose) {
offset = build_single_offset(ubld, spill_offset, ip);
@ -898,7 +898,7 @@ fs_reg_alloc::emit_unspill(const fs_builder &bld,
}
void
fs_reg_alloc::emit_spill(const fs_builder &bld,
fs_reg_alloc::emit_spill(const brw_builder &bld,
struct brw_shader_stats *stats,
brw_reg src,
uint32_t spill_offset, unsigned count, int ip)
@ -1136,7 +1136,7 @@ fs_reg_alloc::spill_reg(unsigned spill_reg)
*/
int ip = 0;
foreach_block_and_inst (block, fs_inst, inst, fs->cfg) {
const fs_builder ibld = fs_builder(fs, block, inst);
const brw_builder ibld = brw_builder(fs, block, inst);
exec_node *before = inst->prev;
exec_node *after = inst->next;
@ -1223,7 +1223,7 @@ fs_reg_alloc::spill_reg(unsigned spill_reg)
inst->exec_size == width;
/* Builder used to emit the scratch messages. */
const fs_builder ubld = ibld.exec_all(!per_channel).group(width, 0);
const brw_builder ubld = ibld.exec_all(!per_channel).group(width, 0);
/* If our write is going to affect just part of the
* regs_written(inst), then we need to unspill the destination since

View file

@ -104,7 +104,7 @@ gs_thread_payload::gs_thread_payload(fs_visitor &v)
{
struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(v.prog_data);
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(v.prog_data);
const fs_builder bld = fs_builder(&v).at_end();
const brw_builder bld = brw_builder(&v).at_end();
/* R0: thread header. */
unsigned r = reg_unit(v.devinfo);
@ -390,7 +390,7 @@ cs_thread_payload::cs_thread_payload(const fs_visitor &v)
}
void
cs_thread_payload::load_subgroup_id(const fs_builder &bld,
cs_thread_payload::load_subgroup_id(const brw_builder &bld,
brw_reg &dest) const
{
auto devinfo = bld.shader->devinfo;
@ -430,7 +430,7 @@ task_mesh_thread_payload::task_mesh_thread_payload(fs_visitor &v)
* the address to descriptors.
*/
const fs_builder bld = fs_builder(&v).at_end();
const brw_builder bld = brw_builder(&v).at_end();
unsigned r = 0;
assert(subgroup_id_.file != BAD_FILE);
@ -491,7 +491,7 @@ bs_thread_payload::bs_thread_payload(const fs_visitor &v)
}
void
bs_thread_payload::load_shader_type(const fs_builder &bld, brw_reg &dest) const
bs_thread_payload::load_shader_type(const brw_builder &bld, brw_reg &dest) const
{
brw_reg ud_dest = retype(dest, BRW_TYPE_UD);
bld.MOV(ud_dest, retype(brw_vec1_grf(0, 3), ud_dest.type));

View file

@ -64,7 +64,7 @@ fs_visitor::emit_urb_writes(const brw_reg &gs_vertex_count)
unreachable("invalid stage");
}
const fs_builder bld = fs_builder(this).at_end();
const brw_builder bld = brw_builder(this).at_end();
brw_reg per_slot_offsets;
@ -202,7 +202,7 @@ fs_visitor::emit_urb_writes(const brw_reg &gs_vertex_count)
break;
}
const fs_builder abld = bld.annotate("URB write");
const brw_builder abld = bld.annotate("URB write");
/* If we've queued up 8 registers of payload (2 VUE slots), if this is
* the last slot or if we need to flush (see BAD_FILE varying case
@ -329,7 +329,7 @@ fs_visitor::emit_urb_writes(const brw_reg &gs_vertex_count)
void
fs_visitor::emit_cs_terminate()
{
const fs_builder ubld = fs_builder(this).at_end().exec_all();
const brw_builder ubld = brw_builder(this).at_end().exec_all();
/* We can't directly send from g0, since sends with EOT have to use
* g112-127. So, copy it to a virtual register, The register allocator will

View file

@ -62,8 +62,8 @@ brw_lower_load_payload(fs_visitor &s)
assert(inst->saturate == false);
brw_reg dst = inst->dst;
const fs_builder ibld(&s, block, inst);
const fs_builder ubld = ibld.exec_all();
const brw_builder ibld(&s, block, inst);
const brw_builder ubld = ibld.exec_all();
for (uint8_t i = 0; i < inst->header_size;) {
/* Number of header GRFs to initialize at once with a single MOV
@ -155,7 +155,7 @@ brw_lower_csel(fs_visitor &s)
}
if (!supported) {
const fs_builder ibld(&s, block, inst);
const brw_builder ibld(&s, block, inst);
/* CSEL: dst = src2 <op> 0 ? src0 : src1 */
brw_reg zero = brw_imm_reg(orig_type);
@ -187,7 +187,7 @@ brw_lower_sub_sat(fs_visitor &s)
bool progress = false;
foreach_block_and_inst_safe(block, fs_inst, inst, s.cfg) {
const fs_builder ibld(&s, block, inst);
const brw_builder ibld(&s, block, inst);
if (inst->opcode == SHADER_OPCODE_USUB_SAT ||
inst->opcode == SHADER_OPCODE_ISUB_SAT) {
@ -295,8 +295,8 @@ brw_lower_barycentrics(fs_visitor &s)
if (inst->exec_size < 16)
continue;
const fs_builder ibld(&s, block, inst);
const fs_builder ubld = ibld.exec_all().group(8, 0);
const brw_builder ibld(&s, block, inst);
const brw_builder ubld = ibld.exec_all().group(8, 0);
switch (inst->opcode) {
case BRW_OPCODE_PLN: {
@ -355,7 +355,7 @@ static bool
lower_derivative(fs_visitor &s, bblock_t *block, fs_inst *inst,
unsigned swz0, unsigned swz1)
{
const fs_builder ubld = fs_builder(&s, block, inst).exec_all();
const brw_builder ubld = brw_builder(&s, block, inst).exec_all();
const brw_reg tmp0 = ubld.vgrf(inst->src[0].type);
const brw_reg tmp1 = ubld.vgrf(inst->src[0].type);
@ -433,11 +433,11 @@ brw_lower_find_live_channel(fs_visitor &s)
* useless there.
*/
const fs_builder ibld(&s, block, inst);
const brw_builder ibld(&s, block, inst);
if (!inst->is_partial_write())
ibld.emit_undef_for_dst(inst);
const fs_builder ubld = fs_builder(&s, block, inst).exec_all().group(1, 0);
const brw_builder ubld = brw_builder(&s, block, inst).exec_all().group(1, 0);
brw_reg exec_mask = ubld.vgrf(BRW_TYPE_UD);
ubld.UNDEF(exec_mask);
@ -529,7 +529,7 @@ brw_lower_sends_overlapping_payload(fs_visitor &s)
/* Sadly, we've lost all notion of channels and bit sizes at this
* point. Just WE_all it.
*/
const fs_builder ibld = fs_builder(&s, block, inst).exec_all().group(16, 0);
const brw_builder ibld = brw_builder(&s, block, inst).exec_all().group(16, 0);
brw_reg copy_src = retype(inst->src[arg], BRW_TYPE_UD);
brw_reg copy_dst = tmp;
for (unsigned i = 0; i < len; i += 2) {
@ -606,7 +606,7 @@ brw_lower_alu_restrictions(fs_visitor &s)
assert(!inst->saturate);
assert(!inst->src[0].abs);
assert(!inst->src[0].negate);
const brw::fs_builder ibld(&s, block, inst);
const brw::brw_builder ibld(&s, block, inst);
enum brw_reg_type type = brw_type_with_size(inst->dst.type, 32);
@ -630,7 +630,7 @@ brw_lower_alu_restrictions(fs_visitor &s)
assert(!inst->src[0].abs && !inst->src[0].negate);
assert(!inst->src[1].abs && !inst->src[1].negate);
assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
const brw::fs_builder ibld(&s, block, inst);
const brw::brw_builder ibld(&s, block, inst);
enum brw_reg_type type = brw_type_with_size(inst->dst.type, 32);
@ -766,9 +766,9 @@ brw_lower_load_subgroup_invocation(fs_visitor &s)
if (inst->opcode != SHADER_OPCODE_LOAD_SUBGROUP_INVOCATION)
continue;
const fs_builder abld =
fs_builder(&s, block, inst).annotate("SubgroupInvocation");
const fs_builder ubld8 = abld.group(8, 0).exec_all();
const brw_builder abld =
brw_builder(&s, block, inst).annotate("SubgroupInvocation");
const brw_builder ubld8 = abld.group(8, 0).exec_all();
ubld8.UNDEF(inst->dst);
if (inst->exec_size == 8) {
@ -781,7 +781,7 @@ brw_lower_load_subgroup_invocation(fs_visitor &s)
ubld8.MOV(inst->dst, brw_imm_v(0x76543210));
ubld8.ADD(byte_offset(inst->dst, 16), inst->dst, brw_imm_uw(8u));
if (inst->exec_size > 16) {
const fs_builder ubld16 = abld.group(16, 0).exec_all();
const brw_builder ubld16 = abld.group(16, 0).exec_all();
ubld16.ADD(byte_offset(inst->dst, 32), inst->dst, brw_imm_uw(16u));
}
}
@ -814,7 +814,7 @@ brw_lower_indirect_mov(fs_visitor &s)
assert(brw_type_size_bytes(inst->src[0].type) ==
brw_type_size_bytes(inst->dst.type));
const fs_builder ibld(&s, block, inst);
const brw_builder ibld(&s, block, inst);
/* Extract unaligned part */
uint16_t extra_offset = inst->src[0].offset & 0x1;

View file

@ -9,7 +9,7 @@
using namespace brw;
static void
f16_using_mac(const fs_builder &bld, fs_inst *inst)
f16_using_mac(const brw_builder &bld, fs_inst *inst)
{
/* We only intend to support configurations where the destination and
* accumulator have the same type.
@ -115,7 +115,7 @@ f16_using_mac(const fs_builder &bld, fs_inst *inst)
}
static void
int8_using_dp4a(const fs_builder &bld, fs_inst *inst)
int8_using_dp4a(const brw_builder &bld, fs_inst *inst)
{
/* We only intend to support configurations where the destination and
* accumulator have the same type.
@ -162,7 +162,7 @@ int8_using_dp4a(const fs_builder &bld, fs_inst *inst)
}
static void
int8_using_mul_add(const fs_builder &bld, fs_inst *inst)
int8_using_mul_add(const brw_builder &bld, fs_inst *inst)
{
/* We only intend to support configurations where the destination and
* accumulator have the same type.
@ -280,7 +280,7 @@ brw_lower_dpas(fs_visitor &v)
continue;
const unsigned exec_size = v.devinfo->ver >= 20 ? 16 : 8;
const fs_builder bld = fs_builder(&v, block, inst).group(exec_size, 0).exec_all();
const brw_builder bld = brw_builder(&v, block, inst).group(exec_size, 0).exec_all();
if (brw_type_is_float(inst->dst.type)) {
f16_using_mac(bld, inst);

View file

@ -142,7 +142,7 @@ static void
brw_lower_mul_dword_inst(fs_visitor &s, fs_inst *inst, bblock_t *block)
{
const intel_device_info *devinfo = s.devinfo;
const fs_builder ibld(&s, block, inst);
const brw_builder ibld(&s, block, inst);
/* It is correct to use inst->src[1].d in both end of the comparison.
* Using .ud in the UINT16_MAX comparison would cause any negative value to
@ -304,7 +304,7 @@ static void
brw_lower_mul_qword_inst(fs_visitor &s, fs_inst *inst, bblock_t *block)
{
const intel_device_info *devinfo = s.devinfo;
const fs_builder ibld(&s, block, inst);
const brw_builder ibld(&s, block, inst);
/* Considering two 64-bit integers ab and cd where each letter ab
* corresponds to 32 bits, we get a 128-bit result WXYZ. We * cd
@ -373,7 +373,7 @@ static void
brw_lower_mulh_inst(fs_visitor &s, fs_inst *inst, bblock_t *block)
{
const intel_device_info *devinfo = s.devinfo;
const fs_builder ibld(&s, block, inst);
const brw_builder ibld(&s, block, inst);
/* According to the BDW+ BSpec page for the "Multiply Accumulate
* High" instruction:

View file

@ -32,7 +32,7 @@
using namespace brw;
static void
lower_urb_read_logical_send(const fs_builder &bld, fs_inst *inst)
lower_urb_read_logical_send(const brw_builder &bld, fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
const bool per_slot_present =
@ -75,7 +75,7 @@ lower_urb_read_logical_send(const fs_builder &bld, fs_inst *inst)
}
static void
lower_urb_read_logical_send_xe2(const fs_builder &bld, fs_inst *inst)
lower_urb_read_logical_send_xe2(const brw_builder &bld, fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
assert(devinfo->has_lsc);
@ -134,7 +134,7 @@ lower_urb_read_logical_send_xe2(const fs_builder &bld, fs_inst *inst)
}
static void
lower_urb_write_logical_send(const fs_builder &bld, fs_inst *inst)
lower_urb_write_logical_send(const brw_builder &bld, fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
const bool per_slot_present =
@ -191,7 +191,7 @@ lower_urb_write_logical_send(const fs_builder &bld, fs_inst *inst)
}
static void
lower_urb_write_logical_send_xe2(const fs_builder &bld, fs_inst *inst)
lower_urb_write_logical_send_xe2(const brw_builder &bld, fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
assert(devinfo->has_lsc);
@ -264,7 +264,7 @@ lower_urb_write_logical_send_xe2(const fs_builder &bld, fs_inst *inst)
}
static void
setup_color_payload(const fs_builder &bld, const brw_wm_prog_key *key,
setup_color_payload(const brw_builder &bld, const brw_wm_prog_key *key,
brw_reg *dst, brw_reg color, unsigned components)
{
if (key->clamp_fragment_color) {
@ -283,7 +283,7 @@ setup_color_payload(const fs_builder &bld, const brw_wm_prog_key *key,
}
static void
lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
lower_fb_write_logical_send(const brw_builder &bld, fs_inst *inst,
const struct brw_wm_prog_data *prog_data,
const brw_wm_prog_key *key,
const fs_thread_payload &fs_payload)
@ -318,7 +318,7 @@ lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
* dispatched. This field is only required for the end-of-
* thread message and on all dual-source messages."
*/
const fs_builder ubld = bld.exec_all().group(8, 0);
const brw_builder ubld = bld.exec_all().group(8, 0);
brw_reg header = ubld.vgrf(BRW_TYPE_UD, 2);
if (bld.group() < 16) {
@ -386,8 +386,8 @@ lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
if (src0_alpha.file != BAD_FILE) {
for (unsigned i = 0; i < bld.dispatch_width() / 8; i++) {
const fs_builder &ubld = bld.exec_all().group(8, i)
.annotate("FB write src0 alpha");
const brw_builder &ubld = bld.exec_all().group(8, i)
.annotate("FB write src0 alpha");
const brw_reg tmp = ubld.vgrf(BRW_TYPE_F);
ubld.MOV(tmp, horiz_offset(src0_alpha, i * 8));
setup_color_payload(ubld, key, &sources[length], tmp, 1);
@ -474,7 +474,7 @@ lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
inst->desc |= (1 << 18);
} else if (prog_data->coarse_pixel_dispatch == INTEL_SOMETIMES) {
STATIC_ASSERT(INTEL_MSAA_FLAG_COARSE_RT_WRITES == (1 << 18));
const fs_builder &ubld = bld.exec_all().group(8, 0);
const brw_builder &ubld = bld.exec_all().group(8, 0);
desc = ubld.vgrf(BRW_TYPE_UD);
ubld.AND(desc, dynamic_msaa_flags(prog_data),
brw_imm_ud(INTEL_MSAA_FLAG_COARSE_RT_WRITES));
@ -513,11 +513,11 @@ lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
}
static void
lower_fb_read_logical_send(const fs_builder &bld, fs_inst *inst,
lower_fb_read_logical_send(const brw_builder &bld, fs_inst *inst,
const struct brw_wm_prog_data *wm_prog_data)
{
const intel_device_info *devinfo = bld.shader->devinfo;
const fs_builder &ubld = bld.exec_all().group(8, 0);
const brw_builder &ubld = bld.exec_all().group(8, 0);
const unsigned length = 2;
const brw_reg header = ubld.vgrf(BRW_TYPE_UD, length);
@ -676,7 +676,7 @@ sampler_msg_type(const intel_device_info *devinfo,
* the given requested_alignment_sz.
*/
static fs_inst *
emit_load_payload_with_padding(const fs_builder &bld, const brw_reg &dst,
emit_load_payload_with_padding(const brw_builder &bld, const brw_reg &dst,
const brw_reg *src, unsigned sources,
unsigned header_size,
unsigned requested_alignment_sz)
@ -734,7 +734,7 @@ shader_opcode_needs_header(opcode op)
}
static void
lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst,
lower_sampler_logical_send(const brw_builder &bld, fs_inst *inst,
const brw_reg &coordinate,
const brw_reg &shadow_c,
brw_reg lod, const brw_reg &lod2,
@ -808,8 +808,8 @@ lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst,
inst->offset |= 1 << 23; /* g0.2 bit23 : Pixel Null Mask Enable */
/* Build the actual header */
const fs_builder ubld = bld.exec_all().group(8 * reg_unit(devinfo), 0);
const fs_builder ubld1 = ubld.group(1, 0);
const brw_builder ubld = bld.exec_all().group(8 * reg_unit(devinfo), 0);
const brw_builder ubld1 = ubld.group(1, 0);
if (devinfo->ver >= 11)
ubld.MOV(header, brw_imm_ud(0));
else
@ -1158,7 +1158,7 @@ lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst,
if (sampler_handle.file != BAD_FILE || sampler.file == IMM) {
inst->src[0] = brw_imm_ud(0);
} else {
const fs_builder ubld = bld.group(1, 0).exec_all();
const brw_builder ubld = bld.group(1, 0).exec_all();
brw_reg desc = ubld.vgrf(BRW_TYPE_UD);
ubld.SHL(desc, sampler, brw_imm_ud(8));
inst->src[0] = component(desc, 0);
@ -1177,7 +1177,7 @@ lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst,
msg_type,
simd_mode,
sampler_ret_type);
const fs_builder ubld = bld.group(1, 0).exec_all();
const brw_builder ubld = bld.group(1, 0).exec_all();
brw_reg desc = ubld.vgrf(BRW_TYPE_UD);
if (surface.equals(sampler)) {
/* This case is common in GL */
@ -1260,7 +1260,7 @@ get_sampler_msg_payload_type_bit_size(const intel_device_info *devinfo,
}
static void
lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst)
lower_sampler_logical_send(const brw_builder &bld, fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
const brw_reg coordinate = inst->src[TEX_LOGICAL_SRC_COORDINATE];
@ -1303,13 +1303,13 @@ lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst)
* Predicate the specified instruction on the vector mask.
*/
static void
emit_predicate_on_vector_mask(const fs_builder &bld, fs_inst *inst)
emit_predicate_on_vector_mask(const brw_builder &bld, fs_inst *inst)
{
assert(bld.shader->stage == MESA_SHADER_FRAGMENT &&
bld.group() == inst->group &&
bld.dispatch_width() == inst->exec_size);
const fs_builder ubld = bld.exec_all().group(1, 0);
const brw_builder ubld = bld.exec_all().group(1, 0);
const fs_visitor &s = *bld.shader;
const brw_reg vector_mask = ubld.vgrf(BRW_TYPE_UW);
@ -1337,7 +1337,7 @@ emit_predicate_on_vector_mask(const fs_builder &bld, fs_inst *inst)
}
static void
setup_surface_descriptors(const fs_builder &bld, fs_inst *inst, uint32_t desc,
setup_surface_descriptors(const brw_builder &bld, fs_inst *inst, uint32_t desc,
const brw_reg &surface, const brw_reg &surface_handle)
{
const brw_compiler *compiler = bld.shader->compiler;
@ -1361,7 +1361,7 @@ setup_surface_descriptors(const fs_builder &bld, fs_inst *inst, uint32_t desc,
inst->send_ex_bso = compiler->extended_bindless_surface_offset;
} else {
inst->desc = desc;
const fs_builder ubld = bld.exec_all().group(1, 0);
const brw_builder ubld = bld.exec_all().group(1, 0);
brw_reg tmp = ubld.vgrf(BRW_TYPE_UD);
ubld.AND(tmp, surface, brw_imm_ud(0xff));
inst->src[0] = component(tmp, 0);
@ -1370,7 +1370,7 @@ setup_surface_descriptors(const fs_builder &bld, fs_inst *inst, uint32_t desc,
}
static void
setup_lsc_surface_descriptors(const fs_builder &bld, fs_inst *inst,
setup_lsc_surface_descriptors(const brw_builder &bld, fs_inst *inst,
uint32_t desc, const brw_reg &surface)
{
const ASSERTED intel_device_info *devinfo = bld.shader->devinfo;
@ -1399,7 +1399,7 @@ setup_lsc_surface_descriptors(const fs_builder &bld, fs_inst *inst,
if (surface.file == IMM) {
inst->src[1] = brw_imm_ud(lsc_bti_ex_desc(devinfo, surface.ud));
} else {
const fs_builder ubld = bld.exec_all().group(1, 0);
const brw_builder ubld = bld.exec_all().group(1, 0);
brw_reg tmp = ubld.vgrf(BRW_TYPE_UD);
ubld.SHL(tmp, surface, brw_imm_ud(24));
inst->src[1] = component(tmp, 0);
@ -1427,7 +1427,7 @@ lsc_addr_size_for_type(enum brw_reg_type type)
}
static void
lower_lsc_memory_logical_send(const fs_builder &bld, fs_inst *inst)
lower_lsc_memory_logical_send(const brw_builder &bld, fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
assert(devinfo->has_lsc);
@ -1470,7 +1470,7 @@ lower_lsc_memory_logical_send(const fs_builder &bld, fs_inst *inst)
if (addr.file != VGRF || !addr.is_contiguous()) {
if (inst->force_writemask_all) {
const fs_builder dbld = bld.group(bld.shader->dispatch_width, 0);
const brw_builder dbld = bld.group(bld.shader->dispatch_width, 0);
payload = dbld.move_to_vgrf(addr, coord_components);
} else {
payload = bld.move_to_vgrf(addr, coord_components);
@ -1584,16 +1584,16 @@ lower_lsc_memory_logical_send(const fs_builder &bld, fs_inst *inst)
}
static brw_reg
emit_a64_oword_block_header(const fs_builder &bld, const brw_reg &addr)
emit_a64_oword_block_header(const brw_builder &bld, const brw_reg &addr)
{
const fs_builder ubld = bld.exec_all().group(8, 0);
const brw_builder ubld = bld.exec_all().group(8, 0);
assert(brw_type_size_bytes(addr.type) == 8 && addr.stride == 0);
brw_reg expanded_addr = addr;
if (addr.file == UNIFORM) {
/* We can't do stride 1 with the UNIFORM file, it requires stride 0 */
fs_builder ubld1 = ubld.group(1, 0);
brw_builder ubld1 = ubld.group(1, 0);
brw_reg tmp = ubld1.vgrf(BRW_TYPE_UQ);
ubld1.UNDEF(tmp);
@ -1615,7 +1615,7 @@ emit_a64_oword_block_header(const fs_builder &bld, const brw_reg &addr)
}
static void
lower_hdc_memory_logical_send(const fs_builder &bld, fs_inst *inst)
lower_hdc_memory_logical_send(const brw_builder &bld, fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
const brw_compiler *compiler = bld.shader->compiler;
@ -1681,8 +1681,8 @@ lower_hdc_memory_logical_send(const fs_builder &bld, fs_inst *inst)
unsigned addr_size_B = coord_components * lsc_addr_size_bytes(addr_size);
brw_reg header;
fs_builder ubld8 = bld.exec_all().group(8, 0);
fs_builder ubld1 = ubld8.group(1, 0);
brw_builder ubld8 = bld.exec_all().group(8, 0);
brw_builder ubld1 = ubld8.group(1, 0);
if (mode == MEMORY_MODE_SCRATCH) {
header = ubld8.vgrf(BRW_TYPE_UD);
ubld8.emit(SHADER_OPCODE_SCRATCH_HEADER, header, brw_ud8_grf(0, 0));
@ -1906,7 +1906,7 @@ lower_hdc_memory_logical_send(const fs_builder &bld, fs_inst *inst)
}
static void
lower_lsc_varying_pull_constant_logical_send(const fs_builder &bld,
lower_lsc_varying_pull_constant_logical_send(const brw_builder &bld,
fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
@ -1993,7 +1993,7 @@ lower_lsc_varying_pull_constant_logical_send(const fs_builder &bld,
}
static void
lower_varying_pull_constant_logical_send(const fs_builder &bld, fs_inst *inst)
lower_varying_pull_constant_logical_send(const brw_builder &bld, fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
const brw_compiler *compiler = bld.shader->compiler;
@ -2071,7 +2071,7 @@ lower_varying_pull_constant_logical_send(const fs_builder &bld, fs_inst *inst)
}
static void
lower_interpolator_logical_send(const fs_builder &bld, fs_inst *inst,
lower_interpolator_logical_send(const brw_builder &bld, fs_inst *inst,
const struct brw_wm_prog_key *wm_prog_key,
const struct brw_wm_prog_data *wm_prog_data)
{
@ -2122,7 +2122,7 @@ lower_interpolator_logical_send(const fs_builder &bld, fs_inst *inst,
} else if (wm_prog_data->coarse_pixel_dispatch == INTEL_SOMETIMES) {
STATIC_ASSERT(INTEL_MSAA_FLAG_COARSE_PI_MSG == (1 << 15));
brw_reg orig_desc = desc;
const fs_builder &ubld = bld.exec_all().group(8, 0);
const brw_builder &ubld = bld.exec_all().group(8, 0);
desc = ubld.vgrf(BRW_TYPE_UD);
ubld.AND(desc, dynamic_msaa_flags(wm_prog_data),
brw_imm_ud(INTEL_MSAA_FLAG_COARSE_PI_MSG));
@ -2152,7 +2152,7 @@ lower_interpolator_logical_send(const fs_builder &bld, fs_inst *inst,
*/
if (dynamic_mode) {
brw_reg orig_desc = desc;
const fs_builder &ubld = bld.exec_all().group(8, 0);
const brw_builder &ubld = bld.exec_all().group(8, 0);
desc = ubld.vgrf(BRW_TYPE_UD);
/* The predicate should have been built in brw_fs_nir.cpp when emitting
@ -2195,7 +2195,7 @@ lower_interpolator_logical_send(const fs_builder &bld, fs_inst *inst,
}
static void
lower_btd_logical_send(const fs_builder &bld, fs_inst *inst)
lower_btd_logical_send(const brw_builder &bld, fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
brw_reg global_addr = inst->src[0];
@ -2203,7 +2203,7 @@ lower_btd_logical_send(const fs_builder &bld, fs_inst *inst)
const unsigned unit = reg_unit(devinfo);
const unsigned mlen = 2 * unit;
const fs_builder ubld = bld.exec_all();
const brw_builder ubld = bld.exec_all();
brw_reg header = ubld.vgrf(BRW_TYPE_UD, 2 * unit);
ubld.MOV(header, brw_imm_ud(0));
@ -2267,7 +2267,7 @@ lower_btd_logical_send(const fs_builder &bld, fs_inst *inst)
}
static void
lower_trace_ray_logical_send(const fs_builder &bld, fs_inst *inst)
lower_trace_ray_logical_send(const brw_builder &bld, fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
/* The emit_uniformize() in brw_fs_nir.cpp will generate an horizontal
@ -2292,7 +2292,7 @@ lower_trace_ray_logical_send(const fs_builder &bld, fs_inst *inst)
const unsigned unit = reg_unit(devinfo);
const unsigned mlen = unit;
const fs_builder ubld = bld.exec_all();
const brw_builder ubld = bld.exec_all();
brw_reg header = ubld.vgrf(BRW_TYPE_UD);
ubld.MOV(header, brw_imm_ud(0));
@ -2364,7 +2364,7 @@ lower_trace_ray_logical_send(const fs_builder &bld, fs_inst *inst)
}
static void
lower_get_buffer_size(const fs_builder &bld, fs_inst *inst)
lower_get_buffer_size(const brw_builder &bld, fs_inst *inst)
{
const intel_device_info *devinfo = bld.shader->devinfo;
/* Since we can only execute this instruction on uniform bti/surface
@ -2404,7 +2404,7 @@ brw_lower_logical_sends(fs_visitor &s)
bool progress = false;
foreach_block_and_inst_safe(block, fs_inst, inst, s.cfg) {
const fs_builder ibld(&s, block, inst);
const brw_builder ibld(&s, block, inst);
switch (inst->opcode) {
case FS_OPCODE_FB_WRITE_LOGICAL:
@ -2543,8 +2543,8 @@ brw_lower_uniform_pull_constant_loads(fs_visitor &s)
assert(size_B.file == IMM);
if (devinfo->has_lsc) {
const fs_builder ubld =
fs_builder(&s, block, inst).group(8, 0).exec_all();
const brw_builder ubld =
brw_builder(&s, block, inst).group(8, 0).exec_all();
const brw_reg payload = ubld.vgrf(BRW_TYPE_UD);
ubld.MOV(payload, offset_B);
@ -2581,8 +2581,8 @@ brw_lower_uniform_pull_constant_loads(fs_visitor &s)
s.invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
} else {
const fs_builder ubld = fs_builder(&s, block, inst).exec_all();
brw_reg header = fs_builder(&s, 8).exec_all().vgrf(BRW_TYPE_UD);
const brw_builder ubld = brw_builder(&s, block, inst).exec_all();
brw_reg header = brw_builder(&s, 8).exec_all().vgrf(BRW_TYPE_UD);
ubld.group(8, 0).MOV(header,
retype(brw_vec8_grf(0, 0), BRW_TYPE_UD));
@ -2624,7 +2624,7 @@ brw_lower_send_descriptors(fs_visitor &s)
if (inst->opcode != SHADER_OPCODE_SEND)
continue;
const fs_builder ubld = fs_builder(&s, block, inst).exec_all().group(1, 0);
const brw_builder ubld = brw_builder(&s, block, inst).exec_all().group(1, 0);
/* Descriptor */
const unsigned rlen = inst->dst.is_null() ? 0 : inst->size_written / REG_SIZE;

View file

@ -42,7 +42,7 @@ brw_lower_pack(fs_visitor &s)
assert(inst->saturate == false);
brw_reg dst = inst->dst;
const fs_builder ibld(&s, block, inst);
const brw_builder ibld(&s, block, inst);
/* The lowering generates 2 instructions for what was previously 1. This
* can trick the IR to believe we're doing partial writes, but the
* register is actually fully written. Mark it as undef to help the IR

View file

@ -478,7 +478,7 @@ namespace brw {
MIN2(brw_type_size_bytes(inst->src[0].type), brw_type_size_bytes(inst->src[1].type)) >= 4 ||
brw_type_size_bytes(inst->src[i].type) == get_exec_type_size(inst));
const fs_builder ibld(v, block, inst);
const brw_builder ibld(v, block, inst);
const brw_reg tmp = ibld.vgrf(get_exec_type(inst));
lower_instruction(v, block, ibld.MOV(tmp, inst->src[i]));
@ -499,7 +499,7 @@ namespace {
bool
lower_dst_modifiers(fs_visitor *v, bblock_t *block, fs_inst *inst)
{
const fs_builder ibld(v, block, inst);
const brw_builder ibld(v, block, inst);
const brw_reg_type type = get_exec_type(inst);
/* Not strictly necessary, but if possible use a temporary with the same
* channel alignment as the current destination in order to avoid
@ -550,7 +550,7 @@ namespace {
{
assert(inst->components_read(i) == 1);
const intel_device_info *devinfo = v->devinfo;
const fs_builder ibld(v, block, inst);
const brw_builder ibld(v, block, inst);
const unsigned stride = required_src_byte_stride(devinfo, inst, i) /
brw_type_size_bytes(inst->src[i].type);
assert(stride > 0);
@ -618,7 +618,7 @@ namespace {
assert(inst->opcode != BRW_OPCODE_MUL || !inst->dst.is_accumulator() ||
brw_type_is_float(inst->dst.type));
const fs_builder ibld(v, block, inst);
const brw_builder ibld(v, block, inst);
const unsigned stride = required_dst_byte_stride(inst) /
brw_type_size_bytes(inst->dst.type);
assert(stride > 0);
@ -689,7 +689,7 @@ namespace {
const unsigned mask = has_invalid_exec_type(v->devinfo, inst);
const brw_reg_type raw_type = required_exec_type(v->devinfo, inst);
const unsigned n = get_exec_type_size(inst) / brw_type_size_bytes(raw_type);
const fs_builder ibld(v, block, inst);
const brw_builder ibld(v, block, inst);
brw_reg tmp = ibld.vgrf(inst->dst.type, inst->dst.stride);
ibld.UNDEF(tmp);
@ -736,7 +736,7 @@ namespace {
lower_src_conversion(fs_visitor *v, bblock_t *block, fs_inst *inst)
{
const intel_device_info *devinfo = v->devinfo;
const fs_builder ibld = fs_builder(v, block, inst).scalar_group();
const brw_builder ibld = brw_builder(v, block, inst).scalar_group();
/* We only handle scalar conversions from small types for now. */
assert(is_uniform(inst->src[0]));

View file

@ -1304,8 +1304,8 @@ namespace {
/* Emit dependency into the SWSB of an extra SYNC
* instruction.
*/
const fs_builder ibld = fs_builder(shader, block, inst)
.exec_all().group(1, 0);
const brw_builder ibld = brw_builder(shader, block, inst)
.exec_all().group(1, 0);
fs_inst *sync = ibld.SYNC(TGL_SYNC_NOP);
sync->sched.sbid = dep.id;
sync->sched.mode = dep.unordered;
@ -1327,8 +1327,8 @@ namespace {
* scenario with unordered dependencies should have been
* handled above.
*/
const fs_builder ibld = fs_builder(shader, block, inst)
.exec_all().group(1, 0);
const brw_builder ibld = brw_builder(shader, block, inst)
.exec_all().group(1, 0);
fs_inst *sync = ibld.SYNC(TGL_SYNC_NOP);
sync->sched = ordered_dependency_swsb(deps[ip], jps[ip], true);
break;

View file

@ -470,7 +470,7 @@ brw_get_lowered_simd_width(const fs_visitor *shader, const fs_inst *inst)
* of the lowered instruction.
*/
static inline bool
needs_src_copy(const fs_builder &lbld, const fs_inst *inst, unsigned i)
needs_src_copy(const brw_builder &lbld, const fs_inst *inst, unsigned i)
{
/* The indirectly indexed register stays the same even if we split the
* instruction.
@ -492,7 +492,7 @@ needs_src_copy(const fs_builder &lbld, const fs_inst *inst, unsigned i)
* it as result in packed form.
*/
static brw_reg
emit_unzip(const fs_builder &lbld, fs_inst *inst, unsigned i)
emit_unzip(const brw_builder &lbld, fs_inst *inst, unsigned i)
{
assert(lbld.group() >= inst->group);
@ -537,7 +537,7 @@ emit_unzip(const fs_builder &lbld, fs_inst *inst, unsigned i)
* destination region.
*/
static inline bool
needs_dst_copy(const fs_builder &lbld, const fs_inst *inst)
needs_dst_copy(const brw_builder &lbld, const fs_inst *inst)
{
if (inst->dst.is_null())
return false;
@ -580,7 +580,7 @@ needs_dst_copy(const fs_builder &lbld, const fs_inst *inst)
* zipping up the destination of \p inst will be inserted using \p lbld_after.
*/
static brw_reg
emit_zip(const fs_builder &lbld_before, const fs_builder &lbld_after,
emit_zip(const brw_builder &lbld_before, const brw_builder &lbld_after,
fs_inst *inst)
{
assert(lbld_before.dispatch_width() == lbld_after.dispatch_width());
@ -632,7 +632,7 @@ emit_zip(const fs_builder &lbld_before, const fs_builder &lbld_after,
* have to build a single 32bit value for the SIMD32 message out of 2
* SIMD16 16 bit values.
*/
const fs_builder rbld = lbld_after.exec_all().group(1, 0);
const brw_builder rbld = lbld_after.exec_all().group(1, 0);
brw_reg local_res_reg = component(
retype(offset(tmp, lbld_before, dst_size), BRW_TYPE_UW), 0);
brw_reg final_res_reg =
@ -660,8 +660,8 @@ brw_lower_simd_width(fs_visitor &s)
assert(lower_width < inst->exec_size);
/* Builder matching the original instruction. */
const fs_builder bld = fs_builder(&s).at_end();
const fs_builder ibld =
const brw_builder bld = brw_builder(&s).at_end();
const brw_builder ibld =
bld.at(block, inst).exec_all(inst->force_writemask_all)
.group(inst->exec_size, inst->group / inst->exec_size);
@ -737,7 +737,7 @@ brw_lower_simd_width(fs_visitor &s)
* transform the sources and destination and emit the lowered
* instruction.
*/
const fs_builder lbld = ibld.group(lower_width, i);
const brw_builder lbld = ibld.group(lower_width, i);
for (unsigned j = 0; j < inst->sources; j++)
split_inst.src[j] = emit_unzip(lbld.at(block, inst), inst, j);

View file

@ -122,7 +122,7 @@ brw_get_reduction_info(brw_reduce_op red_op, brw_reg_type type)
}
static void
brw_emit_scan_step(const fs_builder &bld, enum opcode opcode, brw_conditional_mod mod,
brw_emit_scan_step(const brw_builder &bld, enum opcode opcode, brw_conditional_mod mod,
const brw_reg &tmp,
unsigned left_offset, unsigned left_stride,
unsigned right_offset, unsigned right_stride)
@ -187,7 +187,7 @@ brw_emit_scan_step(const fs_builder &bld, enum opcode opcode, brw_conditional_mo
}
static void
brw_emit_scan(const fs_builder &bld, enum opcode opcode, const brw_reg &tmp,
brw_emit_scan(const brw_builder &bld, enum opcode opcode, const brw_reg &tmp,
unsigned cluster_size, brw_conditional_mod mod)
{
unsigned dispatch_width = bld.dispatch_width();
@ -198,7 +198,7 @@ brw_emit_scan(const fs_builder &bld, enum opcode opcode, const brw_reg &tmp,
*/
if (dispatch_width * brw_type_size_bytes(tmp.type) > 2 * REG_SIZE) {
const unsigned half_width = dispatch_width / 2;
const fs_builder ubld = bld.exec_all().group(half_width, 0);
const brw_builder ubld = bld.exec_all().group(half_width, 0);
brw_reg left = tmp;
brw_reg right = horiz_offset(tmp, half_width);
brw_emit_scan(ubld, opcode, left, cluster_size, mod);
@ -211,13 +211,13 @@ brw_emit_scan(const fs_builder &bld, enum opcode opcode, const brw_reg &tmp,
}
if (cluster_size > 1) {
const fs_builder ubld = bld.exec_all().group(dispatch_width / 2, 0);
const brw_builder ubld = bld.exec_all().group(dispatch_width / 2, 0);
brw_emit_scan_step(ubld, opcode, mod, tmp, 0, 2, 1, 2);
}
if (cluster_size > 2) {
if (brw_type_size_bytes(tmp.type) <= 4) {
const fs_builder ubld =
const brw_builder ubld =
bld.exec_all().group(dispatch_width / 4, 0);
brw_emit_scan_step(ubld, opcode, mod, tmp, 1, 4, 2, 4);
brw_emit_scan_step(ubld, opcode, mod, tmp, 1, 4, 3, 4);
@ -228,7 +228,7 @@ brw_emit_scan(const fs_builder &bld, enum opcode opcode, const brw_reg &tmp,
* 8-wide in that case and it's the same number of
* instructions.
*/
const fs_builder ubld = bld.exec_all().group(2, 0);
const brw_builder ubld = bld.exec_all().group(2, 0);
for (unsigned i = 0; i < dispatch_width; i += 4)
brw_emit_scan_step(ubld, opcode, mod, tmp, i + 1, 0, i + 2, 1);
}
@ -237,7 +237,7 @@ brw_emit_scan(const fs_builder &bld, enum opcode opcode, const brw_reg &tmp,
for (unsigned i = 4;
i < MIN2(cluster_size, dispatch_width);
i *= 2) {
const fs_builder ubld = bld.exec_all().group(i, 0);
const brw_builder ubld = bld.exec_all().group(i, 0);
brw_emit_scan_step(ubld, opcode, mod, tmp, i - 1, 0, i, 1);
if (dispatch_width > i * 2)
@ -253,7 +253,7 @@ brw_emit_scan(const fs_builder &bld, enum opcode opcode, const brw_reg &tmp,
static bool
brw_lower_reduce(fs_visitor &s, bblock_t *block, fs_inst *inst)
{
const fs_builder bld(&s, block, inst);
const brw_builder bld(&s, block, inst);
assert(inst->dst.type == inst->src[0].type);
brw_reg dst = inst->dst;
@ -305,7 +305,7 @@ brw_lower_reduce(fs_visitor &s, bblock_t *block, fs_inst *inst)
static bool
brw_lower_scan(fs_visitor &s, bblock_t *block, fs_inst *inst)
{
const fs_builder bld(&s, block, inst);
const brw_builder bld(&s, block, inst);
assert(inst->dst.type == inst->src[0].type);
brw_reg dst = inst->dst;
@ -320,7 +320,7 @@ brw_lower_scan(fs_visitor &s, bblock_t *block, fs_inst *inst)
* to reduction operation's identity value.
*/
brw_reg scan = bld.vgrf(src.type);
const fs_builder ubld = bld.exec_all();
const brw_builder ubld = bld.exec_all();
ubld.emit(SHADER_OPCODE_SEL_EXEC, scan, src, info.identity);
if (inst->opcode == SHADER_OPCODE_EXCLUSIVE_SCAN) {
@ -346,9 +346,9 @@ brw_lower_scan(fs_visitor &s, bblock_t *block, fs_inst *inst)
}
static brw_reg
brw_fill_flag(const fs_builder &bld, unsigned v)
brw_fill_flag(const brw_builder &bld, unsigned v)
{
const fs_builder ubld1 = bld.exec_all().group(1, 0);
const brw_builder ubld1 = bld.exec_all().group(1, 0);
brw_reg flag = brw_flag_reg(0, 0);
if (bld.shader->dispatch_width == 32) {
@ -363,7 +363,7 @@ brw_fill_flag(const fs_builder &bld, unsigned v)
}
static void
brw_lower_dispatch_width_vote(const fs_builder &bld, enum opcode opcode, brw_reg dst, brw_reg src)
brw_lower_dispatch_width_vote(const brw_builder &bld, enum opcode opcode, brw_reg dst, brw_reg src)
{
const intel_device_info *devinfo = bld.shader->devinfo;
const unsigned dispatch_width = bld.shader->dispatch_width;
@ -393,8 +393,8 @@ brw_lower_dispatch_width_vote(const fs_builder &bld, enum opcode opcode, brw_reg
*
* TODO: Check if we still need this for newer platforms.
*/
const fs_builder ubld = devinfo->ver >= 20 ? bld.exec_all()
: bld.exec_all().group(1, 0);
const brw_builder ubld = devinfo->ver >= 20 ? bld.exec_all()
: bld.exec_all().group(1, 0);
brw_reg res1 = ubld.MOV(brw_imm_d(0));
enum brw_predicate pred;
@ -415,7 +415,7 @@ brw_lower_dispatch_width_vote(const fs_builder &bld, enum opcode opcode, brw_reg
}
static void
brw_lower_quad_vote_gfx9(const fs_builder &bld, enum opcode opcode, brw_reg dst, brw_reg src)
brw_lower_quad_vote_gfx9(const brw_builder &bld, enum opcode opcode, brw_reg dst, brw_reg src)
{
assert(opcode == SHADER_OPCODE_VOTE_ANY || opcode == SHADER_OPCODE_VOTE_ALL);
const bool any = opcode == SHADER_OPCODE_VOTE_ANY;
@ -437,7 +437,7 @@ brw_lower_quad_vote_gfx9(const fs_builder &bld, enum opcode opcode, brw_reg dst,
}
static void
brw_lower_quad_vote_gfx20(const fs_builder &bld, enum opcode opcode, brw_reg dst, brw_reg src)
brw_lower_quad_vote_gfx20(const brw_builder &bld, enum opcode opcode, brw_reg dst, brw_reg src)
{
assert(opcode == SHADER_OPCODE_VOTE_ANY || opcode == SHADER_OPCODE_VOTE_ALL);
const bool any = opcode == SHADER_OPCODE_VOTE_ANY;
@ -484,7 +484,7 @@ brw_lower_quad_vote_gfx20(const fs_builder &bld, enum opcode opcode, brw_reg dst
static bool
brw_lower_vote(fs_visitor &s, bblock_t *block, fs_inst *inst)
{
const fs_builder bld(&s, block, inst);
const brw_builder bld(&s, block, inst);
brw_reg dst = inst->dst;
brw_reg src = inst->src[0];
@ -514,12 +514,12 @@ brw_lower_vote(fs_visitor &s, bblock_t *block, fs_inst *inst)
static bool
brw_lower_ballot(fs_visitor &s, bblock_t *block, fs_inst *inst)
{
const fs_builder bld(&s, block, inst);
const brw_builder bld(&s, block, inst);
brw_reg value = retype(inst->src[0], BRW_TYPE_UD);
brw_reg dst = inst->dst;
const fs_builder xbld = dst.is_scalar ? bld.scalar_group() : bld;
const brw_builder xbld = dst.is_scalar ? bld.scalar_group() : bld;
if (value.file == IMM) {
/* Implement a fast-path for ballot(true). */
@ -544,7 +544,7 @@ brw_lower_ballot(fs_visitor &s, bblock_t *block, fs_inst *inst)
static bool
brw_lower_quad_swap(fs_visitor &s, bblock_t *block, fs_inst *inst)
{
const fs_builder bld(&s, block, inst);
const brw_builder bld(&s, block, inst);
assert(inst->dst.type == inst->src[0].type);
brw_reg dst = inst->dst;
@ -557,7 +557,7 @@ brw_lower_quad_swap(fs_visitor &s, bblock_t *block, fs_inst *inst)
case BRW_SWAP_HORIZONTAL: {
const brw_reg tmp = bld.vgrf(value.type);
const fs_builder ubld = bld.exec_all().group(s.dispatch_width / 2, 0);
const brw_builder ubld = bld.exec_all().group(s.dispatch_width / 2, 0);
const brw_reg src_left = horiz_stride(value, 2);
const brw_reg src_right = horiz_stride(horiz_offset(value, 1), 2);
@ -577,7 +577,7 @@ brw_lower_quad_swap(fs_visitor &s, bblock_t *block, fs_inst *inst)
const unsigned swizzle = dir == BRW_SWAP_VERTICAL ? BRW_SWIZZLE4(2,3,0,1)
: BRW_SWIZZLE4(3,2,1,0);
const brw_reg tmp = bld.vgrf(value.type);
const fs_builder ubld = bld.exec_all();
const brw_builder ubld = bld.exec_all();
ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value, brw_imm_ud(swizzle));
bld.MOV(dst, tmp);
} else {
@ -600,7 +600,7 @@ brw_lower_quad_swap(fs_visitor &s, bblock_t *block, fs_inst *inst)
static bool
brw_lower_read_from_live_channel(fs_visitor &s, bblock_t *block, fs_inst *inst)
{
const fs_builder bld(&s, block, inst);
const brw_builder bld(&s, block, inst);
assert(inst->sources == 1);
assert(inst->dst.type == inst->src[0].type);
@ -616,7 +616,7 @@ brw_lower_read_from_live_channel(fs_visitor &s, bblock_t *block, fs_inst *inst)
static bool
brw_lower_read_from_channel(fs_visitor &s, bblock_t *block, fs_inst *inst)
{
const fs_builder bld(&s, block, inst);
const brw_builder bld(&s, block, inst);
assert(inst->sources == 2);
assert(inst->dst.type == inst->src[0].type);

View file

@ -345,7 +345,7 @@ brw_opt_split_sends(fs_visitor &s)
if (end <= mid)
continue;
const fs_builder ibld(&s, block, lp);
const brw_builder ibld(&s, block, lp);
fs_inst *lp1 = ibld.LOAD_PAYLOAD(lp->dst, &lp->src[0], mid, lp->header_size);
fs_inst *lp2 = ibld.LOAD_PAYLOAD(lp->dst, &lp->src[mid], end - mid, 0);

View file

@ -39,7 +39,7 @@ opt_address_reg_load_local(fs_visitor &s, bblock_t *block, const brw::def_analys
src_inst->sources > 2)
continue;
fs_builder ubld = fs_builder(&s).at(block, inst).exec_all().group(1, 0);
brw_builder ubld = brw_builder(&s).at(block, inst).exec_all().group(1, 0);
brw_reg sources[3];
for (unsigned i = 0; i < src_inst->sources; i++) {
sources[i] = inst->src[i].file == VGRF ? component(src_inst->src[i], 0) : src_inst->src[i];

View file

@ -1606,7 +1606,7 @@ brw_opt_combine_constants(fs_visitor &s)
* both HF slots within a DWord with the constant.
*/
const uint32_t width = 1;
const fs_builder ibld = fs_builder(&s, width).at(insert_block, n).exec_all();
const brw_builder ibld = brw_builder(&s, width).at(insert_block, n).exec_all();
brw_reg reg = brw_vgrf(imm->nr, BRW_TYPE_F);
reg.offset = imm->subreg_offset;

View file

@ -379,8 +379,8 @@ remap_sources(fs_visitor &s, const brw::def_analysis &defs,
*/
fs_inst *def = defs.get(inst->src[i]);
bblock_t *def_block = defs.get_block(inst->src[i]);
const fs_builder dbld =
fs_builder(&s, def_block, def).at(def_block, def->next);
const brw_builder dbld =
brw_builder(&s, def_block, def).at(def_block, def->next);
/* Resolve any deferred block IP changes before inserting */
if (def_block->end_ip_delta)

View file

@ -152,9 +152,9 @@ brw_opt_combine_convergent_txf(fs_visitor &s)
for (unsigned curr = 0; curr < count; curr += max_simd) {
const unsigned lanes = CLAMP(count - curr, min_simd, max_simd);
const unsigned width = util_next_power_of_two(lanes);
const fs_builder ubld =
fs_builder(&s).at(block, txfs[curr]).exec_all().group(width, 0);
const fs_builder ubld1 = ubld.group(1, 0);
const brw_builder ubld =
brw_builder(&s).at(block, txfs[curr]).exec_all().group(width, 0);
const brw_builder ubld1 = ubld.group(1, 0);
enum brw_reg_type coord_type =
txfs[curr]->src[TEX_LOGICAL_SRC_COORDINATE].type;
@ -211,7 +211,7 @@ brw_opt_combine_convergent_txf(fs_visitor &s)
if (!txf)
break;
const fs_builder ibld = fs_builder(&s, block, txf);
const brw_builder ibld = brw_builder(&s, block, txf);
/* Replace each of the original TXFs with MOVs from our new one */
const unsigned dest_comps = dest_comps_for_txf(s, txf);

View file

@ -145,7 +145,7 @@ brw_opt_split_virtual_grfs(fs_visitor &s)
if (inst->opcode == SHADER_OPCODE_UNDEF) {
assert(inst->dst.file == VGRF);
if (vgrf_has_split[inst->dst.nr]) {
const fs_builder ibld(&s, block, inst);
const brw_builder ibld(&s, block, inst);
assert(inst->size_written % REG_SIZE == 0);
unsigned reg_offset = inst->dst.offset / REG_SIZE;
unsigned size_written = 0;

View file

@ -30,8 +30,8 @@ brw_workaround_emit_dummy_mov_instruction(fs_visitor &s)
return false;
/* Insert dummy mov as first instruction. */
const fs_builder ubld =
fs_builder(&s, s.cfg->first_block(), (fs_inst *)first_inst).exec_all().group(8, 0);
const brw_builder ubld =
brw_builder(&s, s.cfg->first_block(), (fs_inst *)first_inst).exec_all().group(8, 0);
ubld.MOV(ubld.null_reg_ud(), brw_imm_ud(0u));
s.invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
@ -100,8 +100,8 @@ brw_workaround_memory_fence_before_eot(fs_visitor &s)
if (!has_ugm_write_or_atomic)
break;
const fs_builder ibld(&s, block, inst);
const fs_builder ubld = ibld.exec_all().group(1, 0);
const brw_builder ibld(&s, block, inst);
const brw_builder ubld = ibld.exec_all().group(1, 0);
brw_reg dst = ubld.vgrf(BRW_TYPE_UD);
fs_inst *dummy_fence = ubld.emit(SHADER_OPCODE_MEMORY_FENCE,
@ -228,7 +228,7 @@ brw_workaround_nomask_control_flow(fs_visitor &s)
* instruction), in order to avoid getting a right-shifted
* value.
*/
const fs_builder ubld = fs_builder(&s, block, inst)
const brw_builder ubld = brw_builder(&s, block, inst)
.exec_all().group(s.dispatch_width, 0);
const brw_reg flag = retype(brw_flag_reg(0, 0),
BRW_TYPE_UD);
@ -343,8 +343,8 @@ brw_workaround_source_arf_before_eot(fs_visitor &s)
*/
assert(++eot_count == 1);
const fs_builder ibld(&s, block, inst);
const fs_builder ubld = ibld.exec_all().group(1, 0);
const brw_builder ibld(&s, block, inst);
const brw_builder ubld = ibld.exec_all().group(1, 0);
if (flags_unread & 0x0f)
ubld.MOV(ubld.null_reg_ud(), retype(brw_flag_reg(0, 0), BRW_TYPE_UD));

View file

@ -40,7 +40,7 @@ protected:
struct brw_wm_prog_data *prog_data;
struct gl_shader_program *shader_prog;
fs_visitor *v;
fs_builder bld;
brw_builder bld;
};
scoreboard_test::scoreboard_test()
@ -65,7 +65,7 @@ scoreboard_test::scoreboard_test()
v = new fs_visitor(compiler, &params, NULL, &prog_data->base, shader, 8,
false, false);
bld = fs_builder(v).at_end();
bld = brw_builder(v).at_end();
}
scoreboard_test::~scoreboard_test()
@ -106,7 +106,7 @@ lower_scoreboard(fs_visitor *v)
}
fs_inst *
emit_SEND(const fs_builder &bld, const brw_reg &dst,
emit_SEND(const brw_builder &bld, const brw_reg &dst,
const brw_reg &desc, const brw_reg &payload)
{
fs_inst *inst = bld.emit(SHADER_OPCODE_SEND, dst, desc, desc, payload);

View file

@ -40,7 +40,7 @@ protected:
struct brw_wm_prog_data *prog_data;
struct gl_shader_program *shader_prog;
fs_visitor *v;
fs_builder bld;
brw_builder bld;
void test_mov_prop(enum brw_conditional_mod cmod,
enum brw_reg_type add_type,
@ -72,7 +72,7 @@ cmod_propagation_test::cmod_propagation_test()
v = new fs_visitor(compiler, &params, NULL, &prog_data->base, shader,
8, false, false);
bld = fs_builder(v).at_end();
bld = brw_builder(v).at_end();
devinfo->ver = 9;
devinfo->verx10 = devinfo->ver * 10;

View file

@ -67,15 +67,15 @@ struct FSCombineConstantsTest : public ::testing::Test {
}
};
static fs_builder
static brw_builder
make_builder(fs_visitor *s)
{
return fs_builder(s, s->dispatch_width).at_end();
return brw_builder(s, s->dispatch_width).at_end();
}
TEST_F(FSCombineConstantsTest, Simple)
{
fs_builder bld = make_builder(shader);
brw_builder bld = make_builder(shader);
brw_reg r = brw_vec8_grf(1, 0);
brw_reg imm_a = brw_imm_ud(1);
@ -100,7 +100,7 @@ TEST_F(FSCombineConstantsTest, Simple)
TEST_F(FSCombineConstantsTest, DoContainingDo)
{
fs_builder bld = make_builder(shader);
brw_builder bld = make_builder(shader);
brw_reg r1 = brw_vec8_grf(1, 0);
brw_reg r2 = brw_vec8_grf(2, 0);

View file

@ -40,7 +40,7 @@ protected:
struct brw_wm_prog_data *prog_data;
struct gl_shader_program *shader_prog;
fs_visitor *v;
fs_builder bld;
brw_builder bld;
};
copy_propagation_test::copy_propagation_test()
@ -61,7 +61,7 @@ copy_propagation_test::copy_propagation_test()
v = new fs_visitor(compiler, &params, NULL, &prog_data->base, shader,
8, false, false);
bld = fs_builder(v).at_end();
bld = brw_builder(v).at_end();
devinfo->ver = 9;
devinfo->verx10 = devinfo->ver * 10;

View file

@ -22,7 +22,7 @@ protected:
struct brw_wm_prog_data *prog_data;
struct gl_shader_program *shader_prog;
fs_visitor *v;
fs_builder bld;
brw_builder bld;
};
cse_test::cse_test()
@ -43,7 +43,7 @@ cse_test::cse_test()
v = new fs_visitor(compiler, &params, NULL, &prog_data->base, shader,
16, false, false);
bld = fs_builder(v).at_end();
bld = brw_builder(v).at_end();
devinfo->verx10 = 125;
devinfo->ver = devinfo->verx10 / 10;

View file

@ -40,7 +40,7 @@ protected:
struct brw_wm_prog_data *prog_data;
struct gl_shader_program *shader_prog;
fs_visitor *v;
fs_builder bld;
brw_builder bld;
};
saturate_propagation_test::saturate_propagation_test()
@ -61,7 +61,7 @@ saturate_propagation_test::saturate_propagation_test()
v = new fs_visitor(compiler, &params, NULL, &prog_data->base, shader,
16, false, false);
bld = fs_builder(v).at_end();
bld = brw_builder(v).at_end();
devinfo->ver = 9;
devinfo->verx10 = devinfo->ver * 10;