intel/brw: Rename fs_visitor to brw_shader

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32536>
This commit is contained in:
Caio Oliveira 2024-12-07 10:25:45 -08:00 committed by Marge Bot
parent 352a63122f
commit cf3bb77224
58 changed files with 400 additions and 412 deletions

View file

@ -15,7 +15,7 @@
* (less than 1000 nodes) that this algorithm is significantly faster than
* others like Lengauer-Tarjan.
*/
brw_idom_tree::brw_idom_tree(const fs_visitor *s) :
brw_idom_tree::brw_idom_tree(const brw_shader *s) :
num_parents(s->cfg->num_blocks),
parents(new bblock_t *[num_parents]())
{
@ -77,7 +77,7 @@ brw_idom_tree::dump(FILE *file) const
fprintf(file, "}\n");
}
brw_register_pressure::brw_register_pressure(const fs_visitor *v)
brw_register_pressure::brw_register_pressure(const brw_shader *v)
{
const brw_live_variables &live = v->live_analysis.require();
const unsigned num_instructions = v->cfg->num_blocks ?

View file

@ -9,7 +9,7 @@
#include "brw_inst.h"
#include "util/bitset.h"
struct fs_visitor;
struct brw_shader;
/**
* Bitset of state categories that can influence the result of IR analysis
@ -177,11 +177,11 @@ private:
* Immediate dominator tree analysis of a shader.
*/
struct brw_idom_tree {
brw_idom_tree(const fs_visitor *s);
brw_idom_tree(const brw_shader *s);
~brw_idom_tree();
bool
validate(const fs_visitor *) const
validate(const brw_shader *) const
{
/* FINISHME */
return true;
@ -237,7 +237,7 @@ private:
* are live at any point of the program in GRF units.
*/
struct brw_register_pressure {
brw_register_pressure(const fs_visitor *v);
brw_register_pressure(const brw_shader *v);
~brw_register_pressure();
brw_analysis_dependency_class
@ -249,7 +249,7 @@ struct brw_register_pressure {
}
bool
validate(const fs_visitor *) const
validate(const brw_shader *) const
{
/* FINISHME */
return true;
@ -260,7 +260,7 @@ struct brw_register_pressure {
class brw_def_analysis {
public:
brw_def_analysis(const fs_visitor *v);
brw_def_analysis(const brw_shader *v);
~brw_def_analysis();
brw_inst *
@ -287,7 +287,7 @@ public:
unsigned count() const { return def_count; }
unsigned ssa_count() const;
void print_stats(const fs_visitor *) const;
void print_stats(const brw_shader *) const;
brw_analysis_dependency_class
dependency_class() const
@ -298,13 +298,13 @@ public:
BRW_DEPENDENCY_BLOCKS;
}
bool validate(const fs_visitor *) const;
bool validate(const brw_shader *) const;
private:
void mark_invalid(int);
bool fully_defines(const fs_visitor *v, brw_inst *);
bool fully_defines(const brw_shader *v, brw_inst *);
void update_for_reads(const brw_idom_tree &idom, bblock_t *block, brw_inst *);
void update_for_write(const fs_visitor *v, bblock_t *block, brw_inst *);
void update_for_write(const brw_shader *v, bblock_t *block, brw_inst *);
brw_inst **def_insts;
bblock_t **def_blocks;
@ -352,10 +352,10 @@ public:
BITSET_WORD flag_liveout[1];
};
brw_live_variables(const fs_visitor *s);
brw_live_variables(const brw_shader *s);
~brw_live_variables();
bool validate(const fs_visitor *s) const;
bool validate(const brw_shader *s) const;
brw_analysis_dependency_class
dependency_class() const
@ -423,7 +423,7 @@ protected:
* analysis.
*/
struct brw_performance {
brw_performance(const fs_visitor *v);
brw_performance(const brw_shader *v);
~brw_performance();
brw_analysis_dependency_class
@ -434,7 +434,7 @@ struct brw_performance {
}
bool
validate(const fs_visitor *) const
validate(const brw_shader *) const
{
return true;
}

View file

@ -94,14 +94,14 @@ brw_def_analysis::update_for_reads(const brw_idom_tree &idom,
}
bool
brw_def_analysis::fully_defines(const fs_visitor *v, brw_inst *inst)
brw_def_analysis::fully_defines(const brw_shader *v, brw_inst *inst)
{
return v->alloc.sizes[inst->dst.nr] * REG_SIZE == inst->size_written &&
!inst->is_partial_write();
}
void
brw_def_analysis::update_for_write(const fs_visitor *v,
brw_def_analysis::update_for_write(const brw_shader *v,
bblock_t *block,
brw_inst *inst)
{
@ -124,7 +124,7 @@ brw_def_analysis::update_for_write(const fs_visitor *v,
}
}
brw_def_analysis::brw_def_analysis(const fs_visitor *v)
brw_def_analysis::brw_def_analysis(const brw_shader *v)
{
const brw_idom_tree &idom = v->idom_analysis.require();
@ -182,7 +182,7 @@ brw_def_analysis::~brw_def_analysis()
}
bool
brw_def_analysis::validate(const fs_visitor *v) const
brw_def_analysis::validate(const brw_shader *v) const
{
for (unsigned i = 0; i < def_count; i++) {
assert(!def_insts[i] == !def_blocks[i]);
@ -205,7 +205,7 @@ brw_def_analysis::ssa_count() const
}
void
brw_def_analysis::print_stats(const fs_visitor *v) const
brw_def_analysis::print_stats(const brw_shader *v) const
{
const unsigned defs = ssa_count();

View file

@ -244,7 +244,7 @@ brw_live_variables::compute_start_end()
}
}
brw_live_variables::brw_live_variables(const fs_visitor *s)
brw_live_variables::brw_live_variables(const brw_shader *s)
: devinfo(s->devinfo), cfg(s->cfg)
{
mem_ctx = ralloc_context(NULL);
@ -332,7 +332,7 @@ check_register_live_range(const brw_live_variables *live, int ip,
}
bool
brw_live_variables::validate(const fs_visitor *s) const
brw_live_variables::validate(const brw_shader *s) const
{
int ip = 0;

View file

@ -1003,7 +1003,7 @@ namespace {
* Estimate the performance of the specified shader.
*/
void
calculate_performance(brw_performance &p, const fs_visitor *s,
calculate_performance(brw_performance &p, const brw_shader *s,
unsigned dispatch_width)
{
/* XXX - Note that the previous version of this code used worst-case
@ -1069,7 +1069,7 @@ namespace {
}
}
brw_performance::brw_performance(const fs_visitor *v) :
brw_performance::brw_performance(const brw_shader *v) :
block_latency(new unsigned[v->cfg->num_blocks])
{
calculate_performance(*this, v, v->dispatch_width);

View file

@ -133,7 +133,7 @@ brw_builder::shuffle_from_32bit_read(const brw_reg &dst,
brw_reg
brw_sample_mask_reg(const brw_builder &bld)
{
const fs_visitor &s = *bld.shader;
const brw_shader &s = *bld.shader;
if (s.stage != MESA_SHADER_FRAGMENT) {
return brw_imm_ud(0xffffffff);
@ -158,7 +158,7 @@ brw_emit_predicate_on_sample_mask(const brw_builder &bld, brw_inst *inst)
bld.group() == inst->group &&
bld.dispatch_width() == inst->exec_size);
const fs_visitor &s = *bld.shader;
const brw_shader &s = *bld.shader;
const brw_reg sample_mask = brw_sample_mask_reg(bld);
const unsigned subreg = sample_mask_flag_subreg(s);

View file

@ -40,7 +40,7 @@ public:
* Construct an brw_builder that inserts instructions into \p shader.
* \p dispatch_width gives the native execution width of the program.
*/
brw_builder(fs_visitor *shader,
brw_builder(brw_shader *shader,
unsigned dispatch_width) :
shader(shader), block(NULL), cursor(NULL),
_dispatch_width(dispatch_width),
@ -50,7 +50,7 @@ public:
{
}
explicit brw_builder(fs_visitor *s) : brw_builder(s, s->dispatch_width) {}
explicit brw_builder(brw_shader *s) : brw_builder(s, s->dispatch_width) {}
/**
* Construct an brw_builder that inserts instructions into \p shader
@ -58,7 +58,7 @@ public:
* execution controls and debug annotation are initialized from the
* instruction passed as argument.
*/
brw_builder(fs_visitor *shader, bblock_t *block, brw_inst *inst) :
brw_builder(brw_shader *shader, bblock_t *block, brw_inst *inst) :
shader(shader), block(block), cursor(inst),
_dispatch_width(inst->exec_size),
_group(inst->group),
@ -844,7 +844,7 @@ public:
return component(dst, 0);
}
fs_visitor *shader;
brw_shader *shader;
brw_inst *BREAK() { return emit(BRW_OPCODE_BREAK); }
brw_inst *DO() { return emit(BRW_OPCODE_DO); }

View file

@ -155,7 +155,7 @@ bblock_t::combine_with(bblock_t *that)
void
bblock_t::dump(FILE *file) const
{
const fs_visitor *s = this->cfg->s;
const brw_shader *s = this->cfg->s;
int ip = this->start_ip;
foreach_inst_in_block(brw_inst, inst, this) {
@ -187,7 +187,7 @@ bblock_t::unlink_list(exec_list *list)
}
}
cfg_t::cfg_t(const fs_visitor *s, exec_list *instructions) :
cfg_t::cfg_t(const brw_shader *s, exec_list *instructions) :
s(s)
{
mem_ctx = ralloc_context(NULL);
@ -663,7 +663,7 @@ cfg_t::dump_cfg()
}
void
brw_calculate_cfg(fs_visitor &s)
brw_calculate_cfg(brw_shader &s)
{
if (s.cfg)
return;

View file

@ -71,7 +71,7 @@ struct bblock_link {
enum bblock_link_kind kind;
};
struct fs_visitor;
struct brw_shader;
struct cfg_t;
struct bblock_t {
@ -318,7 +318,7 @@ bblock_t::last_non_control_flow_inst()
struct cfg_t {
DECLARE_RALLOC_CXX_OPERATORS(cfg_t)
cfg_t(const fs_visitor *s, exec_list *instructions);
cfg_t(const brw_shader *s, exec_list *instructions);
~cfg_t();
void remove_block(bblock_t *block);
@ -346,7 +346,7 @@ struct cfg_t {
*/
inline void adjust_block_ips();
const struct fs_visitor *s;
const struct brw_shader *s;
void *mem_ctx;
/** Ordered list (by ip) of basic blocks */

View file

@ -32,7 +32,7 @@ brw_bsr(const struct intel_device_info *devinfo,
}
static bool
run_bs(fs_visitor &s, bool allow_spilling)
run_bs(brw_shader &s, bool allow_spilling)
{
assert(s.stage >= MESA_SHADER_RAYGEN && s.stage <= MESA_SHADER_CALLABLE);
@ -95,7 +95,7 @@ compile_single_bs(const struct brw_compiler *compiler,
.required_width = compiler->devinfo->ver >= 20 ? 16u : 8u,
};
std::unique_ptr<fs_visitor> v[2];
std::unique_ptr<brw_shader> v[2];
for (unsigned simd = 0; simd < ARRAY_SIZE(v); simd++) {
if (!brw_simd_should_compile(simd_state, simd))
@ -106,7 +106,7 @@ compile_single_bs(const struct brw_compiler *compiler,
if (dispatch_width == 8 && compiler->devinfo->ver >= 20)
continue;
v[simd] = std::make_unique<fs_visitor>(compiler, &params->base,
v[simd] = std::make_unique<brw_shader>(compiler, &params->base,
&key->base,
&prog_data->base, shader,
dispatch_width,
@ -138,7 +138,7 @@ compile_single_bs(const struct brw_compiler *compiler,
}
assert(selected_simd < int(ARRAY_SIZE(v)));
fs_visitor *selected = v[selected_simd].get();
brw_shader *selected = v[selected_simd].get();
assert(selected);
const unsigned dispatch_width = selected->dispatch_width;

View file

@ -59,7 +59,7 @@ cs_fill_push_const_info(const struct intel_device_info *devinfo,
}
static bool
run_cs(fs_visitor &s, bool allow_spilling)
run_cs(brw_shader &s, bool allow_spilling)
{
assert(gl_shader_stage_is_compute(s.stage));
const brw_builder bld = brw_builder(&s).at_end();
@ -164,7 +164,7 @@ brw_compile_cs(const struct brw_compiler *compiler,
prog_data->uses_sampler = brw_nir_uses_sampler(params->base.nir);
std::unique_ptr<fs_visitor> v[3];
std::unique_ptr<brw_shader> v[3];
for (unsigned i = 0; i < 3; i++) {
const unsigned simd = devinfo->ver >= 30 ? 2 - i : i;
@ -187,7 +187,7 @@ brw_compile_cs(const struct brw_compiler *compiler,
brw_postprocess_nir(shader, compiler, debug_enabled,
key->base.robust_flags);
v[simd] = std::make_unique<fs_visitor>(compiler, &params->base,
v[simd] = std::make_unique<brw_shader>(compiler, &params->base,
&key->base,
&prog_data->base,
shader, dispatch_width,

View file

@ -19,7 +19,7 @@
#include <memory>
static brw_inst *
brw_emit_single_fb_write(fs_visitor &s, const brw_builder &bld,
brw_emit_single_fb_write(brw_shader &s, const brw_builder &bld,
brw_reg color0, brw_reg color1,
brw_reg src0_alpha,
unsigned target, unsigned components,
@ -56,7 +56,7 @@ brw_emit_single_fb_write(fs_visitor &s, const brw_builder &bld,
}
static void
brw_do_emit_fb_writes(fs_visitor &s, int nr_color_regions, bool replicate_alpha)
brw_do_emit_fb_writes(brw_shader &s, int nr_color_regions, bool replicate_alpha)
{
const brw_builder bld = brw_builder(&s).at_end();
brw_inst *inst = NULL;
@ -111,7 +111,7 @@ brw_do_emit_fb_writes(fs_visitor &s, int nr_color_regions, bool replicate_alpha)
}
static void
brw_emit_fb_writes(fs_visitor &s)
brw_emit_fb_writes(brw_shader &s)
{
const struct intel_device_info *devinfo = s.devinfo;
assert(s.stage == MESA_SHADER_FRAGMENT);
@ -176,7 +176,7 @@ brw_emit_fb_writes(fs_visitor &s)
/** Emits the interpolation for the varying inputs. */
static void
brw_emit_interpolation_setup(fs_visitor &s)
brw_emit_interpolation_setup(brw_shader &s)
{
const struct intel_device_info *devinfo = s.devinfo;
const brw_builder bld = brw_builder(&s).at_end();
@ -600,7 +600,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
* instructions to FS_OPCODE_REP_FB_WRITE.
*/
static void
brw_emit_repclear_shader(fs_visitor &s)
brw_emit_repclear_shader(brw_shader &s)
{
brw_wm_prog_key *key = (brw_wm_prog_key*) s.key;
brw_inst *write = NULL;
@ -1266,7 +1266,7 @@ gfx9_ps_header_only_workaround(struct brw_wm_prog_data *wm_prog_data)
}
static void
brw_assign_urb_setup(fs_visitor &s)
brw_assign_urb_setup(brw_shader &s)
{
assert(s.stage == MESA_SHADER_FRAGMENT);
@ -1449,7 +1449,7 @@ brw_assign_urb_setup(fs_visitor &s)
}
static bool
run_fs(fs_visitor &s, bool allow_spilling, bool do_rep_send)
run_fs(brw_shader &s, bool allow_spilling, bool do_rep_send)
{
const struct intel_device_info *devinfo = s.devinfo;
struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(s.prog_data);
@ -1580,14 +1580,14 @@ brw_compile_fs(const struct brw_compiler *compiler,
assert(reqd_dispatch_width == SUBGROUP_SIZE_VARYING ||
reqd_dispatch_width == SUBGROUP_SIZE_REQUIRE_16);
std::unique_ptr<fs_visitor> v8, v16, v32, vmulti;
std::unique_ptr<brw_shader> v8, v16, v32, vmulti;
cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL, *simd32_cfg = NULL,
*multi_cfg = NULL;
float throughput = 0;
bool has_spilled = false;
if (devinfo->ver < 20) {
v8 = std::make_unique<fs_visitor>(compiler, &params->base, key,
v8 = std::make_unique<brw_shader>(compiler, &params->base, key,
prog_data, nir, 8, 1,
params->base.stats != NULL,
debug_enabled);
@ -1621,14 +1621,14 @@ brw_compile_fs(const struct brw_compiler *compiler,
if (devinfo->ver >= 30) {
unsigned max_dispatch_width = reqd_dispatch_width ? reqd_dispatch_width : 32;
fs_visitor *vbase = NULL;
brw_shader *vbase = NULL;
if (params->max_polygons >= 2 && !key->coarse_pixel) {
if (params->max_polygons >= 4 && max_dispatch_width >= 32 &&
4 * prog_data->num_varying_inputs <= MAX_VARYING &&
INTEL_SIMD(FS, 4X8)) {
/* Try a quad-SIMD8 compile */
vmulti = std::make_unique<fs_visitor>(compiler, &params->base, key,
vmulti = std::make_unique<brw_shader>(compiler, &params->base, key,
prog_data, nir, 32, 4,
params->base.stats != NULL,
debug_enabled);
@ -1649,7 +1649,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
2 * prog_data->num_varying_inputs <= MAX_VARYING &&
INTEL_SIMD(FS, 2X16)) {
/* Try a dual-SIMD16 compile */
vmulti = std::make_unique<fs_visitor>(compiler, &params->base, key,
vmulti = std::make_unique<brw_shader>(compiler, &params->base, key,
prog_data, nir, 32, 2,
params->base.stats != NULL,
debug_enabled);
@ -1670,7 +1670,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
2 * prog_data->num_varying_inputs <= MAX_VARYING &&
INTEL_SIMD(FS, 2X8)) {
/* Try a dual-SIMD8 compile */
vmulti = std::make_unique<fs_visitor>(compiler, &params->base, key,
vmulti = std::make_unique<brw_shader>(compiler, &params->base, key,
prog_data, nir, 16, 2,
params->base.stats != NULL,
debug_enabled);
@ -1692,7 +1692,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
INTEL_SIMD(FS, 32) &&
!prog_data->base.ray_queries) {
/* Try a SIMD32 compile */
v32 = std::make_unique<fs_visitor>(compiler, &params->base, key,
v32 = std::make_unique<brw_shader>(compiler, &params->base, key,
prog_data, nir, 32, 1,
params->base.stats != NULL,
debug_enabled);
@ -1717,7 +1717,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
if (!vbase && INTEL_SIMD(FS, 16)) {
/* Try a SIMD16 compile */
v16 = std::make_unique<fs_visitor>(compiler, &params->base, key,
v16 = std::make_unique<brw_shader>(compiler, &params->base, key,
prog_data, nir, 16, 1,
params->base.stats != NULL,
debug_enabled);
@ -1741,7 +1741,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
INTEL_SIMD(FS, 16)) ||
reqd_dispatch_width == SUBGROUP_SIZE_REQUIRE_16) {
/* Try a SIMD16 compile */
v16 = std::make_unique<fs_visitor>(compiler, &params->base, key,
v16 = std::make_unique<brw_shader>(compiler, &params->base, key,
prog_data, nir, 16, 1,
params->base.stats != NULL,
debug_enabled);
@ -1775,7 +1775,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
reqd_dispatch_width == SUBGROUP_SIZE_VARYING &&
!simd16_failed && INTEL_SIMD(FS, 32)) {
/* Try a SIMD32 compile */
v32 = std::make_unique<fs_visitor>(compiler, &params->base, key,
v32 = std::make_unique<brw_shader>(compiler, &params->base, key,
prog_data, nir, 32, 1,
params->base.stats != NULL,
debug_enabled);
@ -1810,7 +1810,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
if (devinfo->ver >= 12 && !has_spilled &&
params->max_polygons >= 2 && !key->coarse_pixel &&
reqd_dispatch_width == SUBGROUP_SIZE_VARYING) {
fs_visitor *vbase = v8 ? v8.get() : v16 ? v16.get() : v32.get();
brw_shader *vbase = v8 ? v8.get() : v16 ? v16.get() : v32.get();
assert(vbase);
if (devinfo->ver >= 20 &&
@ -1819,7 +1819,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
4 * prog_data->num_varying_inputs <= MAX_VARYING &&
INTEL_SIMD(FS, 4X8)) {
/* Try a quad-SIMD8 compile */
vmulti = std::make_unique<fs_visitor>(compiler, &params->base, key,
vmulti = std::make_unique<brw_shader>(compiler, &params->base, key,
prog_data, nir, 32, 4,
params->base.stats != NULL,
debug_enabled);
@ -1839,7 +1839,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
2 * prog_data->num_varying_inputs <= MAX_VARYING &&
INTEL_SIMD(FS, 2X16)) {
/* Try a dual-SIMD16 compile */
vmulti = std::make_unique<fs_visitor>(compiler, &params->base, key,
vmulti = std::make_unique<brw_shader>(compiler, &params->base, key,
prog_data, nir, 32, 2,
params->base.stats != NULL,
debug_enabled);
@ -1858,7 +1858,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
2 * prog_data->num_varying_inputs <= MAX_VARYING &&
INTEL_SIMD(FS, 2X8)) {
/* Try a dual-SIMD8 compile */
vmulti = std::make_unique<fs_visitor>(compiler, &params->base, key,
vmulti = std::make_unique<brw_shader>(compiler, &params->base, key,
prog_data, nir, 16, 2,
params->base.stats != NULL,
debug_enabled);

View file

@ -30,7 +30,7 @@ static const GLuint gl_prim_to_hw_prim[MESA_PRIM_TRIANGLE_STRIP_ADJACENCY+1] = {
};
static void
brw_emit_gs_thread_end(fs_visitor &s)
brw_emit_gs_thread_end(brw_shader &s)
{
assert(s.stage == MESA_SHADER_GEOMETRY);
@ -68,7 +68,7 @@ brw_emit_gs_thread_end(fs_visitor &s)
}
static void
brw_assign_gs_urb_setup(fs_visitor &s)
brw_assign_gs_urb_setup(brw_shader &s)
{
assert(s.stage == MESA_SHADER_GEOMETRY);
@ -84,7 +84,7 @@ brw_assign_gs_urb_setup(fs_visitor &s)
}
static bool
run_gs(fs_visitor &s)
run_gs(brw_shader &s)
{
assert(s.stage == MESA_SHADER_GEOMETRY);
@ -349,7 +349,7 @@ brw_compile_gs(const struct brw_compiler *compiler,
brw_print_vue_map(stderr, &prog_data->base.vue_map, MESA_SHADER_GEOMETRY);
}
fs_visitor v(compiler, &params->base, &key->base, &prog_data->base.base,
brw_shader v(compiler, &params->base, &key->base, &prog_data->base.base,
nir, dispatch_width,
params->base.stats != NULL, debug_enabled);
v.gs.control_data_bits_per_vertex = control_data_bits_per_vertex;

View file

@ -287,7 +287,7 @@ brw_nir_lower_mesh_primitive_count(nir_shader *nir)
}
static void
brw_emit_urb_fence(fs_visitor &s)
brw_emit_urb_fence(brw_shader &s)
{
const brw_builder bld1 = brw_builder(&s).at_end().exec_all().group(1, 0);
brw_reg dst = bld1.vgrf(BRW_TYPE_UD);
@ -314,7 +314,7 @@ brw_emit_urb_fence(fs_visitor &s)
}
static bool
run_task_mesh(fs_visitor &s, bool allow_spilling)
run_task_mesh(brw_shader &s, bool allow_spilling)
{
assert(s.stage == MESA_SHADER_TASK ||
s.stage == MESA_SHADER_MESH);
@ -393,7 +393,7 @@ brw_compile_task(const struct brw_compiler *compiler,
.required_width = brw_required_dispatch_width(&nir->info),
};
std::unique_ptr<fs_visitor> v[3];
std::unique_ptr<brw_shader> v[3];
for (unsigned i = 0; i < 3; i++) {
const unsigned simd = devinfo->ver >= 30 ? 2 - i : i;
@ -411,7 +411,7 @@ brw_compile_task(const struct brw_compiler *compiler,
brw_postprocess_nir(shader, compiler, debug_enabled,
key->base.robust_flags);
v[simd] = std::make_unique<fs_visitor>(compiler, &params->base,
v[simd] = std::make_unique<brw_shader>(compiler, &params->base,
&key->base,
&prog_data->base.base,
shader, dispatch_width,
@ -446,7 +446,7 @@ brw_compile_task(const struct brw_compiler *compiler,
return NULL;
}
fs_visitor *selected = v[selected_simd].get();
brw_shader *selected = v[selected_simd].get();
prog_data->base.prog_mask = 1 << selected_simd;
prog_data->base.base.grf_used = MAX2(prog_data->base.base.grf_used,
selected->grf_used);
@ -1704,7 +1704,7 @@ brw_compile_mesh(const struct brw_compiler *compiler,
.required_width = brw_required_dispatch_width(&nir->info),
};
std::unique_ptr<fs_visitor> v[3];
std::unique_ptr<brw_shader> v[3];
for (unsigned i = 0; i < 3; i++) {
const unsigned simd = devinfo->ver >= 30 ? 2 - i : i;
@ -1734,7 +1734,7 @@ brw_compile_mesh(const struct brw_compiler *compiler,
brw_postprocess_nir(shader, compiler, debug_enabled,
key->base.robust_flags);
v[simd] = std::make_unique<fs_visitor>(compiler, &params->base,
v[simd] = std::make_unique<brw_shader>(compiler, &params->base,
&key->base,
&prog_data->base.base,
shader, dispatch_width,
@ -1769,7 +1769,7 @@ brw_compile_mesh(const struct brw_compiler *compiler,
return NULL;
}
fs_visitor *selected = v[selected_simd].get();
brw_shader *selected = v[selected_simd].get();
prog_data->base.prog_mask = 1 << selected_simd;
prog_data->base.base.grf_used = MAX2(prog_data->base.base.grf_used,
selected->grf_used);

View file

@ -42,7 +42,7 @@ get_patch_count_threshold(int input_control_points)
}
static void
brw_set_tcs_invocation_id(fs_visitor &s)
brw_set_tcs_invocation_id(brw_shader &s)
{
const struct intel_device_info *devinfo = s.devinfo;
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(s.prog_data);
@ -88,7 +88,7 @@ brw_set_tcs_invocation_id(fs_visitor &s)
}
static void
brw_emit_tcs_thread_end(fs_visitor &s)
brw_emit_tcs_thread_end(brw_shader &s)
{
/* Try and tag the last URB write with EOT instead of emitting a whole
* separate write just to finish the thread. There isn't guaranteed to
@ -115,7 +115,7 @@ brw_emit_tcs_thread_end(fs_visitor &s)
}
static void
brw_assign_tcs_urb_setup(fs_visitor &s)
brw_assign_tcs_urb_setup(brw_shader &s)
{
assert(s.stage == MESA_SHADER_TESS_CTRL);
@ -126,7 +126,7 @@ brw_assign_tcs_urb_setup(fs_visitor &s)
}
static bool
run_tcs(fs_visitor &s)
run_tcs(brw_shader &s)
{
assert(s.stage == MESA_SHADER_TESS_CTRL);
@ -274,9 +274,9 @@ brw_compile_tcs(const struct brw_compiler *compiler,
brw_print_vue_map(stderr, &vue_prog_data->vue_map, MESA_SHADER_TESS_CTRL);
}
fs_visitor v(compiler, &params->base, &key->base,
&prog_data->base.base, nir, dispatch_width,
params->base.stats != NULL, debug_enabled);
brw_shader v(compiler, &params->base, &key->base,
&prog_data->base.base, nir, dispatch_width,
params->base.stats != NULL, debug_enabled);
if (!run_tcs(v)) {
params->base.error_str =
ralloc_strdup(params->base.mem_ctx, v.fail_msg);

View file

@ -13,7 +13,7 @@
#include "util/macros.h"
static void
brw_assign_tes_urb_setup(fs_visitor &s)
brw_assign_tes_urb_setup(brw_shader &s)
{
assert(s.stage == MESA_SHADER_TESS_EVAL);
@ -28,7 +28,7 @@ brw_assign_tes_urb_setup(fs_visitor &s)
}
static bool
run_tes(fs_visitor &s)
run_tes(brw_shader &s)
{
assert(s.stage == MESA_SHADER_TESS_EVAL);
@ -153,9 +153,9 @@ brw_compile_tes(const struct brw_compiler *compiler,
MESA_SHADER_TESS_EVAL);
}
fs_visitor v(compiler, &params->base, &key->base,
&prog_data->base.base, nir, dispatch_width,
params->base.stats != NULL, debug_enabled);
brw_shader v(compiler, &params->base, &key->base,
&prog_data->base.base, nir, dispatch_width,
params->base.stats != NULL, debug_enabled);
if (!run_tes(v)) {
params->base.error_str =
ralloc_strdup(params->base.mem_ctx, v.fail_msg);

View file

@ -11,7 +11,7 @@
#include "dev/intel_debug.h"
static void
brw_assign_vs_urb_setup(fs_visitor &s)
brw_assign_vs_urb_setup(brw_shader &s)
{
struct brw_vs_prog_data *vs_prog_data = brw_vs_prog_data(s.prog_data);
@ -29,7 +29,7 @@ brw_assign_vs_urb_setup(fs_visitor &s)
}
static bool
run_vs(fs_visitor &s)
run_vs(brw_shader &s)
{
assert(s.stage == MESA_SHADER_VERTEX);
@ -147,7 +147,7 @@ brw_compile_vs(const struct brw_compiler *compiler,
prog_data->base.dispatch_mode = INTEL_DISPATCH_MODE_SIMD8;
fs_visitor v(compiler, &params->base, &key->base,
brw_shader v(compiler, &params->base, &key->base,
&prog_data->base.base, nir, dispatch_width,
params->base.stats != NULL, debug_enabled);
if (!run_vs(v)) {

View file

@ -43,7 +43,7 @@ struct brw_bind_info {
};
struct nir_to_brw_state {
fs_visitor &s;
brw_shader &s;
const nir_shader *nir;
const intel_device_info *devinfo;
void *mem_ctx;
@ -128,7 +128,7 @@ setup_imm_b(const brw_builder &bld, int8_t v)
static void
brw_from_nir_setup_outputs(nir_to_brw_state &ntb)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
if (s.stage == MESA_SHADER_TESS_CTRL ||
s.stage == MESA_SHADER_TASK ||
@ -176,7 +176,7 @@ brw_from_nir_setup_outputs(nir_to_brw_state &ntb)
}
static void
brw_from_nir_setup_uniforms(fs_visitor &s)
brw_from_nir_setup_uniforms(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
@ -203,7 +203,7 @@ brw_from_nir_setup_uniforms(fs_visitor &s)
static brw_reg
emit_work_group_id_setup(nir_to_brw_state &ntb)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
const brw_builder &bld = ntb.bld.scalar_group();
assert(gl_shader_stage_is_compute(s.stage));
@ -226,7 +226,7 @@ emit_work_group_id_setup(nir_to_brw_state &ntb)
static bool
emit_system_values_block(nir_to_brw_state &ntb, nir_block *block)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
brw_reg *reg;
nir_foreach_instr(instr, block) {
@ -367,7 +367,7 @@ emit_system_values_block(nir_to_brw_state &ntb, nir_block *block)
static void
brw_from_nir_emit_system_values(nir_to_brw_state &ntb)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
ntb.system_values = ralloc_array(ntb.mem_ctx, brw_reg, SYSTEM_VALUE_MAX);
for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
@ -631,7 +631,7 @@ optimize_frontfacing_ternary(nir_to_brw_state &ntb,
const brw_reg &result)
{
const intel_device_info *devinfo = ntb.devinfo;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
nir_intrinsic_instr *src0 = nir_src_as_intrinsic(instr->src[0].src);
if (src0 == NULL || src0->intrinsic != nir_intrinsic_load_front_face)
@ -790,7 +790,7 @@ prepare_alu_destination_and_sources(nir_to_brw_state &ntb,
instr->def.bit_size));
/* Move and vecN instrutions may still be vectored. Return the raw,
* vectored source and destination so that fs_visitor::nir_emit_alu can
* vectored source and destination so that brw_shader::nir_emit_alu can
* handle it. Other callers should not have to handle these kinds of
* instructions.
*/
@ -2123,7 +2123,7 @@ emit_pixel_interpolater_send(const brw_builder &bld,
static brw_reg
fetch_polygon_reg(const brw_builder &bld, unsigned reg, unsigned subreg)
{
const fs_visitor *shader = bld.shader;
const brw_shader *shader = bld.shader;
assert(shader->stage == MESA_SHADER_FRAGMENT);
const struct intel_device_info *devinfo = shader->devinfo;
@ -2157,7 +2157,7 @@ emit_pixel_interpolater_alu_at_offset(const brw_builder &bld,
const brw_reg &offs,
glsl_interp_mode interpolation)
{
const fs_visitor *shader = bld.shader;
const brw_shader *shader = bld.shader;
assert(shader->stage == MESA_SHADER_FRAGMENT);
const intel_device_info *devinfo = shader->devinfo;
@ -2350,7 +2350,7 @@ intexp2(const brw_builder &bld, const brw_reg &x)
static void
emit_gs_end_primitive(nir_to_brw_state &ntb, const nir_src &vertex_count_nir_src)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_GEOMETRY);
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(s.prog_data);
@ -2408,7 +2408,7 @@ emit_gs_end_primitive(nir_to_brw_state &ntb, const nir_src &vertex_count_nir_src
}
brw_reg
fs_visitor::gs_urb_per_slot_dword_index(const brw_reg &vertex_count)
brw_shader::gs_urb_per_slot_dword_index(const brw_reg &vertex_count)
{
/* We use a single UD register to accumulate control data bits (32 bits
* for each of the SIMD8 channels). So we need to write a DWord (32 bits)
@ -2455,7 +2455,7 @@ fs_visitor::gs_urb_per_slot_dword_index(const brw_reg &vertex_count)
}
brw_reg
fs_visitor::gs_urb_channel_mask(const brw_reg &dword_index)
brw_shader::gs_urb_channel_mask(const brw_reg &dword_index)
{
brw_reg channel_mask;
@ -2493,7 +2493,7 @@ fs_visitor::gs_urb_channel_mask(const brw_reg &dword_index)
}
void
fs_visitor::emit_gs_control_data_bits(const brw_reg &vertex_count)
brw_shader::emit_gs_control_data_bits(const brw_reg &vertex_count)
{
assert(stage == MESA_SHADER_GEOMETRY);
assert(gs.control_data_bits_per_vertex != 0);
@ -2556,7 +2556,7 @@ static void
set_gs_stream_control_data_bits(nir_to_brw_state &ntb, const brw_reg &vertex_count,
unsigned stream_id)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
/* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
@ -2597,7 +2597,7 @@ static void
emit_gs_vertex(nir_to_brw_state &ntb, const nir_src &vertex_count_nir_src,
unsigned stream_id)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_GEOMETRY);
@ -2712,7 +2712,7 @@ emit_gs_input_load(nir_to_brw_state &ntb, const brw_reg &dst,
const brw_builder &bld = ntb.bld;
const struct intel_device_info *devinfo = ntb.devinfo;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(brw_type_size_bytes(dst.type) == 4);
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(s.prog_data);
@ -2885,7 +2885,7 @@ brw_from_nir_emit_vs_intrinsic(nir_to_brw_state &ntb,
nir_intrinsic_instr *instr)
{
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_VERTEX);
brw_reg dest;
@ -2925,7 +2925,7 @@ static brw_reg
get_tcs_single_patch_icp_handle(nir_to_brw_state &ntb, const brw_builder &bld,
nir_intrinsic_instr *instr)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(s.prog_data);
const nir_src &vertex_src = instr->src[0];
@ -2970,7 +2970,7 @@ static brw_reg
get_tcs_multi_patch_icp_handle(nir_to_brw_state &ntb, const brw_builder &bld,
nir_intrinsic_instr *instr)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
const intel_device_info *devinfo = s.devinfo;
struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) s.key;
@ -3047,7 +3047,7 @@ emit_barrier(nir_to_brw_state &ntb)
const brw_builder &bld = ntb.bld;
const brw_builder ubld = bld.exec_all();
const brw_builder hbld = ubld.group(8 * reg_unit(devinfo), 0);
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
/* We are getting the barrier ID from the compute shader header */
assert(gl_shader_stage_uses_workgroup(s.stage));
@ -3079,7 +3079,7 @@ emit_tcs_barrier(nir_to_brw_state &ntb)
{
const intel_device_info *devinfo = ntb.devinfo;
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_TESS_CTRL);
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(s.prog_data);
@ -3123,7 +3123,7 @@ brw_from_nir_emit_tcs_intrinsic(nir_to_brw_state &ntb,
{
const intel_device_info *devinfo = ntb.devinfo;
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_TESS_CTRL);
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(s.prog_data);
@ -3346,7 +3346,7 @@ brw_from_nir_emit_tes_intrinsic(nir_to_brw_state &ntb,
{
const intel_device_info *devinfo = ntb.devinfo;
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_TESS_EVAL);
struct brw_tes_prog_data *tes_prog_data = brw_tes_prog_data(s.prog_data);
@ -3453,7 +3453,7 @@ brw_from_nir_emit_gs_intrinsic(nir_to_brw_state &ntb,
nir_intrinsic_instr *instr)
{
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_GEOMETRY);
@ -3516,7 +3516,7 @@ brw_from_nir_emit_gs_intrinsic(nir_to_brw_state &ntb,
static brw_reg
fetch_render_target_array_index(const brw_builder &bld)
{
const fs_visitor *v = bld.shader;
const brw_shader *v = bld.shader;
if (bld.shader->devinfo->ver >= 20) {
/* Gfx20+ has separate Render Target Array indices for each pair
@ -3574,7 +3574,7 @@ fetch_render_target_array_index(const brw_builder &bld)
static brw_reg
fetch_viewport_index(const brw_builder &bld)
{
const fs_visitor *v = bld.shader;
const brw_shader *v = bld.shader;
if (bld.shader->devinfo->ver >= 20) {
/* Gfx20+ has separate viewport indices for each pair
@ -3675,7 +3675,7 @@ static brw_inst *
emit_non_coherent_fb_read(nir_to_brw_state &ntb, const brw_builder &bld, const brw_reg &dst,
unsigned target)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
const struct intel_device_info *devinfo = s.devinfo;
assert(bld.shader->stage == MESA_SHADER_FRAGMENT);
@ -3776,7 +3776,7 @@ alloc_temporary(const brw_builder &bld, unsigned size, brw_reg *regs, unsigned n
static brw_reg
alloc_frag_output(nir_to_brw_state &ntb, unsigned location)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_FRAGMENT);
const brw_wm_prog_key *const key =
@ -3843,7 +3843,7 @@ emit_frontfacing_interpolation(nir_to_brw_state &ntb)
{
const intel_device_info *devinfo = ntb.devinfo;
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
brw_reg ff = bld.vgrf(BRW_TYPE_D);
@ -3913,7 +3913,7 @@ static brw_reg
emit_samplepos_setup(nir_to_brw_state &ntb)
{
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_FRAGMENT);
struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(s.prog_data);
@ -3974,7 +3974,7 @@ emit_sampleid_setup(nir_to_brw_state &ntb)
{
const intel_device_info *devinfo = ntb.devinfo;
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_FRAGMENT);
ASSERTED brw_wm_prog_key *key = (brw_wm_prog_key*) s.key;
@ -4044,7 +4044,7 @@ static brw_reg
emit_samplemaskin_setup(nir_to_brw_state &ntb)
{
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_FRAGMENT);
struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(s.prog_data);
@ -4145,7 +4145,7 @@ static brw_reg
brw_interp_reg(const brw_builder &bld, unsigned location,
unsigned channel, unsigned comp)
{
fs_visitor &s = *bld.shader;
brw_shader &s = *bld.shader;
assert(s.stage == MESA_SHADER_FRAGMENT);
assert(BITFIELD64_BIT(location) & ~s.nir->info.per_primitive_inputs);
@ -4184,7 +4184,7 @@ brw_interp_reg(const brw_builder &bld, unsigned location,
static brw_reg
brw_per_primitive_reg(const brw_builder &bld, int location, unsigned comp)
{
fs_visitor &s = *bld.shader;
brw_shader &s = *bld.shader;
assert(s.stage == MESA_SHADER_FRAGMENT);
assert(BITFIELD64_BIT(location) & s.nir->info.per_primitive_inputs);
@ -4219,7 +4219,7 @@ brw_from_nir_emit_fs_intrinsic(nir_to_brw_state &ntb,
{
const intel_device_info *devinfo = ntb.devinfo;
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_FRAGMENT);
@ -4589,7 +4589,7 @@ brw_from_nir_emit_fs_intrinsic(nir_to_brw_state &ntb,
}
static unsigned
brw_workgroup_size(fs_visitor &s)
brw_workgroup_size(brw_shader &s)
{
assert(gl_shader_stage_uses_workgroup(s.stage));
assert(!s.nir->info.workgroup_size_variable);
@ -4603,7 +4603,7 @@ brw_from_nir_emit_cs_intrinsic(nir_to_brw_state &ntb,
{
const intel_device_info *devinfo = ntb.devinfo;
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(gl_shader_stage_uses_workgroup(s.stage));
struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(s.prog_data);
@ -4770,7 +4770,7 @@ brw_from_nir_emit_bs_intrinsic(nir_to_brw_state &ntb,
nir_intrinsic_instr *instr)
{
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(brw_shader_stage_is_bindless(s.stage));
const brw_bs_thread_payload &payload = s.bs_payload();
@ -4883,7 +4883,7 @@ swizzle_nir_scratch_addr(nir_to_brw_state &ntb,
const nir_src &nir_addr_src,
bool in_dwords)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
const brw_reg chan_index = bld.LOAD_SUBGROUP_INVOCATION();
const unsigned chan_index_bits = ffs(s.dispatch_width) - 1;
@ -5023,7 +5023,7 @@ lsc_fence_descriptor_for_intrinsic(const struct intel_device_info *devinfo,
static brw_reg
get_timestamp(const brw_builder &bld)
{
fs_visitor &s = *bld.shader;
brw_shader &s = *bld.shader;
brw_reg ts = brw_reg(retype(brw_vec4_reg(ARF,
BRW_ARF_TIMESTAMP, 0), BRW_TYPE_UD));
@ -5618,7 +5618,7 @@ static void
brw_from_nir_emit_task_mesh_intrinsic(nir_to_brw_state &ntb, const brw_builder &bld,
nir_intrinsic_instr *instr)
{
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_MESH || s.stage == MESA_SHADER_TASK);
const brw_task_mesh_thread_payload &payload = s.task_mesh_payload();
@ -5665,7 +5665,7 @@ brw_from_nir_emit_task_intrinsic(nir_to_brw_state &ntb,
nir_intrinsic_instr *instr)
{
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_TASK);
const brw_task_mesh_thread_payload &payload = s.task_mesh_payload();
@ -5692,7 +5692,7 @@ brw_from_nir_emit_mesh_intrinsic(nir_to_brw_state &ntb,
nir_intrinsic_instr *instr)
{
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
assert(s.stage == MESA_SHADER_MESH);
const brw_task_mesh_thread_payload &payload = s.task_mesh_payload();
@ -5725,7 +5725,7 @@ brw_from_nir_emit_intrinsic(nir_to_brw_state &ntb,
const brw_builder &bld, nir_intrinsic_instr *instr)
{
const intel_device_info *devinfo = ntb.devinfo;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
/* We handle this as a special case */
if (instr->intrinsic == nir_intrinsic_decl_reg) {
@ -6874,7 +6874,7 @@ brw_from_nir_emit_memory_access(nir_to_brw_state &ntb,
nir_intrinsic_instr *instr)
{
const intel_device_info *devinfo = ntb.devinfo;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
brw_reg srcs[MEMORY_LOGICAL_NUM_SRCS];
@ -7765,7 +7765,7 @@ static void
emit_shader_float_controls_execution_mode(nir_to_brw_state &ntb)
{
const brw_builder &bld = ntb.bld;
fs_visitor &s = ntb.s;
brw_shader &s = ntb.s;
unsigned execution_mode = s.nir->info.float_controls_execution_mode;
if (execution_mode == FLOAT_CONTROLS_DEFAULT_FLOAT_CONTROL_MODE)
@ -7791,7 +7791,7 @@ emit_shader_float_controls_execution_mode(nir_to_brw_state &ntb)
static UNUSED void
brw_fs_test_dispatch_packing(const brw_builder &bld)
{
const fs_visitor *shader = bld.shader;
const brw_shader *shader = bld.shader;
const gl_shader_stage stage = shader->stage;
const bool uses_vmask =
stage == MESA_SHADER_FRAGMENT &&
@ -7817,7 +7817,7 @@ brw_fs_test_dispatch_packing(const brw_builder &bld)
}
void
brw_from_nir(fs_visitor *s)
brw_from_nir(brw_shader *s)
{
nir_to_brw_state ntb = {
.s = *s,

View file

@ -1251,7 +1251,7 @@ is_multi_copy_payload(const struct intel_device_info *devinfo,
* instruction.
*/
bool
is_coalescing_payload(const fs_visitor &s, const brw_inst *inst)
is_coalescing_payload(const brw_shader &s, const brw_inst *inst)
{
return is_identity_payload(s.devinfo, VGRF, inst) &&
inst->src[0].offset == 0 &&

View file

@ -373,7 +373,7 @@ bool is_identity_payload(const struct intel_device_info *devinfo,
bool is_multi_copy_payload(const struct intel_device_info *devinfo,
const brw_inst *inst);
bool is_coalescing_payload(const struct fs_visitor &s, const brw_inst *inst);
bool is_coalescing_payload(const struct brw_shader &s, const brw_inst *inst);
bool has_bank_conflict(const struct brw_isa_info *isa, const brw_inst *inst);

View file

@ -23,7 +23,7 @@
* clear is_scalar "just in case."
*/
bool
brw_lower_scalar_fp64_MAD(fs_visitor &s)
brw_lower_scalar_fp64_MAD(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
bool progress = false;
@ -48,7 +48,7 @@ brw_lower_scalar_fp64_MAD(fs_visitor &s)
}
bool
brw_lower_load_payload(fs_visitor &s)
brw_lower_load_payload(brw_shader &s)
{
bool progress = false;
@ -104,7 +104,7 @@ brw_lower_load_payload(fs_visitor &s)
* Or, for unsigned ==/!= comparisons, simply change the types.
*/
bool
brw_lower_csel(fs_visitor &s)
brw_lower_csel(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
bool progress = false;
@ -180,7 +180,7 @@ brw_lower_csel(fs_visitor &s)
}
bool
brw_lower_sub_sat(fs_visitor &s)
brw_lower_sub_sat(brw_shader &s)
{
bool progress = false;
@ -281,7 +281,7 @@ brw_lower_sub_sat(fs_visitor &s)
* component layout.
*/
bool
brw_lower_barycentrics(fs_visitor &s)
brw_lower_barycentrics(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
@ -352,7 +352,7 @@ brw_lower_barycentrics(fs_visitor &s)
* swizzles of the source, specified as \p swz0 and \p swz1.
*/
static bool
lower_derivative(fs_visitor &s, bblock_t *block, brw_inst *inst,
lower_derivative(brw_shader &s, bblock_t *block, brw_inst *inst,
unsigned swz0, unsigned swz1)
{
const brw_builder ubld = brw_builder(&s, block, inst).exec_all();
@ -375,7 +375,7 @@ lower_derivative(fs_visitor &s, bblock_t *block, brw_inst *inst,
* them efficiently (i.e. XeHP).
*/
bool
brw_lower_derivatives(fs_visitor &s)
brw_lower_derivatives(brw_shader &s)
{
bool progress = false;
@ -408,7 +408,7 @@ brw_lower_derivatives(fs_visitor &s)
}
bool
brw_lower_find_live_channel(fs_visitor &s)
brw_lower_find_live_channel(brw_shader &s)
{
bool progress = false;
@ -514,7 +514,7 @@ brw_lower_find_live_channel(fs_visitor &s)
* just adds a new vgrf for the second payload and copies it over.
*/
bool
brw_lower_sends_overlapping_payload(fs_visitor &s)
brw_lower_sends_overlapping_payload(brw_shader &s)
{
bool progress = false;
@ -560,7 +560,7 @@ brw_lower_sends_overlapping_payload(fs_visitor &s)
* ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
*/
bool
brw_lower_3src_null_dest(fs_visitor &s)
brw_lower_3src_null_dest(brw_shader &s)
{
bool progress = false;
@ -595,7 +595,7 @@ unsupported_64bit_type(const intel_device_info *devinfo,
* - Splitting 64-bit MOV/SEL into 2x32-bit where needed
*/
bool
brw_lower_alu_restrictions(fs_visitor &s)
brw_lower_alu_restrictions(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
bool progress = false;
@ -727,7 +727,7 @@ brw_lower_vgrf_to_fixed_grf(const struct intel_device_info *devinfo, brw_inst *i
}
void
brw_lower_vgrfs_to_fixed_grfs(fs_visitor &s)
brw_lower_vgrfs_to_fixed_grfs(brw_shader &s)
{
assert(s.grf_used || !"Must be called after register allocation");
@ -776,7 +776,7 @@ brw_s0(enum brw_reg_type type, unsigned subnr)
}
static bool
brw_lower_send_gather_inst(fs_visitor &s, bblock_t *block, brw_inst *inst)
brw_lower_send_gather_inst(brw_shader &s, bblock_t *block, brw_inst *inst)
{
const intel_device_info *devinfo = s.devinfo;
assert(devinfo->ver >= 30);
@ -826,7 +826,7 @@ brw_lower_send_gather_inst(fs_visitor &s, bblock_t *block, brw_inst *inst)
}
bool
brw_lower_send_gather(fs_visitor &s)
brw_lower_send_gather(brw_shader &s)
{
assert(s.devinfo->ver >= 30);
assert(s.grf_used || !"Must be called after register allocation");
@ -846,7 +846,7 @@ brw_lower_send_gather(fs_visitor &s)
}
bool
brw_lower_load_subgroup_invocation(fs_visitor &s)
brw_lower_load_subgroup_invocation(brw_shader &s)
{
bool progress = false;
@ -886,7 +886,7 @@ brw_lower_load_subgroup_invocation(fs_visitor &s)
}
bool
brw_lower_indirect_mov(fs_visitor &s)
brw_lower_indirect_mov(brw_shader &s)
{
bool progress = false;

View file

@ -269,7 +269,7 @@ int8_using_mul_add(const brw_builder &bld, brw_inst *inst)
}
bool
brw_lower_dpas(fs_visitor &v)
brw_lower_dpas(brw_shader &v)
{
bool progress = false;

View file

@ -137,7 +137,7 @@ factor_uint32(uint32_t x, unsigned *result_a, unsigned *result_b)
}
static void
brw_lower_mul_dword_inst(fs_visitor &s, brw_inst *inst, bblock_t *block)
brw_lower_mul_dword_inst(brw_shader &s, brw_inst *inst, bblock_t *block)
{
const intel_device_info *devinfo = s.devinfo;
const brw_builder ibld(&s, block, inst);
@ -298,7 +298,7 @@ brw_lower_mul_dword_inst(fs_visitor &s, brw_inst *inst, bblock_t *block)
}
static void
brw_lower_mul_qword_inst(fs_visitor &s, brw_inst *inst, bblock_t *block)
brw_lower_mul_qword_inst(brw_shader &s, brw_inst *inst, bblock_t *block)
{
const intel_device_info *devinfo = s.devinfo;
const brw_builder ibld(&s, block, inst);
@ -367,7 +367,7 @@ brw_lower_mul_qword_inst(fs_visitor &s, brw_inst *inst, bblock_t *block)
}
static void
brw_lower_mulh_inst(fs_visitor &s, brw_inst *inst, bblock_t *block)
brw_lower_mulh_inst(brw_shader &s, brw_inst *inst, bblock_t *block)
{
const intel_device_info *devinfo = s.devinfo;
const brw_builder ibld(&s, block, inst);
@ -411,7 +411,7 @@ brw_lower_mulh_inst(fs_visitor &s, brw_inst *inst, bblock_t *block)
}
bool
brw_lower_integer_multiplication(fs_visitor &s)
brw_lower_integer_multiplication(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
bool progress = false;

View file

@ -1303,7 +1303,7 @@ emit_predicate_on_vector_mask(const brw_builder &bld, brw_inst *inst)
const brw_builder ubld = bld.exec_all().group(1, 0);
const fs_visitor &s = *bld.shader;
const brw_shader &s = *bld.shader;
const brw_reg vector_mask = ubld.vgrf(BRW_TYPE_UW);
ubld.UNDEF(vector_mask);
ubld.emit(SHADER_OPCODE_READ_ARCH_REG, vector_mask, retype(brw_sr0_reg(3),
@ -2308,7 +2308,7 @@ lower_trace_ray_logical_send(const brw_builder &bld, brw_inst *inst)
* optimization. This occurs in many Vulkan CTS tests.
*
* Many places in the late compiler, including but not limited to an
* assertion in fs_visitor::assign_curb_setup, assume that all uses of a
* assertion in brw_shader::assign_curb_setup, assume that all uses of a
* UNIFORM will be uniform (i.e., <0,1,0>). The clever SIMD2
* optimization violates that assumption.
*/
@ -2512,7 +2512,7 @@ lower_hdc_memory_fence_and_interlock(const brw_builder &bld, brw_inst *inst)
}
bool
brw_lower_logical_sends(fs_visitor &s)
brw_lower_logical_sends(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
bool progress = false;
@ -2648,7 +2648,7 @@ brw_lower_logical_sends(fs_visitor &s)
* source operand for all 8 or 16 of its channels.
*/
bool
brw_lower_uniform_pull_constant_loads(fs_visitor &s)
brw_lower_uniform_pull_constant_loads(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
bool progress = false;
@ -2740,7 +2740,7 @@ brw_lower_uniform_pull_constant_loads(fs_visitor &s)
}
bool
brw_lower_send_descriptors(fs_visitor &s)
brw_lower_send_descriptors(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
bool progress = false;

View file

@ -27,7 +27,7 @@
#include "brw_builder.h"
bool
brw_lower_pack(fs_visitor &s)
brw_lower_pack(brw_shader &s)
{
bool progress = false;

View file

@ -456,7 +456,7 @@ namespace {
}
bool
lower_instruction(fs_visitor *v, bblock_t *block, brw_inst *inst);
lower_instruction(brw_shader *v, bblock_t *block, brw_inst *inst);
}
/**
@ -466,7 +466,7 @@ namespace {
* MOV instruction prior to the original instruction.
*/
bool
brw_lower_src_modifiers(fs_visitor &s, bblock_t *block, brw_inst *inst, unsigned i)
brw_lower_src_modifiers(brw_shader &s, bblock_t *block, brw_inst *inst, unsigned i)
{
assert(inst->components_read(i) == 1);
assert(s.devinfo->has_integer_dword_mul ||
@ -493,7 +493,7 @@ namespace {
* instruction.
*/
bool
lower_dst_modifiers(fs_visitor *v, bblock_t *block, brw_inst *inst)
lower_dst_modifiers(brw_shader *v, bblock_t *block, brw_inst *inst)
{
const brw_builder ibld(v, block, inst);
const brw_reg_type type = get_exec_type(inst);
@ -542,7 +542,7 @@ namespace {
* copies into a temporary with the same channel layout as the destination.
*/
bool
lower_src_region(fs_visitor *v, bblock_t *block, brw_inst *inst, unsigned i)
lower_src_region(brw_shader *v, bblock_t *block, brw_inst *inst, unsigned i)
{
assert(inst->components_read(i) == 1);
const intel_device_info *devinfo = v->devinfo;
@ -604,7 +604,7 @@ namespace {
* sources.
*/
bool
lower_dst_region(fs_visitor *v, bblock_t *block, brw_inst *inst)
lower_dst_region(brw_shader *v, bblock_t *block, brw_inst *inst)
{
/* We cannot replace the result of an integer multiply which writes the
* accumulator because MUL+MACH pairs act on the accumulator as a 66-bit
@ -679,7 +679,7 @@ namespace {
* where the execution type of an instruction is unsupported.
*/
bool
lower_exec_type(fs_visitor *v, bblock_t *block, brw_inst *inst)
lower_exec_type(brw_shader *v, bblock_t *block, brw_inst *inst)
{
assert(inst->dst.type == get_exec_type(inst));
const unsigned mask = has_invalid_exec_type(v->devinfo, inst);
@ -729,7 +729,7 @@ namespace {
* the general lowering in lower_src_modifiers or lower_src_region.
*/
void
lower_src_conversion(fs_visitor *v, bblock_t *block, brw_inst *inst)
lower_src_conversion(brw_shader *v, bblock_t *block, brw_inst *inst)
{
const intel_device_info *devinfo = v->devinfo;
const brw_builder ibld = brw_builder(v, block, inst).scalar_group();
@ -758,7 +758,7 @@ namespace {
* instruction.
*/
bool
lower_instruction(fs_visitor *v, bblock_t *block, brw_inst *inst)
lower_instruction(brw_shader *v, bblock_t *block, brw_inst *inst)
{
const intel_device_info *devinfo = v->devinfo;
bool progress = false;
@ -799,7 +799,7 @@ namespace {
}
bool
brw_lower_regioning(fs_visitor &s)
brw_lower_regioning(brw_shader &s)
{
bool progress = false;

View file

@ -260,7 +260,7 @@ namespace {
* Return the number of instructions in the program.
*/
unsigned
num_instructions(const fs_visitor *shader)
num_instructions(const brw_shader *shader)
{
return shader->cfg->blocks[shader->cfg->num_blocks - 1]->end_ip + 1;
}
@ -270,7 +270,7 @@ namespace {
* instruction of the shader for subsequent constant-time look-up.
*/
ordered_address *
ordered_inst_addresses(const fs_visitor *shader)
ordered_inst_addresses(const brw_shader *shader)
{
ordered_address *jps = new ordered_address[num_instructions(shader)];
ordered_address jp(TGL_PIPE_ALL, 0);
@ -1040,7 +1040,7 @@ namespace {
* instruction \p inst.
*/
void
update_inst_scoreboard(const fs_visitor *shader, const ordered_address *jps,
update_inst_scoreboard(const brw_shader *shader, const ordered_address *jps,
const brw_inst *inst, unsigned ip, scoreboard &sb)
{
const bool exec_all = inst->force_writemask_all;
@ -1098,7 +1098,7 @@ namespace {
* program.
*/
scoreboard *
gather_block_scoreboards(const fs_visitor *shader,
gather_block_scoreboards(const brw_shader *shader,
const ordered_address *jps)
{
scoreboard *sbs = new scoreboard[shader->cfg->num_blocks];
@ -1118,7 +1118,7 @@ namespace {
* of each block, and returns it as an array of scoreboard objects.
*/
scoreboard *
propagate_block_scoreboards(const fs_visitor *shader,
propagate_block_scoreboards(const brw_shader *shader,
const ordered_address *jps,
equivalence_relation &eq)
{
@ -1165,7 +1165,7 @@ namespace {
* shader based on the result of global dependency analysis.
*/
dependency_list *
gather_inst_dependencies(const fs_visitor *shader,
gather_inst_dependencies(const brw_shader *shader,
const ordered_address *jps)
{
const struct intel_device_info *devinfo = shader->devinfo;
@ -1242,7 +1242,7 @@ namespace {
* instruction of the shader.
*/
dependency_list *
allocate_inst_dependencies(const fs_visitor *shader,
allocate_inst_dependencies(const brw_shader *shader,
const dependency_list *deps0)
{
/* XXX - Use bin-packing algorithm to assign hardware SBIDs optimally in
@ -1286,7 +1286,7 @@ namespace {
* represented directly by annotating existing instructions.
*/
void
emit_inst_dependencies(fs_visitor *shader,
emit_inst_dependencies(brw_shader *shader,
const ordered_address *jps,
const dependency_list *deps)
{
@ -1360,7 +1360,7 @@ namespace {
}
bool
brw_lower_scoreboard(fs_visitor &s)
brw_lower_scoreboard(brw_shader &s)
{
if (s.devinfo->ver >= 12) {
const ordered_address *jps = ordered_inst_addresses(&s);

View file

@ -49,7 +49,7 @@ is_mixed_float_with_packed_fp16_dst(const brw_inst *inst)
* excessively restrictive.
*/
static unsigned
get_fpu_lowered_simd_width(const fs_visitor *shader,
get_fpu_lowered_simd_width(const brw_shader *shader,
const brw_inst *inst)
{
const struct brw_compiler *compiler = shader->compiler;
@ -236,11 +236,11 @@ is_half_float_src_dst(const brw_inst *inst)
/**
* Get the closest native SIMD width supported by the hardware for instruction
* \p inst. The instruction will be left untouched by
* fs_visitor::lower_simd_width() if the returned value is equal to the
* brw_shader::lower_simd_width() if the returned value is equal to the
* original execution size.
*/
unsigned
brw_get_lowered_simd_width(const fs_visitor *shader, const brw_inst *inst)
brw_get_lowered_simd_width(const brw_shader *shader, const brw_inst *inst)
{
const struct brw_compiler *compiler = shader->compiler;
const struct intel_device_info *devinfo = compiler->devinfo;
@ -644,7 +644,7 @@ emit_zip(const brw_builder &lbld_before, const brw_builder &lbld_after,
}
bool
brw_lower_simd_width(fs_visitor &s)
brw_lower_simd_width(brw_shader &s)
{
bool progress = false;

View file

@ -249,7 +249,7 @@ brw_emit_scan(const brw_builder &bld, enum opcode opcode, const brw_reg &tmp,
}
static bool
brw_lower_reduce(fs_visitor &s, bblock_t *block, brw_inst *inst)
brw_lower_reduce(brw_shader &s, bblock_t *block, brw_inst *inst)
{
const brw_builder bld(&s, block, inst);
@ -301,7 +301,7 @@ brw_lower_reduce(fs_visitor &s, bblock_t *block, brw_inst *inst)
}
static bool
brw_lower_scan(fs_visitor &s, bblock_t *block, brw_inst *inst)
brw_lower_scan(brw_shader &s, bblock_t *block, brw_inst *inst)
{
const brw_builder bld(&s, block, inst);
@ -486,7 +486,7 @@ brw_lower_quad_vote_gfx20(const brw_builder &bld, enum opcode opcode, brw_reg ds
}
static bool
brw_lower_vote(fs_visitor &s, bblock_t *block, brw_inst *inst)
brw_lower_vote(brw_shader &s, bblock_t *block, brw_inst *inst)
{
const brw_builder bld(&s, block, inst);
@ -516,7 +516,7 @@ brw_lower_vote(fs_visitor &s, bblock_t *block, brw_inst *inst)
}
static bool
brw_lower_ballot(fs_visitor &s, bblock_t *block, brw_inst *inst)
brw_lower_ballot(brw_shader &s, bblock_t *block, brw_inst *inst)
{
const brw_builder bld(&s, block, inst);
@ -546,7 +546,7 @@ brw_lower_ballot(fs_visitor &s, bblock_t *block, brw_inst *inst)
}
static bool
brw_lower_quad_swap(fs_visitor &s, bblock_t *block, brw_inst *inst)
brw_lower_quad_swap(brw_shader &s, bblock_t *block, brw_inst *inst)
{
const brw_builder bld(&s, block, inst);
@ -602,7 +602,7 @@ brw_lower_quad_swap(fs_visitor &s, bblock_t *block, brw_inst *inst)
}
static bool
brw_lower_read_from_live_channel(fs_visitor &s, bblock_t *block, brw_inst *inst)
brw_lower_read_from_live_channel(brw_shader &s, bblock_t *block, brw_inst *inst)
{
const brw_builder bld(&s, block, inst);
@ -618,7 +618,7 @@ brw_lower_read_from_live_channel(fs_visitor &s, bblock_t *block, brw_inst *inst)
}
static bool
brw_lower_read_from_channel(fs_visitor &s, bblock_t *block, brw_inst *inst)
brw_lower_read_from_channel(brw_shader &s, bblock_t *block, brw_inst *inst)
{
const brw_builder bld(&s, block, inst);
@ -651,7 +651,7 @@ brw_lower_read_from_channel(fs_visitor &s, bblock_t *block, brw_inst *inst)
}
bool
brw_lower_subgroup_ops(fs_visitor &s)
brw_lower_subgroup_ops(brw_shader &s)
{
bool progress = false;

View file

@ -112,7 +112,7 @@ brw_nir_lower_alpha_to_coverage(nir_shader *shader,
assert(block->cf_node.parent == &impl->cf_node);
assert(nir_cf_node_is_last(&block->cf_node));
/* See store_output in fs_visitor::nir_emit_fs_intrinsic */
/* See store_output in brw_shader::nir_emit_fs_intrinsic */
const unsigned store_offset = nir_src_as_uint(intrin->src[1]);
const unsigned driver_location = nir_intrinsic_base(intrin) +
SET_FIELD(store_offset, BRW_NIR_FRAG_OUTPUT_LOCATION);

View file

@ -10,7 +10,7 @@
#include "dev/intel_debug.h"
void
brw_optimize(fs_visitor &s)
brw_optimize(brw_shader &s)
{
const nir_shader *nir = s.nir;
@ -230,7 +230,7 @@ load_payload_sources_read_for_size(brw_inst *lp, unsigned size_read)
*/
bool
brw_opt_zero_samples(fs_visitor &s)
brw_opt_zero_samples(brw_shader &s)
{
bool progress = false;
@ -307,7 +307,7 @@ brw_opt_zero_samples(fs_visitor &s)
* payload concatenation altogether.
*/
bool
brw_opt_split_sends(fs_visitor &s)
brw_opt_split_sends(brw_shader &s)
{
bool progress = false;
@ -388,7 +388,7 @@ brw_opt_split_sends(fs_visitor &s)
* halt-target
*/
bool
brw_opt_remove_redundant_halts(fs_visitor &s)
brw_opt_remove_redundant_halts(brw_shader &s)
{
bool progress = false;
@ -437,7 +437,7 @@ brw_opt_remove_redundant_halts(fs_visitor &s)
* analysis.
*/
bool
brw_opt_eliminate_find_live_channel(fs_visitor &s)
brw_opt_eliminate_find_live_channel(brw_shader &s)
{
bool progress = false;
unsigned depth = 0;
@ -531,7 +531,7 @@ out:
* mode once is enough for the full vector/matrix
*/
bool
brw_opt_remove_extra_rounding_modes(fs_visitor &s)
brw_opt_remove_extra_rounding_modes(brw_shader &s)
{
bool progress = false;
unsigned execution_mode = s.nir->info.float_controls_execution_mode;
@ -572,7 +572,7 @@ brw_opt_remove_extra_rounding_modes(fs_visitor &s)
}
bool
brw_opt_send_to_send_gather(fs_visitor &s)
brw_opt_send_to_send_gather(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
bool progress = false;
@ -653,7 +653,7 @@ brw_opt_send_to_send_gather(fs_visitor &s)
* having to write the ARF scalar register.
*/
bool
brw_opt_send_gather_to_send(fs_visitor &s)
brw_opt_send_gather_to_send(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
bool progress = false;

View file

@ -21,7 +21,7 @@
*/
static bool
opt_address_reg_load_local(fs_visitor &s, bblock_t *block, const brw_def_analysis &defs)
opt_address_reg_load_local(brw_shader &s, bblock_t *block, const brw_def_analysis &defs)
{
bool progress = false;
@ -53,7 +53,7 @@ opt_address_reg_load_local(fs_visitor &s, bblock_t *block, const brw_def_analysi
}
bool
brw_opt_address_reg_load(fs_visitor &s)
brw_opt_address_reg_load(brw_shader &s)
{
bool progress = false;
const brw_def_analysis &defs = s.def_analysis.require();

View file

@ -318,7 +318,7 @@ brw_opt_constant_fold_instruction(const intel_device_info *devinfo, brw_inst *in
}
bool
brw_opt_algebraic(fs_visitor &s)
brw_opt_algebraic(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
bool progress = false;

View file

@ -507,7 +507,7 @@ namespace {
* the program.
*/
partitioning
shader_reg_partitioning(const fs_visitor *v)
shader_reg_partitioning(const brw_shader *v)
{
partitioning p(BRW_MAX_GRF);
@ -530,7 +530,7 @@ namespace {
* original location to avoid violating hardware or software assumptions.
*/
bool *
shader_reg_constraints(const fs_visitor *v, const partitioning &p)
shader_reg_constraints(const brw_shader *v, const partitioning &p)
{
bool *constrained = new bool[p.num_atoms()]();
@ -607,7 +607,7 @@ namespace {
* helpful than not optimizing at all.
*/
weight_vector_type *
shader_conflict_weight_matrix(const fs_visitor *v, const partitioning &p)
shader_conflict_weight_matrix(const brw_shader *v, const partitioning &p)
{
weight_vector_type *conflicts = new weight_vector_type[p.num_atoms()];
for (unsigned r = 0; r < p.num_atoms(); r++)
@ -887,7 +887,7 @@ namespace {
}
bool
brw_opt_bank_conflicts(fs_visitor &s)
brw_opt_bank_conflicts(brw_shader &s)
{
assert(s.grf_used || !"Must be called after register allocation");

View file

@ -248,7 +248,7 @@ opt_cmod_propagation_local(const intel_device_info *devinfo, bblock_t *block)
continue;
/* Only an AND.NZ can be propagated. Many AND.Z instructions are
* generated (for ir_unop_not in fs_visitor::emit_bool_to_cond_code).
* generated (for ir_unop_not in brw_shader::emit_bool_to_cond_code).
* Propagating those would require inverting the condition on the CMP.
* This changes both the flag value and the register destination of the
* CMP. That result may be used elsewhere, so we can't change its value
@ -556,7 +556,7 @@ opt_cmod_propagation_local(const intel_device_info *devinfo, bblock_t *block)
}
bool
brw_opt_cmod_propagation(fs_visitor &s)
brw_opt_cmod_propagation(brw_shader &s)
{
bool progress = false;

View file

@ -1183,7 +1183,7 @@ struct register_allocation {
};
static brw_reg
allocate_slots(fs_visitor &s,
allocate_slots(brw_shader &s,
struct register_allocation *regs, unsigned num_regs,
unsigned bytes, unsigned align_bytes)
{
@ -1242,7 +1242,7 @@ deallocate_slots(const struct intel_device_info *devinfo,
}
static void
parcel_out_registers(fs_visitor &s,
parcel_out_registers(brw_shader &s,
struct imm *imm, unsigned len, const bblock_t *cur_block,
struct register_allocation *regs, unsigned num_regs)
{
@ -1284,7 +1284,7 @@ parcel_out_registers(fs_visitor &s,
}
bool
brw_opt_combine_constants(fs_visitor &s)
brw_opt_combine_constants(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
void *const_ctx = ralloc_context(NULL);
@ -1772,7 +1772,7 @@ brw_opt_combine_constants(fs_visitor &s)
if (rebuild_cfg) {
/* When the CFG is initially built, the instructions are removed from
* the list of instructions stored in fs_visitor -- the same exec_node
* the list of instructions stored in brw_shader -- the same exec_node
* is used for membership in that list and in a block list. So we need
* to pull them back before rebuilding the CFG.
*/

View file

@ -657,7 +657,7 @@ instruction_requires_packed_data(brw_inst *inst)
}
static bool
try_copy_propagate(fs_visitor &s, brw_inst *inst,
try_copy_propagate(brw_shader &s, brw_inst *inst,
acp_entry *entry, int arg,
uint8_t max_polygons)
{
@ -1371,7 +1371,7 @@ commute_immediates(brw_inst *inst)
* list.
*/
static bool
opt_copy_propagation_local(fs_visitor &s, linear_ctx *lin_ctx,
opt_copy_propagation_local(brw_shader &s, linear_ctx *lin_ctx,
bblock_t *block, struct acp &acp,
uint8_t max_polygons)
{
@ -1477,7 +1477,7 @@ opt_copy_propagation_local(fs_visitor &s, linear_ctx *lin_ctx,
}
bool
brw_opt_copy_propagation(fs_visitor &s)
brw_opt_copy_propagation(brw_shader &s)
{
bool progress = false;
void *copy_prop_ctx = ralloc_context(NULL);
@ -1546,7 +1546,7 @@ brw_opt_copy_propagation(fs_visitor &s)
}
static bool
try_copy_propagate_def(fs_visitor &s,
try_copy_propagate_def(brw_shader &s,
brw_inst *def, const brw_reg &val,
brw_inst *inst, int arg,
uint8_t max_polygons)
@ -1849,7 +1849,7 @@ find_value_for_offset(brw_inst *def, const brw_reg &src, unsigned src_size)
}
bool
brw_opt_copy_propagation_defs(fs_visitor &s)
brw_opt_copy_propagation_defs(brw_shader &s)
{
const brw_def_analysis &defs = s.def_analysis.require();
unsigned *uses_deleted = new unsigned[defs.count()]();

View file

@ -43,7 +43,7 @@ struct remap_entry {
};
static bool
is_expression(const fs_visitor *v, const brw_inst *const inst)
is_expression(const brw_shader *v, const brw_inst *const inst)
{
switch (inst->opcode) {
case BRW_OPCODE_MOV:
@ -359,7 +359,7 @@ cmp_func(const void *data1, const void *data2)
}
static bool
remap_sources(fs_visitor &s, const brw_def_analysis &defs,
remap_sources(brw_shader &s, const brw_def_analysis &defs,
brw_inst *inst, struct remap_entry *remap_table)
{
bool progress = false;
@ -392,7 +392,7 @@ remap_sources(fs_visitor &s, const brw_def_analysis &defs,
}
bool
brw_opt_cse_defs(fs_visitor &s)
brw_opt_cse_defs(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;
const brw_idom_tree &idom = s.idom_analysis.require();

View file

@ -95,7 +95,7 @@ can_eliminate_conditional_mod(const intel_device_info *devinfo,
}
bool
brw_opt_dead_code_eliminate(fs_visitor &s)
brw_opt_dead_code_eliminate(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;

View file

@ -66,7 +66,7 @@ is_nop_mov(const brw_inst *inst)
}
static bool
is_coalesce_candidate(const fs_visitor *v, const brw_inst *inst)
is_coalesce_candidate(const brw_shader *v, const brw_inst *inst)
{
if ((inst->opcode != BRW_OPCODE_MOV &&
inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD) ||
@ -192,7 +192,7 @@ can_coalesce_vars(const intel_device_info *devinfo,
* SEND instruction's payload to more than would fit in g112-g127.
*/
static bool
would_violate_eot_restriction(fs_visitor &s,
would_violate_eot_restriction(brw_shader &s,
const cfg_t *cfg,
unsigned dst_reg, unsigned src_reg)
{
@ -224,7 +224,7 @@ would_violate_eot_restriction(fs_visitor &s,
}
bool
brw_opt_register_coalesce(fs_visitor &s)
brw_opt_register_coalesce(brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;

View file

@ -86,7 +86,7 @@ propagate_sat(brw_inst *inst, brw_inst *scan_inst)
}
static bool
opt_saturate_propagation_local(fs_visitor &s, bblock_t *block)
opt_saturate_propagation_local(brw_shader &s, bblock_t *block)
{
bool progress = false;
int ip = block->end_ip + 1;
@ -188,7 +188,7 @@ opt_saturate_propagation_local(fs_visitor &s, bblock_t *block)
}
bool
brw_opt_saturate_propagation(fs_visitor &s)
brw_opt_saturate_propagation(brw_shader &s)
{
bool progress = false;

View file

@ -8,7 +8,7 @@
#include "brw_builder.h"
static unsigned
dest_comps_for_txf(const fs_visitor &s, const brw_inst *txf)
dest_comps_for_txf(const brw_shader &s, const brw_inst *txf)
{
if (!txf)
return 0;
@ -78,7 +78,7 @@ sources_match(ASSERTED const brw_def_analysis &defs,
* lower register pressure.
*/
bool
brw_opt_combine_convergent_txf(fs_visitor &s)
brw_opt_combine_convergent_txf(brw_shader &s)
{
const brw_def_analysis &defs = s.def_analysis.require();

View file

@ -20,7 +20,7 @@
* elimination and coalescing.
*/
bool
brw_opt_split_virtual_grfs(fs_visitor &s)
brw_opt_split_virtual_grfs(brw_shader &s)
{
/* Compact the register file so we eliminate dead vgrfs. This
* only defines split points for live registers, so if we have
@ -220,7 +220,7 @@ cleanup:
* overhead.
*/
bool
brw_opt_compact_virtual_grfs(fs_visitor &s)
brw_opt_compact_virtual_grfs(brw_shader &s)
{
bool progress = false;
int *remap_table = new int[s.alloc.count];

View file

@ -11,7 +11,7 @@
#include "util/half_float.h"
void
brw_print_instructions(const fs_visitor &s, FILE *file)
brw_print_instructions(const brw_shader &s, FILE *file)
{
if (s.cfg && s.grf_used == 0) {
const brw_def_analysis &defs = s.def_analysis.require();
@ -372,7 +372,7 @@ print_memory_logical_source(FILE *file, const brw_inst *inst, unsigned i)
}
void
brw_print_instruction(const fs_visitor &s, const brw_inst *inst, FILE *file, const brw_def_analysis *defs)
brw_print_instruction(const brw_shader &s, const brw_inst *inst, FILE *file, const brw_def_analysis *defs)
{
if (inst->predicate) {
fprintf(file, "(%cf%d.%d) ",

View file

@ -43,7 +43,7 @@ assign_reg(const struct intel_device_info *devinfo,
}
void
brw_assign_regs_trivial(fs_visitor &s)
brw_assign_regs_trivial(brw_shader &s)
{
const struct intel_device_info *devinfo = s.devinfo;
unsigned *hw_reg_mapping = ralloc_array(NULL, unsigned, s.alloc.count + 1);
@ -152,7 +152,7 @@ count_to_loop_end(const bblock_t *block)
unreachable("not reached");
}
void fs_visitor::calculate_payload_ranges(bool allow_spilling,
void brw_shader::calculate_payload_ranges(bool allow_spilling,
unsigned payload_node_count,
int *payload_last_use_ip) const
{
@ -236,7 +236,7 @@ void fs_visitor::calculate_payload_ranges(bool allow_spilling,
class brw_reg_alloc {
public:
brw_reg_alloc(fs_visitor *fs):
brw_reg_alloc(brw_shader *fs):
fs(fs), devinfo(fs->devinfo), compiler(fs->compiler),
live(fs->live_analysis.require()), g(NULL),
have_spill_costs(false)
@ -308,7 +308,7 @@ private:
void spill_reg(unsigned spill_reg);
void *mem_ctx;
fs_visitor *fs;
brw_shader *fs;
const intel_device_info *devinfo;
const brw_compiler *compiler;
const brw_live_variables &live;
@ -351,7 +351,7 @@ namespace {
* into multiple (force_writemask_all) scratch messages.
*/
unsigned
spill_max_size(const fs_visitor *s)
spill_max_size(const brw_shader *s)
{
/* LSC is limited to SIMD16 sends (SIMD32 on Xe2) */
if (s->devinfo->has_lsc)
@ -1334,7 +1334,7 @@ brw_reg_alloc::assign_regs(bool allow_spilling, bool spill_all)
}
bool
brw_assign_regs(fs_visitor &s, bool allow_spilling, bool spill_all)
brw_assign_regs(brw_shader &s, bool allow_spilling, bool spill_all)
{
brw_reg_alloc alloc(&s);
bool success = alloc.assign_regs(allow_spilling, spill_all);

View file

@ -583,7 +583,7 @@ schedule_node::set_latency(const struct brw_isa_info *isa)
class brw_instruction_scheduler {
public:
brw_instruction_scheduler(void *mem_ctx, const fs_visitor *s, int grf_count, int hw_reg_count,
brw_instruction_scheduler(void *mem_ctx, const brw_shader *s, int grf_count, int hw_reg_count,
int block_count, bool post_reg_alloc);
void add_barrier_deps(schedule_node *n);
@ -646,7 +646,7 @@ public:
bool post_reg_alloc;
int grf_count;
const fs_visitor *s;
const brw_shader *s;
/**
* Last instruction to have written the grf (or a channel in the grf, for the
@ -701,7 +701,7 @@ public:
int *hw_reads_remaining;
};
brw_instruction_scheduler::brw_instruction_scheduler(void *mem_ctx, const fs_visitor *s,
brw_instruction_scheduler::brw_instruction_scheduler(void *mem_ctx, const brw_shader *s,
int grf_count, int hw_reg_count,
int block_count, bool post_reg_alloc)
: s(s)
@ -1824,7 +1824,7 @@ brw_instruction_scheduler::run(brw_instruction_scheduler_mode mode)
}
brw_instruction_scheduler *
brw_prepare_scheduler(fs_visitor &s, void *mem_ctx)
brw_prepare_scheduler(brw_shader &s, void *mem_ctx)
{
const int grf_count = s.alloc.count;
@ -1834,7 +1834,7 @@ brw_prepare_scheduler(fs_visitor &s, void *mem_ctx)
}
void
brw_schedule_instructions_pre_ra(fs_visitor &s, brw_instruction_scheduler *sched,
brw_schedule_instructions_pre_ra(brw_shader &s, brw_instruction_scheduler *sched,
brw_instruction_scheduler_mode mode)
{
if (mode == BRW_SCHEDULE_NONE)
@ -1846,7 +1846,7 @@ brw_schedule_instructions_pre_ra(fs_visitor &s, brw_instruction_scheduler *sched
}
void
brw_schedule_instructions_post_ra(fs_visitor &s)
brw_schedule_instructions_post_ra(brw_shader &s)
{
const bool post_reg_alloc = true;
const int grf_count = reg_unit(s.devinfo) * s.grf_used;

View file

@ -21,13 +21,6 @@
* IN THE SOFTWARE.
*/
/** @file
*
* This file drives the GLSL IR -> LIR translation, contains the
* optimizations on the LIR, and drives the generation of native code
* from the LIR.
*/
#include "brw_analysis.h"
#include "brw_eu.h"
#include "brw_shader.h"
@ -45,7 +38,7 @@
#include "util/u_math.h"
void
fs_visitor::emit_urb_writes(const brw_reg &gs_vertex_count)
brw_shader::emit_urb_writes(const brw_reg &gs_vertex_count)
{
int slot, urb_offset, length;
int starting_urb_offset = 0;
@ -335,7 +328,7 @@ fs_visitor::emit_urb_writes(const brw_reg &gs_vertex_count)
}
void
fs_visitor::emit_cs_terminate()
brw_shader::emit_cs_terminate()
{
const brw_builder ubld = brw_builder(this).at_end().exec_all();
@ -378,7 +371,7 @@ fs_visitor::emit_cs_terminate()
send->eot = true;
}
fs_visitor::fs_visitor(const struct brw_compiler *compiler,
brw_shader::brw_shader(const struct brw_compiler *compiler,
const struct brw_compile_params *params,
const brw_base_prog_key *key,
struct brw_stage_prog_data *prog_data,
@ -402,7 +395,7 @@ fs_visitor::fs_visitor(const struct brw_compiler *compiler,
init();
}
fs_visitor::fs_visitor(const struct brw_compiler *compiler,
brw_shader::brw_shader(const struct brw_compiler *compiler,
const struct brw_compile_params *params,
const brw_wm_prog_key *key,
struct brw_wm_prog_data *prog_data,
@ -431,7 +424,7 @@ fs_visitor::fs_visitor(const struct brw_compiler *compiler,
}
void
fs_visitor::init()
brw_shader::init()
{
this->max_dispatch_width = 32;
@ -462,13 +455,13 @@ fs_visitor::init()
this->gs.control_data_header_size_bits = 0;
}
fs_visitor::~fs_visitor()
brw_shader::~brw_shader()
{
delete this->payload_;
}
void
fs_visitor::vfail(const char *format, va_list va)
brw_shader::vfail(const char *format, va_list va)
{
char *msg;
@ -489,7 +482,7 @@ fs_visitor::vfail(const char *format, va_list va)
}
void
fs_visitor::fail(const char *format, ...)
brw_shader::fail(const char *format, ...)
{
va_list va;
@ -510,7 +503,7 @@ fs_visitor::fail(const char *format, ...)
* this just calls fail().
*/
void
fs_visitor::limit_dispatch_width(unsigned n, const char *msg)
brw_shader::limit_dispatch_width(unsigned n, const char *msg)
{
if (dispatch_width > n) {
fail("%s", msg);
@ -526,7 +519,7 @@ fs_visitor::limit_dispatch_width(unsigned n, const char *msg)
* This brings in those uniform definitions
*/
void
fs_visitor::import_uniforms(fs_visitor *v)
brw_shader::import_uniforms(brw_shader *v)
{
this->uniforms = v->uniforms;
}
@ -577,7 +570,7 @@ brw_barycentric_mode(const struct brw_wm_prog_key *key,
* Return true if successful or false if a separate EOT write is needed.
*/
bool
fs_visitor::mark_last_urb_write_with_eot()
brw_shader::mark_last_urb_write_with_eot()
{
foreach_in_list_reverse(brw_inst, prev, &this->instructions) {
if (prev->opcode == SHADER_OPCODE_URB_WRITE_LOGICAL) {
@ -606,7 +599,7 @@ round_components_to_whole_registers(const intel_device_info *devinfo,
}
void
fs_visitor::assign_curb_setup()
brw_shader::assign_curb_setup()
{
unsigned uniform_push_length =
round_components_to_whole_registers(devinfo, prog_data->nr_params);
@ -845,7 +838,7 @@ brw_compute_urb_setup_index(struct brw_wm_prog_data *wm_prog_data)
}
void
fs_visitor::convert_attr_sources_to_hw_regs(brw_inst *inst)
brw_shader::convert_attr_sources_to_hw_regs(brw_inst *inst)
{
for (int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == ATTR) {
@ -935,7 +928,7 @@ brw_fb_write_msg_control(const brw_inst *inst,
}
void
fs_visitor::invalidate_analysis(brw_analysis_dependency_class c)
brw_shader::invalidate_analysis(brw_analysis_dependency_class c)
{
live_analysis.invalidate(c);
regpressure_analysis.invalidate(c);
@ -945,7 +938,7 @@ fs_visitor::invalidate_analysis(brw_analysis_dependency_class c)
}
void
fs_visitor::debug_optimizer(const nir_shader *nir,
brw_shader::debug_optimizer(const nir_shader *nir,
const char *pass_name,
int iteration, int pass_num) const
{
@ -976,7 +969,7 @@ fs_visitor::debug_optimizer(const nir_shader *nir,
}
static uint32_t
brw_compute_max_register_pressure(fs_visitor &s)
brw_compute_max_register_pressure(brw_shader &s)
{
const brw_register_pressure &rp = s.regpressure_analysis.require();
uint32_t ip = 0, max_pressure = 0;
@ -1031,7 +1024,7 @@ brw_get_scratch_size(int size)
}
void
brw_allocate_registers(fs_visitor &s, bool allow_spilling)
brw_allocate_registers(brw_shader &s, bool allow_spilling)
{
const struct intel_device_info *devinfo = s.devinfo;
const nir_shader *nir = s.nir;
@ -1249,7 +1242,7 @@ brw_cs_get_dispatch_info(const struct intel_device_info *devinfo,
}
void
brw_shader_phase_update(fs_visitor &s, enum brw_shader_phase phase)
brw_shader_phase_update(brw_shader &s, enum brw_shader_phase phase)
{
assert(phase == s.phase + 1);
s.phase = phase;
@ -1262,7 +1255,7 @@ bool brw_should_print_shader(const nir_shader *shader, uint64_t debug_flag)
}
static unsigned
brw_allocate_vgrf_number(fs_visitor &s, unsigned size_in_REGSIZE_units)
brw_allocate_vgrf_number(brw_shader &s, unsigned size_in_REGSIZE_units)
{
assert(size_in_REGSIZE_units > 0);
@ -1279,7 +1272,7 @@ brw_allocate_vgrf_number(fs_visitor &s, unsigned size_in_REGSIZE_units)
}
brw_reg
brw_allocate_vgrf(fs_visitor &s, brw_reg_type type, unsigned count)
brw_allocate_vgrf(brw_shader &s, brw_reg_type type, unsigned count)
{
const unsigned unit = reg_unit(s.devinfo);
const unsigned size = DIV_ROUND_UP(count * brw_type_size_bytes(type),
@ -1288,7 +1281,7 @@ brw_allocate_vgrf(fs_visitor &s, brw_reg_type type, unsigned count)
}
brw_reg
brw_allocate_vgrf_units(fs_visitor &s, unsigned units_of_REGSIZE)
brw_allocate_vgrf_units(brw_shader &s, unsigned units_of_REGSIZE)
{
return brw_vgrf(brw_allocate_vgrf_number(s, units_of_REGSIZE), BRW_TYPE_UD);
}

View file

@ -59,15 +59,10 @@ enum brw_shader_phase {
BRW_SHADER_PHASE_INVALID,
};
/**
* The fragment shader front-end.
*
* Translates either GLSL IR or Mesa IR (for ARB_fragment_program) into FS IR.
*/
struct fs_visitor
struct brw_shader
{
public:
fs_visitor(const struct brw_compiler *compiler,
brw_shader(const struct brw_compiler *compiler,
const struct brw_compile_params *params,
const brw_base_prog_key *key,
struct brw_stage_prog_data *prog_data,
@ -75,7 +70,7 @@ public:
unsigned dispatch_width,
bool needs_register_pressure,
bool debug_enabled);
fs_visitor(const struct brw_compiler *compiler,
brw_shader(const struct brw_compiler *compiler,
const struct brw_compile_params *params,
const brw_wm_prog_key *key,
struct brw_wm_prog_data *prog_data,
@ -85,9 +80,9 @@ public:
bool needs_register_pressure,
bool debug_enabled);
void init();
~fs_visitor();
~brw_shader();
void import_uniforms(fs_visitor *v);
void import_uniforms(brw_shader *v);
void assign_curb_setup();
void convert_attr_sources_to_hw_regs(brw_inst *inst);
@ -139,11 +134,11 @@ public:
struct brw_stage_prog_data *prog_data;
brw_analysis<brw_live_variables, fs_visitor> live_analysis;
brw_analysis<brw_register_pressure, fs_visitor> regpressure_analysis;
brw_analysis<brw_performance, fs_visitor> performance_analysis;
brw_analysis<brw_idom_tree, fs_visitor> idom_analysis;
brw_analysis<brw_def_analysis, fs_visitor> def_analysis;
brw_analysis<brw_live_variables, brw_shader> live_analysis;
brw_analysis<brw_register_pressure, brw_shader> regpressure_analysis;
brw_analysis<brw_performance, brw_shader> performance_analysis;
brw_analysis<brw_idom_tree, brw_shader> idom_analysis;
brw_analysis<brw_def_analysis, brw_shader> def_analysis;
/** Number of uniform variable components visited. */
unsigned uniforms;
@ -226,9 +221,9 @@ public:
int iteration, int pass_num) const;
};
void brw_print_instructions(const fs_visitor &s, FILE *file = stderr);
void brw_print_instructions(const brw_shader &s, FILE *file = stderr);
void brw_print_instruction(const fs_visitor &s, const brw_inst *inst,
void brw_print_instruction(const brw_shader &s, const brw_inst *inst,
FILE *file = stderr,
const brw_def_analysis *defs = nullptr);
@ -240,7 +235,7 @@ void brw_print_swsb(FILE *f, const struct intel_device_info *devinfo, const tgl_
* dispatch mode.
*/
static inline unsigned
sample_mask_flag_subreg(const fs_visitor &s)
sample_mask_flag_subreg(const brw_shader &s)
{
assert(s.stage == MESA_SHADER_FRAGMENT);
return 2;
@ -263,19 +258,19 @@ void brw_compute_urb_setup_index(struct brw_wm_prog_data *wm_prog_data);
int brw_get_subgroup_id_param_index(const intel_device_info *devinfo,
const brw_stage_prog_data *prog_data);
void brw_from_nir(fs_visitor *s);
void brw_from_nir(brw_shader *s);
void brw_shader_phase_update(fs_visitor &s, enum brw_shader_phase phase);
void brw_shader_phase_update(brw_shader &s, enum brw_shader_phase phase);
#ifndef NDEBUG
void brw_validate(const fs_visitor &s);
void brw_validate(const brw_shader &s);
#else
static inline void brw_validate(const fs_visitor &s) {}
static inline void brw_validate(const brw_shader &s) {}
#endif
void brw_calculate_cfg(fs_visitor &s);
void brw_calculate_cfg(brw_shader &s);
void brw_optimize(fs_visitor &s);
void brw_optimize(brw_shader &s);
enum brw_instruction_scheduler_mode {
BRW_SCHEDULE_PRE,
@ -287,73 +282,73 @@ enum brw_instruction_scheduler_mode {
class brw_instruction_scheduler;
brw_instruction_scheduler *brw_prepare_scheduler(fs_visitor &s, void *mem_ctx);
void brw_schedule_instructions_pre_ra(fs_visitor &s, brw_instruction_scheduler *sched,
brw_instruction_scheduler *brw_prepare_scheduler(brw_shader &s, void *mem_ctx);
void brw_schedule_instructions_pre_ra(brw_shader &s, brw_instruction_scheduler *sched,
brw_instruction_scheduler_mode mode);
void brw_schedule_instructions_post_ra(fs_visitor &s);
void brw_schedule_instructions_post_ra(brw_shader &s);
void brw_allocate_registers(fs_visitor &s, bool allow_spilling);
bool brw_assign_regs(fs_visitor &s, bool allow_spilling, bool spill_all);
void brw_assign_regs_trivial(fs_visitor &s);
void brw_allocate_registers(brw_shader &s, bool allow_spilling);
bool brw_assign_regs(brw_shader &s, bool allow_spilling, bool spill_all);
void brw_assign_regs_trivial(brw_shader &s);
bool brw_lower_3src_null_dest(fs_visitor &s);
bool brw_lower_alu_restrictions(fs_visitor &s);
bool brw_lower_barycentrics(fs_visitor &s);
bool brw_lower_constant_loads(fs_visitor &s);
bool brw_lower_csel(fs_visitor &s);
bool brw_lower_derivatives(fs_visitor &s);
bool brw_lower_dpas(fs_visitor &s);
bool brw_lower_find_live_channel(fs_visitor &s);
bool brw_lower_indirect_mov(fs_visitor &s);
bool brw_lower_integer_multiplication(fs_visitor &s);
bool brw_lower_load_payload(fs_visitor &s);
bool brw_lower_load_subgroup_invocation(fs_visitor &s);
bool brw_lower_logical_sends(fs_visitor &s);
bool brw_lower_pack(fs_visitor &s);
bool brw_lower_regioning(fs_visitor &s);
bool brw_lower_scalar_fp64_MAD(fs_visitor &s);
bool brw_lower_scoreboard(fs_visitor &s);
bool brw_lower_send_descriptors(fs_visitor &s);
bool brw_lower_send_gather(fs_visitor &s);
bool brw_lower_sends_overlapping_payload(fs_visitor &s);
bool brw_lower_simd_width(fs_visitor &s);
bool brw_lower_src_modifiers(fs_visitor &s, bblock_t *block, brw_inst *inst, unsigned i);
bool brw_lower_sub_sat(fs_visitor &s);
bool brw_lower_subgroup_ops(fs_visitor &s);
bool brw_lower_uniform_pull_constant_loads(fs_visitor &s);
void brw_lower_vgrfs_to_fixed_grfs(fs_visitor &s);
bool brw_lower_3src_null_dest(brw_shader &s);
bool brw_lower_alu_restrictions(brw_shader &s);
bool brw_lower_barycentrics(brw_shader &s);
bool brw_lower_constant_loads(brw_shader &s);
bool brw_lower_csel(brw_shader &s);
bool brw_lower_derivatives(brw_shader &s);
bool brw_lower_dpas(brw_shader &s);
bool brw_lower_find_live_channel(brw_shader &s);
bool brw_lower_indirect_mov(brw_shader &s);
bool brw_lower_integer_multiplication(brw_shader &s);
bool brw_lower_load_payload(brw_shader &s);
bool brw_lower_load_subgroup_invocation(brw_shader &s);
bool brw_lower_logical_sends(brw_shader &s);
bool brw_lower_pack(brw_shader &s);
bool brw_lower_regioning(brw_shader &s);
bool brw_lower_scalar_fp64_MAD(brw_shader &s);
bool brw_lower_scoreboard(brw_shader &s);
bool brw_lower_send_descriptors(brw_shader &s);
bool brw_lower_send_gather(brw_shader &s);
bool brw_lower_sends_overlapping_payload(brw_shader &s);
bool brw_lower_simd_width(brw_shader &s);
bool brw_lower_src_modifiers(brw_shader &s, bblock_t *block, brw_inst *inst, unsigned i);
bool brw_lower_sub_sat(brw_shader &s);
bool brw_lower_subgroup_ops(brw_shader &s);
bool brw_lower_uniform_pull_constant_loads(brw_shader &s);
void brw_lower_vgrfs_to_fixed_grfs(brw_shader &s);
bool brw_opt_address_reg_load(fs_visitor &s);
bool brw_opt_algebraic(fs_visitor &s);
bool brw_opt_bank_conflicts(fs_visitor &s);
bool brw_opt_cmod_propagation(fs_visitor &s);
bool brw_opt_combine_constants(fs_visitor &s);
bool brw_opt_combine_convergent_txf(fs_visitor &s);
bool brw_opt_compact_virtual_grfs(fs_visitor &s);
bool brw_opt_address_reg_load(brw_shader &s);
bool brw_opt_algebraic(brw_shader &s);
bool brw_opt_bank_conflicts(brw_shader &s);
bool brw_opt_cmod_propagation(brw_shader &s);
bool brw_opt_combine_constants(brw_shader &s);
bool brw_opt_combine_convergent_txf(brw_shader &s);
bool brw_opt_compact_virtual_grfs(brw_shader &s);
bool brw_opt_constant_fold_instruction(const intel_device_info *devinfo, brw_inst *inst);
bool brw_opt_copy_propagation(fs_visitor &s);
bool brw_opt_copy_propagation_defs(fs_visitor &s);
bool brw_opt_cse_defs(fs_visitor &s);
bool brw_opt_dead_code_eliminate(fs_visitor &s);
bool brw_opt_eliminate_find_live_channel(fs_visitor &s);
bool brw_opt_register_coalesce(fs_visitor &s);
bool brw_opt_remove_extra_rounding_modes(fs_visitor &s);
bool brw_opt_remove_redundant_halts(fs_visitor &s);
bool brw_opt_saturate_propagation(fs_visitor &s);
bool brw_opt_send_gather_to_send(fs_visitor &s);
bool brw_opt_send_to_send_gather(fs_visitor &s);
bool brw_opt_split_sends(fs_visitor &s);
bool brw_opt_split_virtual_grfs(fs_visitor &s);
bool brw_opt_zero_samples(fs_visitor &s);
bool brw_opt_copy_propagation(brw_shader &s);
bool brw_opt_copy_propagation_defs(brw_shader &s);
bool brw_opt_cse_defs(brw_shader &s);
bool brw_opt_dead_code_eliminate(brw_shader &s);
bool brw_opt_eliminate_find_live_channel(brw_shader &s);
bool brw_opt_register_coalesce(brw_shader &s);
bool brw_opt_remove_extra_rounding_modes(brw_shader &s);
bool brw_opt_remove_redundant_halts(brw_shader &s);
bool brw_opt_saturate_propagation(brw_shader &s);
bool brw_opt_send_gather_to_send(brw_shader &s);
bool brw_opt_send_to_send_gather(brw_shader &s);
bool brw_opt_split_sends(brw_shader &s);
bool brw_opt_split_virtual_grfs(brw_shader &s);
bool brw_opt_zero_samples(brw_shader &s);
bool brw_workaround_emit_dummy_mov_instruction(fs_visitor &s);
bool brw_workaround_memory_fence_before_eot(fs_visitor &s);
bool brw_workaround_nomask_control_flow(fs_visitor &s);
bool brw_workaround_source_arf_before_eot(fs_visitor &s);
bool brw_workaround_emit_dummy_mov_instruction(brw_shader &s);
bool brw_workaround_memory_fence_before_eot(brw_shader &s);
bool brw_workaround_nomask_control_flow(brw_shader &s);
bool brw_workaround_source_arf_before_eot(brw_shader &s);
/* Helpers. */
unsigned brw_get_lowered_simd_width(const fs_visitor *shader,
unsigned brw_get_lowered_simd_width(const brw_shader *shader,
const brw_inst *inst);
brw_reg brw_allocate_vgrf(fs_visitor &s, brw_reg_type type, unsigned count);
brw_reg brw_allocate_vgrf_units(fs_visitor &s, unsigned units_of_REGSIZE);
brw_reg brw_allocate_vgrf(brw_shader &s, brw_reg_type type, unsigned count);
brw_reg brw_allocate_vgrf_units(brw_shader &s, unsigned units_of_REGSIZE);

View file

@ -24,7 +24,7 @@
#include "brw_shader.h"
#include "brw_builder.h"
brw_vs_thread_payload::brw_vs_thread_payload(const fs_visitor &v)
brw_vs_thread_payload::brw_vs_thread_payload(const brw_shader &v)
{
unsigned r = 0;
@ -38,7 +38,7 @@ brw_vs_thread_payload::brw_vs_thread_payload(const fs_visitor &v)
num_regs = r;
}
brw_tcs_thread_payload::brw_tcs_thread_payload(const fs_visitor &v)
brw_tcs_thread_payload::brw_tcs_thread_payload(const brw_shader &v)
{
struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(v.prog_data);
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(v.prog_data);
@ -76,7 +76,7 @@ brw_tcs_thread_payload::brw_tcs_thread_payload(const fs_visitor &v)
}
}
brw_tes_thread_payload::brw_tes_thread_payload(const fs_visitor &v)
brw_tes_thread_payload::brw_tes_thread_payload(const brw_shader &v)
{
unsigned r = 0;
@ -98,7 +98,7 @@ brw_tes_thread_payload::brw_tes_thread_payload(const fs_visitor &v)
num_regs = r;
}
brw_gs_thread_payload::brw_gs_thread_payload(fs_visitor &v)
brw_gs_thread_payload::brw_gs_thread_payload(brw_shader &v)
{
struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(v.prog_data);
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(v.prog_data);
@ -155,7 +155,7 @@ brw_gs_thread_payload::brw_gs_thread_payload(fs_visitor &v)
static inline void
setup_fs_payload_gfx20(brw_fs_thread_payload &payload,
const fs_visitor &v,
const brw_shader &v,
bool &source_depth_to_render_target)
{
struct brw_wm_prog_data *prog_data = brw_wm_prog_data(v.prog_data);
@ -244,7 +244,7 @@ setup_fs_payload_gfx20(brw_fs_thread_payload &payload,
static inline void
setup_fs_payload_gfx9(brw_fs_thread_payload &payload,
const fs_visitor &v,
const brw_shader &v,
bool &source_depth_to_render_target)
{
struct brw_wm_prog_data *prog_data = brw_wm_prog_data(v.prog_data);
@ -332,7 +332,7 @@ setup_fs_payload_gfx9(brw_fs_thread_payload &payload,
}
}
brw_fs_thread_payload::brw_fs_thread_payload(const fs_visitor &v,
brw_fs_thread_payload::brw_fs_thread_payload(const brw_shader &v,
bool &source_depth_to_render_target)
: subspan_coord_reg(),
source_depth_reg(),
@ -352,7 +352,7 @@ brw_fs_thread_payload::brw_fs_thread_payload(const fs_visitor &v,
setup_fs_payload_gfx9(*this, v, source_depth_to_render_target);
}
brw_cs_thread_payload::brw_cs_thread_payload(const fs_visitor &v)
brw_cs_thread_payload::brw_cs_thread_payload(const brw_shader &v)
{
struct brw_cs_prog_data *prog_data = brw_cs_prog_data(v.prog_data);
@ -411,7 +411,7 @@ brw_cs_thread_payload::load_subgroup_id(const brw_builder &bld,
}
}
brw_task_mesh_thread_payload::brw_task_mesh_thread_payload(fs_visitor &v)
brw_task_mesh_thread_payload::brw_task_mesh_thread_payload(brw_shader &v)
: brw_cs_thread_payload(v)
{
/* Task and Mesh Shader Payloads (SIMD8 and SIMD16)
@ -475,7 +475,7 @@ brw_task_mesh_thread_payload::brw_task_mesh_thread_payload(fs_visitor &v)
num_regs = r;
}
brw_bs_thread_payload::brw_bs_thread_payload(const fs_visitor &v)
brw_bs_thread_payload::brw_bs_thread_payload(const brw_shader &v)
{
struct brw_bs_prog_data *prog_data = brw_bs_prog_data(v.prog_data);

View file

@ -7,7 +7,7 @@
#include "brw_reg.h"
struct fs_visitor;
struct brw_shader;
class brw_builder;
struct brw_thread_payload {
@ -21,13 +21,13 @@ protected:
};
struct brw_vs_thread_payload : public brw_thread_payload {
brw_vs_thread_payload(const fs_visitor &v);
brw_vs_thread_payload(const brw_shader &v);
brw_reg urb_handles;
};
struct brw_tcs_thread_payload : public brw_thread_payload {
brw_tcs_thread_payload(const fs_visitor &v);
brw_tcs_thread_payload(const brw_shader &v);
brw_reg patch_urb_output;
brw_reg primitive_id;
@ -35,7 +35,7 @@ struct brw_tcs_thread_payload : public brw_thread_payload {
};
struct brw_tes_thread_payload : public brw_thread_payload {
brw_tes_thread_payload(const fs_visitor &v);
brw_tes_thread_payload(const brw_shader &v);
brw_reg patch_urb_input;
brw_reg primitive_id;
@ -44,7 +44,7 @@ struct brw_tes_thread_payload : public brw_thread_payload {
};
struct brw_gs_thread_payload : public brw_thread_payload {
brw_gs_thread_payload(fs_visitor &v);
brw_gs_thread_payload(brw_shader &v);
brw_reg urb_handles;
brw_reg primitive_id;
@ -53,7 +53,7 @@ struct brw_gs_thread_payload : public brw_thread_payload {
};
struct brw_fs_thread_payload : public brw_thread_payload {
brw_fs_thread_payload(const fs_visitor &v,
brw_fs_thread_payload(const brw_shader &v,
bool &source_depth_to_render_target);
uint8_t subspan_coord_reg[2];
@ -71,7 +71,7 @@ struct brw_fs_thread_payload : public brw_thread_payload {
};
struct brw_cs_thread_payload : public brw_thread_payload {
brw_cs_thread_payload(const fs_visitor &v);
brw_cs_thread_payload(const brw_shader &v);
void load_subgroup_id(const brw_builder &bld, brw_reg &dest) const;
@ -84,7 +84,7 @@ protected:
};
struct brw_task_mesh_thread_payload : public brw_cs_thread_payload {
brw_task_mesh_thread_payload(fs_visitor &v);
brw_task_mesh_thread_payload(brw_shader &v);
brw_reg extended_parameter_0;
brw_reg local_index;
@ -96,7 +96,7 @@ struct brw_task_mesh_thread_payload : public brw_cs_thread_payload {
};
struct brw_bs_thread_payload : public brw_thread_payload {
brw_bs_thread_payload(const fs_visitor &v);
brw_bs_thread_payload(const brw_shader &v);
brw_reg inline_parameter;

View file

@ -95,7 +95,7 @@ is_ud_imm(const brw_reg &reg)
}
static void
validate_memory_logical(const fs_visitor &s, const brw_inst *inst)
validate_memory_logical(const brw_shader &s, const brw_inst *inst)
{
const intel_device_info *devinfo = s.devinfo;
@ -200,7 +200,7 @@ brw_shader_phase_to_string(enum brw_shader_phase phase)
}
static void
brw_validate_instruction_phase(const fs_visitor &s, brw_inst *inst)
brw_validate_instruction_phase(const brw_shader &s, brw_inst *inst)
{
enum brw_shader_phase invalid_from = BRW_SHADER_PHASE_INVALID;
@ -271,7 +271,7 @@ brw_validate_instruction_phase(const fs_visitor &s, brw_inst *inst)
}
void
brw_validate(const fs_visitor &s)
brw_validate(const brw_shader &s)
{
const intel_device_info *devinfo = s.devinfo;

View file

@ -12,7 +12,7 @@
* Make sure this happens by introducing a dummy mov instruction.
*/
bool
brw_workaround_emit_dummy_mov_instruction(fs_visitor &s)
brw_workaround_emit_dummy_mov_instruction(brw_shader &s)
{
if (!intel_needs_workaround(s.devinfo, 14015360517))
return false;
@ -81,7 +81,7 @@ needs_dummy_fence(const intel_device_info *devinfo, brw_inst *inst)
* We probably need a better criteria in needs_dummy_fence().
*/
bool
brw_workaround_memory_fence_before_eot(fs_visitor &s)
brw_workaround_memory_fence_before_eot(brw_shader &s)
{
bool progress = false;
bool has_ugm_write_or_atomic = false;
@ -131,7 +131,7 @@ brw_workaround_memory_fence_before_eot(fs_visitor &s)
* the only SHADER_OPCODE_HALT_TARGET in the program.
*/
static const brw_inst *
find_halt_control_flow_region_start(const fs_visitor *v)
find_halt_control_flow_region_start(const brw_shader *v)
{
foreach_block_and_inst(block, brw_inst, inst, v->cfg) {
if (inst->opcode == BRW_OPCODE_HALT ||
@ -155,7 +155,7 @@ find_halt_control_flow_region_start(const fs_visitor *v)
* all channels of the program are disabled.
*/
bool
brw_workaround_nomask_control_flow(fs_visitor &s)
brw_workaround_nomask_control_flow(brw_shader &s)
{
if (s.devinfo->ver != 12)
return false;
@ -299,7 +299,7 @@ bytes_bitmask_to_words(unsigned b)
* accessed inside the next blocks, but this still should be good enough.
*/
bool
brw_workaround_source_arf_before_eot(fs_visitor &s)
brw_workaround_source_arf_before_eot(brw_shader &s)
{
bool progress = false;

View file

@ -37,7 +37,7 @@ protected:
void *ctx;
struct brw_wm_prog_data *prog_data;
struct gl_shader_program *shader_prog;
fs_visitor *v;
brw_shader *v;
brw_builder bld;
};
@ -60,7 +60,7 @@ scoreboard_test::scoreboard_test()
nir_shader *shader =
nir_shader_create(ctx, MESA_SHADER_FRAGMENT, NULL, NULL);
v = new fs_visitor(compiler, &params, NULL, &prog_data->base, shader, 8,
v = new brw_shader(compiler, &params, NULL, &prog_data->base, shader, 8,
false, false);
bld = brw_builder(v).at_end();
@ -86,7 +86,7 @@ instruction(bblock_t *block, int num)
}
static void
lower_scoreboard(fs_visitor *v)
lower_scoreboard(brw_shader *v)
{
const bool print = getenv("TEST_DEBUG");

View file

@ -37,7 +37,7 @@ protected:
void *ctx;
struct brw_wm_prog_data *prog_data;
struct gl_shader_program *shader_prog;
fs_visitor *v;
brw_shader *v;
brw_builder bld;
void test_mov_prop(enum brw_conditional_mod cmod,
@ -67,7 +67,7 @@ cmod_propagation_test::cmod_propagation_test()
nir_shader *shader =
nir_shader_create(ctx, MESA_SHADER_FRAGMENT, NULL, NULL);
v = new fs_visitor(compiler, &params, NULL, &prog_data->base, shader,
v = new brw_shader(compiler, &params, NULL, &prog_data->base, shader,
8, false, false);
bld = brw_builder(v).at_end();
@ -96,7 +96,7 @@ instruction(bblock_t *block, int num)
}
static bool
cmod_propagation(fs_visitor *v)
cmod_propagation(brw_shader *v)
{
const bool print = getenv("TEST_DEBUG");

View file

@ -27,7 +27,7 @@ struct FSCombineConstantsTest : public ::testing::Test {
nir_shader *nir =
nir_shader_create(mem_ctx, MESA_SHADER_COMPUTE, NULL, NULL);
shader = new fs_visitor(&compiler, &params, NULL,
shader = new brw_shader(&compiler, &params, NULL,
&prog_data.base, nir, 8, false, false);
}
@ -44,9 +44,9 @@ struct FSCombineConstantsTest : public ::testing::Test {
struct brw_wm_prog_data prog_data;
struct gl_shader_program *shader_prog;
fs_visitor *shader;
brw_shader *shader;
bool opt_combine_constants(fs_visitor *s) {
bool opt_combine_constants(brw_shader *s) {
const bool print = getenv("TEST_DEBUG");
if (print) {
@ -66,7 +66,7 @@ struct FSCombineConstantsTest : public ::testing::Test {
};
static brw_builder
make_builder(fs_visitor *s)
make_builder(brw_shader *s)
{
return brw_builder(s, s->dispatch_width).at_end();
}

View file

@ -37,7 +37,7 @@ protected:
void *ctx;
struct brw_wm_prog_data *prog_data;
struct gl_shader_program *shader_prog;
fs_visitor *v;
brw_shader *v;
brw_builder bld;
};
@ -56,7 +56,7 @@ copy_propagation_test::copy_propagation_test()
nir_shader *shader =
nir_shader_create(ctx, MESA_SHADER_FRAGMENT, NULL, NULL);
v = new fs_visitor(compiler, &params, NULL, &prog_data->base, shader,
v = new brw_shader(compiler, &params, NULL, &prog_data->base, shader,
8, false, false);
bld = brw_builder(v).at_end();
@ -85,7 +85,7 @@ instruction(bblock_t *block, int num)
}
static bool
copy_propagation(fs_visitor *v)
copy_propagation(brw_shader *v)
{
const bool print = getenv("TEST_DEBUG");

View file

@ -19,7 +19,7 @@ protected:
void *ctx;
struct brw_wm_prog_data *prog_data;
struct gl_shader_program *shader_prog;
fs_visitor *v;
brw_shader *v;
brw_builder bld;
};
@ -38,7 +38,7 @@ cse_test::cse_test()
nir_shader *shader =
nir_shader_create(ctx, MESA_SHADER_FRAGMENT, NULL, NULL);
v = new fs_visitor(compiler, &params, NULL, &prog_data->base, shader,
v = new brw_shader(compiler, &params, NULL, &prog_data->base, shader,
16, false, false);
bld = brw_builder(v).at_end();
@ -68,7 +68,7 @@ instruction(bblock_t *block, int num)
}
static bool
cse(fs_visitor *v)
cse(brw_shader *v)
{
const bool print = false;

View file

@ -37,7 +37,7 @@ protected:
void *ctx;
struct brw_wm_prog_data *prog_data;
struct gl_shader_program *shader_prog;
fs_visitor *v;
brw_shader *v;
brw_builder bld;
};
@ -56,7 +56,7 @@ saturate_propagation_test::saturate_propagation_test()
nir_shader *shader =
nir_shader_create(ctx, MESA_SHADER_FRAGMENT, NULL, NULL);
v = new fs_visitor(compiler, &params, NULL, &prog_data->base, shader,
v = new brw_shader(compiler, &params, NULL, &prog_data->base, shader,
16, false, false);
bld = brw_builder(v).at_end();
@ -86,7 +86,7 @@ instruction(bblock_t *block, int num)
}
static bool
saturate_propagation(fs_visitor *v)
saturate_propagation(brw_shader *v)
{
const bool print = false;