brw: rename brw_sometimes to intel_sometimes

Moving it to intel_shader_enums.h

The plan is to make it visible to OpenCL shaders.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Tapani Pälli <tapani.palli@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32329>
This commit is contained in:
Lionel Landwerlin 2024-11-18 10:58:46 +02:00 committed by Marge Bot
parent 9016a5458a
commit bfcb9bf276
12 changed files with 104 additions and 104 deletions

View file

@ -563,10 +563,10 @@ iris_to_brw_fs_key(const struct iris_screen *screen,
.nr_color_regions = key->nr_color_regions,
.flat_shade = key->flat_shade,
.alpha_test_replicate_alpha = key->alpha_test_replicate_alpha,
.alpha_to_coverage = key->alpha_to_coverage ? BRW_ALWAYS : BRW_NEVER,
.alpha_to_coverage = key->alpha_to_coverage ? INTEL_ALWAYS : INTEL_NEVER,
.clamp_fragment_color = key->clamp_fragment_color,
.persample_interp = key->persample_interp ? BRW_ALWAYS : BRW_NEVER,
.multisample_fbo = key->multisample_fbo ? BRW_ALWAYS : BRW_NEVER,
.persample_interp = key->persample_interp ? INTEL_ALWAYS : INTEL_NEVER,
.multisample_fbo = key->multisample_fbo ? INTEL_ALWAYS : INTEL_NEVER,
.force_dual_color_blend = key->force_dual_color_blend,
.coherent_fb_fetch = key->coherent_fb_fetch,
.color_outputs_valid = key->color_outputs_valid,

View file

@ -36,7 +36,7 @@ blorp_compile_fs_brw(struct blorp_context *blorp, void *mem_ctx,
struct brw_wm_prog_key wm_key;
memset(&wm_key, 0, sizeof(wm_key));
wm_key.multisample_fbo = multisample_fbo ? BRW_ALWAYS : BRW_NEVER;
wm_key.multisample_fbo = multisample_fbo ? INTEL_ALWAYS : INTEL_NEVER;
wm_key.nr_color_regions = 1;
struct brw_compile_fs_params params = {

View file

@ -90,7 +90,7 @@ brw_do_emit_fb_writes(fs_visitor &s, int nr_color_regions, bool replicate_alpha)
* it if null_rt is enabled.
*/
const bool use_null_rt =
key->alpha_to_coverage == BRW_NEVER &&
key->alpha_to_coverage == INTEL_NEVER &&
!prog_data->uses_omask;
/* Even if there's no color buffers enabled, we still need to send
@ -196,7 +196,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
brw_reg int_sample_offset_x, int_sample_offset_y; /* Used on Gen12HP+ */
brw_reg int_sample_offset_xy; /* Used on Gen8+ */
brw_reg half_int_sample_offset_x, half_int_sample_offset_y;
if (wm_prog_data->coarse_pixel_dispatch != BRW_ALWAYS) {
if (wm_prog_data->coarse_pixel_dispatch != INTEL_ALWAYS) {
/* The thread payload only delivers subspan locations (ss0, ss1,
* ss2, ...). Since subspans covers 2x2 pixels blocks, we need to
* generate 4 pixel coordinates out of each subspan location. We do this
@ -245,7 +245,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
brw_reg int_coarse_offset_x, int_coarse_offset_y; /* Used on Gen12HP+ */
brw_reg int_coarse_offset_xy; /* Used on Gen8+ */
brw_reg half_int_coarse_offset_x, half_int_coarse_offset_y;
if (wm_prog_data->coarse_pixel_dispatch != BRW_NEVER) {
if (wm_prog_data->coarse_pixel_dispatch != INTEL_NEVER) {
/* In coarse pixel dispatch we have to do the same ADD instruction that
* we do in normal per pixel dispatch, except this time we're not adding
* 1 in each direction, but instead the coarse pixel size.
@ -295,7 +295,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
brw_reg int_pixel_offset_xy; /* Used on Gen8+ */
brw_reg half_int_pixel_offset_x, half_int_pixel_offset_y;
switch (wm_prog_data->coarse_pixel_dispatch) {
case BRW_NEVER:
case INTEL_NEVER:
int_pixel_offset_x = int_sample_offset_x;
int_pixel_offset_y = int_sample_offset_y;
int_pixel_offset_xy = int_sample_offset_xy;
@ -303,7 +303,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
half_int_pixel_offset_y = half_int_sample_offset_y;
break;
case BRW_SOMETIMES: {
case INTEL_SOMETIMES: {
const fs_builder dbld =
abld.exec_all().group(MIN2(16, s.dispatch_width) * 2, 0);
@ -342,7 +342,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
break;
}
case BRW_ALWAYS:
case INTEL_ALWAYS:
int_pixel_offset_x = int_coarse_offset_x;
int_pixel_offset_y = int_coarse_offset_y;
int_pixel_offset_xy = int_coarse_offset_xy;
@ -376,12 +376,12 @@ brw_emit_interpolation_setup(fs_visitor &s)
brw_reg(stride(suboffset(gi_uw, 5), 2, 8, 0)),
int_pixel_offset_y);
if (wm_prog_data->coarse_pixel_dispatch != BRW_NEVER) {
if (wm_prog_data->coarse_pixel_dispatch != INTEL_NEVER) {
fs_inst *addx = dbld.ADD(int_pixel_x, int_pixel_x,
horiz_stride(half_int_pixel_offset_x, 0));
fs_inst *addy = dbld.ADD(int_pixel_y, int_pixel_y,
horiz_stride(half_int_pixel_offset_y, 0));
if (wm_prog_data->coarse_pixel_dispatch != BRW_ALWAYS) {
if (wm_prog_data->coarse_pixel_dispatch != INTEL_ALWAYS) {
addx->predicate = BRW_PREDICATE_NORMAL;
addy->predicate = BRW_PREDICATE_NORMAL;
}
@ -418,7 +418,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
abld = bld.annotate("compute pos.z");
brw_reg coarse_z;
if (wm_prog_data->coarse_pixel_dispatch != BRW_NEVER &&
if (wm_prog_data->coarse_pixel_dispatch != INTEL_NEVER &&
wm_prog_data->uses_depth_w_coefficients) {
/* In coarse pixel mode, the HW doesn't interpolate Z coordinate
* properly. In the same way we have to add the coarse pixel size to
@ -469,10 +469,10 @@ brw_emit_interpolation_setup(fs_visitor &s)
brw_reg sample_z = s.pixel_z;
switch (wm_prog_data->coarse_pixel_dispatch) {
case BRW_NEVER:
case INTEL_NEVER:
break;
case BRW_SOMETIMES:
case INTEL_SOMETIMES:
assert(wm_prog_data->uses_src_depth);
assert(wm_prog_data->uses_depth_w_coefficients);
s.pixel_z = abld.vgrf(BRW_TYPE_F);
@ -482,7 +482,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
abld.SEL(s.pixel_z, coarse_z, sample_z));
break;
case BRW_ALWAYS:
case INTEL_ALWAYS:
assert(!wm_prog_data->uses_src_depth);
assert(wm_prog_data->uses_depth_w_coefficients);
s.pixel_z = coarse_z;
@ -497,7 +497,7 @@ brw_emit_interpolation_setup(fs_visitor &s)
abld.emit(SHADER_OPCODE_RCP, s.wpos_w, s.pixel_w);
}
if (wm_key->persample_interp == BRW_SOMETIMES) {
if (wm_key->persample_interp == INTEL_SOMETIMES) {
assert(!devinfo->needs_unlit_centroid_workaround);
const fs_builder ubld = bld.exec_all().group(16, 0);
@ -678,7 +678,7 @@ calculate_urb_setup(const struct intel_device_info *devinfo,
nir->info.inputs_read & ~nir->info.per_primitive_inputs;
/* Figure out where each of the incoming setup attributes lands. */
if (key->mesh_input != BRW_NEVER) {
if (key->mesh_input != INTEL_NEVER) {
/* Per-Primitive Attributes are laid out by Hardware before the regular
* attributes, so order them like this to make easy later to map setup
* into real HW registers.
@ -1082,12 +1082,12 @@ brw_nir_populate_wm_prog_data(nir_shader *shader,
shader->info.fs.uses_sample_shading ||
shader->info.outputs_read;
assert(key->multisample_fbo != BRW_NEVER ||
key->persample_interp == BRW_NEVER);
assert(key->multisample_fbo != INTEL_NEVER ||
key->persample_interp == INTEL_NEVER);
prog_data->persample_dispatch = key->persample_interp;
if (prog_data->sample_shading)
prog_data->persample_dispatch = BRW_ALWAYS;
prog_data->persample_dispatch = INTEL_ALWAYS;
/* We can only persample dispatch if we have a multisample FBO */
prog_data->persample_dispatch = MIN2(prog_data->persample_dispatch,
@ -1112,7 +1112,7 @@ brw_nir_populate_wm_prog_data(nir_shader *shader,
* persample dispatch, we hard-code it to 0.5.
*/
prog_data->uses_pos_offset =
prog_data->persample_dispatch != BRW_NEVER &&
prog_data->persample_dispatch != INTEL_NEVER &&
(BITSET_TEST(shader->info.system_values_read,
SYSTEM_VALUE_SAMPLE_POS) ||
BITSET_TEST(shader->info.system_values_read,
@ -1133,7 +1133,7 @@ brw_nir_populate_wm_prog_data(nir_shader *shader,
* So cleanup any potentially set sample barycentric mode when not in per
* sample dispatch.
*/
if (prog_data->persample_dispatch == BRW_NEVER) {
if (prog_data->persample_dispatch == INTEL_NEVER) {
prog_data->barycentric_interp_modes &=
~BITFIELD_BIT(BRW_BARYCENTRIC_PERSPECTIVE_SAMPLE);
}
@ -1158,21 +1158,21 @@ brw_nir_populate_wm_prog_data(nir_shader *shader,
/* The current VK_EXT_graphics_pipeline_library specification requires
* coarse to specified at compile time. But per sample interpolation can be
* dynamic. So we should never be in a situation where coarse &
* persample_interp are both respectively true & BRW_ALWAYS.
* persample_interp are both respectively true & INTEL_ALWAYS.
*
* Coarse will dynamically turned off when persample_interp is active.
*/
assert(!key->coarse_pixel || key->persample_interp != BRW_ALWAYS);
assert(!key->coarse_pixel || key->persample_interp != INTEL_ALWAYS);
prog_data->coarse_pixel_dispatch =
brw_sometimes_invert(prog_data->persample_dispatch);
intel_sometimes_invert(prog_data->persample_dispatch);
if (!key->coarse_pixel ||
prog_data->uses_omask ||
prog_data->sample_shading ||
prog_data->uses_sample_mask ||
(prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF) ||
prog_data->computed_stencil) {
prog_data->coarse_pixel_dispatch = BRW_NEVER;
prog_data->coarse_pixel_dispatch = INTEL_NEVER;
}
/* ICL PRMs, Volume 9: Render Engine, Shared Functions Pixel Interpolater,
@ -1202,7 +1202,7 @@ brw_nir_populate_wm_prog_data(nir_shader *shader,
* interpolater message at sample.
*/
if (intel_nir_pulls_at_sample(shader))
prog_data->coarse_pixel_dispatch = BRW_NEVER;
prog_data->coarse_pixel_dispatch = INTEL_NEVER;
/* We choose to always enable VMask prior to XeHP, as it would cause
* us to lose out on the eliminate_find_live_channel() optimization.
@ -1210,16 +1210,16 @@ brw_nir_populate_wm_prog_data(nir_shader *shader,
prog_data->uses_vmask = devinfo->verx10 < 125 ||
shader->info.fs.needs_quad_helper_invocations ||
shader->info.uses_wide_subgroup_intrinsics ||
prog_data->coarse_pixel_dispatch != BRW_NEVER;
prog_data->coarse_pixel_dispatch != INTEL_NEVER;
prog_data->uses_src_w =
BITSET_TEST(shader->info.system_values_read, SYSTEM_VALUE_FRAG_COORD);
prog_data->uses_src_depth =
BITSET_TEST(shader->info.system_values_read, SYSTEM_VALUE_FRAG_COORD) &&
prog_data->coarse_pixel_dispatch != BRW_ALWAYS;
prog_data->coarse_pixel_dispatch != INTEL_ALWAYS;
prog_data->uses_depth_w_coefficients = prog_data->uses_pc_bary_coefficients ||
(BITSET_TEST(shader->info.system_values_read, SYSTEM_VALUE_FRAG_COORD) &&
prog_data->coarse_pixel_dispatch != BRW_NEVER);
prog_data->coarse_pixel_dispatch != INTEL_NEVER);
calculate_urb_setup(devinfo, key, prog_data, shader, mue_map);
brw_compute_flat_inputs(prog_data, shader);
@ -1541,7 +1541,7 @@ brw_compile_fs(const struct brw_compiler *compiler,
* "If Pixel Shader outputs oMask, AlphaToCoverage is disabled in
* hardware, regardless of the state setting for this feature."
*/
if (key->alpha_to_coverage != BRW_NEVER) {
if (key->alpha_to_coverage != INTEL_NEVER) {
/* Run constant fold optimization in order to get the correct source
* offset to determine render target 0 store instruction in
* emit_alpha_to_coverage pass.

View file

@ -294,18 +294,6 @@ struct brw_mesh_prog_key
unsigned padding:31;
};
enum brw_sometimes {
BRW_NEVER = 0,
BRW_SOMETIMES,
BRW_ALWAYS
};
static inline enum brw_sometimes
brw_sometimes_invert(enum brw_sometimes x)
{
return (enum brw_sometimes)((int)BRW_ALWAYS - (int)x);
}
/** The program key for Fragment/Pixel Shaders. */
struct brw_wm_prog_key {
struct brw_base_prog_key base;
@ -317,7 +305,7 @@ struct brw_wm_prog_key {
bool flat_shade:1;
unsigned nr_color_regions:5;
bool alpha_test_replicate_alpha:1;
enum brw_sometimes alpha_to_coverage:2;
enum intel_sometimes alpha_to_coverage:2;
bool clamp_fragment_color:1;
bool force_dual_color_blend:1;
@ -330,13 +318,13 @@ struct brw_wm_prog_key {
* us to run per-sample. Even when running per-sample due to gl_SampleID,
* we may still interpolate unqualified inputs at the pixel center.
*/
enum brw_sometimes persample_interp:2;
enum intel_sometimes persample_interp:2;
/* Whether or not we are running on a multisampled framebuffer */
enum brw_sometimes multisample_fbo:2;
enum intel_sometimes multisample_fbo:2;
/* Whether the preceding shader stage is mesh */
enum brw_sometimes mesh_input:2;
enum intel_sometimes mesh_input:2;
bool coherent_fb_fetch:1;
bool ignore_sample_mask_out:1;
@ -691,18 +679,18 @@ struct brw_wm_prog_data {
float min_sample_shading;
/** Should this shader be dispatched per-sample */
enum brw_sometimes persample_dispatch;
enum intel_sometimes persample_dispatch;
/**
* Shader is ran at the coarse pixel shading dispatch rate (3DSTATE_CPS).
*/
enum brw_sometimes coarse_pixel_dispatch;
enum intel_sometimes coarse_pixel_dispatch;
/**
* Shader writes the SampleMask and this is AND-ed with the API's
* SampleMask to generate a new coverage mask.
*/
enum brw_sometimes alpha_to_coverage;
enum intel_sometimes alpha_to_coverage;
unsigned msaa_flags_param;
@ -859,7 +847,7 @@ static inline bool
brw_wm_prog_data_is_persample(const struct brw_wm_prog_data *prog_data,
enum intel_msaa_flags pushed_msaa_flags)
{
if (prog_data->persample_dispatch != BRW_SOMETIMES)
if (prog_data->persample_dispatch != INTEL_SOMETIMES)
return prog_data->persample_dispatch;
assert(pushed_msaa_flags & INTEL_MSAA_FLAG_ENABLE_DYNAMIC);
@ -871,9 +859,9 @@ brw_wm_prog_data_is_persample(const struct brw_wm_prog_data *prog_data,
assert(pushed_msaa_flags & INTEL_MSAA_FLAG_PERSAMPLE_DISPATCH);
if (pushed_msaa_flags & INTEL_MSAA_FLAG_PERSAMPLE_DISPATCH)
assert(prog_data->persample_dispatch != BRW_NEVER);
assert(prog_data->persample_dispatch != INTEL_NEVER);
else
assert(prog_data->persample_dispatch != BRW_ALWAYS);
assert(prog_data->persample_dispatch != INTEL_ALWAYS);
return (pushed_msaa_flags & INTEL_MSAA_FLAG_PERSAMPLE_DISPATCH) != 0;
}
@ -887,7 +875,7 @@ wm_prog_data_barycentric_modes(const struct brw_wm_prog_data *prog_data,
/* In the non dynamic case, we can just return the computed modes from
* compilation time.
*/
if (prog_data->persample_dispatch != BRW_SOMETIMES)
if (prog_data->persample_dispatch != INTEL_SOMETIMES)
return modes;
assert(pushed_msaa_flags & INTEL_MSAA_FLAG_ENABLE_DYNAMIC);
@ -975,15 +963,15 @@ static inline bool
brw_wm_prog_data_is_coarse(const struct brw_wm_prog_data *prog_data,
enum intel_msaa_flags pushed_msaa_flags)
{
if (prog_data->coarse_pixel_dispatch != BRW_SOMETIMES)
if (prog_data->coarse_pixel_dispatch != INTEL_SOMETIMES)
return prog_data->coarse_pixel_dispatch;
assert(pushed_msaa_flags & INTEL_MSAA_FLAG_ENABLE_DYNAMIC);
if (pushed_msaa_flags & INTEL_MSAA_FLAG_COARSE_RT_WRITES)
assert(prog_data->coarse_pixel_dispatch != BRW_NEVER);
assert(prog_data->coarse_pixel_dispatch != INTEL_NEVER);
else
assert(prog_data->coarse_pixel_dispatch != BRW_ALWAYS);
assert(prog_data->coarse_pixel_dispatch != INTEL_ALWAYS);
return (pushed_msaa_flags & INTEL_MSAA_FLAG_COARSE_RT_WRITES) != 0;
}

View file

@ -863,7 +863,7 @@ brw_barycentric_mode(const struct brw_wm_prog_key *key,
* interpolation. We'll dynamically remap things so that the FS thread
* payload is not affected.
*/
bary = key->persample_interp == BRW_SOMETIMES ?
bary = key->persample_interp == INTEL_SOMETIMES ?
BRW_BARYCENTRIC_PERSPECTIVE_SAMPLE :
BRW_BARYCENTRIC_PERSPECTIVE_PIXEL;
break;
@ -1848,4 +1848,3 @@ namespace brw {
inst->conditional_mod = BRW_CONDITIONAL_NZ;
}
}

View file

@ -3580,8 +3580,8 @@ emit_non_coherent_fb_read(nir_to_brw_state &ntb, const fs_builder &bld, const br
* shouldn't be necessary to recompile based on whether the framebuffer is
* CMS or UMS.
*/
assert(wm_key->multisample_fbo == BRW_ALWAYS ||
wm_key->multisample_fbo == BRW_NEVER);
assert(wm_key->multisample_fbo == INTEL_ALWAYS ||
wm_key->multisample_fbo == INTEL_NEVER);
if (wm_key->multisample_fbo &&
ntb.system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
ntb.system_values[SYSTEM_VALUE_SAMPLE_ID] = emit_sampleid_setup(ntb);
@ -3807,7 +3807,7 @@ emit_samplepos_setup(nir_to_brw_state &ntb)
const fs_builder abld = bld.annotate("compute sample position");
brw_reg pos = abld.vgrf(BRW_TYPE_F, 2);
if (wm_prog_data->persample_dispatch == BRW_NEVER) {
if (wm_prog_data->persample_dispatch == INTEL_NEVER) {
/* From ARB_sample_shading specification:
* "When rendering to a non-multisample buffer, or if multisample
* rasterization is disabled, gl_SamplePosition will always be
@ -3842,7 +3842,7 @@ emit_samplepos_setup(nir_to_brw_state &ntb)
abld.MUL(offset(pos, abld, i), tmp_f, brw_imm_f(1 / 16.0f));
}
if (wm_prog_data->persample_dispatch == BRW_SOMETIMES) {
if (wm_prog_data->persample_dispatch == INTEL_SOMETIMES) {
check_dynamic_msaa_flag(abld, wm_prog_data,
INTEL_MSAA_FLAG_PERSAMPLE_DISPATCH);
for (unsigned i = 0; i < 2; i++) {
@ -3869,7 +3869,7 @@ emit_sampleid_setup(nir_to_brw_state &ntb)
const fs_builder abld = bld.annotate("compute sample id");
brw_reg sample_id = abld.vgrf(BRW_TYPE_UD);
assert(key->multisample_fbo != BRW_NEVER);
assert(key->multisample_fbo != INTEL_NEVER);
/* Sample ID comes in as 4-bit numbers in g1.0:
*
@ -3916,7 +3916,7 @@ emit_sampleid_setup(nir_to_brw_state &ntb)
abld.AND(sample_id, tmp, brw_imm_w(0xf));
if (key->multisample_fbo == BRW_SOMETIMES) {
if (key->multisample_fbo == INTEL_SOMETIMES) {
check_dynamic_msaa_flag(abld, wm_prog_data,
INTEL_MSAA_FLAG_MULTISAMPLE_FBO);
set_predicate(BRW_PREDICATE_NORMAL,
@ -3936,12 +3936,12 @@ emit_samplemaskin_setup(nir_to_brw_state &ntb)
struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(s.prog_data);
/* The HW doesn't provide us with expected values. */
assert(wm_prog_data->coarse_pixel_dispatch != BRW_ALWAYS);
assert(wm_prog_data->coarse_pixel_dispatch != INTEL_ALWAYS);
brw_reg coverage_mask =
fetch_payload_reg(bld, s.fs_payload().sample_mask_in_reg, BRW_TYPE_UD);
if (wm_prog_data->persample_dispatch == BRW_NEVER)
if (wm_prog_data->persample_dispatch == INTEL_NEVER)
return coverage_mask;
/* gl_SampleMaskIn[] comes from two sources: the input coverage mask,
@ -3963,7 +3963,7 @@ emit_samplemaskin_setup(nir_to_brw_state &ntb)
brw_reg enabled_mask = abld.SHL(one, ntb.system_values[SYSTEM_VALUE_SAMPLE_ID]);
brw_reg mask = abld.AND(enabled_mask, coverage_mask);
if (wm_prog_data->persample_dispatch == BRW_ALWAYS)
if (wm_prog_data->persample_dispatch == INTEL_ALWAYS)
return mask;
check_dynamic_msaa_flag(abld, wm_prog_data,
@ -3987,7 +3987,7 @@ emit_shading_rate_setup(nir_to_brw_state &ntb)
/* Coarse pixel shading size fields overlap with other fields of not in
* coarse pixel dispatch mode, so report 0 when that's not the case.
*/
if (wm_prog_data->coarse_pixel_dispatch == BRW_NEVER)
if (wm_prog_data->coarse_pixel_dispatch == INTEL_NEVER)
return brw_imm_ud(0);
const fs_builder abld = bld.annotate("compute fragment shading rate");
@ -4008,7 +4008,7 @@ emit_shading_rate_setup(nir_to_brw_state &ntb)
brw_reg rate = abld.OR(abld.SHL(int_rate_x, brw_imm_ud(2)), int_rate_y);
if (wm_prog_data->coarse_pixel_dispatch == BRW_ALWAYS)
if (wm_prog_data->coarse_pixel_dispatch == INTEL_ALWAYS)
return rate;
check_dynamic_msaa_flag(abld, wm_prog_data,
@ -4379,7 +4379,7 @@ fs_nir_emit_fs_intrinsic(nir_to_brw_state &ntb,
brw_reg flag_reg;
struct brw_wm_prog_key *wm_prog_key = (struct brw_wm_prog_key *) s.key;
if (wm_prog_key->multisample_fbo == BRW_SOMETIMES) {
if (wm_prog_key->multisample_fbo == INTEL_SOMETIMES) {
struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(s.prog_data);
check_dynamic_msaa_flag(bld.exec_all().group(8, 0),

View file

@ -470,9 +470,9 @@ lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
0 /* coarse_rt_write */);
brw_reg desc = brw_imm_ud(0);
if (prog_data->coarse_pixel_dispatch == BRW_ALWAYS) {
if (prog_data->coarse_pixel_dispatch == INTEL_ALWAYS) {
inst->desc |= (1 << 18);
} else if (prog_data->coarse_pixel_dispatch == BRW_SOMETIMES) {
} else if (prog_data->coarse_pixel_dispatch == INTEL_SOMETIMES) {
STATIC_ASSERT(INTEL_MSAA_FLAG_COARSE_RT_WRITES == (1 << 18));
const fs_builder &ubld = bld.exec_all().group(8, 0);
desc = ubld.vgrf(BRW_TYPE_UD);
@ -2107,9 +2107,9 @@ lower_interpolator_logical_send(const fs_builder &bld, fs_inst *inst,
false /* coarse_pixel_rate */,
inst->exec_size, inst->group);
if (wm_prog_data->coarse_pixel_dispatch == BRW_ALWAYS) {
if (wm_prog_data->coarse_pixel_dispatch == INTEL_ALWAYS) {
desc_imm |= (1 << 15);
} else if (wm_prog_data->coarse_pixel_dispatch == BRW_SOMETIMES) {
} else if (wm_prog_data->coarse_pixel_dispatch == INTEL_SOMETIMES) {
STATIC_ASSERT(INTEL_MSAA_FLAG_COARSE_PI_MSG == (1 << 15));
brw_reg orig_desc = desc;
const fs_builder &ubld = bld.exec_all().group(8, 0);

View file

@ -612,9 +612,9 @@ brw_nir_lower_fs_inputs(nir_shader *nir,
if (devinfo->ver >= 11)
nir_lower_interpolation(nir, ~0);
if (key->multisample_fbo == BRW_NEVER) {
if (key->multisample_fbo == INTEL_NEVER) {
nir_lower_single_sampled(nir);
} else if (key->persample_interp == BRW_ALWAYS) {
} else if (key->persample_interp == INTEL_ALWAYS) {
nir_shader_intrinsics_pass(nir, lower_barycentric_per_sample,
nir_metadata_control_flow,
NULL);

View file

@ -82,7 +82,7 @@ brw_nir_lower_alpha_to_coverage(nir_shader *shader,
const struct brw_wm_prog_data *prog_data)
{
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
assert(key->alpha_to_coverage != BRW_NEVER);
assert(key->alpha_to_coverage != INTEL_NEVER);
nir_function_impl *impl = nir_shader_get_entrypoint(shader);
@ -172,7 +172,7 @@ brw_nir_lower_alpha_to_coverage(nir_shader *shader,
nir_def *dither_mask = build_dither_mask(&b, color0);
dither_mask = nir_iand(&b, sample_mask, dither_mask);
if (key->alpha_to_coverage == BRW_SOMETIMES) {
if (key->alpha_to_coverage == INTEL_SOMETIMES) {
nir_def *push_flags =
nir_load_uniform(&b, 1, 32, nir_imm_int(&b, prog_data->msaa_flags_param * 4));
nir_def *alpha_to_coverage =

View file

@ -15,6 +15,19 @@
extern "C" {
#endif
/** A tri-state value to track states that are potentially dynamic */
enum intel_sometimes {
INTEL_NEVER = 0,
INTEL_SOMETIMES,
INTEL_ALWAYS
};
static inline enum intel_sometimes
intel_sometimes_invert(enum intel_sometimes x)
{
return (enum intel_sometimes)((int)INTEL_ALWAYS - (int)x);
}
enum intel_msaa_flags {
/** Must be set whenever any dynamic MSAA is used
*

View file

@ -541,7 +541,7 @@ populate_wm_prog_key(struct anv_pipeline_stage *stage,
const struct vk_multisample_state *ms,
const struct vk_fragment_shading_rate_state *fsr,
const struct vk_render_pass_state *rp,
const enum brw_sometimes is_mesh)
const enum intel_sometimes is_mesh)
{
const struct anv_device *device = pipeline->base.device;
@ -587,18 +587,18 @@ populate_wm_prog_key(struct anv_pipeline_stage *stage,
*/
key->multisample_fbo =
BITSET_TEST(dynamic, MESA_VK_DYNAMIC_MS_RASTERIZATION_SAMPLES) ?
BRW_SOMETIMES :
ms->rasterization_samples > 1 ? BRW_ALWAYS : BRW_NEVER;
INTEL_SOMETIMES :
ms->rasterization_samples > 1 ? INTEL_ALWAYS : INTEL_NEVER;
key->persample_interp =
BITSET_TEST(dynamic, MESA_VK_DYNAMIC_MS_RASTERIZATION_SAMPLES) ?
BRW_SOMETIMES :
INTEL_SOMETIMES :
(ms->sample_shading_enable &&
(ms->min_sample_shading * ms->rasterization_samples) > 1) ?
BRW_ALWAYS : BRW_NEVER;
INTEL_ALWAYS : INTEL_NEVER;
key->alpha_to_coverage =
BITSET_TEST(dynamic, MESA_VK_DYNAMIC_MS_ALPHA_TO_COVERAGE_ENABLE) ?
BRW_SOMETIMES :
(ms->alpha_to_coverage_enable ? BRW_ALWAYS : BRW_NEVER);
INTEL_SOMETIMES :
(ms->alpha_to_coverage_enable ? INTEL_ALWAYS : INTEL_NEVER);
/* TODO: We should make this dynamic */
if (device->physical->instance->sample_mask_out_opengl_behaviour)
@ -608,9 +608,9 @@ populate_wm_prog_key(struct anv_pipeline_stage *stage,
key->color_outputs_valid = (1u << MAX_RTS) - 1;
key->nr_color_regions = MAX_RTS;
key->alpha_to_coverage = BRW_SOMETIMES;
key->multisample_fbo = BRW_SOMETIMES;
key->persample_interp = BRW_SOMETIMES;
key->alpha_to_coverage = INTEL_SOMETIMES;
key->multisample_fbo = INTEL_SOMETIMES;
key->persample_interp = INTEL_SOMETIMES;
}
key->mesh_input = is_mesh;
@ -672,9 +672,9 @@ anv_graphics_pipeline_stage_fragment_dynamic(const struct anv_pipeline_stage *st
if (stage->stage != MESA_SHADER_FRAGMENT)
return false;
return stage->key.wm.persample_interp == BRW_SOMETIMES ||
stage->key.wm.multisample_fbo == BRW_SOMETIMES ||
stage->key.wm.alpha_to_coverage == BRW_SOMETIMES;
return stage->key.wm.persample_interp == INTEL_SOMETIMES ||
stage->key.wm.multisample_fbo == INTEL_SOMETIMES ||
stage->key.wm.alpha_to_coverage == INTEL_SOMETIMES;
}
static void
@ -1557,8 +1557,8 @@ anv_pipeline_link_fs(const struct brw_compiler *compiler,
num_rt_bindings = stage->key.wm.nr_color_regions;
} else if (brw_nir_fs_needs_null_rt(
compiler->devinfo, stage->nir,
stage->key.wm.multisample_fbo != BRW_NEVER,
stage->key.wm.alpha_to_coverage != BRW_NEVER)) {
stage->key.wm.multisample_fbo != INTEL_NEVER,
stage->key.wm.alpha_to_coverage != INTEL_NEVER)) {
/* Setup a null render target */
rt_bindings[0] = (struct anv_pipeline_binding) {
.set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
@ -1854,15 +1854,15 @@ anv_graphics_pipeline_init_keys(struct anv_graphics_base_pipeline *pipeline,
state->rs == NULL ||
!state->rs->rasterizer_discard_enable ||
BITSET_TEST(state->dynamic, MESA_VK_DYNAMIC_RS_RASTERIZER_DISCARD_ENABLE);
enum brw_sometimes is_mesh = BRW_NEVER;
enum intel_sometimes is_mesh = INTEL_NEVER;
if (device->vk.enabled_extensions.EXT_mesh_shader) {
if (anv_pipeline_base_has_stage(pipeline, MESA_SHADER_VERTEX))
is_mesh = BRW_NEVER;
is_mesh = INTEL_NEVER;
else if (anv_pipeline_base_has_stage(pipeline, MESA_SHADER_MESH))
is_mesh = BRW_ALWAYS;
is_mesh = INTEL_ALWAYS;
else {
assert(pipeline->base.type == ANV_PIPELINE_GRAPHICS_LIB);
is_mesh = BRW_SOMETIMES;
is_mesh = INTEL_SOMETIMES;
}
}
populate_wm_prog_key(&stages[s],

View file

@ -686,9 +686,9 @@ update_fs_msaa_flags(struct anv_gfx_dynamic_state *hw_state,
/* If we have any dynamic bits here, we might need to update the value
* in the push constant for the shader.
*/
if (wm_prog_data->coarse_pixel_dispatch != BRW_SOMETIMES &&
wm_prog_data->persample_dispatch != BRW_SOMETIMES &&
wm_prog_data->alpha_to_coverage != BRW_SOMETIMES)
if (wm_prog_data->coarse_pixel_dispatch != INTEL_SOMETIMES &&
wm_prog_data->persample_dispatch != INTEL_SOMETIMES &&
wm_prog_data->alpha_to_coverage != INTEL_SOMETIMES)
return;
enum intel_msaa_flags fs_msaa_flags = INTEL_MSAA_FLAG_ENABLE_DYNAMIC;
@ -697,7 +697,7 @@ update_fs_msaa_flags(struct anv_gfx_dynamic_state *hw_state,
fs_msaa_flags |= INTEL_MSAA_FLAG_MULTISAMPLE_FBO;
if (wm_prog_data->sample_shading) {
assert(wm_prog_data->persample_dispatch != BRW_NEVER);
assert(wm_prog_data->persample_dispatch != INTEL_NEVER);
fs_msaa_flags |= INTEL_MSAA_FLAG_PERSAMPLE_DISPATCH;
}
if ((pipeline->sample_shading_enable &&
@ -708,7 +708,7 @@ update_fs_msaa_flags(struct anv_gfx_dynamic_state *hw_state,
}
}
if (wm_prog_data->coarse_pixel_dispatch == BRW_SOMETIMES &&
if (wm_prog_data->coarse_pixel_dispatch == INTEL_SOMETIMES &&
!(fs_msaa_flags & INTEL_MSAA_FLAG_PERSAMPLE_DISPATCH)) {
fs_msaa_flags |= INTEL_MSAA_FLAG_COARSE_PI_MSG |
INTEL_MSAA_FLAG_COARSE_RT_WRITES;