mesa/src/intel/compiler/brw_nir.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

297 lines
10 KiB
C
Raw Normal View History

/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#pragma once
#include "brw_reg.h"
#include "compiler/nir/nir.h"
#include "brw_compiler.h"
#include "nir_builder.h"
#ifdef __cplusplus
extern "C" {
#endif
extern const struct nir_shader_compiler_options brw_scalar_nir_options;
int type_size_vec4(const struct glsl_type *type, bool bindless);
int type_size_dvec4(const struct glsl_type *type, bool bindless);
static inline int
type_size_scalar_bytes(const struct glsl_type *type, bool bindless)
{
return glsl_count_dword_slots(type, bindless) * 4;
}
static inline int
type_size_vec4_bytes(const struct glsl_type *type, bool bindless)
{
return type_size_vec4(type, bindless) * 16;
}
struct brw_nir_compiler_opts {
/* Soft floating point implementation shader */
const nir_shader *softfp64;
/* Whether robust image access is enabled */
bool robust_image_access;
/* Input vertices for TCS stage (0 means dynamic) */
unsigned input_vertices;
};
/* UBO surface index can come in 2 flavors :
* - nir_intrinsic_resource_intel
* - anything else
*
* In the first case, checking that the surface index is const requires
* checking resource_intel::src[1]. In any other case it's a simple
* nir_src_is_const().
*
* This function should only be called on src[0] of load_ubo intrinsics.
*/
static inline bool
brw_nir_ubo_surface_index_is_pushable(nir_src src)
{
nir_intrinsic_instr *intrin =
src.ssa->parent_instr->type == nir_instr_type_intrinsic ?
nir_instr_as_intrinsic(src.ssa->parent_instr) : NULL;
if (intrin && intrin->intrinsic == nir_intrinsic_resource_intel) {
return (nir_intrinsic_resource_access_intel(intrin) &
nir_resource_intel_pushable);
}
return nir_src_is_const(src);
}
static inline unsigned
brw_nir_ubo_surface_index_get_push_block(nir_src src)
{
if (nir_src_is_const(src))
return nir_src_as_uint(src);
if (!brw_nir_ubo_surface_index_is_pushable(src))
return UINT32_MAX;
assert(src.ssa->parent_instr->type == nir_instr_type_intrinsic);
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src.ssa->parent_instr);
assert(intrin->intrinsic == nir_intrinsic_resource_intel);
return nir_intrinsic_resource_block_intel(intrin);
}
/* This helper return the binding table index of a surface access (any
* buffer/image/etc...). It works off the source of one of the intrinsics
* (load_ubo, load_ssbo, store_ssbo, load_image, store_image, etc...).
*
* If the source is constant, then this is the binding table index. If we're
* going through a resource_intel intel intrinsic, then we need to check
* src[1] of that intrinsic.
*/
static inline unsigned
brw_nir_ubo_surface_index_get_bti(nir_src src)
{
if (nir_src_is_const(src))
return nir_src_as_uint(src);
assert(src.ssa->parent_instr->type == nir_instr_type_intrinsic);
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src.ssa->parent_instr);
if (!intrin || intrin->intrinsic != nir_intrinsic_resource_intel)
return UINT32_MAX;
/* In practice we could even drop this intrinsic because the bindless
* access always operate from a base offset coming from a push constant, so
* they can never be constant.
*/
if (nir_intrinsic_resource_access_intel(intrin) &
nir_resource_intel_bindless)
return UINT32_MAX;
if (!nir_src_is_const(intrin->src[1]))
return UINT32_MAX;
return nir_src_as_uint(intrin->src[1]);
}
/* Returns true if a fragment shader needs at least one render target */
static inline bool
brw_nir_fs_needs_null_rt(const struct intel_device_info *devinfo,
nir_shader *nir,
bool multisample_fbo, bool alpha_to_coverage)
{
assert(nir->info.stage == MESA_SHADER_FRAGMENT);
/* Null-RT bit in the render target write extended descriptor is only
* available on Gfx11+.
*/
if (devinfo->ver < 11)
return true;
uint64_t relevant_outputs = 0;
if (multisample_fbo)
relevant_outputs |= BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
return (alpha_to_coverage ||
(nir->info.outputs_written & relevant_outputs) != 0);
}
void brw_preprocess_nir(const struct brw_compiler *compiler,
nir_shader *nir,
const struct brw_nir_compiler_opts *opts);
void
brw_nir_link_shaders(const struct brw_compiler *compiler,
nir_shader *producer, nir_shader *consumer);
bool brw_nir_lower_cs_intrinsics(nir_shader *nir,
const struct intel_device_info *devinfo,
struct brw_cs_prog_data *prog_data);
bool brw_nir_lower_alpha_to_coverage(nir_shader *shader,
const struct brw_wm_prog_key *key,
const struct brw_wm_prog_data *prog_data);
void brw_nir_lower_vs_inputs(nir_shader *nir);
void brw_nir_lower_vue_inputs(nir_shader *nir,
const struct intel_vue_map *vue_map);
void brw_nir_lower_tes_inputs(nir_shader *nir, const struct intel_vue_map *vue);
void brw_nir_lower_fs_inputs(nir_shader *nir,
const struct intel_device_info *devinfo,
const struct brw_wm_prog_key *key);
void brw_nir_lower_vue_outputs(nir_shader *nir);
void brw_nir_lower_tcs_outputs(nir_shader *nir, const struct intel_vue_map *vue,
enum tess_primitive_mode tes_primitive_mode);
void brw_nir_lower_fs_outputs(nir_shader *nir);
bool brw_nir_lower_cmat(nir_shader *nir, unsigned subgroup_size);
struct brw_nir_lower_storage_image_opts {
const struct intel_device_info *devinfo;
bool lower_loads;
bool lower_stores;
};
bool brw_nir_lower_storage_image(nir_shader *nir,
const struct brw_nir_lower_storage_image_opts *opts);
bool brw_nir_lower_mem_access_bit_sizes(nir_shader *shader,
const struct
intel_device_info *devinfo);
void brw_postprocess_nir(nir_shader *nir,
const struct brw_compiler *compiler,
bool debug_enabled,
enum brw_robustness_flags robust_flags);
bool brw_nir_apply_attribute_workarounds(nir_shader *nir,
const uint8_t *attrib_wa_flags);
bool brw_nir_apply_trig_workarounds(nir_shader *nir);
bool brw_nir_limit_trig_input_range_workaround(nir_shader *nir);
intel/brw: Handle fsign optimization in a NIR algebraic pass This is a lot less code, and it makes it easier to experiment with other pattern-based optimizations in the future. The results here are nearly identical to the results I got from Ken's "intel/brw: Make fsign (for 16/32-bit) in SSA form"... which are not particularly good. In this commit and in Ken's, all of the shader-db shaders hurt for spills and fills are from Deus Ex Mankind Divided. Each shader has a bunch of texture instructions with a single fsign between the blocks. With the dependency on the flag removed, the scheduler puts all of the texture instructions at the start... and there are a LOT of them. shader-db: All Intel platforms had similar results. (Meteor Lake shown) total instructions in shared programs: 19647060 -> 19650207 (0.02%) instructions in affected programs: 734718 -> 737865 (0.43%) helped: 382 / HURT: 1984 total cycles in shared programs: 823238442 -> 822785913 (-0.05%) cycles in affected programs: 426901157 -> 426448628 (-0.11%) helped: 3408 / HURT: 3671 total spills in shared programs: 3887 -> 3891 (0.10%) spills in affected programs: 256 -> 260 (1.56%) helped: 0 / HURT: 4 total fills in shared programs: 3236 -> 3306 (2.16%) fills in affected programs: 882 -> 952 (7.94%) helped: 0 / HURT: 12 LOST: 37 GAINED: 34 fossil-db: DG2 and Meteor Lake had similar results. (Meteor Lake shown) Totals: Instrs: 154005469 -> 154008294 (+0.00%); split: -0.00%, +0.00% Cycle count: 17551859277 -> 17554293955 (+0.01%); split: -0.02%, +0.04% Spill count: 142078 -> 142090 (+0.01%) Fill count: 266761 -> 266729 (-0.01%); split: -0.02%, +0.01% Max live registers: 32593578 -> 32593858 (+0.00%) Max dispatch width: 5535944 -> 5536816 (+0.02%); split: +0.02%, -0.01% Totals from 5867 (0.93% of 631350) affected shaders: Instrs: 5475544 -> 5478369 (+0.05%); split: -0.04%, +0.09% Cycle count: 1649032029 -> 1651466707 (+0.15%); split: -0.24%, +0.39% Spill count: 26411 -> 26423 (+0.05%) Fill count: 57364 -> 57332 (-0.06%); split: -0.10%, +0.04% Max live registers: 431561 -> 431841 (+0.06%) Max dispatch width: 49784 -> 50656 (+1.75%); split: +2.38%, -0.63% Tiger Lake Totals: Instrs: 149530671 -> 149533588 (+0.00%); split: -0.00%, +0.00% Cycle count: 15261418953 -> 15264764921 (+0.02%); split: -0.00%, +0.03% Spill count: 60317 -> 60316 (-0.00%); split: -0.02%, +0.01% Max live registers: 32249201 -> 32249464 (+0.00%) Max dispatch width: 5540608 -> 5540584 (-0.00%) Totals from 5862 (0.93% of 630309) affected shaders: Instrs: 4740800 -> 4743717 (+0.06%); split: -0.04%, +0.10% Cycle count: 566531248 -> 569877216 (+0.59%); split: -0.13%, +0.72% Spill count: 11709 -> 11708 (-0.01%); split: -0.09%, +0.08% Max live registers: 424560 -> 424823 (+0.06%) Max dispatch width: 50304 -> 50280 (-0.05%) Ice Lake Totals: Instrs: 150499705 -> 150502608 (+0.00%); split: -0.00%, +0.00% Cycle count: 15105629116 -> 15105425880 (-0.00%); split: -0.00%, +0.00% Spill count: 60087 -> 60090 (+0.00%) Fill count: 100542 -> 100541 (-0.00%); split: -0.00%, +0.00% Max live registers: 32605215 -> 32605495 (+0.00%) Max dispatch width: 5617752 -> 5617792 (+0.00%); split: +0.00%, -0.00% Totals from 5882 (0.93% of 634934) affected shaders: Instrs: 4737206 -> 4740109 (+0.06%); split: -0.04%, +0.10% Cycle count: 598882104 -> 598678868 (-0.03%); split: -0.08%, +0.05% Spill count: 10278 -> 10281 (+0.03%) Fill count: 22504 -> 22503 (-0.00%); split: -0.01%, +0.01% Max live registers: 424184 -> 424464 (+0.07%) Max dispatch width: 50216 -> 50256 (+0.08%); split: +0.25%, -0.18% Skylake Totals: Instrs: 139092612 -> 139095257 (+0.00%); split: -0.00%, +0.00% Cycle count: 14533550285 -> 14533544716 (-0.00%); split: -0.00%, +0.00% Spill count: 58176 -> 58172 (-0.01%) Fill count: 95877 -> 95796 (-0.08%) Max live registers: 31924594 -> 31924874 (+0.00%) Max dispatch width: 5484568 -> 5484552 (-0.00%); split: +0.00%, -0.00% Totals from 5789 (0.93% of 625512) affected shaders: Instrs: 4481987 -> 4484632 (+0.06%); split: -0.04%, +0.10% Cycle count: 578310124 -> 578304555 (-0.00%); split: -0.05%, +0.05% Spill count: 9248 -> 9244 (-0.04%) Fill count: 19677 -> 19596 (-0.41%) Max live registers: 415340 -> 415620 (+0.07%) Max dispatch width: 49720 -> 49704 (-0.03%); split: +0.10%, -0.13% Reviewed-by: Kenneth Graunke <kenneth@whitecape.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/29095>
2024-05-01 21:02:13 -07:00
bool brw_nir_lower_fsign(nir_shader *nir);
intel/brw: Move fsat instructions closer to the source Intel GPUs have a saturate destination modifier, and brw_fs_opt_saturate_propagation tries to replace explicit saturate operations with this destination modifier. That pass is limited in several ways. If the source of the explicit saturate is in a different block or if the source of the explicit saturate is live after the explicit saturate, brw_fs_opt_saturate_propagation will be unable to make progress. This optimization exists to help brw_fs_opt_saturate_propagation make more progress. It tries to move NIR fsat instructions to the same block that contains the definition of its source. It does this only in cases where it will not create additional live values. It also attempts to do this only in cases where the explicit saturate will ultimiately be converted to a destination modifier. v2: Fix metadata_preserve when theres no progress and use nir_metadata_control_flow when there is progress. All suggested by Alyssa. v3: Fix a typo in the file header comment. Noticed by Ken. Don't require nir_metadata_instr_index. Use nir_def_rewrite_uses_after instead of open-coding something slightly more specific. Both suggested by Ken. shader-db: All Intel platforms had similar results. (Meteor Lake shown) total instructions in shared programs: 19733645 -> 19733028 (<.01%) instructions in affected programs: 193300 -> 192683 (-0.32%) helped: 246 HURT: 1 helped stats (abs) min: 2 max: 48 x̄: 2.51 x̃: 2 helped stats (rel) min: 0.18% max: 0.39% x̄: 0.33% x̃: 0.34% HURT stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1 HURT stats (rel) min: 0.31% max: 0.31% x̄: 0.31% x̃: 0.31% 95% mean confidence interval for instructions value: -2.87 -2.13 95% mean confidence interval for instructions %-change: -0.34% -0.32% Instructions are helped. total cycles in shared programs: 916180971 -> 916264656 (<.01%) cycles in affected programs: 30197180 -> 30280865 (0.28%) helped: 194 HURT: 142 helped stats (abs) min: 1 max: 21251 x̄: 872.75 x̃: 19 helped stats (rel) min: <.01% max: 23.17% x̄: 2.59% x̃: 0.23% HURT stats (abs) min: 1 max: 28058 x̄: 1781.68 x̃: 399 HURT stats (rel) min: <.01% max: 37.21% x̄: 4.85% x̃: 1.63% 95% mean confidence interval for cycles value: -196.84 694.97 95% mean confidence interval for cycles %-change: -0.17% 1.27% Inconclusive result (value mean confidence interval includes 0). fossil-db: Meteor Lake, DG2, and Tiger Lake had similar results. (Meteor Lake shown) Totals: Instrs: 151512021 -> 151511351 (-0.00%); split: -0.00%, +0.00% Cycle count: 17209013596 -> 17209840995 (+0.00%); split: -0.02%, +0.02% Max live registers: 32013312 -> 32013549 (+0.00%) Max dispatch width: 5512304 -> 5512136 (-0.00%) Totals from 774 (0.12% of 630172) affected shaders: Instrs: 1559285 -> 1558615 (-0.04%); split: -0.05%, +0.01% Cycle count: 1312656268 -> 1313483667 (+0.06%); split: -0.24%, +0.30% Max live registers: 82195 -> 82432 (+0.29%) Max dispatch width: 6664 -> 6496 (-2.52%) Ice Lake Totals: Instrs: 151416791 -> 151416137 (-0.00%); split: -0.00%, +0.00% Cycle count: 15162468885 -> 15163298824 (+0.01%); split: -0.00%, +0.01% Max live registers: 32471367 -> 32471603 (+0.00%) Max dispatch width: 5623752 -> 5623712 (-0.00%) Totals from 733 (0.12% of 635598) affected shaders: Instrs: 877965 -> 877311 (-0.07%); split: -0.09%, +0.01% Cycle count: 190763628 -> 191593567 (+0.44%); split: -0.21%, +0.64% Max live registers: 72067 -> 72303 (+0.33%) Max dispatch width: 6216 -> 6176 (-0.64%) Skylake Totals: Instrs: 140794845 -> 140794075 (-0.00%); split: -0.00%, +0.00% Cycle count: 14665159301 -> 14665320514 (+0.00%); split: -0.00%, +0.01% Max live registers: 31783341 -> 31783662 (+0.00%); split: -0.00%, +0.00% Totals from 659 (0.11% of 625670) affected shaders: Instrs: 829061 -> 828291 (-0.09%); split: -0.09%, +0.00% Cycle count: 185478478 -> 185639691 (+0.09%); split: -0.33%, +0.41% Max live registers: 67491 -> 67812 (+0.48%); split: -0.01%, +0.48% Reviewed-by: Kenneth Graunke <kenneth@whitecape.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/29774>
2024-06-07 13:08:06 -07:00
bool brw_nir_opt_fsat(nir_shader *);
void brw_nir_apply_key(nir_shader *nir,
const struct brw_compiler *compiler,
const struct brw_base_prog_key *key,
unsigned max_subgroup_size);
unsigned brw_nir_api_subgroup_size(const nir_shader *nir,
unsigned hw_subgroup_size);
enum brw_conditional_mod brw_cmod_for_nir_comparison(nir_op op);
enum lsc_opcode lsc_op_for_nir_intrinsic(const nir_intrinsic_instr *intrin);
enum brw_reg_type brw_type_for_nir_type(const struct intel_device_info *devinfo,
nir_alu_type type);
bool brw_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
unsigned bit_size,
unsigned num_components,
unsigned hole_size,
nir_intrinsic_instr *low,
nir_intrinsic_instr *high,
void *data);
void brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler,
nir_shader *nir,
struct brw_ubo_range out_ranges[4]);
void brw_nir_optimize(nir_shader *nir,
const struct intel_device_info *devinfo);
nir_shader *brw_nir_create_passthrough_tcs(void *mem_ctx,
const struct brw_compiler *compiler,
const struct brw_tcs_prog_key *key);
i965/fs: Rework representation of fragment output locations in NIR. The problem with the current approach is that driver output locations are represented as a linear offset within the nir_outputs array, which makes it rather difficult for the back-end to figure out what color output and index some nir_intrinsic_load/store_output was meant for, because the offset of a given output within the nir_output array is dependent on the type and size of all previously allocated outputs. Instead this defines the driver location of an output to be the pair formed by its GLSL-assigned location and index (I've borrowed the bitfield macros from brw_defines.h in order to represent the pair of integers as a single scalar value that can be assigned to nir_variable_data::driver_location). nir_assign_var_locations is no longer useful for fragment outputs. Because fragment outputs are now allocated independently rather than within the nir_outputs array, the get_frag_output() helper becomes necessary in order to obtain the right temporary register for a given location-index pair. The type_size helper passed to nir_lower_io is now type_size_dvec4 rather than type_size_vec4_times_4 so that output array offsets are provided in terms of whole array elements rather than in terms of scalar components (dvec4 is the largest vector type supported by the GLSL so this will cause all individual fragment outputs to have a size of one regardless of the type). Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
2016-07-21 21:26:20 -07:00
#define BRW_NIR_FRAG_OUTPUT_INDEX_SHIFT 0
#define BRW_NIR_FRAG_OUTPUT_INDEX_MASK INTEL_MASK(0, 0)
#define BRW_NIR_FRAG_OUTPUT_LOCATION_SHIFT 1
#define BRW_NIR_FRAG_OUTPUT_LOCATION_MASK INTEL_MASK(31, 1)
bool brw_nir_move_interpolation_to_top(nir_shader *nir);
nir_def *brw_nir_load_global_const(nir_builder *b,
nir_intrinsic_instr *load_uniform,
nir_def *base_addr,
unsigned off);
const struct glsl_type *brw_nir_get_var_type(const struct nir_shader *nir,
nir_variable *var);
void brw_nir_adjust_payload(nir_shader *shader);
static inline nir_variable_mode
brw_nir_no_indirect_mask(const struct brw_compiler *compiler,
gl_shader_stage stage)
{
nir_variable_mode indirect_mask = (nir_variable_mode) 0;
switch (stage) {
case MESA_SHADER_VERTEX:
case MESA_SHADER_FRAGMENT:
indirect_mask |= nir_var_shader_in;
break;
default:
/* Everything else can handle indirect inputs */
break;
}
if (stage != MESA_SHADER_TESS_CTRL &&
stage != MESA_SHADER_TASK &&
stage != MESA_SHADER_MESH)
indirect_mask |= nir_var_shader_out;
return indirect_mask;
}
bool brw_nir_uses_inline_data(nir_shader *shader);
#ifdef __cplusplus
}
#endif