2014-08-15 10:32:07 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2010 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*/
|
|
|
|
|
|
2016-01-18 12:16:48 +02:00
|
|
|
#include "compiler/glsl/ir.h"
|
2014-08-15 10:32:07 -07:00
|
|
|
#include "brw_fs.h"
|
2015-03-17 11:49:04 -07:00
|
|
|
#include "brw_nir.h"
|
intel/fs: Improve discard_if code generation
Previously we would blindly emit an sequence like:
mov(1) f0.1<1>UW g1.14<0,1,0>UW
...
cmp.l.f0(16) g7<1>F g5<8,8,1>F 0x41700000F /* 15F */
(+f0.1) cmp.z.f0.1(16) null<1>D g7<8,8,1>D 0D
The first move sets the flags based on the initial execution mask.
Later discard sequences contain a predicated compare that can only
remove more SIMD channels. Often times the only user of the result from
the first compare is the second compare. Instead, generate a sequence
like
mov(1) f0.1<1>UW g1.14<0,1,0>UW
...
cmp.l.f0(16) g7<1>F g5<8,8,1>F 0x41700000F /* 15F */
(+f0.1) cmp.ge.f0.1(8) null<1>F g5<8,8,1>F 0x41700000F /* 15F */
If the results stored in g7 and f0.0 are not used, the comparison will
be eliminated. This removes an instruction and potentially reduces
register pressure.
v2: Major re-write of the commit message (including fixing the assembly
code). Suggested by Matt.
All Gen8+ platforms had similar results. (Ice Lake shown)
total instructions in shared programs: 17224434 -> 17198659 (-0.15%)
instructions in affected programs: 2908125 -> 2882350 (-0.89%)
helped: 18891
HURT: 5
helped stats (abs) min: 1 max: 12 x̄: 1.38 x̃: 1
helped stats (rel) min: 0.03% max: 25.00% x̄: 1.76% x̃: 1.02%
HURT stats (abs) min: 9 max: 105 x̄: 51.40 x̃: 35
HURT stats (rel) min: 0.43% max: 4.92% x̄: 2.34% x̃: 1.56%
95% mean confidence interval for instructions value: -1.39 -1.34
95% mean confidence interval for instructions %-change: -1.79% -1.73%
Instructions are helped.
total cycles in shared programs: 361468458 -> 361170679 (-0.08%)
cycles in affected programs: 38470116 -> 38172337 (-0.77%)
helped: 16202
HURT: 1456
helped stats (abs) min: 1 max: 4473 x̄: 26.24 x̃: 18
helped stats (rel) min: <.01% max: 28.44% x̄: 2.90% x̃: 2.18%
HURT stats (abs) min: 1 max: 5982 x̄: 87.51 x̃: 28
HURT stats (rel) min: <.01% max: 51.29% x̄: 5.48% x̃: 1.64%
95% mean confidence interval for cycles value: -18.24 -15.49
95% mean confidence interval for cycles %-change: -2.26% -2.14%
Cycles are helped.
total spills in shared programs: 12147 -> 12176 (0.24%)
spills in affected programs: 175 -> 204 (16.57%)
helped: 8
HURT: 5
total fills in shared programs: 25262 -> 25292 (0.12%)
fills in affected programs: 269 -> 299 (11.15%)
helped: 8
HURT: 5
Haswell
total instructions in shared programs: 13530316 -> 13502647 (-0.20%)
instructions in affected programs: 2507824 -> 2480155 (-1.10%)
helped: 18859
HURT: 10
helped stats (abs) min: 1 max: 12 x̄: 1.48 x̃: 1
helped stats (rel) min: 0.03% max: 27.78% x̄: 2.38% x̃: 1.41%
HURT stats (abs) min: 5 max: 39 x̄: 25.70 x̃: 31
HURT stats (rel) min: 0.22% max: 1.66% x̄: 1.09% x̃: 1.31%
95% mean confidence interval for instructions value: -1.49 -1.44
95% mean confidence interval for instructions %-change: -2.42% -2.34%
Instructions are helped.
total cycles in shared programs: 377865412 -> 377639034 (-0.06%)
cycles in affected programs: 40169572 -> 39943194 (-0.56%)
helped: 15550
HURT: 1938
helped stats (abs) min: 1 max: 2482 x̄: 25.67 x̃: 18
helped stats (rel) min: <.01% max: 37.77% x̄: 3.00% x̃: 2.25%
HURT stats (abs) min: 1 max: 4862 x̄: 89.17 x̃: 35
HURT stats (rel) min: <.01% max: 67.67% x̄: 6.16% x̃: 2.75%
95% mean confidence interval for cycles value: -14.42 -11.47
95% mean confidence interval for cycles %-change: -2.05% -1.91%
Cycles are helped.
total spills in shared programs: 26769 -> 26814 (0.17%)
spills in affected programs: 826 -> 871 (5.45%)
helped: 9
HURT: 10
total fills in shared programs: 38383 -> 38425 (0.11%)
fills in affected programs: 834 -> 876 (5.04%)
helped: 9
HURT: 10
LOST: 5
GAINED: 10
Ivy Bridge
total instructions in shared programs: 12079250 -> 12044139 (-0.29%)
instructions in affected programs: 2409680 -> 2374569 (-1.46%)
helped: 16135
HURT: 0
helped stats (abs) min: 1 max: 23 x̄: 2.18 x̃: 2
helped stats (rel) min: 0.07% max: 37.50% x̄: 2.72% x̃: 1.68%
95% mean confidence interval for instructions value: -2.21 -2.14
95% mean confidence interval for instructions %-change: -2.76% -2.67%
Instructions are helped.
total cycles in shared programs: 180116747 -> 179900405 (-0.12%)
cycles in affected programs: 25439823 -> 25223481 (-0.85%)
helped: 13817
HURT: 1499
helped stats (abs) min: 1 max: 1886 x̄: 26.40 x̃: 18
helped stats (rel) min: <.01% max: 38.84% x̄: 2.57% x̃: 1.97%
HURT stats (abs) min: 1 max: 3684 x̄: 98.99 x̃: 52
HURT stats (rel) min: <.01% max: 97.01% x̄: 6.37% x̃: 3.42%
95% mean confidence interval for cycles value: -15.68 -12.57
95% mean confidence interval for cycles %-change: -1.77% -1.63%
Cycles are helped.
LOST: 8
GAINED: 10
Sandy Bridge
total instructions in shared programs: 10878990 -> 10863659 (-0.14%)
instructions in affected programs: 1806702 -> 1791371 (-0.85%)
helped: 13023
HURT: 0
helped stats (abs) min: 1 max: 5 x̄: 1.18 x̃: 1
helped stats (rel) min: 0.07% max: 13.79% x̄: 1.65% x̃: 1.10%
95% mean confidence interval for instructions value: -1.18 -1.17
95% mean confidence interval for instructions %-change: -1.68% -1.62%
Instructions are helped.
total cycles in shared programs: 154082878 -> 153862810 (-0.14%)
cycles in affected programs: 20199374 -> 19979306 (-1.09%)
helped: 12048
HURT: 510
helped stats (abs) min: 1 max: 323 x̄: 20.57 x̃: 18
helped stats (rel) min: 0.03% max: 17.78% x̄: 2.05% x̃: 1.52%
HURT stats (abs) min: 1 max: 448 x̄: 54.39 x̃: 16
HURT stats (rel) min: 0.02% max: 37.98% x̄: 4.13% x̃: 1.17%
95% mean confidence interval for cycles value: -17.97 -17.08
95% mean confidence interval for cycles %-change: -1.84% -1.75%
Cycles are helped.
LOST: 1
GAINED: 0
Iron Lake
total instructions in shared programs: 8155075 -> 8142729 (-0.15%)
instructions in affected programs: 949495 -> 937149 (-1.30%)
helped: 5810
HURT: 0
helped stats (abs) min: 1 max: 8 x̄: 2.12 x̃: 2
helped stats (rel) min: 0.10% max: 16.67% x̄: 2.53% x̃: 1.85%
95% mean confidence interval for instructions value: -2.14 -2.11
95% mean confidence interval for instructions %-change: -2.59% -2.48%
Instructions are helped.
total cycles in shared programs: 188584610 -> 188549632 (-0.02%)
cycles in affected programs: 17274446 -> 17239468 (-0.20%)
helped: 3881
HURT: 90
helped stats (abs) min: 2 max: 168 x̄: 9.08 x̃: 6
helped stats (rel) min: <.01% max: 23.53% x̄: 0.83% x̃: 0.30%
HURT stats (abs) min: 2 max: 10 x̄: 2.80 x̃: 2
HURT stats (rel) min: <.01% max: 0.60% x̄: 0.10% x̃: 0.07%
95% mean confidence interval for cycles value: -9.35 -8.27
95% mean confidence interval for cycles %-change: -0.85% -0.77%
Cycles are helped.
GM45
total instructions in shared programs: 5019308 -> 5013119 (-0.12%)
instructions in affected programs: 489028 -> 482839 (-1.27%)
helped: 2912
HURT: 0
helped stats (abs) min: 1 max: 8 x̄: 2.13 x̃: 2
helped stats (rel) min: 0.10% max: 16.67% x̄: 2.46% x̃: 1.81%
95% mean confidence interval for instructions value: -2.14 -2.11
95% mean confidence interval for instructions %-change: -2.54% -2.39%
Instructions are helped.
total cycles in shared programs: 129002592 -> 128977804 (-0.02%)
cycles in affected programs: 12669152 -> 12644364 (-0.20%)
helped: 2759
HURT: 37
helped stats (abs) min: 2 max: 168 x̄: 9.03 x̃: 4
helped stats (rel) min: <.01% max: 21.43% x̄: 0.75% x̃: 0.31%
HURT stats (abs) min: 2 max: 10 x̄: 3.62 x̃: 4
HURT stats (rel) min: <.01% max: 0.41% x̄: 0.10% x̃: 0.04%
95% mean confidence interval for cycles value: -9.53 -8.20
95% mean confidence interval for cycles %-change: -0.79% -0.70%
Cycles are helped.
Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2019-05-20 17:25:01 -07:00
|
|
|
#include "brw_eu.h"
|
2018-06-25 19:55:31 -07:00
|
|
|
#include "nir_search_helpers.h"
|
2018-08-21 09:46:46 -07:00
|
|
|
#include "util/u_math.h"
|
2018-11-12 18:48:10 -06:00
|
|
|
#include "util/bitscan.h"
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2015-06-03 21:12:49 +03:00
|
|
|
using namespace brw;
|
|
|
|
|
|
2015-01-19 22:11:39 -08:00
|
|
|
void
|
|
|
|
|
fs_visitor::emit_nir_code()
|
|
|
|
|
{
|
2018-06-01 12:36:47 +02:00
|
|
|
emit_shader_float_controls_execution_mode();
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
/* emit the arrays used for inputs and outputs - load/store intrinsics will
|
|
|
|
|
* be converted to reads/writes of these arrays
|
|
|
|
|
*/
|
2015-10-01 12:23:53 -07:00
|
|
|
nir_setup_outputs();
|
|
|
|
|
nir_setup_uniforms();
|
|
|
|
|
nir_emit_system_values();
|
2019-02-28 08:15:30 -06:00
|
|
|
last_scratch = ALIGN(nir->scratch_size, 4) * dispatch_width;
|
2014-12-17 12:34:27 -08:00
|
|
|
|
2018-10-29 12:08:29 -05:00
|
|
|
nir_emit_impl(nir_shader_get_entrypoint((nir_shader *)nir));
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2015-10-01 12:23:53 -07:00
|
|
|
fs_visitor::nir_setup_outputs()
|
2014-08-15 10:32:07 -07:00
|
|
|
{
|
2016-07-21 21:47:45 -07:00
|
|
|
if (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_FRAGMENT)
|
2015-11-14 17:40:43 -08:00
|
|
|
return;
|
|
|
|
|
|
2017-10-10 01:02:44 -07:00
|
|
|
unsigned vec4s[VARYING_SLOT_TESS_MAX] = { 0, };
|
|
|
|
|
|
|
|
|
|
/* Calculate the size of output registers in a separate pass, before
|
|
|
|
|
* allocating them. With ARB_enhanced_layouts, multiple output variables
|
|
|
|
|
* may occupy the same slot, but have different type sizes.
|
|
|
|
|
*/
|
2020-07-18 18:24:25 -05:00
|
|
|
nir_foreach_shader_out_variable(var, nir) {
|
2017-10-10 01:02:44 -07:00
|
|
|
const int loc = var->data.driver_location;
|
|
|
|
|
const unsigned var_vec4s =
|
2016-10-04 01:59:33 -07:00
|
|
|
var->data.compact ? DIV_ROUND_UP(glsl_get_length(var->type), 4)
|
2019-03-29 12:39:48 +11:00
|
|
|
: type_size_vec4(var->type, true);
|
2017-10-10 01:02:44 -07:00
|
|
|
vec4s[loc] = MAX2(vec4s[loc], var_vec4s);
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-18 13:39:13 +02:00
|
|
|
for (unsigned loc = 0; loc < ARRAY_SIZE(vec4s);) {
|
|
|
|
|
if (vec4s[loc] == 0) {
|
|
|
|
|
loc++;
|
|
|
|
|
continue;
|
2016-10-12 22:41:09 -07:00
|
|
|
}
|
2018-05-18 13:39:13 +02:00
|
|
|
|
|
|
|
|
unsigned reg_size = vec4s[loc];
|
|
|
|
|
|
|
|
|
|
/* Check if there are any ranges that start within this range and extend
|
|
|
|
|
* past it. If so, include them in this allocation.
|
|
|
|
|
*/
|
|
|
|
|
for (unsigned i = 1; i < reg_size; i++)
|
|
|
|
|
reg_size = MAX2(vec4s[i + loc] + i, reg_size);
|
|
|
|
|
|
|
|
|
|
fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_F, 4 * reg_size);
|
|
|
|
|
for (unsigned i = 0; i < reg_size; i++)
|
|
|
|
|
outputs[loc + i] = offset(reg, bld, 4 * i);
|
|
|
|
|
|
|
|
|
|
loc += reg_size;
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2015-10-01 12:23:53 -07:00
|
|
|
fs_visitor::nir_setup_uniforms()
|
2014-08-15 10:32:07 -07:00
|
|
|
{
|
2017-08-21 18:42:41 -07:00
|
|
|
/* Only the first compile gets to set up uniforms. */
|
|
|
|
|
if (push_constant_loc) {
|
|
|
|
|
assert(pull_constant_loc);
|
2015-05-05 22:12:03 +03:00
|
|
|
return;
|
2017-08-21 18:42:41 -07:00
|
|
|
}
|
2015-05-05 22:12:03 +03:00
|
|
|
|
2015-11-10 21:12:47 -08:00
|
|
|
uniforms = nir->num_uniforms / 4;
|
2017-09-29 17:57:32 -07:00
|
|
|
|
2019-02-03 19:46:16 -06:00
|
|
|
if (stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL) {
|
2018-11-12 06:29:51 -08:00
|
|
|
/* Add uniforms for builtins after regular NIR uniforms. */
|
2017-09-29 17:57:32 -07:00
|
|
|
assert(uniforms == prog_data->nr_params);
|
2018-11-12 06:29:51 -08:00
|
|
|
|
|
|
|
|
uint32_t *param;
|
2020-04-28 09:47:45 -07:00
|
|
|
if (nir->info.cs.local_size_variable &&
|
|
|
|
|
compiler->lower_variable_group_size) {
|
2018-11-12 06:29:51 -08:00
|
|
|
param = brw_stage_prog_data_add_params(prog_data, 3);
|
|
|
|
|
for (unsigned i = 0; i < 3; i++) {
|
|
|
|
|
param[i] = (BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i);
|
|
|
|
|
group_size[i] = fs_reg(UNIFORM, uniforms++, BRW_REGISTER_TYPE_UD);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Subgroup ID must be the last uniform on the list. This will make
|
|
|
|
|
* easier later to split between cross thread and per thread
|
|
|
|
|
* uniforms.
|
|
|
|
|
*/
|
|
|
|
|
param = brw_stage_prog_data_add_params(prog_data, 1);
|
2017-08-24 11:40:31 -07:00
|
|
|
*param = BRW_PARAM_BUILTIN_SUBGROUP_ID;
|
|
|
|
|
subgroup_id = fs_reg(UNIFORM, uniforms++, BRW_REGISTER_TYPE_UD);
|
2017-09-29 17:57:32 -07:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
|
2014-12-17 12:34:27 -08:00
|
|
|
static bool
|
2016-04-12 22:56:14 -04:00
|
|
|
emit_system_values_block(nir_block *block, fs_visitor *v)
|
2014-12-17 12:34:27 -08:00
|
|
|
{
|
|
|
|
|
fs_reg *reg;
|
|
|
|
|
|
2016-04-26 18:34:19 -07:00
|
|
|
nir_foreach_instr(instr, block) {
|
2014-12-17 12:34:27 -08:00
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
|
|
|
|
switch (intrin->intrinsic) {
|
2015-03-09 01:58:55 -07:00
|
|
|
case nir_intrinsic_load_vertex_id:
|
2018-04-28 14:09:22 +02:00
|
|
|
case nir_intrinsic_load_base_vertex:
|
|
|
|
|
unreachable("should be lowered by nir_lower_system_values().");
|
2015-03-09 01:58:55 -07:00
|
|
|
|
|
|
|
|
case nir_intrinsic_load_vertex_id_zero_base:
|
2018-04-28 14:09:20 +02:00
|
|
|
case nir_intrinsic_load_is_indexed_draw:
|
2018-01-25 19:15:40 +01:00
|
|
|
case nir_intrinsic_load_first_vertex:
|
2015-03-09 01:58:55 -07:00
|
|
|
case nir_intrinsic_load_instance_id:
|
2015-12-10 12:24:50 -08:00
|
|
|
case nir_intrinsic_load_base_instance:
|
2015-12-10 12:27:38 -08:00
|
|
|
case nir_intrinsic_load_draw_id:
|
2017-05-03 16:53:40 -07:00
|
|
|
unreachable("should be lowered by brw_nir_lower_vs_inputs().");
|
2015-12-10 12:27:38 -08:00
|
|
|
|
2015-07-10 00:16:19 -07:00
|
|
|
case nir_intrinsic_load_invocation_id:
|
2015-11-14 17:40:43 -08:00
|
|
|
if (v->stage == MESA_SHADER_TESS_CTRL)
|
|
|
|
|
break;
|
2015-07-10 00:16:19 -07:00
|
|
|
assert(v->stage == MESA_SHADER_GEOMETRY);
|
|
|
|
|
reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
|
|
|
|
|
if (reg->file == BAD_FILE) {
|
|
|
|
|
const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
|
|
|
|
|
fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
|
|
|
|
|
fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
2015-11-02 11:26:16 -08:00
|
|
|
abld.SHR(iid, g1, brw_imm_ud(27u));
|
2015-07-10 00:16:19 -07:00
|
|
|
*reg = iid;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2014-12-17 12:34:27 -08:00
|
|
|
case nir_intrinsic_load_sample_pos:
|
|
|
|
|
assert(v->stage == MESA_SHADER_FRAGMENT);
|
|
|
|
|
reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
|
|
|
|
|
if (reg->file == BAD_FILE)
|
|
|
|
|
*reg = *v->emit_samplepos_setup();
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_sample_id:
|
|
|
|
|
assert(v->stage == MESA_SHADER_FRAGMENT);
|
|
|
|
|
reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
|
|
|
|
|
if (reg->file == BAD_FILE)
|
|
|
|
|
*reg = *v->emit_sampleid_setup();
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_sample_mask_in:
|
|
|
|
|
assert(v->stage == MESA_SHADER_FRAGMENT);
|
2015-04-15 18:00:05 -07:00
|
|
|
assert(v->devinfo->gen >= 7);
|
2014-12-17 12:34:27 -08:00
|
|
|
reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
|
|
|
|
|
if (reg->file == BAD_FILE)
|
i965: Fix gl_SampleMaskIn[] in per-sample shading mode.
The coverage mask is not sufficient - in per-sample mode, we also need
to AND with a mask representing the samples being processed by the
current fragment shader invocation.
Fixes 18 dEQP-GLES31.functional.shaders.sample_variables tests:
sample_mask_in.bit_count_per_sample.multisample_{rbo,texture}_{1,2,4,8}
sample_mask_in.bit_count_per_two_samples.multisample_{rbo,texture}_{4,8}
sample_mask_in.bits_unique_per_sample.multisample_{rbo,texture}_{1,2,4,8}
sample_mask_in.bits_unique_per_two_samples.multisample_{rbo,texture}_{4,8}
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2016-04-05 20:14:22 -07:00
|
|
|
*reg = *v->emit_samplemaskin_setup();
|
2014-12-17 12:34:27 -08:00
|
|
|
break;
|
|
|
|
|
|
2015-03-13 11:39:53 -07:00
|
|
|
case nir_intrinsic_load_work_group_id:
|
2019-02-03 19:46:16 -06:00
|
|
|
assert(v->stage == MESA_SHADER_COMPUTE ||
|
|
|
|
|
v->stage == MESA_SHADER_KERNEL);
|
2015-03-13 11:39:53 -07:00
|
|
|
reg = &v->nir_system_values[SYSTEM_VALUE_WORK_GROUP_ID];
|
|
|
|
|
if (reg->file == BAD_FILE)
|
|
|
|
|
*reg = *v->emit_cs_work_group_id_setup();
|
|
|
|
|
break;
|
|
|
|
|
|
2015-11-13 17:51:12 -08:00
|
|
|
case nir_intrinsic_load_helper_invocation:
|
|
|
|
|
assert(v->stage == MESA_SHADER_FRAGMENT);
|
|
|
|
|
reg = &v->nir_system_values[SYSTEM_VALUE_HELPER_INVOCATION];
|
|
|
|
|
if (reg->file == BAD_FILE) {
|
|
|
|
|
const fs_builder abld =
|
|
|
|
|
v->bld.annotate("gl_HelperInvocation", NULL);
|
|
|
|
|
|
|
|
|
|
/* On Gen6+ (gl_HelperInvocation is only exposed on Gen7+) the
|
|
|
|
|
* pixel mask is in g1.7 of the thread payload.
|
|
|
|
|
*
|
|
|
|
|
* We move the per-channel pixel enable bit to the low bit of each
|
|
|
|
|
* channel by shifting the byte containing the pixel mask by the
|
|
|
|
|
* vector immediate 0x76543210UV.
|
|
|
|
|
*
|
|
|
|
|
* The region of <1,8,0> reads only 1 byte (the pixel masks for
|
|
|
|
|
* subspans 0 and 1) in SIMD8 and an additional byte (the pixel
|
|
|
|
|
* masks for 2 and 3) in SIMD16.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg shifted = abld.vgrf(BRW_REGISTER_TYPE_UW, 1);
|
2017-01-11 19:55:33 -08:00
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < DIV_ROUND_UP(v->dispatch_width, 16); i++) {
|
|
|
|
|
const fs_builder hbld = abld.group(MIN2(16, v->dispatch_width), i);
|
|
|
|
|
hbld.SHR(offset(shifted, hbld, i),
|
|
|
|
|
stride(retype(brw_vec1_grf(1 + i, 7),
|
|
|
|
|
BRW_REGISTER_TYPE_UB),
|
|
|
|
|
1, 8, 0),
|
|
|
|
|
brw_imm_v(0x76543210));
|
|
|
|
|
}
|
2015-11-13 17:51:12 -08:00
|
|
|
|
|
|
|
|
/* A set bit in the pixel mask means the channel is enabled, but
|
|
|
|
|
* that is the opposite of gl_HelperInvocation so we need to invert
|
|
|
|
|
* the mask.
|
|
|
|
|
*
|
|
|
|
|
* The negate source-modifier bit of logical instructions on Gen8+
|
|
|
|
|
* performs 1's complement negation, so we can use that instead of
|
|
|
|
|
* a NOT instruction.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg inverted = negate(shifted);
|
|
|
|
|
if (v->devinfo->gen < 8) {
|
|
|
|
|
inverted = abld.vgrf(BRW_REGISTER_TYPE_UW);
|
|
|
|
|
abld.NOT(inverted, shifted);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We then resolve the 0/1 result to 0/~0 boolean values by ANDing
|
|
|
|
|
* with 1 and negating.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg anded = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
abld.AND(anded, inverted, brw_imm_uw(1));
|
|
|
|
|
|
|
|
|
|
fs_reg dst = abld.vgrf(BRW_REGISTER_TYPE_D, 1);
|
|
|
|
|
abld.MOV(dst, negate(retype(anded, BRW_REGISTER_TYPE_D)));
|
|
|
|
|
*reg = dst;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2014-12-17 12:34:27 -08:00
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2015-10-01 12:23:53 -07:00
|
|
|
fs_visitor::nir_emit_system_values()
|
2014-12-17 12:34:27 -08:00
|
|
|
{
|
|
|
|
|
nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
|
2015-10-30 13:53:38 -07:00
|
|
|
for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
|
|
|
|
|
nir_system_values[i] = fs_reg();
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-31 21:56:43 -07:00
|
|
|
/* Always emit SUBGROUP_INVOCATION. Dead code will clean it up if we
|
|
|
|
|
* never end up using it.
|
|
|
|
|
*/
|
|
|
|
|
{
|
|
|
|
|
const fs_builder abld = bld.annotate("gl_SubgroupInvocation", NULL);
|
|
|
|
|
fs_reg ® = nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION];
|
2018-01-05 18:26:58 -08:00
|
|
|
reg = abld.vgrf(BRW_REGISTER_TYPE_UW);
|
2017-08-31 21:56:43 -07:00
|
|
|
|
|
|
|
|
const fs_builder allbld8 = abld.group(8, 0).exec_all();
|
|
|
|
|
allbld8.MOV(reg, brw_imm_v(0x76543210));
|
|
|
|
|
if (dispatch_width > 8)
|
|
|
|
|
allbld8.ADD(byte_offset(reg, 16), reg, brw_imm_uw(8u));
|
|
|
|
|
if (dispatch_width > 16) {
|
|
|
|
|
const fs_builder allbld16 = abld.group(16, 0).exec_all();
|
|
|
|
|
allbld16.ADD(byte_offset(reg, 32), reg, brw_imm_uw(16u));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-29 12:08:29 -05:00
|
|
|
nir_function_impl *impl = nir_shader_get_entrypoint((nir_shader *)nir);
|
|
|
|
|
nir_foreach_block(block, impl)
|
|
|
|
|
emit_system_values_block(block, this);
|
2014-12-17 12:34:27 -08:00
|
|
|
}
|
|
|
|
|
|
2017-08-24 15:54:27 +02:00
|
|
|
/*
|
|
|
|
|
* Returns a type based on a reference_type (word, float, half-float) and a
|
|
|
|
|
* given bit_size.
|
|
|
|
|
*
|
|
|
|
|
* Reference BRW_REGISTER_TYPE are HF,F,DF,W,D,UW,UD.
|
|
|
|
|
*
|
|
|
|
|
* @FIXME: 64-bit return types are always DF on integer types to maintain
|
|
|
|
|
* compability with uses of DF previously to the introduction of int64
|
|
|
|
|
* support.
|
|
|
|
|
*/
|
|
|
|
|
static brw_reg_type
|
|
|
|
|
brw_reg_type_from_bit_size(const unsigned bit_size,
|
|
|
|
|
const brw_reg_type reference_type)
|
|
|
|
|
{
|
|
|
|
|
switch(reference_type) {
|
|
|
|
|
case BRW_REGISTER_TYPE_HF:
|
|
|
|
|
case BRW_REGISTER_TYPE_F:
|
|
|
|
|
case BRW_REGISTER_TYPE_DF:
|
|
|
|
|
switch(bit_size) {
|
|
|
|
|
case 16:
|
|
|
|
|
return BRW_REGISTER_TYPE_HF;
|
|
|
|
|
case 32:
|
|
|
|
|
return BRW_REGISTER_TYPE_F;
|
|
|
|
|
case 64:
|
|
|
|
|
return BRW_REGISTER_TYPE_DF;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid bit size");
|
|
|
|
|
}
|
2018-07-09 02:00:06 +02:00
|
|
|
case BRW_REGISTER_TYPE_B:
|
2017-08-24 15:54:27 +02:00
|
|
|
case BRW_REGISTER_TYPE_W:
|
|
|
|
|
case BRW_REGISTER_TYPE_D:
|
|
|
|
|
case BRW_REGISTER_TYPE_Q:
|
|
|
|
|
switch(bit_size) {
|
2018-07-09 02:00:06 +02:00
|
|
|
case 8:
|
|
|
|
|
return BRW_REGISTER_TYPE_B;
|
2017-08-24 15:54:27 +02:00
|
|
|
case 16:
|
|
|
|
|
return BRW_REGISTER_TYPE_W;
|
|
|
|
|
case 32:
|
|
|
|
|
return BRW_REGISTER_TYPE_D;
|
|
|
|
|
case 64:
|
2017-11-02 15:59:58 -07:00
|
|
|
return BRW_REGISTER_TYPE_Q;
|
2017-08-24 15:54:27 +02:00
|
|
|
default:
|
|
|
|
|
unreachable("Invalid bit size");
|
|
|
|
|
}
|
2018-07-09 02:00:06 +02:00
|
|
|
case BRW_REGISTER_TYPE_UB:
|
2017-08-24 15:54:27 +02:00
|
|
|
case BRW_REGISTER_TYPE_UW:
|
|
|
|
|
case BRW_REGISTER_TYPE_UD:
|
|
|
|
|
case BRW_REGISTER_TYPE_UQ:
|
|
|
|
|
switch(bit_size) {
|
2018-07-09 02:00:06 +02:00
|
|
|
case 8:
|
|
|
|
|
return BRW_REGISTER_TYPE_UB;
|
2017-08-24 15:54:27 +02:00
|
|
|
case 16:
|
|
|
|
|
return BRW_REGISTER_TYPE_UW;
|
|
|
|
|
case 32:
|
|
|
|
|
return BRW_REGISTER_TYPE_UD;
|
|
|
|
|
case 64:
|
2017-11-02 15:59:58 -07:00
|
|
|
return BRW_REGISTER_TYPE_UQ;
|
2017-08-24 15:54:27 +02:00
|
|
|
default:
|
|
|
|
|
unreachable("Invalid bit size");
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Unknown type");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
void
|
2014-11-12 11:05:51 -08:00
|
|
|
fs_visitor::nir_emit_impl(nir_function_impl *impl)
|
2014-08-15 10:32:07 -07:00
|
|
|
{
|
2015-10-30 13:53:38 -07:00
|
|
|
nir_locals = ralloc_array(mem_ctx, fs_reg, impl->reg_alloc);
|
|
|
|
|
for (unsigned i = 0; i < impl->reg_alloc; i++) {
|
|
|
|
|
nir_locals[i] = fs_reg();
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-12 11:05:51 -08:00
|
|
|
foreach_list_typed(nir_register, reg, node, &impl->registers) {
|
2014-08-15 10:32:07 -07:00
|
|
|
unsigned array_elems =
|
2014-11-12 11:05:51 -08:00
|
|
|
reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
|
|
|
|
|
unsigned size = array_elems * reg->num_components;
|
2019-01-04 10:15:39 +01:00
|
|
|
const brw_reg_type reg_type = reg->bit_size == 8 ? BRW_REGISTER_TYPE_B :
|
2017-08-24 15:54:27 +02:00
|
|
|
brw_reg_type_from_bit_size(reg->bit_size, BRW_REGISTER_TYPE_F);
|
2015-07-29 14:16:51 -07:00
|
|
|
nir_locals[reg->index] = bld.vgrf(reg_type, size);
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
|
2015-06-24 12:28:47 -07:00
|
|
|
nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
|
|
|
|
|
impl->ssa_alloc);
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
nir_emit_cf_list(&impl->body);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_cf_list(exec_list *list)
|
|
|
|
|
{
|
2015-01-21 16:00:55 -08:00
|
|
|
exec_list_validate(list);
|
2014-08-15 10:32:07 -07:00
|
|
|
foreach_list_typed(nir_cf_node, node, node, list) {
|
|
|
|
|
switch (node->type) {
|
|
|
|
|
case nir_cf_node_if:
|
|
|
|
|
nir_emit_if(nir_cf_node_as_if(node));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_cf_node_loop:
|
|
|
|
|
nir_emit_loop(nir_cf_node_as_loop(node));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_cf_node_block:
|
|
|
|
|
nir_emit_block(nir_cf_node_as_block(node));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid CFG node block");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_if(nir_if *if_stmt)
|
|
|
|
|
{
|
2018-12-03 12:06:50 -08:00
|
|
|
bool invert;
|
|
|
|
|
fs_reg cond_reg;
|
|
|
|
|
|
|
|
|
|
/* If the condition has the form !other_condition, use other_condition as
|
|
|
|
|
* the source, but invert the predicate on the if instruction.
|
|
|
|
|
*/
|
2019-04-17 17:10:18 -05:00
|
|
|
nir_alu_instr *cond = nir_src_as_alu_instr(if_stmt->condition);
|
2018-12-03 12:06:50 -08:00
|
|
|
if (cond != NULL && cond->op == nir_op_inot) {
|
|
|
|
|
invert = true;
|
|
|
|
|
cond_reg = get_nir_src(cond->src[0].src);
|
|
|
|
|
} else {
|
|
|
|
|
invert = false;
|
|
|
|
|
cond_reg = get_nir_src(if_stmt->condition);
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
/* first, put the condition into f0 */
|
2015-06-03 20:57:12 +03:00
|
|
|
fs_inst *inst = bld.MOV(bld.null_reg_d(),
|
2018-12-03 12:06:50 -08:00
|
|
|
retype(cond_reg, BRW_REGISTER_TYPE_D));
|
2014-08-15 10:32:07 -07:00
|
|
|
inst->conditional_mod = BRW_CONDITIONAL_NZ;
|
|
|
|
|
|
2018-12-03 12:06:50 -08:00
|
|
|
bld.IF(BRW_PREDICATE_NORMAL)->predicate_inverse = invert;
|
2014-08-15 10:32:07 -07:00
|
|
|
|
|
|
|
|
nir_emit_cf_list(&if_stmt->then_list);
|
|
|
|
|
|
2019-04-03 14:24:31 -07:00
|
|
|
if (!nir_cf_list_is_empty_block(&if_stmt->else_list)) {
|
|
|
|
|
bld.emit(BRW_OPCODE_ELSE);
|
|
|
|
|
nir_emit_cf_list(&if_stmt->else_list);
|
|
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2015-06-03 20:57:12 +03:00
|
|
|
bld.emit(BRW_OPCODE_ENDIF);
|
2016-04-25 17:29:57 -07:00
|
|
|
|
|
|
|
|
if (devinfo->gen < 7)
|
|
|
|
|
limit_dispatch_width(16, "Non-uniform control flow unsupported "
|
|
|
|
|
"in SIMD32 mode.");
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_loop(nir_loop *loop)
|
|
|
|
|
{
|
2015-06-03 20:57:12 +03:00
|
|
|
bld.emit(BRW_OPCODE_DO);
|
2014-08-15 10:32:07 -07:00
|
|
|
|
|
|
|
|
nir_emit_cf_list(&loop->body);
|
|
|
|
|
|
2015-06-03 20:57:12 +03:00
|
|
|
bld.emit(BRW_OPCODE_WHILE);
|
2016-04-25 17:29:57 -07:00
|
|
|
|
|
|
|
|
if (devinfo->gen < 7)
|
|
|
|
|
limit_dispatch_width(16, "Non-uniform control flow unsupported "
|
|
|
|
|
"in SIMD32 mode.");
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_block(nir_block *block)
|
|
|
|
|
{
|
2016-04-26 18:34:19 -07:00
|
|
|
nir_foreach_instr(instr, block) {
|
2014-08-15 10:32:07 -07:00
|
|
|
nir_emit_instr(instr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_instr(nir_instr *instr)
|
|
|
|
|
{
|
2015-06-03 20:57:12 +03:00
|
|
|
const fs_builder abld = bld.annotate(NULL, instr);
|
2015-01-24 02:05:56 -08:00
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
switch (instr->type) {
|
|
|
|
|
case nir_instr_type_alu:
|
2019-05-21 12:09:42 -07:00
|
|
|
nir_emit_alu(abld, nir_instr_as_alu(instr), true);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2018-03-23 10:27:12 -07:00
|
|
|
case nir_instr_type_deref:
|
2019-05-01 16:31:14 -07:00
|
|
|
unreachable("All derefs should've been lowered");
|
2018-03-23 10:27:12 -07:00
|
|
|
break;
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_instr_type_intrinsic:
|
2015-11-04 23:05:07 -08:00
|
|
|
switch (stage) {
|
|
|
|
|
case MESA_SHADER_VERTEX:
|
|
|
|
|
nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
|
|
|
|
|
break;
|
2015-11-14 17:40:43 -08:00
|
|
|
case MESA_SHADER_TESS_CTRL:
|
|
|
|
|
nir_emit_tcs_intrinsic(abld, nir_instr_as_intrinsic(instr));
|
|
|
|
|
break;
|
2015-11-10 14:35:27 -08:00
|
|
|
case MESA_SHADER_TESS_EVAL:
|
|
|
|
|
nir_emit_tes_intrinsic(abld, nir_instr_as_intrinsic(instr));
|
|
|
|
|
break;
|
2015-11-04 23:05:07 -08:00
|
|
|
case MESA_SHADER_GEOMETRY:
|
|
|
|
|
nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
|
|
|
|
|
break;
|
|
|
|
|
case MESA_SHADER_FRAGMENT:
|
|
|
|
|
nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
|
|
|
|
|
break;
|
|
|
|
|
case MESA_SHADER_COMPUTE:
|
2019-02-03 19:46:16 -06:00
|
|
|
case MESA_SHADER_KERNEL:
|
2015-11-04 23:05:07 -08:00
|
|
|
nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("unsupported shader stage");
|
|
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2014-12-05 11:03:06 -08:00
|
|
|
case nir_instr_type_tex:
|
2015-06-03 21:02:57 +03:00
|
|
|
nir_emit_texture(abld, nir_instr_as_tex(instr));
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_instr_type_load_const:
|
2015-06-25 16:22:26 -07:00
|
|
|
nir_emit_load_const(abld, nir_instr_as_load_const(instr));
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2015-06-24 12:28:47 -07:00
|
|
|
case nir_instr_type_ssa_undef:
|
2016-07-29 01:29:09 -07:00
|
|
|
/* We create a new VGRF for undefs on every use (by handling
|
|
|
|
|
* them in get_nir_src()), rather than for each definition.
|
|
|
|
|
* This helps register coalescing eliminate MOVs from undef.
|
|
|
|
|
*/
|
2015-06-24 12:28:47 -07:00
|
|
|
break;
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_instr_type_jump:
|
2015-06-03 20:57:12 +03:00
|
|
|
nir_emit_jump(abld, nir_instr_as_jump(instr));
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("unknown instruction type");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-21 09:10:09 -08:00
|
|
|
/**
|
|
|
|
|
* Recognizes a parent instruction of nir_op_extract_* and changes the type to
|
|
|
|
|
* match instr.
|
|
|
|
|
*/
|
|
|
|
|
bool
|
|
|
|
|
fs_visitor::optimize_extract_to_float(nir_alu_instr *instr,
|
|
|
|
|
const fs_reg &result)
|
|
|
|
|
{
|
|
|
|
|
if (!instr->src[0].src.is_ssa ||
|
|
|
|
|
!instr->src[0].src.ssa->parent_instr)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
nir_alu_instr *src0 =
|
|
|
|
|
nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
|
|
|
|
|
|
|
|
|
|
if (src0->op != nir_op_extract_u8 && src0->op != nir_op_extract_u16 &&
|
|
|
|
|
src0->op != nir_op_extract_i8 && src0->op != nir_op_extract_i16)
|
|
|
|
|
return false;
|
|
|
|
|
|
2018-10-20 09:55:28 -05:00
|
|
|
unsigned element = nir_src_as_uint(src0->src[1].src);
|
2016-01-21 09:10:09 -08:00
|
|
|
|
2016-05-18 18:43:54 -07:00
|
|
|
/* Element type to extract.*/
|
|
|
|
|
const brw_reg_type type = brw_int_type(
|
|
|
|
|
src0->op == nir_op_extract_u16 || src0->op == nir_op_extract_i16 ? 2 : 1,
|
|
|
|
|
src0->op == nir_op_extract_i16 || src0->op == nir_op_extract_i8);
|
2016-01-21 09:10:09 -08:00
|
|
|
|
|
|
|
|
fs_reg op0 = get_nir_src(src0->src[0].src);
|
2017-01-20 19:03:21 -08:00
|
|
|
op0.type = brw_type_for_nir_type(devinfo,
|
2015-07-29 09:11:03 -07:00
|
|
|
(nir_alu_type)(nir_op_infos[src0->op].input_types[0] |
|
|
|
|
|
nir_src_bit_size(src0->src[0].src)));
|
2016-01-21 09:10:09 -08:00
|
|
|
op0 = offset(op0, bld, src0->src[0].swizzle[0]);
|
|
|
|
|
|
intel/compiler: Drop nir_lower_to_source_mods() and related handling.
I think we're unanimous in wanting to drop nir_lower_to_source_mods.
It's a bit of complexity to handle in the backend, but perhaps more
importantly, would be even more complexity to handle in nir_search.
And, it turns out that since we made other compiler improvements in the
last few years, they no longer appear to buy us anything of value.
Summarizing the results from shader-db from this patch:
- Icelake (scalar mode)
Instruction counts:
- 411 helped, 598 hurt (out of 139,470 shaders)
- 99.2% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.78 instructions.
- total instructions in shared programs: 17214951 -> 17215206 (<.01%)
- instructions in affected programs: 1143879 -> 1144134 (0.02%)
Cycles:
- 1042 helped, 1357 hurt
- total cycles in shared programs: 365613294 -> 365882263 (0.07%)
- cycles in affected programs: 138155497 -> 138424466 (0.19%)
- Haswell (both scalar and vector modes)
Instruction counts:
- 73 helped, 1680 hurt (out of 139,470 shaders)
- 98.7% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.9 instructions.
- total instructions in shared programs: 14199527 -> 14202262 (0.02%)
- instructions in affected programs: 446499 -> 449234 (0.61%)
Cycles:
- 5253 helped, 5559 hurt
- total cycles in shared programs: 359996545 -> 360038731 (0.01%)
- cycles in affected programs: 155897127 -> 155939313 (0.03%)
Given that ~99% of shader-db remains unaffected, and the affected
programs are hurt by about 1-2 instructions - which are all cheap
ALU instructions - this is unlikely to be measurable in terms of
any real performance impact that would affect users.
So, drop them and simplify the backend, and hopefully enable other
future simplifications in NIR.
Reviewed-by: Eric Anholt <eric@anholt.net> [v1]
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4616>
2020-04-18 01:20:42 -07:00
|
|
|
bld.MOV(result, subscript(op0, type, element));
|
2016-01-21 09:10:09 -08:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-15 13:45:04 -08:00
|
|
|
bool
|
|
|
|
|
fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
|
|
|
|
|
const fs_reg &result)
|
|
|
|
|
{
|
2019-04-17 17:18:19 -05:00
|
|
|
nir_intrinsic_instr *src0 = nir_src_as_intrinsic(instr->src[0].src);
|
|
|
|
|
if (src0 == NULL || src0->intrinsic != nir_intrinsic_load_front_face)
|
2015-02-15 13:45:04 -08:00
|
|
|
return false;
|
|
|
|
|
|
2018-10-20 09:55:28 -05:00
|
|
|
if (!nir_src_is_const(instr->src[1].src) ||
|
|
|
|
|
!nir_src_is_const(instr->src[2].src))
|
2015-02-15 13:45:04 -08:00
|
|
|
return false;
|
|
|
|
|
|
2018-10-20 09:55:28 -05:00
|
|
|
const float value1 = nir_src_as_float(instr->src[1].src);
|
|
|
|
|
const float value2 = nir_src_as_float(instr->src[2].src);
|
|
|
|
|
if (fabsf(value1) != 1.0f || fabsf(value2) != 1.0f)
|
2015-02-15 13:45:04 -08:00
|
|
|
return false;
|
|
|
|
|
|
2018-11-07 15:47:18 -06:00
|
|
|
/* nir_opt_algebraic should have gotten rid of bcsel(b, a, a) */
|
|
|
|
|
assert(value1 == -value2);
|
|
|
|
|
|
2015-02-15 13:45:04 -08:00
|
|
|
fs_reg tmp = vgrf(glsl_type::int_type);
|
|
|
|
|
|
2018-06-11 23:21:57 -07:00
|
|
|
if (devinfo->gen >= 12) {
|
|
|
|
|
/* Bit 15 of g1.1 is 0 if the polygon is front facing. */
|
|
|
|
|
fs_reg g1 = fs_reg(retype(brw_vec1_grf(1, 1), BRW_REGISTER_TYPE_W));
|
|
|
|
|
|
|
|
|
|
/* For (gl_FrontFacing ? 1.0 : -1.0), emit:
|
|
|
|
|
*
|
|
|
|
|
* or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
|
|
|
|
|
* and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
|
|
|
|
|
*
|
|
|
|
|
* and negate the result for (gl_FrontFacing ? -1.0 : 1.0).
|
|
|
|
|
*/
|
|
|
|
|
bld.OR(subscript(tmp, BRW_REGISTER_TYPE_W, 1),
|
|
|
|
|
g1, brw_imm_uw(0x3f80));
|
|
|
|
|
|
|
|
|
|
if (value1 == -1.0f)
|
|
|
|
|
bld.MOV(tmp, negate(tmp));
|
|
|
|
|
|
|
|
|
|
} else if (devinfo->gen >= 6) {
|
2015-02-15 13:45:04 -08:00
|
|
|
/* Bit 15 of g0.0 is 0 if the polygon is front facing. */
|
|
|
|
|
fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
|
|
|
|
|
|
|
|
|
|
/* For (gl_FrontFacing ? 1.0 : -1.0), emit:
|
|
|
|
|
*
|
|
|
|
|
* or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
|
|
|
|
|
* and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
|
|
|
|
|
*
|
|
|
|
|
* and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
|
|
|
|
|
*
|
|
|
|
|
* This negation looks like it's safe in practice, because bits 0:4 will
|
|
|
|
|
* surely be TRIANGLES
|
|
|
|
|
*/
|
|
|
|
|
|
2018-10-20 09:55:28 -05:00
|
|
|
if (value1 == -1.0f) {
|
2015-02-15 13:45:04 -08:00
|
|
|
g0.negate = true;
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-01 19:16:01 -07:00
|
|
|
bld.OR(subscript(tmp, BRW_REGISTER_TYPE_W, 1),
|
|
|
|
|
g0, brw_imm_uw(0x3f80));
|
2015-02-15 13:45:04 -08:00
|
|
|
} else {
|
|
|
|
|
/* Bit 31 of g1.6 is 0 if the polygon is front facing. */
|
|
|
|
|
fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
|
|
|
|
|
|
|
|
|
|
/* For (gl_FrontFacing ? 1.0 : -1.0), emit:
|
|
|
|
|
*
|
|
|
|
|
* or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
|
|
|
|
|
* and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
|
|
|
|
|
*
|
|
|
|
|
* and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
|
|
|
|
|
*
|
|
|
|
|
* This negation looks like it's safe in practice, because bits 0:4 will
|
|
|
|
|
* surely be TRIANGLES
|
|
|
|
|
*/
|
|
|
|
|
|
2018-10-20 09:55:28 -05:00
|
|
|
if (value1 == -1.0f) {
|
2015-02-15 13:45:04 -08:00
|
|
|
g1_6.negate = true;
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-02 11:26:16 -08:00
|
|
|
bld.OR(tmp, g1_6, brw_imm_d(0x3f800000));
|
2015-02-15 13:45:04 -08:00
|
|
|
}
|
2015-11-02 11:26:16 -08:00
|
|
|
bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, brw_imm_d(0xbf800000));
|
2015-02-15 13:45:04 -08:00
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-21 15:14:03 -07:00
|
|
|
static void
|
|
|
|
|
emit_find_msb_using_lzd(const fs_builder &bld,
|
|
|
|
|
const fs_reg &result,
|
|
|
|
|
const fs_reg &src,
|
|
|
|
|
bool is_signed)
|
|
|
|
|
{
|
|
|
|
|
fs_inst *inst;
|
2016-06-21 17:18:04 -07:00
|
|
|
fs_reg temp = src;
|
2016-06-21 15:14:03 -07:00
|
|
|
|
2016-06-21 17:18:04 -07:00
|
|
|
if (is_signed) {
|
|
|
|
|
/* LZD of an absolute value source almost always does the right
|
|
|
|
|
* thing. There are two problem values:
|
|
|
|
|
*
|
|
|
|
|
* * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns
|
|
|
|
|
* 0. However, findMSB(int(0x80000000)) == 30.
|
|
|
|
|
*
|
|
|
|
|
* * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns
|
|
|
|
|
* 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
|
|
|
|
|
*
|
|
|
|
|
* For a value of zero or negative one, -1 will be returned.
|
|
|
|
|
*
|
|
|
|
|
* * Negative powers of two. LZD(abs(-(1<<x))) returns x, but
|
|
|
|
|
* findMSB(-(1<<x)) should return x-1.
|
|
|
|
|
*
|
|
|
|
|
* For all negative number cases, including 0x80000000 and
|
|
|
|
|
* 0xffffffff, the correct value is obtained from LZD if instead of
|
|
|
|
|
* negating the (already negative) value the logical-not is used. A
|
|
|
|
|
* conditonal logical-not can be achieved in two instructions.
|
|
|
|
|
*/
|
|
|
|
|
temp = bld.vgrf(BRW_REGISTER_TYPE_D);
|
|
|
|
|
|
|
|
|
|
bld.ASR(temp, src, brw_imm_d(31));
|
|
|
|
|
bld.XOR(temp, temp, src);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bld.LZD(retype(result, BRW_REGISTER_TYPE_UD),
|
|
|
|
|
retype(temp, BRW_REGISTER_TYPE_UD));
|
2016-06-21 15:14:03 -07:00
|
|
|
|
|
|
|
|
/* LZD counts from the MSB side, while GLSL's findMSB() wants the count
|
|
|
|
|
* from the LSB side. Subtract the result from 31 to convert the MSB
|
|
|
|
|
* count into an LSB count. If no bits are set, LZD will return 32.
|
|
|
|
|
* 31-32 = -1, which is exactly what findMSB() is supposed to return.
|
|
|
|
|
*/
|
|
|
|
|
inst = bld.ADD(result, retype(result, BRW_REGISTER_TYPE_D), brw_imm_d(31));
|
|
|
|
|
inst->src[0].negate = true;
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-01 08:14:09 +02:00
|
|
|
static brw_rnd_mode
|
|
|
|
|
brw_rnd_mode_from_nir_op (const nir_op op) {
|
|
|
|
|
switch (op) {
|
|
|
|
|
case nir_op_f2f16_rtz:
|
|
|
|
|
return BRW_RND_MODE_RTZ;
|
|
|
|
|
case nir_op_f2f16_rtne:
|
|
|
|
|
return BRW_RND_MODE_RTNE;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Operation doesn't support rounding mode");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-12 16:13:59 +01:00
|
|
|
static brw_rnd_mode
|
|
|
|
|
brw_rnd_mode_from_execution_mode(unsigned execution_mode)
|
|
|
|
|
{
|
|
|
|
|
if (nir_has_any_rounding_mode_rtne(execution_mode))
|
|
|
|
|
return BRW_RND_MODE_RTNE;
|
|
|
|
|
if (nir_has_any_rounding_mode_rtz(execution_mode))
|
|
|
|
|
return BRW_RND_MODE_RTZ;
|
|
|
|
|
return BRW_RND_MODE_UNSPECIFIED;
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-05 11:35:37 -08:00
|
|
|
fs_reg
|
|
|
|
|
fs_visitor::prepare_alu_destination_and_sources(const fs_builder &bld,
|
|
|
|
|
nir_alu_instr *instr,
|
|
|
|
|
fs_reg *op,
|
|
|
|
|
bool need_dest)
|
2014-08-15 10:32:07 -07:00
|
|
|
{
|
2018-12-05 11:35:37 -08:00
|
|
|
fs_reg result =
|
|
|
|
|
need_dest ? get_nir_dest(instr->dest.dest) : bld.null_reg_ud();
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2017-01-20 19:03:21 -08:00
|
|
|
result.type = brw_type_for_nir_type(devinfo,
|
2015-07-29 09:11:03 -07:00
|
|
|
(nir_alu_type)(nir_op_infos[instr->op].output_type |
|
|
|
|
|
nir_dest_bit_size(instr->dest.dest)));
|
2014-08-15 10:32:07 -07:00
|
|
|
|
intel/compiler: Drop nir_lower_to_source_mods() and related handling.
I think we're unanimous in wanting to drop nir_lower_to_source_mods.
It's a bit of complexity to handle in the backend, but perhaps more
importantly, would be even more complexity to handle in nir_search.
And, it turns out that since we made other compiler improvements in the
last few years, they no longer appear to buy us anything of value.
Summarizing the results from shader-db from this patch:
- Icelake (scalar mode)
Instruction counts:
- 411 helped, 598 hurt (out of 139,470 shaders)
- 99.2% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.78 instructions.
- total instructions in shared programs: 17214951 -> 17215206 (<.01%)
- instructions in affected programs: 1143879 -> 1144134 (0.02%)
Cycles:
- 1042 helped, 1357 hurt
- total cycles in shared programs: 365613294 -> 365882263 (0.07%)
- cycles in affected programs: 138155497 -> 138424466 (0.19%)
- Haswell (both scalar and vector modes)
Instruction counts:
- 73 helped, 1680 hurt (out of 139,470 shaders)
- 98.7% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.9 instructions.
- total instructions in shared programs: 14199527 -> 14202262 (0.02%)
- instructions in affected programs: 446499 -> 449234 (0.61%)
Cycles:
- 5253 helped, 5559 hurt
- total cycles in shared programs: 359996545 -> 360038731 (0.01%)
- cycles in affected programs: 155897127 -> 155939313 (0.03%)
Given that ~99% of shader-db remains unaffected, and the affected
programs are hurt by about 1-2 instructions - which are all cheap
ALU instructions - this is unlikely to be measurable in terms of
any real performance impact that would affect users.
So, drop them and simplify the backend, and hopefully enable other
future simplifications in NIR.
Reviewed-by: Eric Anholt <eric@anholt.net> [v1]
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4616>
2020-04-18 01:20:42 -07:00
|
|
|
assert(!instr->dest.saturate);
|
|
|
|
|
|
2015-01-21 16:00:55 -08:00
|
|
|
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
|
intel/compiler: Drop nir_lower_to_source_mods() and related handling.
I think we're unanimous in wanting to drop nir_lower_to_source_mods.
It's a bit of complexity to handle in the backend, but perhaps more
importantly, would be even more complexity to handle in nir_search.
And, it turns out that since we made other compiler improvements in the
last few years, they no longer appear to buy us anything of value.
Summarizing the results from shader-db from this patch:
- Icelake (scalar mode)
Instruction counts:
- 411 helped, 598 hurt (out of 139,470 shaders)
- 99.2% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.78 instructions.
- total instructions in shared programs: 17214951 -> 17215206 (<.01%)
- instructions in affected programs: 1143879 -> 1144134 (0.02%)
Cycles:
- 1042 helped, 1357 hurt
- total cycles in shared programs: 365613294 -> 365882263 (0.07%)
- cycles in affected programs: 138155497 -> 138424466 (0.19%)
- Haswell (both scalar and vector modes)
Instruction counts:
- 73 helped, 1680 hurt (out of 139,470 shaders)
- 98.7% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.9 instructions.
- total instructions in shared programs: 14199527 -> 14202262 (0.02%)
- instructions in affected programs: 446499 -> 449234 (0.61%)
Cycles:
- 5253 helped, 5559 hurt
- total cycles in shared programs: 359996545 -> 360038731 (0.01%)
- cycles in affected programs: 155897127 -> 155939313 (0.03%)
Given that ~99% of shader-db remains unaffected, and the affected
programs are hurt by about 1-2 instructions - which are all cheap
ALU instructions - this is unlikely to be measurable in terms of
any real performance impact that would affect users.
So, drop them and simplify the backend, and hopefully enable other
future simplifications in NIR.
Reviewed-by: Eric Anholt <eric@anholt.net> [v1]
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4616>
2020-04-18 01:20:42 -07:00
|
|
|
/* We don't lower to source modifiers so they should not exist. */
|
|
|
|
|
assert(!instr->src[i].abs);
|
|
|
|
|
assert(!instr->src[i].negate);
|
|
|
|
|
|
2015-01-21 16:00:55 -08:00
|
|
|
op[i] = get_nir_src(instr->src[i].src);
|
2017-01-20 19:03:21 -08:00
|
|
|
op[i].type = brw_type_for_nir_type(devinfo,
|
2015-07-29 09:11:03 -07:00
|
|
|
(nir_alu_type)(nir_op_infos[instr->op].input_types[i] |
|
|
|
|
|
nir_src_bit_size(instr->src[i].src)));
|
2015-01-21 16:00:55 -08:00
|
|
|
}
|
|
|
|
|
|
2018-12-05 11:35:37 -08:00
|
|
|
/* Move and vecN instrutions may still be vectored. Return the raw,
|
|
|
|
|
* vectored source and destination so that fs_visitor::nir_emit_alu can
|
|
|
|
|
* handle it. Other callers should not have to handle these kinds of
|
|
|
|
|
* instructions.
|
|
|
|
|
*/
|
|
|
|
|
switch (instr->op) {
|
2019-05-06 11:45:46 -05:00
|
|
|
case nir_op_mov:
|
2018-12-05 11:35:37 -08:00
|
|
|
case nir_op_vec2:
|
|
|
|
|
case nir_op_vec3:
|
|
|
|
|
case nir_op_vec4:
|
|
|
|
|
return result;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* At this point, we have dealt with any instruction that operates on
|
|
|
|
|
* more than a single channel. Therefore, we can just adjust the source
|
|
|
|
|
* and destination registers for that channel and emit the instruction.
|
2015-01-21 16:00:55 -08:00
|
|
|
*/
|
2018-12-05 11:35:37 -08:00
|
|
|
unsigned channel = 0;
|
|
|
|
|
if (nir_op_infos[instr->op].output_size == 0) {
|
|
|
|
|
/* Since NIR is doing the scalarizing for us, we should only ever see
|
|
|
|
|
* vectorized operations with a single channel.
|
|
|
|
|
*/
|
|
|
|
|
assert(util_bitcount(instr->dest.write_mask) == 1);
|
|
|
|
|
channel = ffs(instr->dest.write_mask) - 1;
|
|
|
|
|
|
|
|
|
|
result = offset(result, bld, channel);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
|
|
|
|
|
assert(nir_op_infos[instr->op].input_sizes[i] < 2);
|
|
|
|
|
op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-09 15:20:04 +00:00
|
|
|
void
|
|
|
|
|
fs_visitor::resolve_inot_sources(const fs_builder &bld, nir_alu_instr *instr,
|
|
|
|
|
fs_reg *op)
|
|
|
|
|
{
|
|
|
|
|
for (unsigned i = 0; i < 2; i++) {
|
2019-04-17 17:10:18 -05:00
|
|
|
nir_alu_instr *inot_instr = nir_src_as_alu_instr(instr->src[i].src);
|
2017-02-09 15:20:04 +00:00
|
|
|
|
intel/compiler: Drop nir_lower_to_source_mods() and related handling.
I think we're unanimous in wanting to drop nir_lower_to_source_mods.
It's a bit of complexity to handle in the backend, but perhaps more
importantly, would be even more complexity to handle in nir_search.
And, it turns out that since we made other compiler improvements in the
last few years, they no longer appear to buy us anything of value.
Summarizing the results from shader-db from this patch:
- Icelake (scalar mode)
Instruction counts:
- 411 helped, 598 hurt (out of 139,470 shaders)
- 99.2% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.78 instructions.
- total instructions in shared programs: 17214951 -> 17215206 (<.01%)
- instructions in affected programs: 1143879 -> 1144134 (0.02%)
Cycles:
- 1042 helped, 1357 hurt
- total cycles in shared programs: 365613294 -> 365882263 (0.07%)
- cycles in affected programs: 138155497 -> 138424466 (0.19%)
- Haswell (both scalar and vector modes)
Instruction counts:
- 73 helped, 1680 hurt (out of 139,470 shaders)
- 98.7% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.9 instructions.
- total instructions in shared programs: 14199527 -> 14202262 (0.02%)
- instructions in affected programs: 446499 -> 449234 (0.61%)
Cycles:
- 5253 helped, 5559 hurt
- total cycles in shared programs: 359996545 -> 360038731 (0.01%)
- cycles in affected programs: 155897127 -> 155939313 (0.03%)
Given that ~99% of shader-db remains unaffected, and the affected
programs are hurt by about 1-2 instructions - which are all cheap
ALU instructions - this is unlikely to be measurable in terms of
any real performance impact that would affect users.
So, drop them and simplify the backend, and hopefully enable other
future simplifications in NIR.
Reviewed-by: Eric Anholt <eric@anholt.net> [v1]
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4616>
2020-04-18 01:20:42 -07:00
|
|
|
if (inot_instr != NULL && inot_instr->op == nir_op_inot) {
|
2017-02-09 15:20:04 +00:00
|
|
|
/* The source of the inot is now the source of instr. */
|
|
|
|
|
prepare_alu_destination_and_sources(bld, inot_instr, &op[i], false);
|
|
|
|
|
|
|
|
|
|
assert(!op[i].negate);
|
|
|
|
|
op[i].negate = true;
|
|
|
|
|
} else {
|
|
|
|
|
op[i] = resolve_source_modifiers(op[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-03 15:53:36 -08:00
|
|
|
bool
|
|
|
|
|
fs_visitor::try_emit_b2fi_of_inot(const fs_builder &bld,
|
|
|
|
|
fs_reg result,
|
|
|
|
|
nir_alu_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
if (devinfo->gen < 6 || devinfo->gen >= 12)
|
|
|
|
|
return false;
|
|
|
|
|
|
2019-04-17 17:10:18 -05:00
|
|
|
nir_alu_instr *inot_instr = nir_src_as_alu_instr(instr->src[0].src);
|
2018-12-03 15:53:36 -08:00
|
|
|
|
|
|
|
|
if (inot_instr == NULL || inot_instr->op != nir_op_inot)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* HF is also possible as a destination on BDW+. For nir_op_b2i, the set
|
|
|
|
|
* of valid size-changing combinations is a bit more complex.
|
|
|
|
|
*
|
|
|
|
|
* The source restriction is just because I was lazy about generating the
|
|
|
|
|
* constant below.
|
|
|
|
|
*/
|
|
|
|
|
if (nir_dest_bit_size(instr->dest.dest) != 32 ||
|
|
|
|
|
nir_src_bit_size(inot_instr->src[0].src) != 32)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* b2[fi](inot(a)) maps a=0 => 1, a=-1 => 0. Since a can only be 0 or -1,
|
|
|
|
|
* this is float(1 + a).
|
|
|
|
|
*/
|
|
|
|
|
fs_reg op;
|
|
|
|
|
|
|
|
|
|
prepare_alu_destination_and_sources(bld, inot_instr, &op, false);
|
|
|
|
|
|
2019-03-01 14:39:14 -08:00
|
|
|
/* Ignore the saturate modifier, if there is one. The result of the
|
|
|
|
|
* arithmetic can only be 0 or 1, so the clamping will do nothing anyway.
|
|
|
|
|
*/
|
2018-12-03 15:53:36 -08:00
|
|
|
bld.ADD(result, op, brw_imm_d(1));
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-25 19:50:56 -07:00
|
|
|
/**
|
2018-06-25 19:53:38 -07:00
|
|
|
* Emit code for nir_op_fsign possibly fused with a nir_op_fmul
|
|
|
|
|
*
|
|
|
|
|
* If \c instr is not the \c nir_op_fsign, then \c fsign_src is the index of
|
|
|
|
|
* the source of \c instr that is a \c nir_op_fsign.
|
2018-06-25 19:50:56 -07:00
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::emit_fsign(const fs_builder &bld, const nir_alu_instr *instr,
|
2018-06-25 19:53:38 -07:00
|
|
|
fs_reg result, fs_reg *op, unsigned fsign_src)
|
2018-06-25 19:50:56 -07:00
|
|
|
{
|
|
|
|
|
fs_inst *inst;
|
|
|
|
|
|
2018-06-25 19:53:38 -07:00
|
|
|
assert(instr->op == nir_op_fsign || instr->op == nir_op_fmul);
|
|
|
|
|
assert(fsign_src < nir_op_infos[instr->op].num_inputs);
|
|
|
|
|
|
|
|
|
|
if (instr->op != nir_op_fsign) {
|
|
|
|
|
const nir_alu_instr *const fsign_instr =
|
|
|
|
|
nir_src_as_alu_instr(instr->src[fsign_src].src);
|
|
|
|
|
|
|
|
|
|
/* op[fsign_src] has the nominal result of the fsign, and op[1 -
|
|
|
|
|
* fsign_src] has the other multiply source. This must be rearranged so
|
|
|
|
|
* that op[0] is the source of the fsign op[1] is the other multiply
|
|
|
|
|
* source.
|
|
|
|
|
*/
|
|
|
|
|
if (fsign_src != 0)
|
|
|
|
|
op[1] = op[0];
|
|
|
|
|
|
|
|
|
|
op[0] = get_nir_src(fsign_instr->src[0].src);
|
|
|
|
|
|
|
|
|
|
const nir_alu_type t =
|
|
|
|
|
(nir_alu_type)(nir_op_infos[instr->op].input_types[0] |
|
|
|
|
|
nir_src_bit_size(fsign_instr->src[0].src));
|
|
|
|
|
|
|
|
|
|
op[0].type = brw_type_for_nir_type(devinfo, t);
|
|
|
|
|
|
|
|
|
|
unsigned channel = 0;
|
|
|
|
|
if (nir_op_infos[instr->op].output_size == 0) {
|
|
|
|
|
/* Since NIR is doing the scalarizing for us, we should only ever see
|
|
|
|
|
* vectorized operations with a single channel.
|
|
|
|
|
*/
|
|
|
|
|
assert(util_bitcount(instr->dest.write_mask) == 1);
|
|
|
|
|
channel = ffs(instr->dest.write_mask) - 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
op[0] = offset(op[0], bld, fsign_instr->src[0].swizzle[channel]);
|
|
|
|
|
}
|
2018-06-25 19:50:56 -07:00
|
|
|
|
2020-04-21 23:50:46 -07:00
|
|
|
if (type_sz(op[0].type) == 2) {
|
2019-04-18 15:09:06 -07:00
|
|
|
/* AND(val, 0x8000) gives the sign bit.
|
|
|
|
|
*
|
|
|
|
|
* Predicated OR ORs 1.0 (0x3c00) with the sign bit if val is not zero.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg zero = retype(brw_imm_uw(0), BRW_REGISTER_TYPE_HF);
|
|
|
|
|
bld.CMP(bld.null_reg_f(), op[0], zero, BRW_CONDITIONAL_NZ);
|
|
|
|
|
|
|
|
|
|
op[0].type = BRW_REGISTER_TYPE_UW;
|
|
|
|
|
result.type = BRW_REGISTER_TYPE_UW;
|
|
|
|
|
bld.AND(result, op[0], brw_imm_uw(0x8000u));
|
|
|
|
|
|
|
|
|
|
if (instr->op == nir_op_fsign)
|
|
|
|
|
inst = bld.OR(result, result, brw_imm_uw(0x3c00u));
|
|
|
|
|
else {
|
|
|
|
|
/* Use XOR here to get the result sign correct. */
|
|
|
|
|
inst = bld.XOR(result, result, retype(op[1], BRW_REGISTER_TYPE_UW));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inst->predicate = BRW_PREDICATE_NORMAL;
|
|
|
|
|
} else if (type_sz(op[0].type) == 4) {
|
2018-06-25 19:50:56 -07:00
|
|
|
/* AND(val, 0x80000000) gives the sign bit.
|
|
|
|
|
*
|
|
|
|
|
* Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
|
|
|
|
|
* zero.
|
|
|
|
|
*/
|
|
|
|
|
bld.CMP(bld.null_reg_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
|
|
|
|
|
|
|
|
|
|
op[0].type = BRW_REGISTER_TYPE_UD;
|
|
|
|
|
result.type = BRW_REGISTER_TYPE_UD;
|
2019-04-18 15:09:06 -07:00
|
|
|
bld.AND(result, op[0], brw_imm_ud(0x80000000u));
|
2018-06-25 19:50:56 -07:00
|
|
|
|
2018-06-25 19:53:38 -07:00
|
|
|
if (instr->op == nir_op_fsign)
|
2019-04-18 15:09:06 -07:00
|
|
|
inst = bld.OR(result, result, brw_imm_ud(0x3f800000u));
|
2018-06-25 19:53:38 -07:00
|
|
|
else {
|
|
|
|
|
/* Use XOR here to get the result sign correct. */
|
2019-04-18 15:09:06 -07:00
|
|
|
inst = bld.XOR(result, result, retype(op[1], BRW_REGISTER_TYPE_UD));
|
2018-06-25 19:53:38 -07:00
|
|
|
}
|
|
|
|
|
|
2018-06-25 19:50:56 -07:00
|
|
|
inst->predicate = BRW_PREDICATE_NORMAL;
|
|
|
|
|
} else {
|
|
|
|
|
/* For doubles we do the same but we need to consider:
|
|
|
|
|
*
|
|
|
|
|
* - 2-src instructions can't operate with 64-bit immediates
|
|
|
|
|
* - The sign is encoded in the high 32-bit of each DF
|
|
|
|
|
* - We need to produce a DF result.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
fs_reg zero = vgrf(glsl_type::double_type);
|
|
|
|
|
bld.MOV(zero, setup_imm_df(bld, 0.0));
|
|
|
|
|
bld.CMP(bld.null_reg_df(), op[0], zero, BRW_CONDITIONAL_NZ);
|
|
|
|
|
|
|
|
|
|
bld.MOV(result, zero);
|
|
|
|
|
|
|
|
|
|
fs_reg r = subscript(result, BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
bld.AND(r, subscript(op[0], BRW_REGISTER_TYPE_UD, 1),
|
|
|
|
|
brw_imm_ud(0x80000000u));
|
|
|
|
|
|
2018-06-25 19:53:38 -07:00
|
|
|
if (instr->op == nir_op_fsign) {
|
|
|
|
|
set_predicate(BRW_PREDICATE_NORMAL,
|
|
|
|
|
bld.OR(r, r, brw_imm_ud(0x3ff00000u)));
|
|
|
|
|
} else {
|
|
|
|
|
/* This could be done better in some cases. If the scale is an
|
|
|
|
|
* immediate with the low 32-bits all 0, emitting a separate XOR and
|
|
|
|
|
* OR would allow an algebraic optimization to remove the OR. There
|
|
|
|
|
* are currently zero instances of fsign(double(x))*IMM in shader-db
|
|
|
|
|
* or any test suite, so it is hard to care at this time.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg result_int64 = retype(result, BRW_REGISTER_TYPE_UQ);
|
|
|
|
|
inst = bld.XOR(result_int64, result_int64,
|
|
|
|
|
retype(op[1], BRW_REGISTER_TYPE_UQ));
|
|
|
|
|
}
|
2018-06-25 19:50:56 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-25 19:55:31 -07:00
|
|
|
/**
|
|
|
|
|
* Deteremine whether sources of a nir_op_fmul can be fused with a nir_op_fsign
|
|
|
|
|
*
|
|
|
|
|
* Checks the operands of a \c nir_op_fmul to determine whether or not
|
|
|
|
|
* \c emit_fsign could fuse the multiplication with the \c sign() calculation.
|
|
|
|
|
*
|
|
|
|
|
* \param instr The multiplication instruction
|
|
|
|
|
*
|
|
|
|
|
* \param fsign_src The source of \c instr that may or may not be a
|
|
|
|
|
* \c nir_op_fsign
|
|
|
|
|
*/
|
|
|
|
|
static bool
|
|
|
|
|
can_fuse_fmul_fsign(nir_alu_instr *instr, unsigned fsign_src)
|
|
|
|
|
{
|
|
|
|
|
assert(instr->op == nir_op_fmul);
|
|
|
|
|
|
|
|
|
|
nir_alu_instr *const fsign_instr =
|
|
|
|
|
nir_src_as_alu_instr(instr->src[fsign_src].src);
|
|
|
|
|
|
|
|
|
|
/* Rules:
|
|
|
|
|
*
|
|
|
|
|
* 1. instr->src[fsign_src] must be a nir_op_fsign.
|
|
|
|
|
* 2. The nir_op_fsign can only be used by this multiplication.
|
|
|
|
|
* 3. The source that is the nir_op_fsign does not have source modifiers.
|
|
|
|
|
* \c emit_fsign only examines the source modifiers of the source of the
|
|
|
|
|
* \c nir_op_fsign.
|
|
|
|
|
*
|
|
|
|
|
* The nir_op_fsign must also not have the saturate modifier, but steps
|
|
|
|
|
* have already been taken (in nir_opt_algebraic) to ensure that.
|
|
|
|
|
*/
|
|
|
|
|
return fsign_instr != NULL && fsign_instr->op == nir_op_fsign &&
|
intel/compiler: Drop nir_lower_to_source_mods() and related handling.
I think we're unanimous in wanting to drop nir_lower_to_source_mods.
It's a bit of complexity to handle in the backend, but perhaps more
importantly, would be even more complexity to handle in nir_search.
And, it turns out that since we made other compiler improvements in the
last few years, they no longer appear to buy us anything of value.
Summarizing the results from shader-db from this patch:
- Icelake (scalar mode)
Instruction counts:
- 411 helped, 598 hurt (out of 139,470 shaders)
- 99.2% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.78 instructions.
- total instructions in shared programs: 17214951 -> 17215206 (<.01%)
- instructions in affected programs: 1143879 -> 1144134 (0.02%)
Cycles:
- 1042 helped, 1357 hurt
- total cycles in shared programs: 365613294 -> 365882263 (0.07%)
- cycles in affected programs: 138155497 -> 138424466 (0.19%)
- Haswell (both scalar and vector modes)
Instruction counts:
- 73 helped, 1680 hurt (out of 139,470 shaders)
- 98.7% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.9 instructions.
- total instructions in shared programs: 14199527 -> 14202262 (0.02%)
- instructions in affected programs: 446499 -> 449234 (0.61%)
Cycles:
- 5253 helped, 5559 hurt
- total cycles in shared programs: 359996545 -> 360038731 (0.01%)
- cycles in affected programs: 155897127 -> 155939313 (0.03%)
Given that ~99% of shader-db remains unaffected, and the affected
programs are hurt by about 1-2 instructions - which are all cheap
ALU instructions - this is unlikely to be measurable in terms of
any real performance impact that would affect users.
So, drop them and simplify the backend, and hopefully enable other
future simplifications in NIR.
Reviewed-by: Eric Anholt <eric@anholt.net> [v1]
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4616>
2020-04-18 01:20:42 -07:00
|
|
|
is_used_once(fsign_instr);
|
2018-06-25 19:55:31 -07:00
|
|
|
}
|
|
|
|
|
|
2018-12-05 11:35:37 -08:00
|
|
|
void
|
2019-05-21 12:09:42 -07:00
|
|
|
fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
|
|
|
|
|
bool need_dest)
|
2018-12-05 11:35:37 -08:00
|
|
|
{
|
|
|
|
|
struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
|
|
|
|
|
fs_inst *inst;
|
2019-02-12 16:13:59 +01:00
|
|
|
unsigned execution_mode =
|
|
|
|
|
bld.shader->nir->info.float_controls_execution_mode;
|
2018-12-05 11:35:37 -08:00
|
|
|
|
|
|
|
|
fs_reg op[4];
|
2019-05-21 12:09:42 -07:00
|
|
|
fs_reg result = prepare_alu_destination_and_sources(bld, instr, op, need_dest);
|
2018-12-05 11:35:37 -08:00
|
|
|
|
2015-01-21 16:00:55 -08:00
|
|
|
switch (instr->op) {
|
2019-05-06 11:45:46 -05:00
|
|
|
case nir_op_mov:
|
2015-01-21 16:00:55 -08:00
|
|
|
case nir_op_vec2:
|
|
|
|
|
case nir_op_vec3:
|
|
|
|
|
case nir_op_vec4: {
|
|
|
|
|
fs_reg temp = result;
|
|
|
|
|
bool need_extra_copy = false;
|
|
|
|
|
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
|
|
|
|
|
if (!instr->src[i].src.is_ssa &&
|
|
|
|
|
instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
|
|
|
|
|
need_extra_copy = true;
|
2015-06-03 20:59:26 +03:00
|
|
|
temp = bld.vgrf(result.type, 4);
|
2015-01-21 16:00:55 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < 4; i++) {
|
|
|
|
|
if (!(instr->dest.write_mask & (1 << i)))
|
|
|
|
|
continue;
|
|
|
|
|
|
2019-05-06 11:45:46 -05:00
|
|
|
if (instr->op == nir_op_mov) {
|
2015-06-18 12:07:27 -07:00
|
|
|
inst = bld.MOV(offset(temp, bld, i),
|
|
|
|
|
offset(op[0], bld, instr->src[0].swizzle[i]));
|
2015-01-21 16:00:55 -08:00
|
|
|
} else {
|
2015-06-18 12:07:27 -07:00
|
|
|
inst = bld.MOV(offset(temp, bld, i),
|
|
|
|
|
offset(op[i], bld, instr->src[i].swizzle[0]));
|
2015-01-21 16:00:55 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* In this case the source and destination registers were the same,
|
|
|
|
|
* so we need to insert an extra set of moves in order to deal with
|
|
|
|
|
* any swizzling.
|
|
|
|
|
*/
|
|
|
|
|
if (need_extra_copy) {
|
|
|
|
|
for (unsigned i = 0; i < 4; i++) {
|
|
|
|
|
if (!(instr->dest.write_mask & (1 << i)))
|
|
|
|
|
continue;
|
|
|
|
|
|
2015-06-18 12:07:27 -07:00
|
|
|
bld.MOV(offset(result, bld, i), offset(temp, bld, i));
|
2015-01-21 16:00:55 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
2014-12-23 14:44:19 -08:00
|
|
|
|
2017-03-07 19:54:37 -08:00
|
|
|
case nir_op_i2f32:
|
|
|
|
|
case nir_op_u2f32:
|
2016-01-21 09:10:09 -08:00
|
|
|
if (optimize_extract_to_float(instr, result))
|
|
|
|
|
return;
|
2016-06-13 03:13:23 -04:00
|
|
|
inst = bld.MOV(result, op[0]);
|
|
|
|
|
break;
|
2016-01-21 09:10:09 -08:00
|
|
|
|
2017-07-01 08:14:09 +02:00
|
|
|
case nir_op_f2f16_rtne:
|
|
|
|
|
case nir_op_f2f16_rtz:
|
2019-02-13 10:42:05 +01:00
|
|
|
case nir_op_f2f16: {
|
|
|
|
|
brw_rnd_mode rnd = BRW_RND_MODE_UNSPECIFIED;
|
|
|
|
|
|
|
|
|
|
if (nir_op_f2f16 == instr->op)
|
|
|
|
|
rnd = brw_rnd_mode_from_execution_mode(execution_mode);
|
|
|
|
|
else
|
|
|
|
|
rnd = brw_rnd_mode_from_nir_op(instr->op);
|
|
|
|
|
|
|
|
|
|
if (BRW_RND_MODE_UNSPECIFIED != rnd)
|
|
|
|
|
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(), brw_imm_d(rnd));
|
|
|
|
|
|
2017-07-01 08:11:05 +02:00
|
|
|
/* In theory, it would be better to use BRW_OPCODE_F32TO16. Depending
|
|
|
|
|
* on the HW gen, it is a special hw opcode or just a MOV, and
|
|
|
|
|
* brw_F32TO16 (at brw_eu_emit) would do the work to chose.
|
|
|
|
|
*
|
|
|
|
|
* But if we want to use that opcode, we need to provide support on
|
|
|
|
|
* different optimizations and lowerings. As right now HF support is
|
|
|
|
|
* only for gen8+, it will be better to use directly the MOV, and use
|
|
|
|
|
* BRW_OPCODE_F32TO16 when/if we work for HF support on gen7.
|
|
|
|
|
*/
|
2018-12-18 09:27:21 +01:00
|
|
|
assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */
|
2018-05-04 11:33:07 +02:00
|
|
|
inst = bld.MOV(result, op[0]);
|
2017-07-01 08:11:05 +02:00
|
|
|
break;
|
2019-02-13 10:42:05 +01:00
|
|
|
}
|
2017-07-01 08:11:05 +02:00
|
|
|
|
2018-11-07 13:43:40 -06:00
|
|
|
case nir_op_b2i8:
|
|
|
|
|
case nir_op_b2i16:
|
|
|
|
|
case nir_op_b2i32:
|
|
|
|
|
case nir_op_b2i64:
|
|
|
|
|
case nir_op_b2f16:
|
|
|
|
|
case nir_op_b2f32:
|
|
|
|
|
case nir_op_b2f64:
|
2018-12-03 15:53:36 -08:00
|
|
|
if (try_emit_b2fi_of_inot(bld, result, instr))
|
|
|
|
|
break;
|
2018-10-10 15:17:11 -07:00
|
|
|
op[0].type = BRW_REGISTER_TYPE_D;
|
|
|
|
|
op[0].negate = !op[0].negate;
|
|
|
|
|
/* fallthrough */
|
2017-03-07 19:54:37 -08:00
|
|
|
case nir_op_i2f64:
|
2017-11-08 15:14:19 -08:00
|
|
|
case nir_op_i2i64:
|
2017-03-07 19:54:37 -08:00
|
|
|
case nir_op_u2f64:
|
2017-11-08 15:14:19 -08:00
|
|
|
case nir_op_u2u64:
|
2018-07-17 09:02:27 +02:00
|
|
|
case nir_op_f2f64:
|
|
|
|
|
case nir_op_f2i64:
|
|
|
|
|
case nir_op_f2u64:
|
|
|
|
|
case nir_op_i2i32:
|
|
|
|
|
case nir_op_u2u32:
|
2017-03-07 19:54:37 -08:00
|
|
|
case nir_op_f2i32:
|
|
|
|
|
case nir_op_f2u32:
|
2018-07-17 09:02:27 +02:00
|
|
|
case nir_op_i2f16:
|
2018-05-04 11:33:07 +02:00
|
|
|
case nir_op_i2i16:
|
2018-07-17 09:02:27 +02:00
|
|
|
case nir_op_u2f16:
|
2018-05-04 11:33:07 +02:00
|
|
|
case nir_op_u2u16:
|
2018-07-17 09:02:27 +02:00
|
|
|
case nir_op_f2i16:
|
|
|
|
|
case nir_op_f2u16:
|
2018-07-09 02:00:23 +02:00
|
|
|
case nir_op_i2i8:
|
|
|
|
|
case nir_op_u2u8:
|
2018-07-17 09:02:27 +02:00
|
|
|
case nir_op_f2i8:
|
|
|
|
|
case nir_op_f2u8:
|
|
|
|
|
if (result.type == BRW_REGISTER_TYPE_B ||
|
|
|
|
|
result.type == BRW_REGISTER_TYPE_UB ||
|
|
|
|
|
result.type == BRW_REGISTER_TYPE_HF)
|
|
|
|
|
assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */
|
|
|
|
|
|
|
|
|
|
if (op[0].type == BRW_REGISTER_TYPE_B ||
|
|
|
|
|
op[0].type == BRW_REGISTER_TYPE_UB ||
|
|
|
|
|
op[0].type == BRW_REGISTER_TYPE_HF)
|
|
|
|
|
assert(type_sz(result.type) < 8); /* brw_nir_lower_conversions */
|
|
|
|
|
|
2017-03-07 19:32:50 -08:00
|
|
|
inst = bld.MOV(result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2019-05-06 11:16:25 -05:00
|
|
|
case nir_op_fsat:
|
|
|
|
|
inst = bld.MOV(result, op[0]);
|
|
|
|
|
inst->saturate = true;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_fneg:
|
|
|
|
|
case nir_op_ineg:
|
|
|
|
|
op[0].negate = true;
|
|
|
|
|
inst = bld.MOV(result, op[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_fabs:
|
|
|
|
|
case nir_op_iabs:
|
|
|
|
|
op[0].negate = false;
|
|
|
|
|
op[0].abs = true;
|
|
|
|
|
inst = bld.MOV(result, op[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
2019-02-13 10:42:05 +01:00
|
|
|
case nir_op_f2f32:
|
|
|
|
|
if (nir_has_any_rounding_mode_enabled(execution_mode)) {
|
|
|
|
|
brw_rnd_mode rnd =
|
|
|
|
|
brw_rnd_mode_from_execution_mode(execution_mode);
|
|
|
|
|
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
|
|
|
|
|
brw_imm_d(rnd));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (op[0].type == BRW_REGISTER_TYPE_HF)
|
|
|
|
|
assert(type_sz(result.type) < 8); /* brw_nir_lower_conversions */
|
|
|
|
|
|
|
|
|
|
inst = bld.MOV(result, op[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
2018-06-25 19:50:56 -07:00
|
|
|
case nir_op_fsign:
|
2018-06-25 19:53:38 -07:00
|
|
|
emit_fsign(bld, instr, result, op, 0);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_frcp:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_fexp2:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_flog2:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_fsin:
|
2016-04-07 15:04:35 -07:00
|
|
|
inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_fcos:
|
2016-04-07 15:04:35 -07:00
|
|
|
inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_fddx:
|
2014-12-23 14:44:19 -08:00
|
|
|
if (fs_key->high_quality_derivatives) {
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
|
2014-12-23 14:44:19 -08:00
|
|
|
} else {
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
|
2014-12-23 14:44:19 -08:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2014-10-15 14:44:00 -07:00
|
|
|
case nir_op_fddx_fine:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
|
2014-10-15 14:44:00 -07:00
|
|
|
break;
|
|
|
|
|
case nir_op_fddx_coarse:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
|
2014-10-15 14:44:00 -07:00
|
|
|
break;
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_op_fddy:
|
2014-12-23 14:44:19 -08:00
|
|
|
if (fs_key->high_quality_derivatives) {
|
2016-05-17 01:52:16 -07:00
|
|
|
inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
|
2014-12-23 14:44:19 -08:00
|
|
|
} else {
|
2016-05-17 01:52:16 -07:00
|
|
|
inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
|
2014-12-23 14:44:19 -08:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2014-10-15 14:44:00 -07:00
|
|
|
case nir_op_fddy_fine:
|
2016-05-17 01:52:16 -07:00
|
|
|
inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
|
2014-10-15 14:44:00 -07:00
|
|
|
break;
|
|
|
|
|
case nir_op_fddy_coarse:
|
2016-05-17 01:52:16 -07:00
|
|
|
inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
|
2014-10-15 14:44:00 -07:00
|
|
|
break;
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2015-11-10 10:18:55 +01:00
|
|
|
case nir_op_fadd:
|
2019-02-12 16:13:59 +01:00
|
|
|
if (nir_has_any_rounding_mode_enabled(execution_mode)) {
|
|
|
|
|
brw_rnd_mode rnd =
|
|
|
|
|
brw_rnd_mode_from_execution_mode(execution_mode);
|
|
|
|
|
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
|
|
|
|
|
brw_imm_d(rnd));
|
|
|
|
|
}
|
|
|
|
|
/* fallthrough */
|
|
|
|
|
case nir_op_iadd:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.ADD(result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2018-09-11 16:49:51 -07:00
|
|
|
case nir_op_iadd_sat:
|
2018-10-05 21:04:47 -05:00
|
|
|
case nir_op_uadd_sat:
|
|
|
|
|
inst = bld.ADD(result, op[0], op[1]);
|
2020-04-21 16:06:54 -07:00
|
|
|
inst->saturate = true;
|
2018-10-05 21:04:47 -05:00
|
|
|
break;
|
|
|
|
|
|
2018-09-11 16:49:51 -07:00
|
|
|
case nir_op_isub_sat:
|
|
|
|
|
bld.emit(SHADER_OPCODE_ISUB_SAT, result, op[0], op[1]);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_usub_sat:
|
|
|
|
|
bld.emit(SHADER_OPCODE_USUB_SAT, result, op[0], op[1]);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_irhadd:
|
|
|
|
|
case nir_op_urhadd:
|
|
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
|
|
|
|
inst = bld.AVG(result, op[0], op[1]);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_ihadd:
|
|
|
|
|
case nir_op_uhadd: {
|
|
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
|
|
|
|
fs_reg tmp = bld.vgrf(result.type);
|
|
|
|
|
|
|
|
|
|
if (devinfo->gen >= 8) {
|
|
|
|
|
op[0] = resolve_source_modifiers(op[0]);
|
|
|
|
|
op[1] = resolve_source_modifiers(op[1]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* AVG(x, y) - ((x ^ y) & 1) */
|
|
|
|
|
bld.XOR(tmp, op[0], op[1]);
|
|
|
|
|
bld.AND(tmp, tmp, retype(brw_imm_ud(1), result.type));
|
|
|
|
|
bld.AVG(result, op[0], op[1]);
|
|
|
|
|
inst = bld.ADD(result, result, tmp);
|
|
|
|
|
inst->src[1].negate = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-23 14:44:19 -08:00
|
|
|
case nir_op_fmul:
|
2018-06-25 19:55:31 -07:00
|
|
|
for (unsigned i = 0; i < 2; i++) {
|
|
|
|
|
if (can_fuse_fmul_fsign(instr, i)) {
|
|
|
|
|
emit_fsign(bld, instr, result, op, i);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-24 01:16:11 +03:00
|
|
|
/* We emit the rounding mode after the previous fsign optimization since
|
|
|
|
|
* it won't result in a MUL, but will try to negate the value by other
|
|
|
|
|
* means.
|
|
|
|
|
*/
|
2019-02-12 16:13:59 +01:00
|
|
|
if (nir_has_any_rounding_mode_enabled(execution_mode)) {
|
|
|
|
|
brw_rnd_mode rnd =
|
|
|
|
|
brw_rnd_mode_from_execution_mode(execution_mode);
|
|
|
|
|
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
|
|
|
|
|
brw_imm_d(rnd));
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.MUL(result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2019-02-14 23:08:39 -08:00
|
|
|
case nir_op_imul_2x32_64:
|
|
|
|
|
case nir_op_umul_2x32_64:
|
|
|
|
|
bld.MUL(result, op[0], op[1]);
|
|
|
|
|
break;
|
|
|
|
|
|
2018-09-11 16:49:51 -07:00
|
|
|
case nir_op_imul_32x16:
|
|
|
|
|
case nir_op_umul_32x16: {
|
|
|
|
|
const bool ud = instr->op == nir_op_umul_32x16;
|
|
|
|
|
|
|
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) == 32);
|
|
|
|
|
|
|
|
|
|
/* Before Gen7, the order of the 32-bit source and the 16-bit source was
|
|
|
|
|
* swapped. The extension isn't enabled on those platforms, so don't
|
|
|
|
|
* pretend to support the differences.
|
|
|
|
|
*/
|
|
|
|
|
assert(devinfo->gen >= 7);
|
|
|
|
|
|
|
|
|
|
if (op[1].file == IMM)
|
|
|
|
|
op[1] = ud ? brw_imm_uw(op[1].ud) : brw_imm_w(op[1].d);
|
|
|
|
|
else {
|
|
|
|
|
const enum brw_reg_type word_type =
|
|
|
|
|
ud ? BRW_REGISTER_TYPE_UW : BRW_REGISTER_TYPE_W;
|
|
|
|
|
|
|
|
|
|
op[1] = subscript(op[1], word_type, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const enum brw_reg_type dword_type =
|
|
|
|
|
ud ? BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_D;
|
|
|
|
|
|
|
|
|
|
bld.MUL(result, retype(op[0], dword_type), op[1]);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-11 09:29:56 -07:00
|
|
|
case nir_op_imul:
|
2015-11-10 10:18:55 +01:00
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.MUL(result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_imul_high:
|
2015-08-04 19:08:45 +03:00
|
|
|
case nir_op_umul_high:
|
2015-11-10 10:18:55 +01:00
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
2015-08-04 19:08:45 +03:00
|
|
|
bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_idiv:
|
|
|
|
|
case nir_op_udiv:
|
2015-11-10 10:18:55 +01:00
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2015-07-09 21:42:28 +03:00
|
|
|
case nir_op_uadd_carry:
|
|
|
|
|
unreachable("Should have been lowered by carry_to_arith().");
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2015-07-09 21:42:28 +03:00
|
|
|
case nir_op_usub_borrow:
|
|
|
|
|
unreachable("Should have been lowered by borrow_to_arith().");
|
2014-08-15 10:32:07 -07:00
|
|
|
|
|
|
|
|
case nir_op_umod:
|
2016-03-25 11:17:53 -07:00
|
|
|
case nir_op_irem:
|
|
|
|
|
/* According to the sign table for INT DIV in the Ivy Bridge PRM, it
|
|
|
|
|
* appears that our hardware just does the right thing for signed
|
|
|
|
|
* remainder.
|
|
|
|
|
*/
|
2015-11-10 10:18:55 +01:00
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2016-03-25 11:17:53 -07:00
|
|
|
case nir_op_imod: {
|
|
|
|
|
/* Get a regular C-style remainder. If a % b == 0, set the predicate. */
|
|
|
|
|
bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
|
|
|
|
|
|
|
|
|
|
/* Math instructions don't support conditional mod */
|
|
|
|
|
inst = bld.MOV(bld.null_reg_d(), result);
|
|
|
|
|
inst->conditional_mod = BRW_CONDITIONAL_NZ;
|
|
|
|
|
|
|
|
|
|
/* Now, we need to determine if signs of the sources are different.
|
|
|
|
|
* When we XOR the sources, the top bit is 0 if they are the same and 1
|
|
|
|
|
* if they are different. We can then use a conditional modifier to
|
|
|
|
|
* turn that into a predicate. This leads us to an XOR.l instruction.
|
|
|
|
|
*
|
|
|
|
|
* Technically, according to the PRM, you're not allowed to use .l on a
|
|
|
|
|
* XOR instruction. However, emperical experiments and Curro's reading
|
|
|
|
|
* of the simulator source both indicate that it's safe.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D);
|
|
|
|
|
inst = bld.XOR(tmp, op[0], op[1]);
|
|
|
|
|
inst->predicate = BRW_PREDICATE_NORMAL;
|
|
|
|
|
inst->conditional_mod = BRW_CONDITIONAL_L;
|
|
|
|
|
|
|
|
|
|
/* If the result of the initial remainder operation is non-zero and the
|
|
|
|
|
* two sources have different signs, add in a copy of op[1] to get the
|
|
|
|
|
* final integer modulus value.
|
|
|
|
|
*/
|
|
|
|
|
inst = bld.ADD(result, result, op[1]);
|
|
|
|
|
inst->predicate = BRW_PREDICATE_NORMAL;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-18 11:44:38 -05:00
|
|
|
case nir_op_flt32:
|
|
|
|
|
case nir_op_fge32:
|
|
|
|
|
case nir_op_feq32:
|
2020-08-18 19:51:57 +02:00
|
|
|
case nir_op_fneu32: {
|
2015-08-03 18:08:58 -07:00
|
|
|
fs_reg dest = result;
|
2018-04-19 10:06:43 +02:00
|
|
|
|
|
|
|
|
const uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
|
|
|
|
|
if (bit_size != 32)
|
|
|
|
|
dest = bld.vgrf(op[0].type, 1);
|
|
|
|
|
|
2019-08-02 15:19:16 -05:00
|
|
|
bld.CMP(dest, op[0], op[1], brw_cmod_for_nir_comparison(instr->op));
|
2018-04-19 10:06:43 +02:00
|
|
|
|
|
|
|
|
if (bit_size > 32) {
|
2015-08-03 18:08:58 -07:00
|
|
|
bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
|
2018-04-19 10:06:43 +02:00
|
|
|
} else if(bit_size < 32) {
|
|
|
|
|
/* When we convert the result to 32-bit we need to be careful and do
|
|
|
|
|
* it as a signed conversion to get sign extension (for 32-bit true)
|
|
|
|
|
*/
|
|
|
|
|
const brw_reg_type src_type =
|
|
|
|
|
brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_D);
|
|
|
|
|
|
|
|
|
|
bld.MOV(retype(result, BRW_REGISTER_TYPE_D), retype(dest, src_type));
|
2015-08-03 18:08:58 -07:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-18 11:44:38 -05:00
|
|
|
case nir_op_ilt32:
|
|
|
|
|
case nir_op_ult32:
|
|
|
|
|
case nir_op_ige32:
|
|
|
|
|
case nir_op_uge32:
|
|
|
|
|
case nir_op_ieq32:
|
|
|
|
|
case nir_op_ine32: {
|
2016-10-24 20:24:56 -07:00
|
|
|
fs_reg dest = result;
|
2018-04-19 10:06:43 +02:00
|
|
|
|
2019-06-19 05:09:35 -07:00
|
|
|
/* On Gen11 we have an additional issue being that src1 cannot be a byte
|
|
|
|
|
* type. So we convert both operands for the comparison.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg temp_op[2];
|
|
|
|
|
temp_op[0] = bld.fix_byte_src(op[0]);
|
|
|
|
|
temp_op[1] = bld.fix_byte_src(op[1]);
|
|
|
|
|
|
2019-11-11 16:49:15 -08:00
|
|
|
const uint32_t bit_size = type_sz(temp_op[0].type) * 8;
|
2018-04-19 10:06:43 +02:00
|
|
|
if (bit_size != 32)
|
2019-06-19 05:09:35 -07:00
|
|
|
dest = bld.vgrf(temp_op[0].type, 1);
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2019-08-02 15:19:16 -05:00
|
|
|
bld.CMP(dest, temp_op[0], temp_op[1],
|
|
|
|
|
brw_cmod_for_nir_comparison(instr->op));
|
2018-04-19 10:06:43 +02:00
|
|
|
|
|
|
|
|
if (bit_size > 32) {
|
2016-10-24 20:24:56 -07:00
|
|
|
bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
|
2018-04-19 10:06:43 +02:00
|
|
|
} else if (bit_size < 32) {
|
|
|
|
|
/* When we convert the result to 32-bit we need to be careful and do
|
|
|
|
|
* it as a signed conversion to get sign extension (for 32-bit true)
|
|
|
|
|
*/
|
|
|
|
|
const brw_reg_type src_type =
|
|
|
|
|
brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_D);
|
|
|
|
|
|
|
|
|
|
bld.MOV(retype(result, BRW_REGISTER_TYPE_D), retype(dest, src_type));
|
2016-10-24 20:24:56 -07:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2016-10-24 20:24:56 -07:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
|
|
|
|
|
case nir_op_inot:
|
2015-04-15 18:00:05 -07:00
|
|
|
if (devinfo->gen >= 8) {
|
2019-04-17 17:10:18 -05:00
|
|
|
nir_alu_instr *inot_src_instr = nir_src_as_alu_instr(instr->src[0].src);
|
2017-02-09 15:21:47 +00:00
|
|
|
|
|
|
|
|
if (inot_src_instr != NULL &&
|
|
|
|
|
(inot_src_instr->op == nir_op_ior ||
|
|
|
|
|
inot_src_instr->op == nir_op_ixor ||
|
intel/compiler: Drop nir_lower_to_source_mods() and related handling.
I think we're unanimous in wanting to drop nir_lower_to_source_mods.
It's a bit of complexity to handle in the backend, but perhaps more
importantly, would be even more complexity to handle in nir_search.
And, it turns out that since we made other compiler improvements in the
last few years, they no longer appear to buy us anything of value.
Summarizing the results from shader-db from this patch:
- Icelake (scalar mode)
Instruction counts:
- 411 helped, 598 hurt (out of 139,470 shaders)
- 99.2% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.78 instructions.
- total instructions in shared programs: 17214951 -> 17215206 (<.01%)
- instructions in affected programs: 1143879 -> 1144134 (0.02%)
Cycles:
- 1042 helped, 1357 hurt
- total cycles in shared programs: 365613294 -> 365882263 (0.07%)
- cycles in affected programs: 138155497 -> 138424466 (0.19%)
- Haswell (both scalar and vector modes)
Instruction counts:
- 73 helped, 1680 hurt (out of 139,470 shaders)
- 98.7% of shaders remain unaffected. The average increase in
instruction count in hurt programs is 1.9 instructions.
- total instructions in shared programs: 14199527 -> 14202262 (0.02%)
- instructions in affected programs: 446499 -> 449234 (0.61%)
Cycles:
- 5253 helped, 5559 hurt
- total cycles in shared programs: 359996545 -> 360038731 (0.01%)
- cycles in affected programs: 155897127 -> 155939313 (0.03%)
Given that ~99% of shader-db remains unaffected, and the affected
programs are hurt by about 1-2 instructions - which are all cheap
ALU instructions - this is unlikely to be measurable in terms of
any real performance impact that would affect users.
So, drop them and simplify the backend, and hopefully enable other
future simplifications in NIR.
Reviewed-by: Eric Anholt <eric@anholt.net> [v1]
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4616>
2020-04-18 01:20:42 -07:00
|
|
|
inot_src_instr->op == nir_op_iand)) {
|
2017-02-09 15:21:47 +00:00
|
|
|
/* The sources of the source logical instruction are now the
|
|
|
|
|
* sources of the instruction that will be generated.
|
|
|
|
|
*/
|
|
|
|
|
prepare_alu_destination_and_sources(bld, inot_src_instr, op, false);
|
|
|
|
|
resolve_inot_sources(bld, inot_src_instr, op);
|
|
|
|
|
|
|
|
|
|
/* Smash all of the sources and destination to be signed. This
|
|
|
|
|
* doesn't matter for the operation of the instruction, but cmod
|
|
|
|
|
* propagation fails on unsigned sources with negation (due to
|
|
|
|
|
* fs_inst::can_do_cmod returning false).
|
|
|
|
|
*/
|
|
|
|
|
result.type =
|
|
|
|
|
brw_type_for_nir_type(devinfo,
|
|
|
|
|
(nir_alu_type)(nir_type_int |
|
|
|
|
|
nir_dest_bit_size(instr->dest.dest)));
|
|
|
|
|
op[0].type =
|
|
|
|
|
brw_type_for_nir_type(devinfo,
|
|
|
|
|
(nir_alu_type)(nir_type_int |
|
|
|
|
|
nir_src_bit_size(inot_src_instr->src[0].src)));
|
|
|
|
|
op[1].type =
|
|
|
|
|
brw_type_for_nir_type(devinfo,
|
|
|
|
|
(nir_alu_type)(nir_type_int |
|
|
|
|
|
nir_src_bit_size(inot_src_instr->src[1].src)));
|
|
|
|
|
|
|
|
|
|
/* For XOR, only invert one of the sources. Arbitrarily choose
|
|
|
|
|
* the first source.
|
|
|
|
|
*/
|
|
|
|
|
op[0].negate = !op[0].negate;
|
|
|
|
|
if (inot_src_instr->op != nir_op_ixor)
|
|
|
|
|
op[1].negate = !op[1].negate;
|
|
|
|
|
|
|
|
|
|
switch (inot_src_instr->op) {
|
|
|
|
|
case nir_op_ior:
|
|
|
|
|
bld.AND(result, op[0], op[1]);
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
case nir_op_iand:
|
|
|
|
|
bld.OR(result, op[0], op[1]);
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
case nir_op_ixor:
|
|
|
|
|
bld.XOR(result, op[0], op[1]);
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("impossible opcode");
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-08-10 11:52:50 -07:00
|
|
|
op[0] = resolve_source_modifiers(op[0]);
|
2015-03-05 20:39:49 -08:00
|
|
|
}
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.NOT(result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
case nir_op_ixor:
|
2015-04-15 18:00:05 -07:00
|
|
|
if (devinfo->gen >= 8) {
|
2017-02-09 15:20:04 +00:00
|
|
|
resolve_inot_sources(bld, instr, op);
|
2015-03-05 20:39:49 -08:00
|
|
|
}
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.XOR(result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
case nir_op_ior:
|
2015-04-15 18:00:05 -07:00
|
|
|
if (devinfo->gen >= 8) {
|
2017-02-09 15:20:04 +00:00
|
|
|
resolve_inot_sources(bld, instr, op);
|
2015-03-05 20:39:49 -08:00
|
|
|
}
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.OR(result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
case nir_op_iand:
|
2015-04-15 18:00:05 -07:00
|
|
|
if (devinfo->gen >= 8) {
|
2017-02-09 15:20:04 +00:00
|
|
|
resolve_inot_sources(bld, instr, op);
|
2015-03-05 20:39:49 -08:00
|
|
|
}
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.AND(result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_fdot2:
|
|
|
|
|
case nir_op_fdot3:
|
2014-12-23 14:44:19 -08:00
|
|
|
case nir_op_fdot4:
|
2018-10-18 11:44:38 -05:00
|
|
|
case nir_op_b32all_fequal2:
|
|
|
|
|
case nir_op_b32all_iequal2:
|
|
|
|
|
case nir_op_b32all_fequal3:
|
|
|
|
|
case nir_op_b32all_iequal3:
|
|
|
|
|
case nir_op_b32all_fequal4:
|
|
|
|
|
case nir_op_b32all_iequal4:
|
|
|
|
|
case nir_op_b32any_fnequal2:
|
|
|
|
|
case nir_op_b32any_inequal2:
|
|
|
|
|
case nir_op_b32any_fnequal3:
|
|
|
|
|
case nir_op_b32any_inequal3:
|
|
|
|
|
case nir_op_b32any_fnequal4:
|
|
|
|
|
case nir_op_b32any_inequal4:
|
2014-12-23 14:44:19 -08:00
|
|
|
unreachable("Lowered by nir_lower_alu_reductions");
|
2014-08-15 10:32:07 -07:00
|
|
|
|
|
|
|
|
case nir_op_ldexp:
|
|
|
|
|
unreachable("not reached: should be handled by ldexp_to_arith()");
|
|
|
|
|
|
|
|
|
|
case nir_op_fsqrt:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_frsq:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2018-11-07 13:43:40 -06:00
|
|
|
case nir_op_i2b32:
|
|
|
|
|
case nir_op_f2b32: {
|
2018-03-07 11:12:18 +01:00
|
|
|
uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
|
|
|
|
|
if (bit_size == 64) {
|
2017-03-07 19:32:50 -08:00
|
|
|
/* two-argument instructions can't take 64-bit immediates */
|
|
|
|
|
fs_reg zero;
|
|
|
|
|
fs_reg tmp;
|
|
|
|
|
|
2018-11-07 13:43:40 -06:00
|
|
|
if (instr->op == nir_op_f2b32) {
|
2017-03-07 19:32:50 -08:00
|
|
|
zero = vgrf(glsl_type::double_type);
|
|
|
|
|
tmp = vgrf(glsl_type::double_type);
|
2017-11-02 18:30:04 -07:00
|
|
|
bld.MOV(zero, setup_imm_df(bld, 0.0));
|
2017-03-07 19:32:50 -08:00
|
|
|
} else {
|
|
|
|
|
zero = vgrf(glsl_type::int64_t_type);
|
|
|
|
|
tmp = vgrf(glsl_type::int64_t_type);
|
2017-11-02 18:30:04 -07:00
|
|
|
bld.MOV(zero, brw_imm_q(0));
|
2017-03-07 19:32:50 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* A SIMD16 execution needs to be split in two instructions, so use
|
|
|
|
|
* a vgrf instead of the flag register as dst so instruction splitting
|
|
|
|
|
* works
|
|
|
|
|
*/
|
|
|
|
|
bld.CMP(tmp, op[0], zero, BRW_CONDITIONAL_NZ);
|
|
|
|
|
bld.MOV(result, subscript(tmp, BRW_REGISTER_TYPE_UD, 0));
|
2017-02-08 13:51:22 +01:00
|
|
|
} else {
|
2018-03-07 11:12:18 +01:00
|
|
|
fs_reg zero;
|
|
|
|
|
if (bit_size == 32) {
|
2018-11-07 13:43:40 -06:00
|
|
|
zero = instr->op == nir_op_f2b32 ? brw_imm_f(0.0f) : brw_imm_d(0);
|
2017-03-07 19:32:50 -08:00
|
|
|
} else {
|
2018-03-07 11:12:18 +01:00
|
|
|
assert(bit_size == 16);
|
2018-11-07 13:43:40 -06:00
|
|
|
zero = instr->op == nir_op_f2b32 ?
|
2018-03-07 11:12:18 +01:00
|
|
|
retype(brw_imm_w(0), BRW_REGISTER_TYPE_HF) : brw_imm_w(0);
|
2017-03-07 19:32:50 -08:00
|
|
|
}
|
2018-03-07 11:12:18 +01:00
|
|
|
bld.CMP(result, op[0], zero, BRW_CONDITIONAL_NZ);
|
2017-02-08 13:51:22 +01:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2018-03-07 11:12:18 +01:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2014-12-23 14:44:19 -08:00
|
|
|
case nir_op_ftrunc:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.RNDZ(result, op[0]);
|
2020-01-16 11:17:14 -08:00
|
|
|
if (devinfo->gen < 6) {
|
|
|
|
|
set_condmod(BRW_CONDITIONAL_R, inst);
|
|
|
|
|
set_predicate(BRW_PREDICATE_NORMAL,
|
|
|
|
|
bld.ADD(result, result, brw_imm_f(1.0f)));
|
|
|
|
|
inst = bld.MOV(result, result); /* for potential saturation */
|
|
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2014-12-23 14:44:19 -08:00
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_op_fceil: {
|
|
|
|
|
op[0].negate = !op[0].negate;
|
2014-12-23 14:44:19 -08:00
|
|
|
fs_reg temp = vgrf(glsl_type::float_type);
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.RNDD(temp, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
temp.negate = true;
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.MOV(result, temp);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2014-12-23 14:44:19 -08:00
|
|
|
case nir_op_ffloor:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.RNDD(result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2014-12-23 14:44:19 -08:00
|
|
|
case nir_op_ffract:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.FRC(result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2014-12-23 14:44:19 -08:00
|
|
|
case nir_op_fround_even:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.RNDE(result, op[0]);
|
2020-01-16 11:17:14 -08:00
|
|
|
if (devinfo->gen < 6) {
|
|
|
|
|
set_condmod(BRW_CONDITIONAL_R, inst);
|
|
|
|
|
set_predicate(BRW_PREDICATE_NORMAL,
|
|
|
|
|
bld.ADD(result, result, brw_imm_f(1.0f)));
|
|
|
|
|
inst = bld.MOV(result, result); /* for potential saturation */
|
|
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2016-03-25 13:57:15 -07:00
|
|
|
case nir_op_fquantize2f16: {
|
|
|
|
|
fs_reg tmp16 = bld.vgrf(BRW_REGISTER_TYPE_D);
|
|
|
|
|
fs_reg tmp32 = bld.vgrf(BRW_REGISTER_TYPE_F);
|
|
|
|
|
fs_reg zero = bld.vgrf(BRW_REGISTER_TYPE_F);
|
|
|
|
|
|
|
|
|
|
/* The destination stride must be at least as big as the source stride. */
|
|
|
|
|
tmp16.type = BRW_REGISTER_TYPE_W;
|
|
|
|
|
tmp16.stride = 2;
|
|
|
|
|
|
|
|
|
|
/* Check for denormal */
|
|
|
|
|
fs_reg abs_src0 = op[0];
|
|
|
|
|
abs_src0.abs = true;
|
|
|
|
|
bld.CMP(bld.null_reg_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
|
|
|
|
|
BRW_CONDITIONAL_L);
|
|
|
|
|
/* Get the appropriately signed zero */
|
|
|
|
|
bld.AND(retype(zero, BRW_REGISTER_TYPE_UD),
|
|
|
|
|
retype(op[0], BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(0x80000000));
|
|
|
|
|
/* Do the actual F32 -> F16 -> F32 conversion */
|
|
|
|
|
bld.emit(BRW_OPCODE_F32TO16, tmp16, op[0]);
|
|
|
|
|
bld.emit(BRW_OPCODE_F16TO32, tmp32, tmp16);
|
|
|
|
|
/* Select that or zero based on normal status */
|
|
|
|
|
inst = bld.SEL(result, zero, tmp32);
|
|
|
|
|
inst->predicate = BRW_PREDICATE_NORMAL;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_op_imin:
|
|
|
|
|
case nir_op_umin:
|
2015-11-10 10:18:55 +01:00
|
|
|
case nir_op_fmin:
|
2016-02-11 13:41:58 -08:00
|
|
|
inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_L);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_imax:
|
|
|
|
|
case nir_op_umax:
|
2015-11-10 10:18:55 +01:00
|
|
|
case nir_op_fmax:
|
2016-02-11 13:41:58 -08:00
|
|
|
inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_GE);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_pack_snorm_2x16:
|
|
|
|
|
case nir_op_pack_snorm_4x8:
|
|
|
|
|
case nir_op_pack_unorm_2x16:
|
|
|
|
|
case nir_op_pack_unorm_4x8:
|
|
|
|
|
case nir_op_unpack_snorm_2x16:
|
|
|
|
|
case nir_op_unpack_snorm_4x8:
|
|
|
|
|
case nir_op_unpack_unorm_2x16:
|
|
|
|
|
case nir_op_unpack_unorm_4x8:
|
|
|
|
|
case nir_op_unpack_half_2x16:
|
|
|
|
|
case nir_op_pack_half_2x16:
|
|
|
|
|
unreachable("not reached: should be handled by lower_packing_builtins");
|
|
|
|
|
|
2018-07-09 10:32:10 +02:00
|
|
|
case nir_op_unpack_half_2x16_split_x_flush_to_zero:
|
|
|
|
|
assert(FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16 & execution_mode);
|
2019-09-18 09:04:39 -07:00
|
|
|
/* Fall-through */
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_op_unpack_half_2x16_split_x:
|
2018-12-07 14:03:51 -08:00
|
|
|
inst = bld.emit(BRW_OPCODE_F16TO32, result,
|
|
|
|
|
subscript(op[0], BRW_REGISTER_TYPE_UW, 0));
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2019-09-18 09:04:39 -07:00
|
|
|
|
2018-07-09 10:32:10 +02:00
|
|
|
case nir_op_unpack_half_2x16_split_y_flush_to_zero:
|
|
|
|
|
assert(FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16 & execution_mode);
|
2019-09-18 09:04:39 -07:00
|
|
|
/* Fall-through */
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_op_unpack_half_2x16_split_y:
|
2018-12-07 14:03:51 -08:00
|
|
|
inst = bld.emit(BRW_OPCODE_F16TO32, result,
|
|
|
|
|
subscript(op[0], BRW_REGISTER_TYPE_UW, 1));
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2017-02-14 22:15:16 -08:00
|
|
|
case nir_op_pack_64_2x32_split:
|
2018-04-17 10:23:47 +02:00
|
|
|
case nir_op_pack_32_2x16_split:
|
2015-08-14 12:29:31 -07:00
|
|
|
bld.emit(FS_OPCODE_PACK, result, op[0], op[1]);
|
|
|
|
|
break;
|
|
|
|
|
|
2017-02-14 22:15:16 -08:00
|
|
|
case nir_op_unpack_64_2x32_split_x:
|
|
|
|
|
case nir_op_unpack_64_2x32_split_y: {
|
|
|
|
|
if (instr->op == nir_op_unpack_64_2x32_split_x)
|
2016-09-02 18:49:20 -07:00
|
|
|
bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 0));
|
|
|
|
|
else
|
|
|
|
|
bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 1));
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-17 10:23:47 +02:00
|
|
|
case nir_op_unpack_32_2x16_split_x:
|
|
|
|
|
case nir_op_unpack_32_2x16_split_y: {
|
|
|
|
|
if (instr->op == nir_op_unpack_32_2x16_split_x)
|
|
|
|
|
bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UW, 0));
|
|
|
|
|
else
|
|
|
|
|
bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UW, 1));
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_op_fpow:
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_bitfield_reverse:
|
2015-11-10 10:18:55 +01:00
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.BFREV(result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_bit_count:
|
2015-11-10 10:18:55 +01:00
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.CBIT(result, op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2016-06-21 15:14:03 -07:00
|
|
|
case nir_op_ufind_msb: {
|
|
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
|
|
|
|
emit_find_msb_using_lzd(bld, result, op[0], false);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-11 16:49:51 -07:00
|
|
|
case nir_op_uclz:
|
|
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) == 32);
|
|
|
|
|
bld.LZD(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
2014-11-07 10:59:16 -08:00
|
|
|
case nir_op_ifind_msb: {
|
2015-11-10 10:18:55 +01:00
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2016-06-21 17:18:04 -07:00
|
|
|
if (devinfo->gen < 7) {
|
|
|
|
|
emit_find_msb_using_lzd(bld, result, op[0], true);
|
|
|
|
|
} else {
|
|
|
|
|
bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
|
2015-10-26 11:35:57 -07:00
|
|
|
|
2016-06-21 17:18:04 -07:00
|
|
|
/* FBH counts from the MSB side, while GLSL's findMSB() wants the
|
|
|
|
|
* count from the LSB side. If FBH didn't return an error
|
|
|
|
|
* (0xFFFFFFFF), then subtract the result from 31 to convert the MSB
|
|
|
|
|
* count into an LSB count.
|
|
|
|
|
*/
|
|
|
|
|
bld.CMP(bld.null_reg_d(), result, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
|
|
|
|
|
|
|
|
|
|
inst = bld.ADD(result, result, brw_imm_d(31));
|
|
|
|
|
inst->predicate = BRW_PREDICATE_NORMAL;
|
|
|
|
|
inst->src[0].negate = true;
|
|
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_op_find_lsb:
|
2015-11-10 10:18:55 +01:00
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
2016-06-22 13:12:26 -07:00
|
|
|
|
|
|
|
|
if (devinfo->gen < 7) {
|
|
|
|
|
fs_reg temp = vgrf(glsl_type::int_type);
|
|
|
|
|
|
|
|
|
|
/* (x & -x) generates a value that consists of only the LSB of x.
|
|
|
|
|
* For all powers of 2, findMSB(y) == findLSB(y).
|
|
|
|
|
*/
|
|
|
|
|
fs_reg src = retype(op[0], BRW_REGISTER_TYPE_D);
|
|
|
|
|
fs_reg negated_src = src;
|
|
|
|
|
|
|
|
|
|
/* One must be negated, and the other must be non-negated. It
|
|
|
|
|
* doesn't matter which is which.
|
|
|
|
|
*/
|
|
|
|
|
negated_src.negate = true;
|
|
|
|
|
src.negate = false;
|
|
|
|
|
|
|
|
|
|
bld.AND(temp, src, negated_src);
|
|
|
|
|
emit_find_msb_using_lzd(bld, result, temp, false);
|
|
|
|
|
} else {
|
|
|
|
|
bld.FBL(result, op[0]);
|
|
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_ubitfield_extract:
|
|
|
|
|
case nir_op_ibitfield_extract:
|
2016-01-13 11:09:11 -08:00
|
|
|
unreachable("should have been lowered");
|
|
|
|
|
case nir_op_ubfe:
|
|
|
|
|
case nir_op_ibfe:
|
2015-11-10 10:18:55 +01:00
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.BFE(result, op[2], op[1], op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
case nir_op_bfm:
|
2015-11-10 10:18:55 +01:00
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.BFI1(result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
case nir_op_bfi:
|
2015-11-10 10:18:55 +01:00
|
|
|
assert(nir_dest_bit_size(instr->dest.dest) < 64);
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.BFI2(result, op[0], op[1], op[2]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_bitfield_insert:
|
2016-01-07 15:54:16 -08:00
|
|
|
unreachable("not reached: should have been lowered");
|
2014-08-15 10:32:07 -07:00
|
|
|
|
|
|
|
|
case nir_op_ishl:
|
2018-12-07 15:40:43 -08:00
|
|
|
bld.SHL(result, op[0], op[1]);
|
|
|
|
|
break;
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_op_ishr:
|
2018-12-07 15:40:43 -08:00
|
|
|
bld.ASR(result, op[0], op[1]);
|
|
|
|
|
break;
|
|
|
|
|
case nir_op_ushr:
|
|
|
|
|
bld.SHR(result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2019-05-30 14:14:52 -07:00
|
|
|
case nir_op_urol:
|
|
|
|
|
bld.ROL(result, op[0], op[1]);
|
|
|
|
|
break;
|
|
|
|
|
case nir_op_uror:
|
|
|
|
|
bld.ROR(result, op[0], op[1]);
|
|
|
|
|
break;
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_op_pack_half_2x16_split:
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_ffma:
|
2019-02-12 16:13:59 +01:00
|
|
|
if (nir_has_any_rounding_mode_enabled(execution_mode)) {
|
|
|
|
|
brw_rnd_mode rnd =
|
|
|
|
|
brw_rnd_mode_from_execution_mode(execution_mode);
|
|
|
|
|
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
|
|
|
|
|
brw_imm_d(rnd));
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.MAD(result, op[2], op[1], op[0]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_op_flrp:
|
2019-09-24 01:37:57 +03:00
|
|
|
if (nir_has_any_rounding_mode_enabled(execution_mode)) {
|
|
|
|
|
brw_rnd_mode rnd =
|
|
|
|
|
brw_rnd_mode_from_execution_mode(execution_mode);
|
|
|
|
|
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
|
|
|
|
|
brw_imm_d(rnd));
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.LRP(result, op[0], op[1], op[2]);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2018-10-18 11:44:38 -05:00
|
|
|
case nir_op_b32csel:
|
2015-02-15 13:45:04 -08:00
|
|
|
if (optimize_frontfacing_ternary(instr, result))
|
|
|
|
|
return;
|
|
|
|
|
|
2015-11-02 11:26:16 -08:00
|
|
|
bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
|
2015-06-03 20:59:26 +03:00
|
|
|
inst = bld.SEL(result, op[1], op[2]);
|
2014-12-23 14:44:19 -08:00
|
|
|
inst->predicate = BRW_PREDICATE_NORMAL;
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
|
2016-01-20 18:56:37 -08:00
|
|
|
case nir_op_extract_u8:
|
|
|
|
|
case nir_op_extract_i8: {
|
2018-10-20 09:55:28 -05:00
|
|
|
unsigned byte = nir_src_as_uint(instr->src[1].src);
|
2017-11-10 14:00:24 -08:00
|
|
|
|
|
|
|
|
/* The PRMs say:
|
|
|
|
|
*
|
|
|
|
|
* BDW+
|
|
|
|
|
* There is no direct conversion from B/UB to Q/UQ or Q/UQ to B/UB.
|
|
|
|
|
* Use two instructions and a word or DWord intermediate integer type.
|
|
|
|
|
*/
|
|
|
|
|
if (nir_dest_bit_size(instr->dest.dest) == 64) {
|
2019-02-27 15:52:18 -08:00
|
|
|
const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
|
2017-11-10 14:00:24 -08:00
|
|
|
|
|
|
|
|
if (instr->op == nir_op_extract_i8) {
|
|
|
|
|
/* If we need to sign extend, extract to a word first */
|
|
|
|
|
fs_reg w_temp = bld.vgrf(BRW_REGISTER_TYPE_W);
|
2018-10-20 09:55:28 -05:00
|
|
|
bld.MOV(w_temp, subscript(op[0], type, byte));
|
2017-11-10 14:00:24 -08:00
|
|
|
bld.MOV(result, w_temp);
|
2019-02-27 15:53:55 -08:00
|
|
|
} else if (byte & 1) {
|
|
|
|
|
/* Extract the high byte from the word containing the desired byte
|
|
|
|
|
* offset.
|
|
|
|
|
*/
|
|
|
|
|
bld.SHR(result,
|
|
|
|
|
subscript(op[0], BRW_REGISTER_TYPE_UW, byte / 2),
|
|
|
|
|
brw_imm_uw(8));
|
2017-11-10 14:00:24 -08:00
|
|
|
} else {
|
|
|
|
|
/* Otherwise use an AND with 0xff and a word type */
|
2019-02-27 15:52:18 -08:00
|
|
|
bld.AND(result,
|
|
|
|
|
subscript(op[0], BRW_REGISTER_TYPE_UW, byte / 2),
|
|
|
|
|
brw_imm_uw(0xff));
|
2017-11-10 14:00:24 -08:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
|
2018-10-20 09:55:28 -05:00
|
|
|
bld.MOV(result, subscript(op[0], type, byte));
|
2017-11-10 14:00:24 -08:00
|
|
|
}
|
2016-01-20 18:56:37 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_op_extract_u16:
|
|
|
|
|
case nir_op_extract_i16: {
|
2016-05-18 18:43:54 -07:00
|
|
|
const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i16);
|
2018-10-20 09:55:28 -05:00
|
|
|
unsigned word = nir_src_as_uint(instr->src[1].src);
|
|
|
|
|
bld.MOV(result, subscript(op[0], type, word));
|
2016-01-20 18:56:37 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
default:
|
|
|
|
|
unreachable("unhandled instruction");
|
|
|
|
|
}
|
2015-03-17 11:49:04 -07:00
|
|
|
|
|
|
|
|
/* If we need to do a boolean resolve, replace the result with -(x & 1)
|
|
|
|
|
* to sign extend the low bit to 0/~0
|
|
|
|
|
*/
|
2015-04-15 18:00:05 -07:00
|
|
|
if (devinfo->gen <= 5 &&
|
2019-05-21 12:09:42 -07:00
|
|
|
!result.is_null() &&
|
2015-03-17 11:49:04 -07:00
|
|
|
(instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
|
|
|
|
|
fs_reg masked = vgrf(glsl_type::int_type);
|
2015-11-02 11:26:16 -08:00
|
|
|
bld.AND(masked, result, brw_imm_d(1));
|
2015-03-17 11:49:04 -07:00
|
|
|
masked.negate = true;
|
2015-06-03 20:59:26 +03:00
|
|
|
bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
|
2015-03-17 11:49:04 -07:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
|
2015-06-25 16:22:26 -07:00
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_load_const(const fs_builder &bld,
|
|
|
|
|
nir_load_const_instr *instr)
|
|
|
|
|
{
|
2015-07-29 14:16:51 -07:00
|
|
|
const brw_reg_type reg_type =
|
2017-08-24 15:54:27 +02:00
|
|
|
brw_reg_type_from_bit_size(instr->def.bit_size, BRW_REGISTER_TYPE_D);
|
2015-07-29 14:16:51 -07:00
|
|
|
fs_reg reg = bld.vgrf(reg_type, instr->def.num_components);
|
2015-06-25 16:22:26 -07:00
|
|
|
|
2015-07-29 14:16:51 -07:00
|
|
|
switch (instr->def.bit_size) {
|
2018-07-27 13:38:39 +02:00
|
|
|
case 8:
|
|
|
|
|
for (unsigned i = 0; i < instr->def.num_components; i++)
|
2019-03-27 00:59:03 +01:00
|
|
|
bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value[i].i8));
|
2018-07-27 13:38:39 +02:00
|
|
|
break;
|
|
|
|
|
|
2018-04-10 10:02:29 +02:00
|
|
|
case 16:
|
|
|
|
|
for (unsigned i = 0; i < instr->def.num_components; i++)
|
2019-03-27 00:59:03 +01:00
|
|
|
bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value[i].i16));
|
2018-04-10 10:02:29 +02:00
|
|
|
break;
|
|
|
|
|
|
2015-07-29 14:16:51 -07:00
|
|
|
case 32:
|
|
|
|
|
for (unsigned i = 0; i < instr->def.num_components; i++)
|
2019-03-27 00:59:03 +01:00
|
|
|
bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value[i].i32));
|
2015-07-29 14:16:51 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case 64:
|
2017-11-02 18:32:39 -07:00
|
|
|
assert(devinfo->gen >= 7);
|
|
|
|
|
if (devinfo->gen == 7) {
|
|
|
|
|
/* We don't get 64-bit integer types until gen8 */
|
|
|
|
|
for (unsigned i = 0; i < instr->def.num_components; i++) {
|
|
|
|
|
bld.MOV(retype(offset(reg, bld, i), BRW_REGISTER_TYPE_DF),
|
2019-03-27 00:59:03 +01:00
|
|
|
setup_imm_df(bld, instr->value[i].f64));
|
2017-11-02 18:32:39 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for (unsigned i = 0; i < instr->def.num_components; i++)
|
2019-03-27 00:59:03 +01:00
|
|
|
bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value[i].i64));
|
2017-11-02 18:32:39 -07:00
|
|
|
}
|
2015-07-29 14:16:51 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid bit size");
|
|
|
|
|
}
|
2015-06-25 16:22:26 -07:00
|
|
|
|
|
|
|
|
nir_ssa_values[instr->def.index] = reg;
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
fs_reg
|
2016-05-19 14:43:23 -07:00
|
|
|
fs_visitor::get_nir_src(const nir_src &src)
|
2014-08-15 10:32:07 -07:00
|
|
|
{
|
2015-06-24 12:28:47 -07:00
|
|
|
fs_reg reg;
|
2014-11-12 16:24:21 -08:00
|
|
|
if (src.is_ssa) {
|
2016-07-29 01:29:09 -07:00
|
|
|
if (src.ssa->parent_instr->type == nir_instr_type_ssa_undef) {
|
2017-08-24 15:54:27 +02:00
|
|
|
const brw_reg_type reg_type =
|
|
|
|
|
brw_reg_type_from_bit_size(src.ssa->bit_size, BRW_REGISTER_TYPE_D);
|
2016-07-29 01:29:09 -07:00
|
|
|
reg = bld.vgrf(reg_type, src.ssa->num_components);
|
|
|
|
|
} else {
|
|
|
|
|
reg = nir_ssa_values[src.ssa->index];
|
|
|
|
|
}
|
2014-11-12 16:24:21 -08:00
|
|
|
} else {
|
2015-11-10 21:07:45 -08:00
|
|
|
/* We don't handle indirects on locals */
|
|
|
|
|
assert(src.reg.indirect == NULL);
|
|
|
|
|
reg = offset(nir_locals[src.reg.reg->index], bld,
|
|
|
|
|
src.reg.base_offset * src.reg.reg->num_components);
|
2014-11-12 16:24:21 -08:00
|
|
|
}
|
2015-06-24 12:28:47 -07:00
|
|
|
|
2017-08-23 17:10:33 -07:00
|
|
|
if (nir_src_bit_size(src) == 64 && devinfo->gen == 7) {
|
|
|
|
|
/* The only 64-bit type available on gen7 is DF, so use that. */
|
|
|
|
|
reg.type = BRW_REGISTER_TYPE_DF;
|
|
|
|
|
} else {
|
|
|
|
|
/* To avoid floating-point denorm flushing problems, set the type by
|
|
|
|
|
* default to an integer type - instructions that need floating point
|
|
|
|
|
* semantics will set this to F if they need to
|
|
|
|
|
*/
|
|
|
|
|
reg.type = brw_reg_type_from_bit_size(nir_src_bit_size(src),
|
|
|
|
|
BRW_REGISTER_TYPE_D);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return reg;
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
|
2016-05-04 15:10:25 -07:00
|
|
|
/**
|
|
|
|
|
* Return an IMM for constants; otherwise call get_nir_src() as normal.
|
2017-08-23 17:10:33 -07:00
|
|
|
*
|
|
|
|
|
* This function should not be called on any value which may be 64 bits.
|
|
|
|
|
* We could theoretically support 64-bit on gen8+ but we choose not to
|
|
|
|
|
* because it wouldn't work in general (no gen7 support) and there are
|
|
|
|
|
* enough restrictions in 64-bit immediates that you can't take the return
|
|
|
|
|
* value and treat it the same as the result of get_nir_src().
|
2016-05-04 15:10:25 -07:00
|
|
|
*/
|
|
|
|
|
fs_reg
|
2016-05-19 14:43:23 -07:00
|
|
|
fs_visitor::get_nir_src_imm(const nir_src &src)
|
2016-05-04 15:10:25 -07:00
|
|
|
{
|
2017-08-23 17:10:33 -07:00
|
|
|
assert(nir_src_bit_size(src) == 32);
|
2018-10-20 09:55:28 -05:00
|
|
|
return nir_src_is_const(src) ?
|
|
|
|
|
fs_reg(brw_imm_d(nir_src_as_int(src))) : get_nir_src(src);
|
2016-05-04 15:10:25 -07:00
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
fs_reg
|
2016-05-19 14:43:23 -07:00
|
|
|
fs_visitor::get_nir_dest(const nir_dest &dest)
|
2014-08-15 10:32:07 -07:00
|
|
|
{
|
2015-06-24 12:28:47 -07:00
|
|
|
if (dest.is_ssa) {
|
2015-07-29 14:16:51 -07:00
|
|
|
const brw_reg_type reg_type =
|
2018-07-09 02:00:06 +02:00
|
|
|
brw_reg_type_from_bit_size(dest.ssa.bit_size,
|
|
|
|
|
dest.ssa.bit_size == 8 ?
|
|
|
|
|
BRW_REGISTER_TYPE_D :
|
|
|
|
|
BRW_REGISTER_TYPE_F);
|
2015-07-29 14:16:51 -07:00
|
|
|
nir_ssa_values[dest.ssa.index] =
|
|
|
|
|
bld.vgrf(reg_type, dest.ssa.num_components);
|
2019-05-29 17:46:55 -05:00
|
|
|
bld.UNDEF(nir_ssa_values[dest.ssa.index]);
|
2015-06-24 12:28:47 -07:00
|
|
|
return nir_ssa_values[dest.ssa.index];
|
2015-11-10 21:07:45 -08:00
|
|
|
} else {
|
|
|
|
|
/* We don't handle indirects on locals */
|
|
|
|
|
assert(dest.reg.indirect == NULL);
|
|
|
|
|
return offset(nir_locals[dest.reg.reg->index], bld,
|
|
|
|
|
dest.reg.base_offset * dest.reg.reg->num_components);
|
2015-06-24 12:28:47 -07:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2015-06-03 21:12:49 +03:00
|
|
|
fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
|
|
|
|
|
unsigned wr_mask)
|
2014-08-15 10:32:07 -07:00
|
|
|
{
|
|
|
|
|
for (unsigned i = 0; i < 4; i++) {
|
|
|
|
|
if (!((wr_mask >> i) & 1))
|
|
|
|
|
continue;
|
|
|
|
|
|
2015-06-03 21:12:49 +03:00
|
|
|
fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
|
2015-06-18 12:07:27 -07:00
|
|
|
new_inst->dst = offset(new_inst->dst, bld, i);
|
2014-08-15 10:32:07 -07:00
|
|
|
for (unsigned j = 0; j < new_inst->sources; j++)
|
2015-10-26 17:09:25 -07:00
|
|
|
if (new_inst->src[j].file == VGRF)
|
2015-06-18 12:07:27 -07:00
|
|
|
new_inst->src[j] = offset(new_inst->src[j], bld, i);
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2015-06-03 21:12:49 +03:00
|
|
|
bld.emit(new_inst);
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-17 14:40:03 +01:00
|
|
|
static fs_inst *
|
|
|
|
|
emit_pixel_interpolater_send(const fs_builder &bld,
|
|
|
|
|
enum opcode opcode,
|
|
|
|
|
const fs_reg &dst,
|
|
|
|
|
const fs_reg &src,
|
|
|
|
|
const fs_reg &desc,
|
2016-07-07 02:02:38 -07:00
|
|
|
glsl_interp_mode interpolation)
|
2015-07-17 14:40:03 +01:00
|
|
|
{
|
2016-07-13 20:16:11 -07:00
|
|
|
struct brw_wm_prog_data *wm_prog_data =
|
2016-09-08 23:48:51 -07:00
|
|
|
brw_wm_prog_data(bld.shader->stage_prog_data);
|
2015-07-17 14:40:03 +01:00
|
|
|
|
2016-04-25 18:06:13 -07:00
|
|
|
fs_inst *inst = bld.emit(opcode, dst, src, desc);
|
2015-07-17 14:40:03 +01:00
|
|
|
/* 2 floats per slot returned */
|
2016-09-01 18:43:48 -07:00
|
|
|
inst->size_written = 2 * dst.component_size(inst->exec_size);
|
2016-07-07 02:02:38 -07:00
|
|
|
inst->pi_noperspective = interpolation == INTERP_MODE_NOPERSPECTIVE;
|
2015-07-17 14:40:03 +01:00
|
|
|
|
2016-07-13 20:16:11 -07:00
|
|
|
wm_prog_data->pulls_bary = true;
|
|
|
|
|
|
2015-07-17 14:40:03 +01:00
|
|
|
return inst;
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 23:14:31 -07:00
|
|
|
/**
|
|
|
|
|
* Computes 1 << x, given a D/UD register containing some value x.
|
|
|
|
|
*/
|
|
|
|
|
static fs_reg
|
|
|
|
|
intexp2(const fs_builder &bld, const fs_reg &x)
|
|
|
|
|
{
|
|
|
|
|
assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
|
|
|
|
|
|
|
|
|
|
fs_reg result = bld.vgrf(x.type, 1);
|
|
|
|
|
fs_reg one = bld.vgrf(x.type, 1);
|
|
|
|
|
|
2015-11-02 11:26:16 -08:00
|
|
|
bld.MOV(one, retype(brw_imm_d(1), one.type));
|
2015-03-11 23:14:31 -07:00
|
|
|
bld.SHL(result, one, x);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
|
|
|
|
|
{
|
|
|
|
|
assert(stage == MESA_SHADER_GEOMETRY);
|
|
|
|
|
|
2016-09-08 23:48:51 -07:00
|
|
|
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
|
2015-03-11 23:14:31 -07:00
|
|
|
|
2016-05-23 15:17:02 -07:00
|
|
|
if (gs_compile->control_data_header_size_bits == 0)
|
|
|
|
|
return;
|
|
|
|
|
|
2015-03-11 23:14:31 -07:00
|
|
|
/* We can only do EndPrimitive() functionality when the control data
|
|
|
|
|
* consists of cut bits. Fortunately, the only time it isn't is when the
|
|
|
|
|
* output type is points, in which case EndPrimitive() is a no-op.
|
|
|
|
|
*/
|
|
|
|
|
if (gs_prog_data->control_data_format !=
|
|
|
|
|
GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Cut bits use one bit per vertex. */
|
|
|
|
|
assert(gs_compile->control_data_bits_per_vertex == 1);
|
|
|
|
|
|
|
|
|
|
fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
|
|
|
|
|
vertex_count.type = BRW_REGISTER_TYPE_UD;
|
|
|
|
|
|
|
|
|
|
/* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
|
|
|
|
|
* vertex n, 0 otherwise. So all we need to do here is mark bit
|
|
|
|
|
* (vertex_count - 1) % 32 in the cut_bits register to indicate that
|
|
|
|
|
* EndPrimitive() was called after emitting vertex (vertex_count - 1);
|
|
|
|
|
* vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
|
|
|
|
|
*
|
|
|
|
|
* Note that if EndPrimitive() is called before emitting any vertices, this
|
|
|
|
|
* will cause us to set bit 31 of the control_data_bits register to 1.
|
|
|
|
|
* That's fine because:
|
|
|
|
|
*
|
|
|
|
|
* - If max_vertices < 32, then vertex number 31 (zero-based) will never be
|
|
|
|
|
* output, so the hardware will ignore cut bit 31.
|
|
|
|
|
*
|
|
|
|
|
* - If max_vertices == 32, then vertex number 31 is guaranteed to be the
|
|
|
|
|
* last vertex, so setting cut bit 31 has no effect (since the primitive
|
|
|
|
|
* is automatically ended when the GS terminates).
|
|
|
|
|
*
|
|
|
|
|
* - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
|
|
|
|
|
* control_data_bits register to 0 when the first vertex is emitted.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
const fs_builder abld = bld.annotate("end primitive");
|
|
|
|
|
|
|
|
|
|
/* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
|
|
|
|
|
fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
2015-11-02 11:26:16 -08:00
|
|
|
abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
|
2015-03-11 23:14:31 -07:00
|
|
|
fs_reg mask = intexp2(abld, prev_count);
|
|
|
|
|
/* Note: we're relying on the fact that the GEN SHL instruction only pays
|
|
|
|
|
* attention to the lower 5 bits of its second source argument, so on this
|
|
|
|
|
* architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
|
|
|
|
|
* ((vertex_count - 1) % 32).
|
|
|
|
|
*/
|
|
|
|
|
abld.OR(this->control_data_bits, this->control_data_bits, mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
|
|
|
|
|
{
|
|
|
|
|
assert(stage == MESA_SHADER_GEOMETRY);
|
|
|
|
|
assert(gs_compile->control_data_bits_per_vertex != 0);
|
|
|
|
|
|
2016-09-08 23:48:51 -07:00
|
|
|
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
|
2015-03-11 23:14:31 -07:00
|
|
|
|
|
|
|
|
const fs_builder abld = bld.annotate("emit control data bits");
|
|
|
|
|
const fs_builder fwa_bld = bld.exec_all();
|
|
|
|
|
|
|
|
|
|
/* We use a single UD register to accumulate control data bits (32 bits
|
|
|
|
|
* for each of the SIMD8 channels). So we need to write a DWord (32 bits)
|
|
|
|
|
* at a time.
|
|
|
|
|
*
|
|
|
|
|
* Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
|
|
|
|
|
* We have select a 128-bit group via the Global and Per-Slot Offsets, then
|
|
|
|
|
* use the Channel Mask phase to enable/disable which DWord within that
|
|
|
|
|
* group to write. (Remember, different SIMD8 channels may have emitted
|
|
|
|
|
* different numbers of vertices, so we may need per-slot offsets.)
|
|
|
|
|
*
|
|
|
|
|
* Channel masking presents an annoying problem: we may have to replicate
|
|
|
|
|
* the data up to 4 times:
|
|
|
|
|
*
|
|
|
|
|
* Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
|
|
|
|
|
*
|
|
|
|
|
* To avoid penalizing shaders that emit a small number of vertices, we
|
|
|
|
|
* can avoid these sometimes: if the size of the control data header is
|
|
|
|
|
* <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
|
|
|
|
|
* land in the same 128-bit group, so we can skip per-slot offsets.
|
|
|
|
|
*
|
|
|
|
|
* Similarly, if the control data header is <= 32 bits, there is only one
|
|
|
|
|
* DWord, so we can skip channel masks.
|
|
|
|
|
*/
|
|
|
|
|
enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
|
|
|
|
|
|
|
|
|
|
fs_reg channel_mask, per_slot_offset;
|
|
|
|
|
|
|
|
|
|
if (gs_compile->control_data_header_size_bits > 32) {
|
|
|
|
|
opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
|
|
|
|
|
channel_mask = vgrf(glsl_type::uint_type);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (gs_compile->control_data_header_size_bits > 128) {
|
|
|
|
|
opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
|
|
|
|
|
per_slot_offset = vgrf(glsl_type::uint_type);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Figure out which DWord we're trying to write to using the formula:
|
|
|
|
|
*
|
|
|
|
|
* dword_index = (vertex_count - 1) * bits_per_vertex / 32
|
|
|
|
|
*
|
|
|
|
|
* Since bits_per_vertex is a power of two, and is known at compile
|
|
|
|
|
* time, this can be optimized to:
|
|
|
|
|
*
|
|
|
|
|
* dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
|
|
|
|
|
*/
|
|
|
|
|
if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
|
|
|
|
|
fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
2015-11-02 11:26:16 -08:00
|
|
|
abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
|
2015-03-11 23:14:31 -07:00
|
|
|
unsigned log2_bits_per_vertex =
|
2016-08-02 08:46:04 +02:00
|
|
|
util_last_bit(gs_compile->control_data_bits_per_vertex);
|
2015-11-02 11:26:16 -08:00
|
|
|
abld.SHR(dword_index, prev_count, brw_imm_ud(6u - log2_bits_per_vertex));
|
2015-03-11 23:14:31 -07:00
|
|
|
|
|
|
|
|
if (per_slot_offset.file != BAD_FILE) {
|
|
|
|
|
/* Set the per-slot offset to dword_index / 4, so that we'll write to
|
|
|
|
|
* the appropriate OWord within the control data header.
|
|
|
|
|
*/
|
2015-11-02 11:26:16 -08:00
|
|
|
abld.SHR(per_slot_offset, dword_index, brw_imm_ud(2u));
|
2015-03-11 23:14:31 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Set the channel masks to 1 << (dword_index % 4), so that we'll
|
|
|
|
|
* write to the appropriate DWORD within the OWORD.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
2015-11-02 11:26:16 -08:00
|
|
|
fwa_bld.AND(channel, dword_index, brw_imm_ud(3u));
|
2015-03-11 23:14:31 -07:00
|
|
|
channel_mask = intexp2(fwa_bld, channel);
|
|
|
|
|
/* Then the channel masks need to be in bits 23:16. */
|
2015-11-02 11:26:16 -08:00
|
|
|
fwa_bld.SHL(channel_mask, channel_mask, brw_imm_ud(16u));
|
2015-03-11 23:14:31 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Store the control data bits in the message payload and send it. */
|
2018-12-10 14:49:49 -08:00
|
|
|
unsigned mlen = 2;
|
2015-03-11 23:14:31 -07:00
|
|
|
if (channel_mask.file != BAD_FILE)
|
|
|
|
|
mlen += 4; /* channel masks, plus 3 extra copies of the data */
|
|
|
|
|
if (per_slot_offset.file != BAD_FILE)
|
|
|
|
|
mlen++;
|
|
|
|
|
|
|
|
|
|
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
|
|
|
|
|
fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
|
2018-12-10 14:49:49 -08:00
|
|
|
unsigned i = 0;
|
2015-03-11 23:14:31 -07:00
|
|
|
sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
|
|
|
|
|
if (per_slot_offset.file != BAD_FILE)
|
|
|
|
|
sources[i++] = per_slot_offset;
|
|
|
|
|
if (channel_mask.file != BAD_FILE)
|
|
|
|
|
sources[i++] = channel_mask;
|
|
|
|
|
while (i < mlen) {
|
|
|
|
|
sources[i++] = this->control_data_bits;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
|
|
|
|
|
fs_inst *inst = abld.emit(opcode, reg_undef, payload);
|
|
|
|
|
inst->mlen = mlen;
|
|
|
|
|
/* We need to increment Global Offset by 256-bits to make room for
|
|
|
|
|
* Broadwell's extra "Vertex Count" payload at the beginning of the
|
|
|
|
|
* URB entry. Since this is an OWord message, Global Offset is counted
|
|
|
|
|
* in 128-bit units, so we must set it to 2.
|
|
|
|
|
*/
|
|
|
|
|
if (gs_prog_data->static_vertex_count == -1)
|
|
|
|
|
inst->offset = 2;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
|
|
|
|
|
unsigned stream_id)
|
|
|
|
|
{
|
|
|
|
|
/* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
|
|
|
|
|
|
|
|
|
|
/* Note: we are calling this *before* increasing vertex_count, so
|
|
|
|
|
* this->vertex_count == vertex_count - 1 in the formula above.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* Stream mode uses 2 bits per vertex */
|
|
|
|
|
assert(gs_compile->control_data_bits_per_vertex == 2);
|
|
|
|
|
|
|
|
|
|
/* Must be a valid stream */
|
2017-07-13 16:32:54 +01:00
|
|
|
assert(stream_id < MAX_VERTEX_STREAMS);
|
2015-03-11 23:14:31 -07:00
|
|
|
|
|
|
|
|
/* Control data bits are initialized to 0 so we don't have to set any
|
|
|
|
|
* bits when sending vertices to stream 0.
|
|
|
|
|
*/
|
|
|
|
|
if (stream_id == 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
const fs_builder abld = bld.annotate("set stream control data bits", NULL);
|
|
|
|
|
|
|
|
|
|
/* reg::sid = stream_id */
|
|
|
|
|
fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
2015-11-02 11:26:16 -08:00
|
|
|
abld.MOV(sid, brw_imm_ud(stream_id));
|
2015-03-11 23:14:31 -07:00
|
|
|
|
|
|
|
|
/* reg:shift_count = 2 * (vertex_count - 1) */
|
|
|
|
|
fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
2015-11-02 11:26:16 -08:00
|
|
|
abld.SHL(shift_count, vertex_count, brw_imm_ud(1u));
|
2015-03-11 23:14:31 -07:00
|
|
|
|
|
|
|
|
/* Note: we're relying on the fact that the GEN SHL instruction only pays
|
|
|
|
|
* attention to the lower 5 bits of its second source argument, so on this
|
|
|
|
|
* architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
|
|
|
|
|
* stream_id << ((2 * (vertex_count - 1)) % 32).
|
|
|
|
|
*/
|
|
|
|
|
fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
abld.SHL(mask, sid, shift_count);
|
|
|
|
|
abld.OR(this->control_data_bits, this->control_data_bits, mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
|
|
|
|
|
unsigned stream_id)
|
|
|
|
|
{
|
|
|
|
|
assert(stage == MESA_SHADER_GEOMETRY);
|
|
|
|
|
|
2016-09-08 23:48:51 -07:00
|
|
|
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
|
2015-03-11 23:14:31 -07:00
|
|
|
|
|
|
|
|
fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
|
|
|
|
|
vertex_count.type = BRW_REGISTER_TYPE_UD;
|
|
|
|
|
|
|
|
|
|
/* Haswell and later hardware ignores the "Render Stream Select" bits
|
|
|
|
|
* from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
|
|
|
|
|
* and instead sends all primitives down the pipeline for rasterization.
|
|
|
|
|
* If the SOL stage is enabled, "Render Stream Select" is honored and
|
|
|
|
|
* primitives bound to non-zero streams are discarded after stream output.
|
|
|
|
|
*
|
|
|
|
|
* Since the only purpose of primives sent to non-zero streams is to
|
|
|
|
|
* be recorded by transform feedback, we can simply discard all geometry
|
|
|
|
|
* bound to these streams when transform feedback is disabled.
|
|
|
|
|
*/
|
2017-05-08 09:20:21 -07:00
|
|
|
if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
|
2015-03-11 23:14:31 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* If we're outputting 32 control data bits or less, then we can wait
|
|
|
|
|
* until the shader is over to output them all. Otherwise we need to
|
|
|
|
|
* output them as we go. Now is the time to do it, since we're about to
|
|
|
|
|
* output the vertex_count'th vertex, so it's guaranteed that the
|
|
|
|
|
* control data bits associated with the (vertex_count - 1)th vertex are
|
|
|
|
|
* correct.
|
|
|
|
|
*/
|
|
|
|
|
if (gs_compile->control_data_header_size_bits > 32) {
|
|
|
|
|
const fs_builder abld =
|
|
|
|
|
bld.annotate("emit vertex: emit control data bits");
|
|
|
|
|
|
|
|
|
|
/* Only emit control data bits if we've finished accumulating a batch
|
|
|
|
|
* of 32 bits. This is the case when:
|
|
|
|
|
*
|
|
|
|
|
* (vertex_count * bits_per_vertex) % 32 == 0
|
|
|
|
|
*
|
|
|
|
|
* (in other words, when the last 5 bits of vertex_count *
|
|
|
|
|
* bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
|
|
|
|
|
* integer n (which is always the case, since bits_per_vertex is
|
|
|
|
|
* always 1 or 2), this is equivalent to requiring that the last 5-n
|
|
|
|
|
* bits of vertex_count are 0:
|
|
|
|
|
*
|
|
|
|
|
* vertex_count & (2^(5-n) - 1) == 0
|
|
|
|
|
*
|
|
|
|
|
* 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
|
|
|
|
|
* equivalent to:
|
|
|
|
|
*
|
|
|
|
|
* vertex_count & (32 / bits_per_vertex - 1) == 0
|
|
|
|
|
*
|
|
|
|
|
* TODO: If vertex_count is an immediate, we could do some of this math
|
|
|
|
|
* at compile time...
|
|
|
|
|
*/
|
|
|
|
|
fs_inst *inst =
|
|
|
|
|
abld.AND(bld.null_reg_d(), vertex_count,
|
2015-11-02 11:26:16 -08:00
|
|
|
brw_imm_ud(32u / gs_compile->control_data_bits_per_vertex - 1u));
|
2015-03-11 23:14:31 -07:00
|
|
|
inst->conditional_mod = BRW_CONDITIONAL_Z;
|
|
|
|
|
|
|
|
|
|
abld.IF(BRW_PREDICATE_NORMAL);
|
|
|
|
|
/* If vertex_count is 0, then no control data bits have been
|
|
|
|
|
* accumulated yet, so we can skip emitting them.
|
|
|
|
|
*/
|
2015-11-02 11:26:16 -08:00
|
|
|
abld.CMP(bld.null_reg_d(), vertex_count, brw_imm_ud(0u),
|
2015-03-11 23:14:31 -07:00
|
|
|
BRW_CONDITIONAL_NEQ);
|
|
|
|
|
abld.IF(BRW_PREDICATE_NORMAL);
|
|
|
|
|
emit_gs_control_data_bits(vertex_count);
|
|
|
|
|
abld.emit(BRW_OPCODE_ENDIF);
|
|
|
|
|
|
|
|
|
|
/* Reset control_data_bits to 0 so we can start accumulating a new
|
|
|
|
|
* batch.
|
|
|
|
|
*
|
|
|
|
|
* Note: in the case where vertex_count == 0, this neutralizes the
|
|
|
|
|
* effect of any call to EndPrimitive() that the shader may have
|
|
|
|
|
* made before outputting its first vertex.
|
|
|
|
|
*/
|
2015-11-02 11:26:16 -08:00
|
|
|
inst = abld.MOV(this->control_data_bits, brw_imm_ud(0u));
|
2015-03-11 23:14:31 -07:00
|
|
|
inst->force_writemask_all = true;
|
|
|
|
|
abld.emit(BRW_OPCODE_ENDIF);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
emit_urb_writes(vertex_count);
|
|
|
|
|
|
|
|
|
|
/* In stream mode we have to set control data bits for all vertices
|
|
|
|
|
* unless we have disabled control data bits completely (which we do
|
|
|
|
|
* do for GL_POINTS outputs that don't use streams).
|
|
|
|
|
*/
|
|
|
|
|
if (gs_compile->control_data_header_size_bits > 0 &&
|
|
|
|
|
gs_prog_data->control_data_format ==
|
|
|
|
|
GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
|
|
|
|
|
set_gs_stream_control_data_bits(vertex_count, stream_id);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::emit_gs_input_load(const fs_reg &dst,
|
|
|
|
|
const nir_src &vertex_src,
|
2015-11-25 14:14:05 -08:00
|
|
|
unsigned base_offset,
|
|
|
|
|
const nir_src &offset_src,
|
2016-05-19 15:58:51 +10:00
|
|
|
unsigned num_components,
|
|
|
|
|
unsigned first_component)
|
2015-03-11 23:14:31 -07:00
|
|
|
{
|
2019-07-19 17:38:04 -05:00
|
|
|
assert(type_sz(dst.type) == 4);
|
2016-09-08 23:48:51 -07:00
|
|
|
struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
|
2015-11-25 14:14:05 -08:00
|
|
|
const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
|
|
|
|
|
|
2016-05-08 02:54:28 -07:00
|
|
|
/* TODO: figure out push input layout for invocations == 1 */
|
|
|
|
|
if (gs_prog_data->invocations == 1 &&
|
2018-10-20 09:55:28 -05:00
|
|
|
nir_src_is_const(offset_src) && nir_src_is_const(vertex_src) &&
|
|
|
|
|
4 * (base_offset + nir_src_as_uint(offset_src)) < push_reg_count) {
|
|
|
|
|
int imm_offset = (base_offset + nir_src_as_uint(offset_src)) * 4 +
|
|
|
|
|
nir_src_as_uint(vertex_src) * push_reg_count;
|
2017-05-04 16:33:32 -07:00
|
|
|
for (unsigned i = 0; i < num_components; i++) {
|
|
|
|
|
bld.MOV(offset(dst, bld, i),
|
|
|
|
|
fs_reg(ATTR, imm_offset + i + first_component, dst.type));
|
2015-11-07 18:58:59 -08:00
|
|
|
}
|
2016-05-08 05:22:13 -07:00
|
|
|
return;
|
|
|
|
|
}
|
2015-03-11 23:14:31 -07:00
|
|
|
|
2016-05-08 05:22:13 -07:00
|
|
|
/* Resort to the pull model. Ensure the VUE handles are provided. */
|
2017-09-27 11:36:31 +02:00
|
|
|
assert(gs_prog_data->base.include_vue_handles);
|
2015-03-11 23:14:31 -07:00
|
|
|
|
2016-05-08 05:22:13 -07:00
|
|
|
unsigned first_icp_handle = gs_prog_data->include_primitive_id ? 3 : 2;
|
2016-05-08 02:54:28 -07:00
|
|
|
fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
2016-05-08 05:22:13 -07:00
|
|
|
|
|
|
|
|
if (gs_prog_data->invocations == 1) {
|
2018-10-20 09:55:28 -05:00
|
|
|
if (nir_src_is_const(vertex_src)) {
|
2015-11-07 18:58:59 -08:00
|
|
|
/* The vertex index is constant; just select the proper URB handle. */
|
|
|
|
|
icp_handle =
|
2018-10-20 09:55:28 -05:00
|
|
|
retype(brw_vec8_grf(first_icp_handle + nir_src_as_uint(vertex_src), 0),
|
2015-11-07 18:58:59 -08:00
|
|
|
BRW_REGISTER_TYPE_UD);
|
2015-03-11 23:14:31 -07:00
|
|
|
} else {
|
2015-11-07 18:58:59 -08:00
|
|
|
/* The vertex index is non-constant. We need to use indirect
|
|
|
|
|
* addressing to fetch the proper URB handle.
|
|
|
|
|
*
|
|
|
|
|
* First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
|
|
|
|
|
* indicating that channel <n> should read the handle from
|
|
|
|
|
* DWord <n>. We convert that to bytes by multiplying by 4.
|
|
|
|
|
*
|
|
|
|
|
* Next, we convert the vertex index to bytes by multiplying
|
|
|
|
|
* by 32 (shifting by 5), and add the two together. This is
|
|
|
|
|
* the final indirect byte offset.
|
|
|
|
|
*/
|
2018-01-05 18:26:58 -08:00
|
|
|
fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_UW, 1);
|
2015-11-07 18:58:59 -08:00
|
|
|
fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
|
|
|
|
|
/* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
|
|
|
|
|
bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
|
|
|
|
|
/* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
|
2015-11-02 11:26:16 -08:00
|
|
|
bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
|
2015-11-07 18:58:59 -08:00
|
|
|
/* Convert vertex_index to bytes (multiply by 32) */
|
|
|
|
|
bld.SHL(vertex_offset_bytes,
|
|
|
|
|
retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(5u));
|
|
|
|
|
bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
|
|
|
|
|
|
|
|
|
|
/* Use first_icp_handle as the base offset. There is one register
|
|
|
|
|
* of URB handles per vertex, so inform the register allocator that
|
2017-05-08 09:20:21 -07:00
|
|
|
* we might read up to nir->info.gs.vertices_in registers.
|
2015-11-07 18:58:59 -08:00
|
|
|
*/
|
|
|
|
|
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
|
2017-02-16 10:47:01 +01:00
|
|
|
retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
|
2015-11-07 18:58:59 -08:00
|
|
|
fs_reg(icp_offset_bytes),
|
2017-05-08 09:20:21 -07:00
|
|
|
brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
|
2015-03-11 23:14:31 -07:00
|
|
|
}
|
2016-05-08 02:54:28 -07:00
|
|
|
} else {
|
|
|
|
|
assert(gs_prog_data->invocations > 1);
|
|
|
|
|
|
2018-10-20 09:55:28 -05:00
|
|
|
if (nir_src_is_const(vertex_src)) {
|
|
|
|
|
unsigned vertex = nir_src_as_uint(vertex_src);
|
|
|
|
|
assert(devinfo->gen >= 9 || vertex <= 5);
|
2016-05-08 02:54:28 -07:00
|
|
|
bld.MOV(icp_handle,
|
2018-10-20 09:55:28 -05:00
|
|
|
retype(brw_vec1_grf(first_icp_handle + vertex / 8, vertex % 8),
|
2016-05-08 02:54:28 -07:00
|
|
|
BRW_REGISTER_TYPE_UD));
|
|
|
|
|
} else {
|
|
|
|
|
/* The vertex index is non-constant. We need to use indirect
|
|
|
|
|
* addressing to fetch the proper URB handle.
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
|
|
|
|
|
/* Convert vertex_index to bytes (multiply by 4) */
|
|
|
|
|
bld.SHL(icp_offset_bytes,
|
|
|
|
|
retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(2u));
|
|
|
|
|
|
|
|
|
|
/* Use first_icp_handle as the base offset. There is one DWord
|
|
|
|
|
* of URB handles per vertex, so inform the register allocator that
|
2017-05-08 09:20:21 -07:00
|
|
|
* we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
|
2016-05-08 02:54:28 -07:00
|
|
|
*/
|
|
|
|
|
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
|
2017-02-16 10:47:01 +01:00
|
|
|
retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
|
2016-05-08 02:54:28 -07:00
|
|
|
fs_reg(icp_offset_bytes),
|
2017-05-08 09:20:21 -07:00
|
|
|
brw_imm_ud(DIV_ROUND_UP(nir->info.gs.vertices_in, 8) *
|
2016-05-08 02:54:28 -07:00
|
|
|
REG_SIZE));
|
|
|
|
|
}
|
2016-05-08 05:22:13 -07:00
|
|
|
}
|
2015-11-07 18:58:59 -08:00
|
|
|
|
2016-05-08 05:22:13 -07:00
|
|
|
fs_inst *inst;
|
2016-05-27 11:59:48 +02:00
|
|
|
fs_reg indirect_offset = get_nir_src(offset_src);
|
|
|
|
|
|
2019-07-19 17:38:04 -05:00
|
|
|
if (nir_src_is_const(offset_src)) {
|
|
|
|
|
/* Constant indexing - use global offset. */
|
|
|
|
|
if (first_component != 0) {
|
2016-05-22 22:48:53 +10:00
|
|
|
unsigned read_components = num_components + first_component;
|
|
|
|
|
fs_reg tmp = bld.vgrf(dst.type, read_components);
|
2019-07-19 17:38:04 -05:00
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, icp_handle);
|
|
|
|
|
inst->size_written = read_components *
|
|
|
|
|
tmp.component_size(inst->exec_size);
|
|
|
|
|
for (unsigned i = 0; i < num_components; i++) {
|
|
|
|
|
bld.MOV(offset(dst, bld, i),
|
|
|
|
|
offset(tmp, bld, i + first_component));
|
2016-05-22 22:48:53 +10:00
|
|
|
}
|
2019-07-19 17:38:04 -05:00
|
|
|
} else {
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
|
|
|
|
|
inst->size_written = num_components *
|
|
|
|
|
dst.component_size(inst->exec_size);
|
2016-05-27 11:59:48 +02:00
|
|
|
}
|
2019-07-19 17:38:04 -05:00
|
|
|
inst->offset = base_offset + nir_src_as_uint(offset_src);
|
|
|
|
|
inst->mlen = 1;
|
|
|
|
|
} else {
|
|
|
|
|
/* Indirect indexing - use per-slot offsets as well. */
|
|
|
|
|
const fs_reg srcs[] = { icp_handle, indirect_offset };
|
|
|
|
|
unsigned read_components = num_components + first_component;
|
|
|
|
|
fs_reg tmp = bld.vgrf(dst.type, read_components);
|
|
|
|
|
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
|
|
|
|
|
bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
|
|
|
|
|
if (first_component != 0) {
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
|
|
|
|
|
payload);
|
|
|
|
|
inst->size_written = read_components *
|
|
|
|
|
tmp.component_size(inst->exec_size);
|
|
|
|
|
for (unsigned i = 0; i < num_components; i++) {
|
|
|
|
|
bld.MOV(offset(dst, bld, i),
|
|
|
|
|
offset(tmp, bld, i + first_component));
|
2016-05-27 11:59:48 +02:00
|
|
|
}
|
2019-07-19 17:38:04 -05:00
|
|
|
} else {
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
|
|
|
|
|
inst->size_written = num_components *
|
|
|
|
|
dst.component_size(inst->exec_size);
|
2016-05-27 11:59:48 +02:00
|
|
|
}
|
2019-07-19 17:38:04 -05:00
|
|
|
inst->offset = base_offset;
|
|
|
|
|
inst->mlen = 2;
|
2016-05-08 05:22:13 -07:00
|
|
|
}
|
2015-03-11 23:14:31 -07:00
|
|
|
}
|
|
|
|
|
|
2015-11-10 14:35:27 -08:00
|
|
|
fs_reg
|
|
|
|
|
fs_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
nir_src *offset_src = nir_get_io_offset_src(instr);
|
|
|
|
|
|
2018-10-20 09:55:28 -05:00
|
|
|
if (nir_src_is_const(*offset_src)) {
|
2015-11-10 14:35:27 -08:00
|
|
|
/* The only constant offset we should find is 0. brw_nir.c's
|
|
|
|
|
* add_const_offset_to_base() will fold other constant offsets
|
|
|
|
|
* into instr->const_index[0].
|
|
|
|
|
*/
|
2018-10-20 09:55:28 -05:00
|
|
|
assert(nir_src_as_uint(*offset_src) == 0);
|
2015-11-10 14:35:27 -08:00
|
|
|
return fs_reg();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return get_nir_src(*offset_src);
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
void
|
2015-11-04 23:05:07 -08:00
|
|
|
fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
|
|
|
|
|
nir_intrinsic_instr *instr)
|
2014-08-15 10:32:07 -07:00
|
|
|
{
|
2015-11-04 23:05:07 -08:00
|
|
|
assert(stage == MESA_SHADER_VERTEX);
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
fs_reg dest;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dest = get_nir_dest(instr->dest);
|
|
|
|
|
|
2015-11-04 23:05:07 -08:00
|
|
|
switch (instr->intrinsic) {
|
|
|
|
|
case nir_intrinsic_load_vertex_id:
|
2018-04-28 14:09:22 +02:00
|
|
|
case nir_intrinsic_load_base_vertex:
|
|
|
|
|
unreachable("should be lowered by nir_lower_system_values()");
|
2015-11-04 23:05:07 -08:00
|
|
|
|
2016-07-19 19:00:19 -07:00
|
|
|
case nir_intrinsic_load_input: {
|
2019-07-19 17:38:04 -05:00
|
|
|
assert(nir_dest_bit_size(instr->dest) == 32);
|
2017-05-03 16:56:15 -07:00
|
|
|
fs_reg src = fs_reg(ATTR, nir_intrinsic_base(instr) * 4, dest.type);
|
2019-07-19 17:38:04 -05:00
|
|
|
src = offset(src, bld, nir_intrinsic_component(instr));
|
2018-10-20 09:55:28 -05:00
|
|
|
src = offset(src, bld, nir_src_as_uint(instr->src[0]));
|
2016-07-19 19:00:19 -07:00
|
|
|
|
2019-07-19 17:38:04 -05:00
|
|
|
for (unsigned i = 0; i < instr->num_components; i++)
|
|
|
|
|
bld.MOV(offset(dest, bld, i), offset(src, bld, i));
|
2016-07-19 19:00:19 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-18 15:45:46 -07:00
|
|
|
case nir_intrinsic_load_vertex_id_zero_base:
|
|
|
|
|
case nir_intrinsic_load_instance_id:
|
|
|
|
|
case nir_intrinsic_load_base_instance:
|
|
|
|
|
case nir_intrinsic_load_draw_id:
|
2018-01-25 19:15:40 +01:00
|
|
|
case nir_intrinsic_load_first_vertex:
|
2018-04-28 14:09:20 +02:00
|
|
|
case nir_intrinsic_load_is_indexed_draw:
|
2018-01-25 19:15:40 +01:00
|
|
|
unreachable("lowered by brw_nir_lower_vs_inputs");
|
|
|
|
|
|
2015-11-04 23:05:07 -08:00
|
|
|
default:
|
|
|
|
|
nir_emit_intrinsic(bld, instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-03 14:37:11 -07:00
|
|
|
fs_reg
|
|
|
|
|
fs_visitor::get_tcs_single_patch_icp_handle(const fs_builder &bld,
|
|
|
|
|
nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
|
|
|
|
|
const nir_src &vertex_src = instr->src[0];
|
|
|
|
|
nir_intrinsic_instr *vertex_intrin = nir_src_as_intrinsic(vertex_src);
|
|
|
|
|
fs_reg icp_handle;
|
|
|
|
|
|
|
|
|
|
if (nir_src_is_const(vertex_src)) {
|
|
|
|
|
/* Emit a MOV to resolve <0,1,0> regioning. */
|
|
|
|
|
icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
unsigned vertex = nir_src_as_uint(vertex_src);
|
|
|
|
|
bld.MOV(icp_handle,
|
|
|
|
|
retype(brw_vec1_grf(1 + (vertex >> 3), vertex & 7),
|
|
|
|
|
BRW_REGISTER_TYPE_UD));
|
|
|
|
|
} else if (tcs_prog_data->instances == 1 && vertex_intrin &&
|
|
|
|
|
vertex_intrin->intrinsic == nir_intrinsic_load_invocation_id) {
|
|
|
|
|
/* For the common case of only 1 instance, an array index of
|
|
|
|
|
* gl_InvocationID means reading g1. Skip all the indirect work.
|
|
|
|
|
*/
|
|
|
|
|
icp_handle = retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD);
|
|
|
|
|
} else {
|
|
|
|
|
/* The vertex index is non-constant. We need to use indirect
|
|
|
|
|
* addressing to fetch the proper URB handle.
|
|
|
|
|
*/
|
|
|
|
|
icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
|
|
|
|
|
/* Each ICP handle is a single DWord (4 bytes) */
|
|
|
|
|
fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
bld.SHL(vertex_offset_bytes,
|
|
|
|
|
retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(2u));
|
|
|
|
|
|
|
|
|
|
/* Start at g1. We might read up to 4 registers. */
|
|
|
|
|
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
|
|
|
|
|
retype(brw_vec8_grf(1, 0), icp_handle.type), vertex_offset_bytes,
|
|
|
|
|
brw_imm_ud(4 * REG_SIZE));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return icp_handle;
|
|
|
|
|
}
|
|
|
|
|
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
fs_reg
|
|
|
|
|
fs_visitor::get_tcs_eight_patch_icp_handle(const fs_builder &bld,
|
|
|
|
|
nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
|
|
|
|
|
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
|
|
|
|
|
const nir_src &vertex_src = instr->src[0];
|
|
|
|
|
|
|
|
|
|
unsigned first_icp_handle = tcs_prog_data->include_primitive_id ? 3 : 2;
|
|
|
|
|
|
|
|
|
|
if (nir_src_is_const(vertex_src)) {
|
|
|
|
|
return fs_reg(retype(brw_vec8_grf(first_icp_handle +
|
|
|
|
|
nir_src_as_uint(vertex_src), 0),
|
|
|
|
|
BRW_REGISTER_TYPE_UD));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* The vertex index is non-constant. We need to use indirect
|
|
|
|
|
* addressing to fetch the proper URB handle.
|
|
|
|
|
*
|
|
|
|
|
* First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
|
|
|
|
|
* indicating that channel <n> should read the handle from
|
|
|
|
|
* DWord <n>. We convert that to bytes by multiplying by 4.
|
|
|
|
|
*
|
|
|
|
|
* Next, we convert the vertex index to bytes by multiplying
|
|
|
|
|
* by 32 (shifting by 5), and add the two together. This is
|
|
|
|
|
* the final indirect byte offset.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_UW, 1);
|
|
|
|
|
fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
|
|
|
|
|
/* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
|
|
|
|
|
bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
|
|
|
|
|
/* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
|
|
|
|
|
bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
|
|
|
|
|
/* Convert vertex_index to bytes (multiply by 32) */
|
|
|
|
|
bld.SHL(vertex_offset_bytes,
|
|
|
|
|
retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(5u));
|
|
|
|
|
bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
|
|
|
|
|
|
|
|
|
|
/* Use first_icp_handle as the base offset. There is one register
|
|
|
|
|
* of URB handles per vertex, so inform the register allocator that
|
|
|
|
|
* we might read up to nir->info.gs.vertices_in registers.
|
|
|
|
|
*/
|
|
|
|
|
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
|
|
|
|
|
retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
|
|
|
|
|
icp_offset_bytes, brw_imm_ud(tcs_key->input_vertices * REG_SIZE));
|
|
|
|
|
|
|
|
|
|
return icp_handle;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct brw_reg
|
|
|
|
|
fs_visitor::get_tcs_output_urb_handle()
|
|
|
|
|
{
|
|
|
|
|
struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
|
|
|
|
|
|
|
|
|
|
if (vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH) {
|
|
|
|
|
return retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD);
|
|
|
|
|
} else {
|
|
|
|
|
assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH);
|
|
|
|
|
return retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-14 17:40:43 -08:00
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
|
|
|
|
|
nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
assert(stage == MESA_SHADER_TESS_CTRL);
|
|
|
|
|
struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
|
2016-09-08 23:48:51 -07:00
|
|
|
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
|
|
|
|
|
|
|
|
|
|
bool eight_patch =
|
|
|
|
|
vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH;
|
2015-11-14 17:40:43 -08:00
|
|
|
|
|
|
|
|
fs_reg dst;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dst = get_nir_dest(instr->dest);
|
|
|
|
|
|
|
|
|
|
switch (instr->intrinsic) {
|
|
|
|
|
case nir_intrinsic_load_primitive_id:
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
bld.MOV(dst, fs_reg(eight_patch ? brw_vec8_grf(2, 0)
|
|
|
|
|
: brw_vec1_grf(0, 1)));
|
2015-11-14 17:40:43 -08:00
|
|
|
break;
|
|
|
|
|
case nir_intrinsic_load_invocation_id:
|
|
|
|
|
bld.MOV(retype(dst, invocation_id.type), invocation_id);
|
|
|
|
|
break;
|
|
|
|
|
case nir_intrinsic_load_patch_vertices_in:
|
|
|
|
|
bld.MOV(retype(dst, BRW_REGISTER_TYPE_D),
|
|
|
|
|
brw_imm_d(tcs_key->input_vertices));
|
|
|
|
|
break;
|
|
|
|
|
|
2020-01-07 14:54:26 -06:00
|
|
|
case nir_intrinsic_control_barrier: {
|
2015-11-14 17:40:43 -08:00
|
|
|
if (tcs_prog_data->instances == 1)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
fs_reg m0 = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
i965: Fix execution size of scalar TCS barrier setup code.
Previously, the scalar TCS backend was generating:
mov(8) g17<1>UD 0x00000000UD { align1 WE_all 1Q compacted };
and(8) g17.2<1>UD g0.2<0,1,0>UD 0x0001e000UD { align1 WE_all 1Q };
shl(8) g17.2<1>UD g17.2<8,8,1>UD 0x0000000bUD { align1 WE_all 1Q };
or(8) g17.2<1>UD g17.2<8,8,1>UD 0x00008200UD { align1 WE_all 1Q };
send(8) null<1>UW g17<8,8,1>UD
gateway (barrier msg) mlen 1 rlen 0 { align1 WE_all 1Q };
This is rubbish - g17.2<8,8,1>UD spans two registers, and is an illegal
region. Not to mention it clobbers 8 channels of data when we only
wanted to touch m0.2.
Instead, we want:
mov(8) g17<1>UD 0x00000000UD { align1 WE_all 1Q compacted };
and(1) g17.2<1>UD g0.2<0,1,0>UD 0x0001e000UD { align1 WE_all };
shl(1) g17.2<1>UD g17.2<0,1,0>UD 0x0000000bUD { align1 WE_all };
or(1) g17.2<1>UD g17.2<0,1,0>UD 0x00008200UD { align1 WE_all };
send(8) null<1>UW g17<8,8,1>UD
gateway (barrier msg) mlen 1 rlen 0 { align1 WE_all 1Q };
Using component() accomplishes this.
Fixes GL44-CTS.tessellation_shader.tessellation_shader_tc_barriers.
barrier_guarded_read_write_calls on Skylake. Probably fixes other
barrier issues on Gen8+.
v2: Use a group(1, 0) builder so inst->exec_size is set correctly
(thanks to Francisco Jerez for catching that it was incorrect).
Cc: mesa-stable@lists.freedesktop.org
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Alejandro Piñeiro <apinheiro@igalia.com> [v1]
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
2016-08-17 06:26:01 -07:00
|
|
|
fs_reg m0_2 = component(m0, 2);
|
2015-11-14 17:40:43 -08:00
|
|
|
|
i965: Fix execution size of scalar TCS barrier setup code.
Previously, the scalar TCS backend was generating:
mov(8) g17<1>UD 0x00000000UD { align1 WE_all 1Q compacted };
and(8) g17.2<1>UD g0.2<0,1,0>UD 0x0001e000UD { align1 WE_all 1Q };
shl(8) g17.2<1>UD g17.2<8,8,1>UD 0x0000000bUD { align1 WE_all 1Q };
or(8) g17.2<1>UD g17.2<8,8,1>UD 0x00008200UD { align1 WE_all 1Q };
send(8) null<1>UW g17<8,8,1>UD
gateway (barrier msg) mlen 1 rlen 0 { align1 WE_all 1Q };
This is rubbish - g17.2<8,8,1>UD spans two registers, and is an illegal
region. Not to mention it clobbers 8 channels of data when we only
wanted to touch m0.2.
Instead, we want:
mov(8) g17<1>UD 0x00000000UD { align1 WE_all 1Q compacted };
and(1) g17.2<1>UD g0.2<0,1,0>UD 0x0001e000UD { align1 WE_all };
shl(1) g17.2<1>UD g17.2<0,1,0>UD 0x0000000bUD { align1 WE_all };
or(1) g17.2<1>UD g17.2<0,1,0>UD 0x00008200UD { align1 WE_all };
send(8) null<1>UW g17<8,8,1>UD
gateway (barrier msg) mlen 1 rlen 0 { align1 WE_all 1Q };
Using component() accomplishes this.
Fixes GL44-CTS.tessellation_shader.tessellation_shader_tc_barriers.
barrier_guarded_read_write_calls on Skylake. Probably fixes other
barrier issues on Gen8+.
v2: Use a group(1, 0) builder so inst->exec_size is set correctly
(thanks to Francisco Jerez for catching that it was incorrect).
Cc: mesa-stable@lists.freedesktop.org
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Alejandro Piñeiro <apinheiro@igalia.com> [v1]
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
2016-08-17 06:26:01 -07:00
|
|
|
const fs_builder chanbld = bld.exec_all().group(1, 0);
|
2015-11-14 17:40:43 -08:00
|
|
|
|
|
|
|
|
/* Zero the message header */
|
i965: Fix execution size of scalar TCS barrier setup code.
Previously, the scalar TCS backend was generating:
mov(8) g17<1>UD 0x00000000UD { align1 WE_all 1Q compacted };
and(8) g17.2<1>UD g0.2<0,1,0>UD 0x0001e000UD { align1 WE_all 1Q };
shl(8) g17.2<1>UD g17.2<8,8,1>UD 0x0000000bUD { align1 WE_all 1Q };
or(8) g17.2<1>UD g17.2<8,8,1>UD 0x00008200UD { align1 WE_all 1Q };
send(8) null<1>UW g17<8,8,1>UD
gateway (barrier msg) mlen 1 rlen 0 { align1 WE_all 1Q };
This is rubbish - g17.2<8,8,1>UD spans two registers, and is an illegal
region. Not to mention it clobbers 8 channels of data when we only
wanted to touch m0.2.
Instead, we want:
mov(8) g17<1>UD 0x00000000UD { align1 WE_all 1Q compacted };
and(1) g17.2<1>UD g0.2<0,1,0>UD 0x0001e000UD { align1 WE_all };
shl(1) g17.2<1>UD g17.2<0,1,0>UD 0x0000000bUD { align1 WE_all };
or(1) g17.2<1>UD g17.2<0,1,0>UD 0x00008200UD { align1 WE_all };
send(8) null<1>UW g17<8,8,1>UD
gateway (barrier msg) mlen 1 rlen 0 { align1 WE_all 1Q };
Using component() accomplishes this.
Fixes GL44-CTS.tessellation_shader.tessellation_shader_tc_barriers.
barrier_guarded_read_write_calls on Skylake. Probably fixes other
barrier issues on Gen8+.
v2: Use a group(1, 0) builder so inst->exec_size is set correctly
(thanks to Francisco Jerez for catching that it was incorrect).
Cc: mesa-stable@lists.freedesktop.org
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Alejandro Piñeiro <apinheiro@igalia.com> [v1]
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
2016-08-17 06:26:01 -07:00
|
|
|
bld.exec_all().MOV(m0, brw_imm_ud(0u));
|
2015-11-14 17:40:43 -08:00
|
|
|
|
2019-03-27 09:38:15 -07:00
|
|
|
if (devinfo->gen < 11) {
|
|
|
|
|
/* Copy "Barrier ID" from r0.2, bits 16:13 */
|
|
|
|
|
chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(INTEL_MASK(16, 13)));
|
2015-11-14 17:40:43 -08:00
|
|
|
|
2019-03-27 09:38:15 -07:00
|
|
|
/* Shift it up to bits 27:24. */
|
|
|
|
|
chanbld.SHL(m0_2, m0_2, brw_imm_ud(11));
|
|
|
|
|
} else {
|
|
|
|
|
chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(INTEL_MASK(30, 24)));
|
|
|
|
|
}
|
2015-11-14 17:40:43 -08:00
|
|
|
|
|
|
|
|
/* Set the Barrier Count and the enable bit */
|
2019-03-27 09:38:15 -07:00
|
|
|
if (devinfo->gen < 11) {
|
|
|
|
|
chanbld.OR(m0_2, m0_2,
|
|
|
|
|
brw_imm_ud(tcs_prog_data->instances << 9 | (1 << 15)));
|
|
|
|
|
} else {
|
|
|
|
|
chanbld.OR(m0_2, m0_2,
|
|
|
|
|
brw_imm_ud(tcs_prog_data->instances << 8 | (1 << 15)));
|
|
|
|
|
}
|
2015-11-14 17:40:43 -08:00
|
|
|
|
|
|
|
|
bld.emit(SHADER_OPCODE_BARRIER, bld.null_reg_ud(), m0);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_input:
|
|
|
|
|
unreachable("nir_lower_io should never give us these.");
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_per_vertex_input: {
|
2019-07-19 17:38:04 -05:00
|
|
|
assert(nir_dest_bit_size(instr->dest) == 32);
|
2015-11-14 17:40:43 -08:00
|
|
|
fs_reg indirect_offset = get_indirect_offset(instr);
|
|
|
|
|
unsigned imm_offset = instr->const_index[0];
|
|
|
|
|
fs_inst *inst;
|
|
|
|
|
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
fs_reg icp_handle =
|
|
|
|
|
eight_patch ? get_tcs_eight_patch_icp_handle(bld, instr)
|
|
|
|
|
: get_tcs_single_patch_icp_handle(bld, instr);
|
2015-11-14 17:40:43 -08:00
|
|
|
|
2016-05-09 10:31:50 +02:00
|
|
|
/* We can only read two double components with each URB read, so
|
|
|
|
|
* we send two read messages in that case, each one loading up to
|
|
|
|
|
* two double components.
|
|
|
|
|
*/
|
|
|
|
|
unsigned num_components = instr->num_components;
|
2016-06-10 21:57:49 +10:00
|
|
|
unsigned first_component = nir_intrinsic_component(instr);
|
2016-05-09 10:31:50 +02:00
|
|
|
|
2019-07-19 17:38:04 -05:00
|
|
|
if (indirect_offset.file == BAD_FILE) {
|
|
|
|
|
/* Constant indexing - use global offset. */
|
|
|
|
|
if (first_component != 0) {
|
|
|
|
|
unsigned read_components = num_components + first_component;
|
|
|
|
|
fs_reg tmp = bld.vgrf(dst.type, read_components);
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, icp_handle);
|
|
|
|
|
for (unsigned i = 0; i < num_components; i++) {
|
|
|
|
|
bld.MOV(offset(dst, bld, i),
|
|
|
|
|
offset(tmp, bld, i + first_component));
|
2016-05-19 16:58:48 +10:00
|
|
|
}
|
2016-05-09 10:31:50 +02:00
|
|
|
} else {
|
2019-07-19 17:38:04 -05:00
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
|
2016-05-09 10:31:50 +02:00
|
|
|
}
|
2019-07-19 17:38:04 -05:00
|
|
|
inst->offset = imm_offset;
|
|
|
|
|
inst->mlen = 1;
|
|
|
|
|
} else {
|
|
|
|
|
/* Indirect indexing - use per-slot offsets as well. */
|
|
|
|
|
const fs_reg srcs[] = { icp_handle, indirect_offset };
|
|
|
|
|
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
|
|
|
|
|
bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
|
|
|
|
|
if (first_component != 0) {
|
|
|
|
|
unsigned read_components = num_components + first_component;
|
|
|
|
|
fs_reg tmp = bld.vgrf(dst.type, read_components);
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
|
|
|
|
|
payload);
|
|
|
|
|
for (unsigned i = 0; i < num_components; i++) {
|
|
|
|
|
bld.MOV(offset(dst, bld, i),
|
|
|
|
|
offset(tmp, bld, i + first_component));
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst,
|
|
|
|
|
payload);
|
2016-05-09 10:31:50 +02:00
|
|
|
}
|
2019-07-19 17:38:04 -05:00
|
|
|
inst->offset = imm_offset;
|
|
|
|
|
inst->mlen = 2;
|
|
|
|
|
}
|
|
|
|
|
inst->size_written = (num_components + first_component) *
|
|
|
|
|
inst->dst.component_size(inst->exec_size);
|
2016-05-09 10:31:50 +02:00
|
|
|
|
2019-07-19 17:38:04 -05:00
|
|
|
/* Copy the temporary to the destination to deal with writemasking.
|
|
|
|
|
*
|
|
|
|
|
* Also attempt to deal with gl_PointSize being in the .w component.
|
|
|
|
|
*/
|
|
|
|
|
if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
|
|
|
|
|
assert(type_sz(dst.type) == 4);
|
|
|
|
|
inst->dst = bld.vgrf(dst.type, 4);
|
|
|
|
|
inst->size_written = 4 * REG_SIZE;
|
|
|
|
|
bld.MOV(dst, offset(inst->dst, bld, 3));
|
2015-11-14 17:40:43 -08:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_output:
|
|
|
|
|
case nir_intrinsic_load_per_vertex_output: {
|
2019-07-19 17:38:04 -05:00
|
|
|
assert(nir_dest_bit_size(instr->dest) == 32);
|
2015-11-14 17:40:43 -08:00
|
|
|
fs_reg indirect_offset = get_indirect_offset(instr);
|
|
|
|
|
unsigned imm_offset = instr->const_index[0];
|
2016-06-15 12:35:49 +10:00
|
|
|
unsigned first_component = nir_intrinsic_component(instr);
|
2015-11-14 17:40:43 -08:00
|
|
|
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
struct brw_reg output_handles = get_tcs_output_urb_handle();
|
|
|
|
|
|
2015-11-14 17:40:43 -08:00
|
|
|
fs_inst *inst;
|
|
|
|
|
if (indirect_offset.file == BAD_FILE) {
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
/* This MOV replicates the output handle to all enabled channels
|
|
|
|
|
* is SINGLE_PATCH mode.
|
|
|
|
|
*/
|
2015-11-14 17:40:43 -08:00
|
|
|
fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
bld.MOV(patch_handle, output_handles);
|
2015-11-14 17:40:43 -08:00
|
|
|
|
2016-11-24 01:50:10 -08:00
|
|
|
{
|
2016-06-15 12:35:49 +10:00
|
|
|
if (first_component != 0) {
|
|
|
|
|
unsigned read_components =
|
|
|
|
|
instr->num_components + first_component;
|
|
|
|
|
fs_reg tmp = bld.vgrf(dst.type, read_components);
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
|
|
|
|
|
patch_handle);
|
2016-09-07 13:38:20 -07:00
|
|
|
inst->size_written = read_components * REG_SIZE;
|
2016-06-15 12:35:49 +10:00
|
|
|
for (unsigned i = 0; i < instr->num_components; i++) {
|
|
|
|
|
bld.MOV(offset(dst, bld, i),
|
|
|
|
|
offset(tmp, bld, i + first_component));
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst,
|
|
|
|
|
patch_handle);
|
2016-09-07 13:38:20 -07:00
|
|
|
inst->size_written = instr->num_components * REG_SIZE;
|
2016-06-15 12:35:49 +10:00
|
|
|
}
|
2015-11-14 17:40:43 -08:00
|
|
|
inst->offset = imm_offset;
|
|
|
|
|
inst->mlen = 1;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/* Indirect indexing - use per-slot offsets as well. */
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
const fs_reg srcs[] = { output_handles, indirect_offset };
|
2015-11-14 17:40:43 -08:00
|
|
|
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
|
|
|
|
|
bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
|
2016-06-15 12:35:49 +10:00
|
|
|
if (first_component != 0) {
|
|
|
|
|
unsigned read_components =
|
|
|
|
|
instr->num_components + first_component;
|
|
|
|
|
fs_reg tmp = bld.vgrf(dst.type, read_components);
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
|
|
|
|
|
payload);
|
2016-09-07 13:38:20 -07:00
|
|
|
inst->size_written = read_components * REG_SIZE;
|
2016-06-15 12:35:49 +10:00
|
|
|
for (unsigned i = 0; i < instr->num_components; i++) {
|
|
|
|
|
bld.MOV(offset(dst, bld, i),
|
|
|
|
|
offset(tmp, bld, i + first_component));
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst,
|
|
|
|
|
payload);
|
2016-09-07 13:38:20 -07:00
|
|
|
inst->size_written = instr->num_components * REG_SIZE;
|
2016-06-15 12:35:49 +10:00
|
|
|
}
|
2015-11-14 17:40:43 -08:00
|
|
|
inst->offset = imm_offset;
|
|
|
|
|
inst->mlen = 2;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_store_output:
|
|
|
|
|
case nir_intrinsic_store_per_vertex_output: {
|
2019-07-19 17:38:04 -05:00
|
|
|
assert(nir_src_bit_size(instr->src[0]) == 32);
|
2015-11-14 17:40:43 -08:00
|
|
|
fs_reg value = get_nir_src(instr->src[0]);
|
|
|
|
|
fs_reg indirect_offset = get_indirect_offset(instr);
|
|
|
|
|
unsigned imm_offset = instr->const_index[0];
|
|
|
|
|
unsigned mask = instr->const_index[1];
|
|
|
|
|
unsigned header_regs = 0;
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
struct brw_reg output_handles = get_tcs_output_urb_handle();
|
|
|
|
|
|
2015-11-14 17:40:43 -08:00
|
|
|
fs_reg srcs[7];
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
srcs[header_regs++] = output_handles;
|
2015-11-14 17:40:43 -08:00
|
|
|
|
|
|
|
|
if (indirect_offset.file != BAD_FILE) {
|
|
|
|
|
srcs[header_regs++] = indirect_offset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (mask == 0)
|
|
|
|
|
break;
|
|
|
|
|
|
2016-08-02 08:46:04 +02:00
|
|
|
unsigned num_components = util_last_bit(mask);
|
2015-11-14 17:40:43 -08:00
|
|
|
enum opcode opcode;
|
|
|
|
|
|
2016-05-09 15:23:34 +02:00
|
|
|
/* We can only pack two 64-bit components in a single message, so send
|
|
|
|
|
* 2 messages if we have more components
|
|
|
|
|
*/
|
2016-06-10 21:57:49 +10:00
|
|
|
unsigned first_component = nir_intrinsic_component(instr);
|
2016-05-20 10:29:06 +10:00
|
|
|
mask = mask << first_component;
|
|
|
|
|
|
2019-07-19 17:38:04 -05:00
|
|
|
if (mask != WRITEMASK_XYZW) {
|
|
|
|
|
srcs[header_regs++] = brw_imm_ud(mask << 16);
|
|
|
|
|
opcode = indirect_offset.file != BAD_FILE ?
|
|
|
|
|
SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
|
|
|
|
|
SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
|
|
|
|
|
} else {
|
|
|
|
|
opcode = indirect_offset.file != BAD_FILE ?
|
|
|
|
|
SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT :
|
|
|
|
|
SHADER_OPCODE_URB_WRITE_SIMD8;
|
|
|
|
|
}
|
2016-05-09 15:23:34 +02:00
|
|
|
|
2019-07-19 17:38:04 -05:00
|
|
|
for (unsigned i = 0; i < num_components; i++) {
|
|
|
|
|
if (!(mask & (1 << (i + first_component))))
|
|
|
|
|
continue;
|
2016-05-09 15:23:34 +02:00
|
|
|
|
2019-07-19 17:38:04 -05:00
|
|
|
srcs[header_regs + i + first_component] = offset(value, bld, i);
|
|
|
|
|
}
|
2016-05-09 15:23:34 +02:00
|
|
|
|
2019-07-19 17:38:04 -05:00
|
|
|
unsigned mlen = header_regs + num_components + first_component;
|
|
|
|
|
fs_reg payload =
|
|
|
|
|
bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
|
|
|
|
|
bld.LOAD_PAYLOAD(payload, srcs, mlen, header_regs);
|
2016-05-09 15:23:34 +02:00
|
|
|
|
2019-07-19 17:38:04 -05:00
|
|
|
fs_inst *inst = bld.emit(opcode, bld.null_reg_ud(), payload);
|
|
|
|
|
inst->offset = imm_offset;
|
|
|
|
|
inst->mlen = mlen;
|
2015-11-14 17:40:43 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
nir_emit_intrinsic(bld, instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-10 14:35:27 -08:00
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld,
|
|
|
|
|
nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
assert(stage == MESA_SHADER_TESS_EVAL);
|
2016-09-08 23:48:51 -07:00
|
|
|
struct brw_tes_prog_data *tes_prog_data = brw_tes_prog_data(prog_data);
|
2015-11-10 14:35:27 -08:00
|
|
|
|
|
|
|
|
fs_reg dest;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dest = get_nir_dest(instr->dest);
|
|
|
|
|
|
|
|
|
|
switch (instr->intrinsic) {
|
|
|
|
|
case nir_intrinsic_load_primitive_id:
|
|
|
|
|
bld.MOV(dest, fs_reg(brw_vec1_grf(0, 1)));
|
|
|
|
|
break;
|
|
|
|
|
case nir_intrinsic_load_tess_coord:
|
|
|
|
|
/* gl_TessCoord is part of the payload in g1-3 */
|
|
|
|
|
for (unsigned i = 0; i < 3; i++) {
|
|
|
|
|
bld.MOV(offset(dest, bld, i), fs_reg(brw_vec8_grf(1 + i, 0)));
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_input:
|
|
|
|
|
case nir_intrinsic_load_per_vertex_input: {
|
2019-07-19 17:38:04 -05:00
|
|
|
assert(nir_dest_bit_size(instr->dest) == 32);
|
2015-11-10 14:35:27 -08:00
|
|
|
fs_reg indirect_offset = get_indirect_offset(instr);
|
|
|
|
|
unsigned imm_offset = instr->const_index[0];
|
2016-05-19 16:58:48 +10:00
|
|
|
unsigned first_component = nir_intrinsic_component(instr);
|
2015-11-10 14:35:27 -08:00
|
|
|
|
|
|
|
|
fs_inst *inst;
|
|
|
|
|
if (indirect_offset.file == BAD_FILE) {
|
i965: Push most TES inputs in SIMD8 mode.
Using the push model for inputs is much more efficient than pulling
inputs - the hardware can simply copy a large chunk into URB registers
at thread creation time, rather than having the thread send messages to
request data from the L3 cache. Unfortunately, it's possible to have
more TES inputs than fit in registers, so we have to fall back to the
pull model in some cases.
However, it turns out that most tessellation evaluation shaders are
fairly simple, and don't use many inputs. An arbitrary cut-off of
32 vec4 slots (16 registers) is more than sufficient to ensure that
100% of TES inputs are pushed for Shadow of Mordor, Unigine Heaven,
GPUTest/TessMark, and SynMark.
Note that unlike most SIMD8 stages, this actually reads packed vec4
data, since that is what our vec4 TCS programs write.
Improves performance in GPUTest's tessmark_x64 microbenchmark
by 93.4426% +/- 5.35541% (n = 25) on my Lenovo X250 at 1024x768.
Improves performance in Synmark's Gl40TerrainFlyTess microbenchmark
by 22.74% +/- 0.309394% (n = 5).
Improves performance in Shadow of Mordor at low settings with
tessellation enabled at 1280x720 by 2.12197% +/- 0.478553% (n = 4).
shader-db statistics for files containing tessellation shaders:
total instructions in shared programs: 184358 -> 181181 (-1.72%)
instructions in affected programs: 27971 -> 24794 (-11.36%)
helped: 226
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2015-12-27 17:26:30 -08:00
|
|
|
/* Arbitrarily only push up to 32 vec4 slots worth of data,
|
|
|
|
|
* which is 16 registers (since each holds 2 vec4 slots).
|
|
|
|
|
*/
|
|
|
|
|
const unsigned max_push_slots = 32;
|
2019-07-19 17:38:04 -05:00
|
|
|
if (imm_offset < max_push_slots) {
|
i965: Push most TES inputs in SIMD8 mode.
Using the push model for inputs is much more efficient than pulling
inputs - the hardware can simply copy a large chunk into URB registers
at thread creation time, rather than having the thread send messages to
request data from the L3 cache. Unfortunately, it's possible to have
more TES inputs than fit in registers, so we have to fall back to the
pull model in some cases.
However, it turns out that most tessellation evaluation shaders are
fairly simple, and don't use many inputs. An arbitrary cut-off of
32 vec4 slots (16 registers) is more than sufficient to ensure that
100% of TES inputs are pushed for Shadow of Mordor, Unigine Heaven,
GPUTest/TessMark, and SynMark.
Note that unlike most SIMD8 stages, this actually reads packed vec4
data, since that is what our vec4 TCS programs write.
Improves performance in GPUTest's tessmark_x64 microbenchmark
by 93.4426% +/- 5.35541% (n = 25) on my Lenovo X250 at 1024x768.
Improves performance in Synmark's Gl40TerrainFlyTess microbenchmark
by 22.74% +/- 0.309394% (n = 5).
Improves performance in Shadow of Mordor at low settings with
tessellation enabled at 1280x720 by 2.12197% +/- 0.478553% (n = 4).
shader-db statistics for files containing tessellation shaders:
total instructions in shared programs: 184358 -> 181181 (-1.72%)
instructions in affected programs: 27971 -> 24794 (-11.36%)
helped: 226
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2015-12-27 17:26:30 -08:00
|
|
|
fs_reg src = fs_reg(ATTR, imm_offset / 2, dest.type);
|
|
|
|
|
for (int i = 0; i < instr->num_components; i++) {
|
2019-07-19 17:38:04 -05:00
|
|
|
unsigned comp = 4 * (imm_offset % 2) + i + first_component;
|
2016-05-09 09:43:56 +02:00
|
|
|
bld.MOV(offset(dest, bld, i), component(src, comp));
|
i965: Push most TES inputs in SIMD8 mode.
Using the push model for inputs is much more efficient than pulling
inputs - the hardware can simply copy a large chunk into URB registers
at thread creation time, rather than having the thread send messages to
request data from the L3 cache. Unfortunately, it's possible to have
more TES inputs than fit in registers, so we have to fall back to the
pull model in some cases.
However, it turns out that most tessellation evaluation shaders are
fairly simple, and don't use many inputs. An arbitrary cut-off of
32 vec4 slots (16 registers) is more than sufficient to ensure that
100% of TES inputs are pushed for Shadow of Mordor, Unigine Heaven,
GPUTest/TessMark, and SynMark.
Note that unlike most SIMD8 stages, this actually reads packed vec4
data, since that is what our vec4 TCS programs write.
Improves performance in GPUTest's tessmark_x64 microbenchmark
by 93.4426% +/- 5.35541% (n = 25) on my Lenovo X250 at 1024x768.
Improves performance in Synmark's Gl40TerrainFlyTess microbenchmark
by 22.74% +/- 0.309394% (n = 5).
Improves performance in Shadow of Mordor at low settings with
tessellation enabled at 1280x720 by 2.12197% +/- 0.478553% (n = 4).
shader-db statistics for files containing tessellation shaders:
total instructions in shared programs: 184358 -> 181181 (-1.72%)
instructions in affected programs: 27971 -> 24794 (-11.36%)
helped: 226
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2015-12-27 17:26:30 -08:00
|
|
|
}
|
2017-10-09 14:17:43 +02:00
|
|
|
|
i965: Push most TES inputs in SIMD8 mode.
Using the push model for inputs is much more efficient than pulling
inputs - the hardware can simply copy a large chunk into URB registers
at thread creation time, rather than having the thread send messages to
request data from the L3 cache. Unfortunately, it's possible to have
more TES inputs than fit in registers, so we have to fall back to the
pull model in some cases.
However, it turns out that most tessellation evaluation shaders are
fairly simple, and don't use many inputs. An arbitrary cut-off of
32 vec4 slots (16 registers) is more than sufficient to ensure that
100% of TES inputs are pushed for Shadow of Mordor, Unigine Heaven,
GPUTest/TessMark, and SynMark.
Note that unlike most SIMD8 stages, this actually reads packed vec4
data, since that is what our vec4 TCS programs write.
Improves performance in GPUTest's tessmark_x64 microbenchmark
by 93.4426% +/- 5.35541% (n = 25) on my Lenovo X250 at 1024x768.
Improves performance in Synmark's Gl40TerrainFlyTess microbenchmark
by 22.74% +/- 0.309394% (n = 5).
Improves performance in Shadow of Mordor at low settings with
tessellation enabled at 1280x720 by 2.12197% +/- 0.478553% (n = 4).
shader-db statistics for files containing tessellation shaders:
total instructions in shared programs: 184358 -> 181181 (-1.72%)
instructions in affected programs: 27971 -> 24794 (-11.36%)
helped: 226
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2015-12-27 17:26:30 -08:00
|
|
|
tes_prog_data->base.urb_read_length =
|
|
|
|
|
MAX2(tes_prog_data->base.urb_read_length,
|
2019-07-19 17:38:04 -05:00
|
|
|
(imm_offset / 2) + 1);
|
i965: Push most TES inputs in SIMD8 mode.
Using the push model for inputs is much more efficient than pulling
inputs - the hardware can simply copy a large chunk into URB registers
at thread creation time, rather than having the thread send messages to
request data from the L3 cache. Unfortunately, it's possible to have
more TES inputs than fit in registers, so we have to fall back to the
pull model in some cases.
However, it turns out that most tessellation evaluation shaders are
fairly simple, and don't use many inputs. An arbitrary cut-off of
32 vec4 slots (16 registers) is more than sufficient to ensure that
100% of TES inputs are pushed for Shadow of Mordor, Unigine Heaven,
GPUTest/TessMark, and SynMark.
Note that unlike most SIMD8 stages, this actually reads packed vec4
data, since that is what our vec4 TCS programs write.
Improves performance in GPUTest's tessmark_x64 microbenchmark
by 93.4426% +/- 5.35541% (n = 25) on my Lenovo X250 at 1024x768.
Improves performance in Synmark's Gl40TerrainFlyTess microbenchmark
by 22.74% +/- 0.309394% (n = 5).
Improves performance in Shadow of Mordor at low settings with
tessellation enabled at 1280x720 by 2.12197% +/- 0.478553% (n = 4).
shader-db statistics for files containing tessellation shaders:
total instructions in shared programs: 184358 -> 181181 (-1.72%)
instructions in affected programs: 27971 -> 24794 (-11.36%)
helped: 226
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2015-12-27 17:26:30 -08:00
|
|
|
} else {
|
|
|
|
|
/* Replicate the patch handle to all enabled channels */
|
|
|
|
|
const fs_reg srcs[] = {
|
|
|
|
|
retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)
|
|
|
|
|
};
|
|
|
|
|
fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
|
|
|
|
|
bld.LOAD_PAYLOAD(patch_handle, srcs, ARRAY_SIZE(srcs), 0);
|
|
|
|
|
|
2016-05-19 16:58:48 +10:00
|
|
|
if (first_component != 0) {
|
|
|
|
|
unsigned read_components =
|
|
|
|
|
instr->num_components + first_component;
|
|
|
|
|
fs_reg tmp = bld.vgrf(dest.type, read_components);
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
|
|
|
|
|
patch_handle);
|
2016-09-07 13:38:20 -07:00
|
|
|
inst->size_written = read_components * REG_SIZE;
|
2016-05-19 16:58:48 +10:00
|
|
|
for (unsigned i = 0; i < instr->num_components; i++) {
|
|
|
|
|
bld.MOV(offset(dest, bld, i),
|
|
|
|
|
offset(tmp, bld, i + first_component));
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dest,
|
|
|
|
|
patch_handle);
|
2016-09-07 13:38:20 -07:00
|
|
|
inst->size_written = instr->num_components * REG_SIZE;
|
2016-05-19 16:58:48 +10:00
|
|
|
}
|
i965: Push most TES inputs in SIMD8 mode.
Using the push model for inputs is much more efficient than pulling
inputs - the hardware can simply copy a large chunk into URB registers
at thread creation time, rather than having the thread send messages to
request data from the L3 cache. Unfortunately, it's possible to have
more TES inputs than fit in registers, so we have to fall back to the
pull model in some cases.
However, it turns out that most tessellation evaluation shaders are
fairly simple, and don't use many inputs. An arbitrary cut-off of
32 vec4 slots (16 registers) is more than sufficient to ensure that
100% of TES inputs are pushed for Shadow of Mordor, Unigine Heaven,
GPUTest/TessMark, and SynMark.
Note that unlike most SIMD8 stages, this actually reads packed vec4
data, since that is what our vec4 TCS programs write.
Improves performance in GPUTest's tessmark_x64 microbenchmark
by 93.4426% +/- 5.35541% (n = 25) on my Lenovo X250 at 1024x768.
Improves performance in Synmark's Gl40TerrainFlyTess microbenchmark
by 22.74% +/- 0.309394% (n = 5).
Improves performance in Shadow of Mordor at low settings with
tessellation enabled at 1280x720 by 2.12197% +/- 0.478553% (n = 4).
shader-db statistics for files containing tessellation shaders:
total instructions in shared programs: 184358 -> 181181 (-1.72%)
instructions in affected programs: 27971 -> 24794 (-11.36%)
helped: 226
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2015-12-27 17:26:30 -08:00
|
|
|
inst->mlen = 1;
|
|
|
|
|
inst->offset = imm_offset;
|
|
|
|
|
}
|
2015-11-10 14:35:27 -08:00
|
|
|
} else {
|
|
|
|
|
/* Indirect indexing - use per-slot offsets as well. */
|
|
|
|
|
|
2016-07-15 10:55:05 +02:00
|
|
|
/* We can only read two double components with each URB read, so
|
|
|
|
|
* we send two read messages in that case, each one loading up to
|
|
|
|
|
* two double components.
|
|
|
|
|
*/
|
|
|
|
|
unsigned num_components = instr->num_components;
|
2019-07-19 17:38:04 -05:00
|
|
|
const fs_reg srcs[] = {
|
|
|
|
|
retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
indirect_offset
|
|
|
|
|
};
|
|
|
|
|
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
|
|
|
|
|
bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
|
2016-07-15 10:55:05 +02:00
|
|
|
|
2019-07-19 17:38:04 -05:00
|
|
|
if (first_component != 0) {
|
|
|
|
|
unsigned read_components =
|
|
|
|
|
num_components + first_component;
|
|
|
|
|
fs_reg tmp = bld.vgrf(dest.type, read_components);
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
|
|
|
|
|
payload);
|
|
|
|
|
for (unsigned i = 0; i < num_components; i++) {
|
|
|
|
|
bld.MOV(offset(dest, bld, i),
|
|
|
|
|
offset(tmp, bld, i + first_component));
|
2016-05-23 16:32:50 +10:00
|
|
|
}
|
2019-07-19 17:38:04 -05:00
|
|
|
} else {
|
|
|
|
|
inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dest,
|
|
|
|
|
payload);
|
2016-05-23 16:32:50 +10:00
|
|
|
}
|
2019-07-19 17:38:04 -05:00
|
|
|
inst->mlen = 2;
|
|
|
|
|
inst->offset = imm_offset;
|
|
|
|
|
inst->size_written = (num_components + first_component) *
|
|
|
|
|
inst->dst.component_size(inst->exec_size);
|
2015-11-10 14:35:27 -08:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
nir_emit_intrinsic(bld, instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-04 23:05:07 -08:00
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
|
|
|
|
|
nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
assert(stage == MESA_SHADER_GEOMETRY);
|
2015-11-07 18:58:59 -08:00
|
|
|
fs_reg indirect_offset;
|
2015-11-04 23:05:07 -08:00
|
|
|
|
|
|
|
|
fs_reg dest;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dest = get_nir_dest(instr->dest);
|
|
|
|
|
|
|
|
|
|
switch (instr->intrinsic) {
|
|
|
|
|
case nir_intrinsic_load_primitive_id:
|
|
|
|
|
assert(stage == MESA_SHADER_GEOMETRY);
|
2016-09-08 23:48:51 -07:00
|
|
|
assert(brw_gs_prog_data(prog_data)->include_primitive_id);
|
2015-11-04 23:05:07 -08:00
|
|
|
bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
|
|
|
|
|
retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_input:
|
|
|
|
|
unreachable("load_input intrinsics are invalid for the GS stage");
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_per_vertex_input:
|
2015-11-25 14:14:05 -08:00
|
|
|
emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
|
2016-05-19 15:58:51 +10:00
|
|
|
instr->src[1], instr->num_components,
|
|
|
|
|
nir_intrinsic_component(instr));
|
2015-11-04 23:05:07 -08:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_emit_vertex_with_counter:
|
|
|
|
|
emit_gs_vertex(instr->src[0], instr->const_index[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_end_primitive_with_counter:
|
|
|
|
|
emit_gs_end_primitive(instr->src[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_set_vertex_count:
|
|
|
|
|
bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_invocation_id: {
|
|
|
|
|
fs_reg val = nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
|
|
|
|
|
assert(val.file != BAD_FILE);
|
|
|
|
|
dest.type = val.type;
|
|
|
|
|
bld.MOV(dest, val);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
nir_emit_intrinsic(bld, instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-21 20:25:28 -07:00
|
|
|
/**
|
|
|
|
|
* Fetch the current render target layer index.
|
|
|
|
|
*/
|
|
|
|
|
static fs_reg
|
|
|
|
|
fetch_render_target_array_index(const fs_builder &bld)
|
|
|
|
|
{
|
2020-04-30 17:38:33 +00:00
|
|
|
if (bld.shader->devinfo->gen >= 12) {
|
|
|
|
|
/* The render target array index is provided in the thread payload as
|
|
|
|
|
* bits 26:16 of r1.1.
|
|
|
|
|
*/
|
|
|
|
|
const fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.AND(idx, brw_uw1_reg(BRW_GENERAL_REGISTER_FILE, 1, 3),
|
|
|
|
|
brw_imm_uw(0x7ff));
|
|
|
|
|
return idx;
|
|
|
|
|
} else if (bld.shader->devinfo->gen >= 6) {
|
2016-07-21 20:25:28 -07:00
|
|
|
/* The render target array index is provided in the thread payload as
|
|
|
|
|
* bits 26:16 of r0.0.
|
|
|
|
|
*/
|
|
|
|
|
const fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.AND(idx, brw_uw1_reg(BRW_GENERAL_REGISTER_FILE, 0, 1),
|
|
|
|
|
brw_imm_uw(0x7ff));
|
|
|
|
|
return idx;
|
|
|
|
|
} else {
|
|
|
|
|
/* Pre-SNB we only ever render into the first layer of the framebuffer
|
|
|
|
|
* since layered rendering is not implemented.
|
|
|
|
|
*/
|
|
|
|
|
return brw_imm_ud(0);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Fake non-coherent framebuffer read implemented using TXF to fetch from the
|
|
|
|
|
* framebuffer at the current fragment coordinates and sample index.
|
|
|
|
|
*/
|
|
|
|
|
fs_inst *
|
|
|
|
|
fs_visitor::emit_non_coherent_fb_read(const fs_builder &bld, const fs_reg &dst,
|
|
|
|
|
unsigned target)
|
|
|
|
|
{
|
2016-08-25 16:22:58 -07:00
|
|
|
const struct gen_device_info *devinfo = bld.shader->devinfo;
|
2016-07-21 20:25:28 -07:00
|
|
|
|
|
|
|
|
assert(bld.shader->stage == MESA_SHADER_FRAGMENT);
|
|
|
|
|
const brw_wm_prog_key *wm_key =
|
|
|
|
|
reinterpret_cast<const brw_wm_prog_key *>(key);
|
|
|
|
|
assert(!wm_key->coherent_fb_fetch);
|
2016-09-08 23:48:51 -07:00
|
|
|
const struct brw_wm_prog_data *wm_prog_data =
|
|
|
|
|
brw_wm_prog_data(stage_prog_data);
|
2016-07-21 20:25:28 -07:00
|
|
|
|
|
|
|
|
/* Calculate the surface index relative to the start of the texture binding
|
|
|
|
|
* table block, since that's what the texturing messages expect.
|
|
|
|
|
*/
|
|
|
|
|
const unsigned surface = target +
|
|
|
|
|
wm_prog_data->binding_table.render_target_read_start -
|
|
|
|
|
wm_prog_data->base.binding_table.texture_start;
|
|
|
|
|
|
|
|
|
|
/* Calculate the fragment coordinates. */
|
|
|
|
|
const fs_reg coords = bld.vgrf(BRW_REGISTER_TYPE_UD, 3);
|
|
|
|
|
bld.MOV(offset(coords, bld, 0), pixel_x);
|
|
|
|
|
bld.MOV(offset(coords, bld, 1), pixel_y);
|
|
|
|
|
bld.MOV(offset(coords, bld, 2), fetch_render_target_array_index(bld));
|
|
|
|
|
|
|
|
|
|
/* Calculate the sample index and MCS payload when multisampling. Luckily
|
|
|
|
|
* the MCS fetch message behaves deterministically for UMS surfaces, so it
|
|
|
|
|
* shouldn't be necessary to recompile based on whether the framebuffer is
|
|
|
|
|
* CMS or UMS.
|
|
|
|
|
*/
|
|
|
|
|
if (wm_key->multisample_fbo &&
|
|
|
|
|
nir_system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
|
|
|
|
|
nir_system_values[SYSTEM_VALUE_SAMPLE_ID] = *emit_sampleid_setup();
|
|
|
|
|
|
|
|
|
|
const fs_reg sample = nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
|
|
|
|
|
const fs_reg mcs = wm_key->multisample_fbo ?
|
2019-02-06 15:42:17 -06:00
|
|
|
emit_mcs_fetch(coords, 3, brw_imm_ud(surface), fs_reg()) : fs_reg();
|
2016-07-21 20:25:28 -07:00
|
|
|
|
|
|
|
|
/* Use either a normal or a CMS texel fetch message depending on whether
|
|
|
|
|
* the framebuffer is single or multisample. On SKL+ use the wide CMS
|
|
|
|
|
* message just in case the framebuffer uses 16x multisampling, it should
|
|
|
|
|
* be equivalent to the normal CMS fetch for lower multisampling modes.
|
|
|
|
|
*/
|
|
|
|
|
const opcode op = !wm_key->multisample_fbo ? SHADER_OPCODE_TXF_LOGICAL :
|
|
|
|
|
devinfo->gen >= 9 ? SHADER_OPCODE_TXF_CMS_W_LOGICAL :
|
|
|
|
|
SHADER_OPCODE_TXF_CMS_LOGICAL;
|
|
|
|
|
|
|
|
|
|
/* Emit the instruction. */
|
2019-02-06 14:27:34 -06:00
|
|
|
fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_COORDINATE] = coords;
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_ud(0);
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = sample;
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_MCS] = mcs;
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(surface);
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(0);
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_ud(3);
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_ud(0);
|
2016-07-21 20:25:28 -07:00
|
|
|
|
|
|
|
|
fs_inst *inst = bld.emit(op, dst, srcs, ARRAY_SIZE(srcs));
|
2016-09-07 13:38:20 -07:00
|
|
|
inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
|
2016-07-21 20:25:28 -07:00
|
|
|
|
|
|
|
|
return inst;
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-18 22:12:37 -07:00
|
|
|
/**
|
|
|
|
|
* Actual coherent framebuffer read implemented using the native render target
|
|
|
|
|
* read message. Requires SKL+.
|
|
|
|
|
*/
|
|
|
|
|
static fs_inst *
|
|
|
|
|
emit_coherent_fb_read(const fs_builder &bld, const fs_reg &dst, unsigned target)
|
|
|
|
|
{
|
|
|
|
|
assert(bld.shader->devinfo->gen >= 9);
|
|
|
|
|
fs_inst *inst = bld.emit(FS_OPCODE_FB_READ_LOGICAL, dst);
|
|
|
|
|
inst->target = target;
|
2016-09-07 13:38:20 -07:00
|
|
|
inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
|
2016-08-18 22:12:37 -07:00
|
|
|
|
|
|
|
|
return inst;
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-21 21:26:20 -07:00
|
|
|
static fs_reg
|
2016-07-21 21:47:45 -07:00
|
|
|
alloc_temporary(const fs_builder &bld, unsigned size, fs_reg *regs, unsigned n)
|
|
|
|
|
{
|
|
|
|
|
if (n && regs[0].file != BAD_FILE) {
|
|
|
|
|
return regs[0];
|
|
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, size);
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < n; i++)
|
|
|
|
|
regs[i] = tmp;
|
|
|
|
|
|
|
|
|
|
return tmp;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static fs_reg
|
|
|
|
|
alloc_frag_output(fs_visitor *v, unsigned location)
|
2016-07-21 21:26:20 -07:00
|
|
|
{
|
|
|
|
|
assert(v->stage == MESA_SHADER_FRAGMENT);
|
|
|
|
|
const brw_wm_prog_key *const key =
|
|
|
|
|
reinterpret_cast<const brw_wm_prog_key *>(v->key);
|
|
|
|
|
const unsigned l = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_LOCATION);
|
|
|
|
|
const unsigned i = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_INDEX);
|
|
|
|
|
|
|
|
|
|
if (i > 0 || (key->force_dual_color_blend && l == FRAG_RESULT_DATA1))
|
2016-07-21 21:47:45 -07:00
|
|
|
return alloc_temporary(v->bld, 4, &v->dual_src_output, 1);
|
2016-07-21 21:26:20 -07:00
|
|
|
|
|
|
|
|
else if (l == FRAG_RESULT_COLOR)
|
2016-07-21 21:47:45 -07:00
|
|
|
return alloc_temporary(v->bld, 4, v->outputs,
|
|
|
|
|
MAX2(key->nr_color_regions, 1));
|
2016-07-21 21:26:20 -07:00
|
|
|
|
|
|
|
|
else if (l == FRAG_RESULT_DEPTH)
|
2016-07-21 21:47:45 -07:00
|
|
|
return alloc_temporary(v->bld, 1, &v->frag_depth, 1);
|
2016-07-21 21:26:20 -07:00
|
|
|
|
|
|
|
|
else if (l == FRAG_RESULT_STENCIL)
|
2016-07-21 21:47:45 -07:00
|
|
|
return alloc_temporary(v->bld, 1, &v->frag_stencil, 1);
|
2016-07-21 21:26:20 -07:00
|
|
|
|
|
|
|
|
else if (l == FRAG_RESULT_SAMPLE_MASK)
|
2016-07-21 21:47:45 -07:00
|
|
|
return alloc_temporary(v->bld, 1, &v->sample_mask, 1);
|
2016-07-21 21:26:20 -07:00
|
|
|
|
|
|
|
|
else if (l >= FRAG_RESULT_DATA0 &&
|
|
|
|
|
l < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS)
|
2016-07-21 21:47:45 -07:00
|
|
|
return alloc_temporary(v->bld, 4,
|
|
|
|
|
&v->outputs[l - FRAG_RESULT_DATA0], 1);
|
2016-07-21 21:26:20 -07:00
|
|
|
|
|
|
|
|
else
|
|
|
|
|
unreachable("Invalid location");
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-04 23:05:07 -08:00
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
|
|
|
|
|
nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
assert(stage == MESA_SHADER_FRAGMENT);
|
|
|
|
|
|
|
|
|
|
fs_reg dest;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dest = get_nir_dest(instr->dest);
|
2014-12-04 12:27:29 -08:00
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
switch (instr->intrinsic) {
|
2015-11-04 23:05:07 -08:00
|
|
|
case nir_intrinsic_load_front_face:
|
|
|
|
|
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
|
|
|
|
|
*emit_frontfacing_interpolation());
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_sample_pos: {
|
|
|
|
|
fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
|
|
|
|
|
assert(sample_pos.file != BAD_FILE);
|
|
|
|
|
dest.type = sample_pos.type;
|
|
|
|
|
bld.MOV(dest, sample_pos);
|
|
|
|
|
bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-15 15:18:32 -08:00
|
|
|
case nir_intrinsic_load_layer_id:
|
|
|
|
|
dest.type = BRW_REGISTER_TYPE_UD;
|
|
|
|
|
bld.MOV(dest, fetch_render_target_array_index(bld));
|
|
|
|
|
break;
|
|
|
|
|
|
2019-06-07 23:06:27 -07:00
|
|
|
case nir_intrinsic_is_helper_invocation: {
|
|
|
|
|
/* Unlike the regular gl_HelperInvocation, that is defined at dispatch,
|
|
|
|
|
* the helperInvocationEXT() (aka SpvOpIsHelperInvocationEXT) takes into
|
|
|
|
|
* consideration demoted invocations. That information is stored in
|
|
|
|
|
* f0.1.
|
|
|
|
|
*/
|
|
|
|
|
dest.type = BRW_REGISTER_TYPE_UD;
|
|
|
|
|
|
|
|
|
|
bld.MOV(dest, brw_imm_ud(0));
|
|
|
|
|
|
|
|
|
|
fs_inst *mov = bld.MOV(dest, brw_imm_ud(~0));
|
|
|
|
|
mov->predicate = BRW_PREDICATE_NORMAL;
|
|
|
|
|
mov->predicate_inverse = true;
|
2020-01-04 14:32:09 -08:00
|
|
|
mov->flag_subreg = sample_mask_flag_subreg(this);
|
2019-06-07 23:06:27 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-13 17:51:12 -08:00
|
|
|
case nir_intrinsic_load_helper_invocation:
|
2015-11-04 23:05:07 -08:00
|
|
|
case nir_intrinsic_load_sample_mask_in:
|
|
|
|
|
case nir_intrinsic_load_sample_id: {
|
|
|
|
|
gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
|
|
|
|
|
fs_reg val = nir_system_values[sv];
|
|
|
|
|
assert(val.file != BAD_FILE);
|
|
|
|
|
dest.type = val.type;
|
|
|
|
|
bld.MOV(dest, val);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-21 21:25:46 -07:00
|
|
|
case nir_intrinsic_store_output: {
|
|
|
|
|
const fs_reg src = get_nir_src(instr->src[0]);
|
2018-10-20 09:55:28 -05:00
|
|
|
const unsigned store_offset = nir_src_as_uint(instr->src[1]);
|
2016-07-21 21:26:20 -07:00
|
|
|
const unsigned location = nir_intrinsic_base(instr) +
|
2018-10-20 09:55:28 -05:00
|
|
|
SET_FIELD(store_offset, BRW_NIR_FRAG_OUTPUT_LOCATION);
|
2016-07-21 21:47:45 -07:00
|
|
|
const fs_reg new_dest = retype(alloc_frag_output(this, location),
|
2016-07-21 21:26:20 -07:00
|
|
|
src.type);
|
2016-07-21 21:25:46 -07:00
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < instr->num_components; j++)
|
|
|
|
|
bld.MOV(offset(new_dest, bld, nir_intrinsic_component(instr) + j),
|
|
|
|
|
offset(src, bld, j));
|
|
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-21 21:57:00 -07:00
|
|
|
case nir_intrinsic_load_output: {
|
|
|
|
|
const unsigned l = GET_FIELD(nir_intrinsic_base(instr),
|
|
|
|
|
BRW_NIR_FRAG_OUTPUT_LOCATION);
|
|
|
|
|
assert(l >= FRAG_RESULT_DATA0);
|
2018-10-20 09:55:28 -05:00
|
|
|
const unsigned load_offset = nir_src_as_uint(instr->src[0]);
|
|
|
|
|
const unsigned target = l - FRAG_RESULT_DATA0 + load_offset;
|
2016-07-21 21:57:00 -07:00
|
|
|
const fs_reg tmp = bld.vgrf(dest.type, 4);
|
|
|
|
|
|
2016-08-18 22:12:37 -07:00
|
|
|
if (reinterpret_cast<const brw_wm_prog_key *>(key)->coherent_fb_fetch)
|
|
|
|
|
emit_coherent_fb_read(bld, tmp, target);
|
|
|
|
|
else
|
|
|
|
|
emit_non_coherent_fb_read(bld, tmp, target);
|
2016-07-21 21:57:00 -07:00
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < instr->num_components; j++) {
|
|
|
|
|
bld.MOV(offset(dest, bld, j),
|
|
|
|
|
offset(tmp, bld, nir_intrinsic_component(instr) + j));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-07 23:06:27 -07:00
|
|
|
case nir_intrinsic_demote:
|
2014-08-19 15:22:43 -07:00
|
|
|
case nir_intrinsic_discard:
|
2019-07-18 13:39:49 +02:00
|
|
|
case nir_intrinsic_demote_if:
|
2014-08-19 15:22:43 -07:00
|
|
|
case nir_intrinsic_discard_if: {
|
2020-01-04 15:48:07 -08:00
|
|
|
/* We track our discarded pixels in f0.1/f1.0. By predicating on it, we
|
|
|
|
|
* can update just the flag bits that aren't yet discarded. If there's
|
|
|
|
|
* no condition, we emit a CMP of g0 != g0, so all currently executing
|
2014-08-19 15:22:43 -07:00
|
|
|
* channels will get turned off.
|
2014-08-15 10:32:07 -07:00
|
|
|
*/
|
intel/fs: Improve discard_if code generation
Previously we would blindly emit an sequence like:
mov(1) f0.1<1>UW g1.14<0,1,0>UW
...
cmp.l.f0(16) g7<1>F g5<8,8,1>F 0x41700000F /* 15F */
(+f0.1) cmp.z.f0.1(16) null<1>D g7<8,8,1>D 0D
The first move sets the flags based on the initial execution mask.
Later discard sequences contain a predicated compare that can only
remove more SIMD channels. Often times the only user of the result from
the first compare is the second compare. Instead, generate a sequence
like
mov(1) f0.1<1>UW g1.14<0,1,0>UW
...
cmp.l.f0(16) g7<1>F g5<8,8,1>F 0x41700000F /* 15F */
(+f0.1) cmp.ge.f0.1(8) null<1>F g5<8,8,1>F 0x41700000F /* 15F */
If the results stored in g7 and f0.0 are not used, the comparison will
be eliminated. This removes an instruction and potentially reduces
register pressure.
v2: Major re-write of the commit message (including fixing the assembly
code). Suggested by Matt.
All Gen8+ platforms had similar results. (Ice Lake shown)
total instructions in shared programs: 17224434 -> 17198659 (-0.15%)
instructions in affected programs: 2908125 -> 2882350 (-0.89%)
helped: 18891
HURT: 5
helped stats (abs) min: 1 max: 12 x̄: 1.38 x̃: 1
helped stats (rel) min: 0.03% max: 25.00% x̄: 1.76% x̃: 1.02%
HURT stats (abs) min: 9 max: 105 x̄: 51.40 x̃: 35
HURT stats (rel) min: 0.43% max: 4.92% x̄: 2.34% x̃: 1.56%
95% mean confidence interval for instructions value: -1.39 -1.34
95% mean confidence interval for instructions %-change: -1.79% -1.73%
Instructions are helped.
total cycles in shared programs: 361468458 -> 361170679 (-0.08%)
cycles in affected programs: 38470116 -> 38172337 (-0.77%)
helped: 16202
HURT: 1456
helped stats (abs) min: 1 max: 4473 x̄: 26.24 x̃: 18
helped stats (rel) min: <.01% max: 28.44% x̄: 2.90% x̃: 2.18%
HURT stats (abs) min: 1 max: 5982 x̄: 87.51 x̃: 28
HURT stats (rel) min: <.01% max: 51.29% x̄: 5.48% x̃: 1.64%
95% mean confidence interval for cycles value: -18.24 -15.49
95% mean confidence interval for cycles %-change: -2.26% -2.14%
Cycles are helped.
total spills in shared programs: 12147 -> 12176 (0.24%)
spills in affected programs: 175 -> 204 (16.57%)
helped: 8
HURT: 5
total fills in shared programs: 25262 -> 25292 (0.12%)
fills in affected programs: 269 -> 299 (11.15%)
helped: 8
HURT: 5
Haswell
total instructions in shared programs: 13530316 -> 13502647 (-0.20%)
instructions in affected programs: 2507824 -> 2480155 (-1.10%)
helped: 18859
HURT: 10
helped stats (abs) min: 1 max: 12 x̄: 1.48 x̃: 1
helped stats (rel) min: 0.03% max: 27.78% x̄: 2.38% x̃: 1.41%
HURT stats (abs) min: 5 max: 39 x̄: 25.70 x̃: 31
HURT stats (rel) min: 0.22% max: 1.66% x̄: 1.09% x̃: 1.31%
95% mean confidence interval for instructions value: -1.49 -1.44
95% mean confidence interval for instructions %-change: -2.42% -2.34%
Instructions are helped.
total cycles in shared programs: 377865412 -> 377639034 (-0.06%)
cycles in affected programs: 40169572 -> 39943194 (-0.56%)
helped: 15550
HURT: 1938
helped stats (abs) min: 1 max: 2482 x̄: 25.67 x̃: 18
helped stats (rel) min: <.01% max: 37.77% x̄: 3.00% x̃: 2.25%
HURT stats (abs) min: 1 max: 4862 x̄: 89.17 x̃: 35
HURT stats (rel) min: <.01% max: 67.67% x̄: 6.16% x̃: 2.75%
95% mean confidence interval for cycles value: -14.42 -11.47
95% mean confidence interval for cycles %-change: -2.05% -1.91%
Cycles are helped.
total spills in shared programs: 26769 -> 26814 (0.17%)
spills in affected programs: 826 -> 871 (5.45%)
helped: 9
HURT: 10
total fills in shared programs: 38383 -> 38425 (0.11%)
fills in affected programs: 834 -> 876 (5.04%)
helped: 9
HURT: 10
LOST: 5
GAINED: 10
Ivy Bridge
total instructions in shared programs: 12079250 -> 12044139 (-0.29%)
instructions in affected programs: 2409680 -> 2374569 (-1.46%)
helped: 16135
HURT: 0
helped stats (abs) min: 1 max: 23 x̄: 2.18 x̃: 2
helped stats (rel) min: 0.07% max: 37.50% x̄: 2.72% x̃: 1.68%
95% mean confidence interval for instructions value: -2.21 -2.14
95% mean confidence interval for instructions %-change: -2.76% -2.67%
Instructions are helped.
total cycles in shared programs: 180116747 -> 179900405 (-0.12%)
cycles in affected programs: 25439823 -> 25223481 (-0.85%)
helped: 13817
HURT: 1499
helped stats (abs) min: 1 max: 1886 x̄: 26.40 x̃: 18
helped stats (rel) min: <.01% max: 38.84% x̄: 2.57% x̃: 1.97%
HURT stats (abs) min: 1 max: 3684 x̄: 98.99 x̃: 52
HURT stats (rel) min: <.01% max: 97.01% x̄: 6.37% x̃: 3.42%
95% mean confidence interval for cycles value: -15.68 -12.57
95% mean confidence interval for cycles %-change: -1.77% -1.63%
Cycles are helped.
LOST: 8
GAINED: 10
Sandy Bridge
total instructions in shared programs: 10878990 -> 10863659 (-0.14%)
instructions in affected programs: 1806702 -> 1791371 (-0.85%)
helped: 13023
HURT: 0
helped stats (abs) min: 1 max: 5 x̄: 1.18 x̃: 1
helped stats (rel) min: 0.07% max: 13.79% x̄: 1.65% x̃: 1.10%
95% mean confidence interval for instructions value: -1.18 -1.17
95% mean confidence interval for instructions %-change: -1.68% -1.62%
Instructions are helped.
total cycles in shared programs: 154082878 -> 153862810 (-0.14%)
cycles in affected programs: 20199374 -> 19979306 (-1.09%)
helped: 12048
HURT: 510
helped stats (abs) min: 1 max: 323 x̄: 20.57 x̃: 18
helped stats (rel) min: 0.03% max: 17.78% x̄: 2.05% x̃: 1.52%
HURT stats (abs) min: 1 max: 448 x̄: 54.39 x̃: 16
HURT stats (rel) min: 0.02% max: 37.98% x̄: 4.13% x̃: 1.17%
95% mean confidence interval for cycles value: -17.97 -17.08
95% mean confidence interval for cycles %-change: -1.84% -1.75%
Cycles are helped.
LOST: 1
GAINED: 0
Iron Lake
total instructions in shared programs: 8155075 -> 8142729 (-0.15%)
instructions in affected programs: 949495 -> 937149 (-1.30%)
helped: 5810
HURT: 0
helped stats (abs) min: 1 max: 8 x̄: 2.12 x̃: 2
helped stats (rel) min: 0.10% max: 16.67% x̄: 2.53% x̃: 1.85%
95% mean confidence interval for instructions value: -2.14 -2.11
95% mean confidence interval for instructions %-change: -2.59% -2.48%
Instructions are helped.
total cycles in shared programs: 188584610 -> 188549632 (-0.02%)
cycles in affected programs: 17274446 -> 17239468 (-0.20%)
helped: 3881
HURT: 90
helped stats (abs) min: 2 max: 168 x̄: 9.08 x̃: 6
helped stats (rel) min: <.01% max: 23.53% x̄: 0.83% x̃: 0.30%
HURT stats (abs) min: 2 max: 10 x̄: 2.80 x̃: 2
HURT stats (rel) min: <.01% max: 0.60% x̄: 0.10% x̃: 0.07%
95% mean confidence interval for cycles value: -9.35 -8.27
95% mean confidence interval for cycles %-change: -0.85% -0.77%
Cycles are helped.
GM45
total instructions in shared programs: 5019308 -> 5013119 (-0.12%)
instructions in affected programs: 489028 -> 482839 (-1.27%)
helped: 2912
HURT: 0
helped stats (abs) min: 1 max: 8 x̄: 2.13 x̃: 2
helped stats (rel) min: 0.10% max: 16.67% x̄: 2.46% x̃: 1.81%
95% mean confidence interval for instructions value: -2.14 -2.11
95% mean confidence interval for instructions %-change: -2.54% -2.39%
Instructions are helped.
total cycles in shared programs: 129002592 -> 128977804 (-0.02%)
cycles in affected programs: 12669152 -> 12644364 (-0.20%)
helped: 2759
HURT: 37
helped stats (abs) min: 2 max: 168 x̄: 9.03 x̃: 4
helped stats (rel) min: <.01% max: 21.43% x̄: 0.75% x̃: 0.31%
HURT stats (abs) min: 2 max: 10 x̄: 3.62 x̃: 4
HURT stats (rel) min: <.01% max: 0.41% x̄: 0.10% x̃: 0.04%
95% mean confidence interval for cycles value: -9.53 -8.20
95% mean confidence interval for cycles %-change: -0.79% -0.70%
Cycles are helped.
Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2019-05-20 17:25:01 -07:00
|
|
|
fs_inst *cmp = NULL;
|
2019-07-18 13:39:49 +02:00
|
|
|
if (instr->intrinsic == nir_intrinsic_demote_if ||
|
|
|
|
|
instr->intrinsic == nir_intrinsic_discard_if) {
|
intel/fs: Improve discard_if code generation
Previously we would blindly emit an sequence like:
mov(1) f0.1<1>UW g1.14<0,1,0>UW
...
cmp.l.f0(16) g7<1>F g5<8,8,1>F 0x41700000F /* 15F */
(+f0.1) cmp.z.f0.1(16) null<1>D g7<8,8,1>D 0D
The first move sets the flags based on the initial execution mask.
Later discard sequences contain a predicated compare that can only
remove more SIMD channels. Often times the only user of the result from
the first compare is the second compare. Instead, generate a sequence
like
mov(1) f0.1<1>UW g1.14<0,1,0>UW
...
cmp.l.f0(16) g7<1>F g5<8,8,1>F 0x41700000F /* 15F */
(+f0.1) cmp.ge.f0.1(8) null<1>F g5<8,8,1>F 0x41700000F /* 15F */
If the results stored in g7 and f0.0 are not used, the comparison will
be eliminated. This removes an instruction and potentially reduces
register pressure.
v2: Major re-write of the commit message (including fixing the assembly
code). Suggested by Matt.
All Gen8+ platforms had similar results. (Ice Lake shown)
total instructions in shared programs: 17224434 -> 17198659 (-0.15%)
instructions in affected programs: 2908125 -> 2882350 (-0.89%)
helped: 18891
HURT: 5
helped stats (abs) min: 1 max: 12 x̄: 1.38 x̃: 1
helped stats (rel) min: 0.03% max: 25.00% x̄: 1.76% x̃: 1.02%
HURT stats (abs) min: 9 max: 105 x̄: 51.40 x̃: 35
HURT stats (rel) min: 0.43% max: 4.92% x̄: 2.34% x̃: 1.56%
95% mean confidence interval for instructions value: -1.39 -1.34
95% mean confidence interval for instructions %-change: -1.79% -1.73%
Instructions are helped.
total cycles in shared programs: 361468458 -> 361170679 (-0.08%)
cycles in affected programs: 38470116 -> 38172337 (-0.77%)
helped: 16202
HURT: 1456
helped stats (abs) min: 1 max: 4473 x̄: 26.24 x̃: 18
helped stats (rel) min: <.01% max: 28.44% x̄: 2.90% x̃: 2.18%
HURT stats (abs) min: 1 max: 5982 x̄: 87.51 x̃: 28
HURT stats (rel) min: <.01% max: 51.29% x̄: 5.48% x̃: 1.64%
95% mean confidence interval for cycles value: -18.24 -15.49
95% mean confidence interval for cycles %-change: -2.26% -2.14%
Cycles are helped.
total spills in shared programs: 12147 -> 12176 (0.24%)
spills in affected programs: 175 -> 204 (16.57%)
helped: 8
HURT: 5
total fills in shared programs: 25262 -> 25292 (0.12%)
fills in affected programs: 269 -> 299 (11.15%)
helped: 8
HURT: 5
Haswell
total instructions in shared programs: 13530316 -> 13502647 (-0.20%)
instructions in affected programs: 2507824 -> 2480155 (-1.10%)
helped: 18859
HURT: 10
helped stats (abs) min: 1 max: 12 x̄: 1.48 x̃: 1
helped stats (rel) min: 0.03% max: 27.78% x̄: 2.38% x̃: 1.41%
HURT stats (abs) min: 5 max: 39 x̄: 25.70 x̃: 31
HURT stats (rel) min: 0.22% max: 1.66% x̄: 1.09% x̃: 1.31%
95% mean confidence interval for instructions value: -1.49 -1.44
95% mean confidence interval for instructions %-change: -2.42% -2.34%
Instructions are helped.
total cycles in shared programs: 377865412 -> 377639034 (-0.06%)
cycles in affected programs: 40169572 -> 39943194 (-0.56%)
helped: 15550
HURT: 1938
helped stats (abs) min: 1 max: 2482 x̄: 25.67 x̃: 18
helped stats (rel) min: <.01% max: 37.77% x̄: 3.00% x̃: 2.25%
HURT stats (abs) min: 1 max: 4862 x̄: 89.17 x̃: 35
HURT stats (rel) min: <.01% max: 67.67% x̄: 6.16% x̃: 2.75%
95% mean confidence interval for cycles value: -14.42 -11.47
95% mean confidence interval for cycles %-change: -2.05% -1.91%
Cycles are helped.
total spills in shared programs: 26769 -> 26814 (0.17%)
spills in affected programs: 826 -> 871 (5.45%)
helped: 9
HURT: 10
total fills in shared programs: 38383 -> 38425 (0.11%)
fills in affected programs: 834 -> 876 (5.04%)
helped: 9
HURT: 10
LOST: 5
GAINED: 10
Ivy Bridge
total instructions in shared programs: 12079250 -> 12044139 (-0.29%)
instructions in affected programs: 2409680 -> 2374569 (-1.46%)
helped: 16135
HURT: 0
helped stats (abs) min: 1 max: 23 x̄: 2.18 x̃: 2
helped stats (rel) min: 0.07% max: 37.50% x̄: 2.72% x̃: 1.68%
95% mean confidence interval for instructions value: -2.21 -2.14
95% mean confidence interval for instructions %-change: -2.76% -2.67%
Instructions are helped.
total cycles in shared programs: 180116747 -> 179900405 (-0.12%)
cycles in affected programs: 25439823 -> 25223481 (-0.85%)
helped: 13817
HURT: 1499
helped stats (abs) min: 1 max: 1886 x̄: 26.40 x̃: 18
helped stats (rel) min: <.01% max: 38.84% x̄: 2.57% x̃: 1.97%
HURT stats (abs) min: 1 max: 3684 x̄: 98.99 x̃: 52
HURT stats (rel) min: <.01% max: 97.01% x̄: 6.37% x̃: 3.42%
95% mean confidence interval for cycles value: -15.68 -12.57
95% mean confidence interval for cycles %-change: -1.77% -1.63%
Cycles are helped.
LOST: 8
GAINED: 10
Sandy Bridge
total instructions in shared programs: 10878990 -> 10863659 (-0.14%)
instructions in affected programs: 1806702 -> 1791371 (-0.85%)
helped: 13023
HURT: 0
helped stats (abs) min: 1 max: 5 x̄: 1.18 x̃: 1
helped stats (rel) min: 0.07% max: 13.79% x̄: 1.65% x̃: 1.10%
95% mean confidence interval for instructions value: -1.18 -1.17
95% mean confidence interval for instructions %-change: -1.68% -1.62%
Instructions are helped.
total cycles in shared programs: 154082878 -> 153862810 (-0.14%)
cycles in affected programs: 20199374 -> 19979306 (-1.09%)
helped: 12048
HURT: 510
helped stats (abs) min: 1 max: 323 x̄: 20.57 x̃: 18
helped stats (rel) min: 0.03% max: 17.78% x̄: 2.05% x̃: 1.52%
HURT stats (abs) min: 1 max: 448 x̄: 54.39 x̃: 16
HURT stats (rel) min: 0.02% max: 37.98% x̄: 4.13% x̃: 1.17%
95% mean confidence interval for cycles value: -17.97 -17.08
95% mean confidence interval for cycles %-change: -1.84% -1.75%
Cycles are helped.
LOST: 1
GAINED: 0
Iron Lake
total instructions in shared programs: 8155075 -> 8142729 (-0.15%)
instructions in affected programs: 949495 -> 937149 (-1.30%)
helped: 5810
HURT: 0
helped stats (abs) min: 1 max: 8 x̄: 2.12 x̃: 2
helped stats (rel) min: 0.10% max: 16.67% x̄: 2.53% x̃: 1.85%
95% mean confidence interval for instructions value: -2.14 -2.11
95% mean confidence interval for instructions %-change: -2.59% -2.48%
Instructions are helped.
total cycles in shared programs: 188584610 -> 188549632 (-0.02%)
cycles in affected programs: 17274446 -> 17239468 (-0.20%)
helped: 3881
HURT: 90
helped stats (abs) min: 2 max: 168 x̄: 9.08 x̃: 6
helped stats (rel) min: <.01% max: 23.53% x̄: 0.83% x̃: 0.30%
HURT stats (abs) min: 2 max: 10 x̄: 2.80 x̃: 2
HURT stats (rel) min: <.01% max: 0.60% x̄: 0.10% x̃: 0.07%
95% mean confidence interval for cycles value: -9.35 -8.27
95% mean confidence interval for cycles %-change: -0.85% -0.77%
Cycles are helped.
GM45
total instructions in shared programs: 5019308 -> 5013119 (-0.12%)
instructions in affected programs: 489028 -> 482839 (-1.27%)
helped: 2912
HURT: 0
helped stats (abs) min: 1 max: 8 x̄: 2.13 x̃: 2
helped stats (rel) min: 0.10% max: 16.67% x̄: 2.46% x̃: 1.81%
95% mean confidence interval for instructions value: -2.14 -2.11
95% mean confidence interval for instructions %-change: -2.54% -2.39%
Instructions are helped.
total cycles in shared programs: 129002592 -> 128977804 (-0.02%)
cycles in affected programs: 12669152 -> 12644364 (-0.20%)
helped: 2759
HURT: 37
helped stats (abs) min: 2 max: 168 x̄: 9.03 x̃: 4
helped stats (rel) min: <.01% max: 21.43% x̄: 0.75% x̃: 0.31%
HURT stats (abs) min: 2 max: 10 x̄: 3.62 x̃: 4
HURT stats (rel) min: <.01% max: 0.41% x̄: 0.10% x̃: 0.04%
95% mean confidence interval for cycles value: -9.53 -8.20
95% mean confidence interval for cycles %-change: -0.79% -0.70%
Cycles are helped.
Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2019-05-20 17:25:01 -07:00
|
|
|
nir_alu_instr *alu = nir_src_as_alu_instr(instr->src[0]);
|
|
|
|
|
|
|
|
|
|
if (alu != NULL &&
|
|
|
|
|
alu->op != nir_op_bcsel &&
|
intel/fs: Disable conditional discard optimization on Gen4 and Gen5
The CMP instruction on Gen4 and Gen5 generates one bit (the LSB) of
valid data and 31 bits of junk. Results of comparisons that are used as
Boolean values need to have a fixup applied to generate the proper 0/~0
values.
Calling fs_visitor::nir_emit_alu with need_dest=false prevents the fixup
code from being generated. This results in a sequence like:
cmp.l.f0.0(16) g8<1>F g14<8,8,1>F 0x0F /* 0F */
...
cmp.l.f0.0(16) g4<1>F g6<8,8,1>F 0x0F /* 0F */
(+f0.1) or.z.f0.1(16) null<1>UD g4<8,8,1>UD g8<8,8,1>UD
instead of
cmp.l.f0.0(16) g8<1>F g14<8,8,1>F 0x0F /* 0F */
...
cmp.l.f0.0(16) g4<1>F g6<8,8,1>F 0x0F /* 0F */
or(16) g4<1>UD g4<8,8,1>UD g8<8,8,1>UD
(+f0.1) and.z.f0.1(16) null<1>UD g4<8,8,1>UD 1UD
I examined a couple of the shaders hurt by this change, and ALL of them
would have been affected by this bug. :(
Reviewed-by: Tapani Pälli <tapani.palli@intel.com>
Closes: https://gitlab.freedesktop.org/mesa/mesa/issues/1836
Fixes: 0ba9497e66a ("intel/fs: Improve discard_if code generation")
Iron Lake
total instructions in shared programs: 8122757 -> 8122957 (<.01%)
instructions in affected programs: 8307 -> 8507 (2.41%)
helped: 0
HURT: 100
HURT stats (abs) min: 2 max: 2 x̄: 2.00 x̃: 2
HURT stats (rel) min: 0.84% max: 6.67% x̄: 2.81% x̃: 2.76%
95% mean confidence interval for instructions value: 2.00 2.00
95% mean confidence interval for instructions %-change: 2.58% 3.03%
Instructions are HURT.
total cycles in shared programs: 188510100 -> 188510376 (<.01%)
cycles in affected programs: 76018 -> 76294 (0.36%)
helped: 0
HURT: 55
HURT stats (abs) min: 2 max: 12 x̄: 5.02 x̃: 4
HURT stats (rel) min: 0.07% max: 3.75% x̄: 0.86% x̃: 0.56%
95% mean confidence interval for cycles value: 4.33 5.71
95% mean confidence interval for cycles %-change: 0.60% 1.12%
Cycles are HURT.
GM45
total instructions in shared programs: 4994403 -> 4994503 (<.01%)
instructions in affected programs: 4212 -> 4312 (2.37%)
helped: 0
HURT: 50
HURT stats (abs) min: 2 max: 2 x̄: 2.00 x̃: 2
HURT stats (rel) min: 0.84% max: 6.25% x̄: 2.76% x̃: 2.72%
95% mean confidence interval for instructions value: 2.00 2.00
95% mean confidence interval for instructions %-change: 2.45% 3.07%
Instructions are HURT.
total cycles in shared programs: 128928750 -> 128928982 (<.01%)
cycles in affected programs: 67442 -> 67674 (0.34%)
helped: 0
HURT: 47
HURT stats (abs) min: 2 max: 12 x̄: 4.94 x̃: 4
HURT stats (rel) min: 0.09% max: 3.75% x̄: 0.75% x̃: 0.53%
95% mean confidence interval for cycles value: 4.19 5.68
95% mean confidence interval for cycles %-change: 0.50% 1.00%
Cycles are HURT.
2019-11-18 11:52:47 -08:00
|
|
|
(devinfo->gen > 5 ||
|
|
|
|
|
(alu->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) != BRW_NIR_BOOLEAN_NEEDS_RESOLVE ||
|
2020-08-18 19:51:57 +02:00
|
|
|
alu->op == nir_op_fneu32 || alu->op == nir_op_feq32 ||
|
intel/fs: Disable conditional discard optimization on Gen4 and Gen5
The CMP instruction on Gen4 and Gen5 generates one bit (the LSB) of
valid data and 31 bits of junk. Results of comparisons that are used as
Boolean values need to have a fixup applied to generate the proper 0/~0
values.
Calling fs_visitor::nir_emit_alu with need_dest=false prevents the fixup
code from being generated. This results in a sequence like:
cmp.l.f0.0(16) g8<1>F g14<8,8,1>F 0x0F /* 0F */
...
cmp.l.f0.0(16) g4<1>F g6<8,8,1>F 0x0F /* 0F */
(+f0.1) or.z.f0.1(16) null<1>UD g4<8,8,1>UD g8<8,8,1>UD
instead of
cmp.l.f0.0(16) g8<1>F g14<8,8,1>F 0x0F /* 0F */
...
cmp.l.f0.0(16) g4<1>F g6<8,8,1>F 0x0F /* 0F */
or(16) g4<1>UD g4<8,8,1>UD g8<8,8,1>UD
(+f0.1) and.z.f0.1(16) null<1>UD g4<8,8,1>UD 1UD
I examined a couple of the shaders hurt by this change, and ALL of them
would have been affected by this bug. :(
Reviewed-by: Tapani Pälli <tapani.palli@intel.com>
Closes: https://gitlab.freedesktop.org/mesa/mesa/issues/1836
Fixes: 0ba9497e66a ("intel/fs: Improve discard_if code generation")
Iron Lake
total instructions in shared programs: 8122757 -> 8122957 (<.01%)
instructions in affected programs: 8307 -> 8507 (2.41%)
helped: 0
HURT: 100
HURT stats (abs) min: 2 max: 2 x̄: 2.00 x̃: 2
HURT stats (rel) min: 0.84% max: 6.67% x̄: 2.81% x̃: 2.76%
95% mean confidence interval for instructions value: 2.00 2.00
95% mean confidence interval for instructions %-change: 2.58% 3.03%
Instructions are HURT.
total cycles in shared programs: 188510100 -> 188510376 (<.01%)
cycles in affected programs: 76018 -> 76294 (0.36%)
helped: 0
HURT: 55
HURT stats (abs) min: 2 max: 12 x̄: 5.02 x̃: 4
HURT stats (rel) min: 0.07% max: 3.75% x̄: 0.86% x̃: 0.56%
95% mean confidence interval for cycles value: 4.33 5.71
95% mean confidence interval for cycles %-change: 0.60% 1.12%
Cycles are HURT.
GM45
total instructions in shared programs: 4994403 -> 4994503 (<.01%)
instructions in affected programs: 4212 -> 4312 (2.37%)
helped: 0
HURT: 50
HURT stats (abs) min: 2 max: 2 x̄: 2.00 x̃: 2
HURT stats (rel) min: 0.84% max: 6.25% x̄: 2.76% x̃: 2.72%
95% mean confidence interval for instructions value: 2.00 2.00
95% mean confidence interval for instructions %-change: 2.45% 3.07%
Instructions are HURT.
total cycles in shared programs: 128928750 -> 128928982 (<.01%)
cycles in affected programs: 67442 -> 67674 (0.34%)
helped: 0
HURT: 47
HURT stats (abs) min: 2 max: 12 x̄: 4.94 x̃: 4
HURT stats (rel) min: 0.09% max: 3.75% x̄: 0.75% x̃: 0.53%
95% mean confidence interval for cycles value: 4.19 5.68
95% mean confidence interval for cycles %-change: 0.50% 1.00%
Cycles are HURT.
2019-11-18 11:52:47 -08:00
|
|
|
alu->op == nir_op_flt32 || alu->op == nir_op_fge32 ||
|
|
|
|
|
alu->op == nir_op_ine32 || alu->op == nir_op_ieq32 ||
|
|
|
|
|
alu->op == nir_op_ilt32 || alu->op == nir_op_ige32 ||
|
|
|
|
|
alu->op == nir_op_ult32 || alu->op == nir_op_uge32)) {
|
intel/fs: Improve discard_if code generation
Previously we would blindly emit an sequence like:
mov(1) f0.1<1>UW g1.14<0,1,0>UW
...
cmp.l.f0(16) g7<1>F g5<8,8,1>F 0x41700000F /* 15F */
(+f0.1) cmp.z.f0.1(16) null<1>D g7<8,8,1>D 0D
The first move sets the flags based on the initial execution mask.
Later discard sequences contain a predicated compare that can only
remove more SIMD channels. Often times the only user of the result from
the first compare is the second compare. Instead, generate a sequence
like
mov(1) f0.1<1>UW g1.14<0,1,0>UW
...
cmp.l.f0(16) g7<1>F g5<8,8,1>F 0x41700000F /* 15F */
(+f0.1) cmp.ge.f0.1(8) null<1>F g5<8,8,1>F 0x41700000F /* 15F */
If the results stored in g7 and f0.0 are not used, the comparison will
be eliminated. This removes an instruction and potentially reduces
register pressure.
v2: Major re-write of the commit message (including fixing the assembly
code). Suggested by Matt.
All Gen8+ platforms had similar results. (Ice Lake shown)
total instructions in shared programs: 17224434 -> 17198659 (-0.15%)
instructions in affected programs: 2908125 -> 2882350 (-0.89%)
helped: 18891
HURT: 5
helped stats (abs) min: 1 max: 12 x̄: 1.38 x̃: 1
helped stats (rel) min: 0.03% max: 25.00% x̄: 1.76% x̃: 1.02%
HURT stats (abs) min: 9 max: 105 x̄: 51.40 x̃: 35
HURT stats (rel) min: 0.43% max: 4.92% x̄: 2.34% x̃: 1.56%
95% mean confidence interval for instructions value: -1.39 -1.34
95% mean confidence interval for instructions %-change: -1.79% -1.73%
Instructions are helped.
total cycles in shared programs: 361468458 -> 361170679 (-0.08%)
cycles in affected programs: 38470116 -> 38172337 (-0.77%)
helped: 16202
HURT: 1456
helped stats (abs) min: 1 max: 4473 x̄: 26.24 x̃: 18
helped stats (rel) min: <.01% max: 28.44% x̄: 2.90% x̃: 2.18%
HURT stats (abs) min: 1 max: 5982 x̄: 87.51 x̃: 28
HURT stats (rel) min: <.01% max: 51.29% x̄: 5.48% x̃: 1.64%
95% mean confidence interval for cycles value: -18.24 -15.49
95% mean confidence interval for cycles %-change: -2.26% -2.14%
Cycles are helped.
total spills in shared programs: 12147 -> 12176 (0.24%)
spills in affected programs: 175 -> 204 (16.57%)
helped: 8
HURT: 5
total fills in shared programs: 25262 -> 25292 (0.12%)
fills in affected programs: 269 -> 299 (11.15%)
helped: 8
HURT: 5
Haswell
total instructions in shared programs: 13530316 -> 13502647 (-0.20%)
instructions in affected programs: 2507824 -> 2480155 (-1.10%)
helped: 18859
HURT: 10
helped stats (abs) min: 1 max: 12 x̄: 1.48 x̃: 1
helped stats (rel) min: 0.03% max: 27.78% x̄: 2.38% x̃: 1.41%
HURT stats (abs) min: 5 max: 39 x̄: 25.70 x̃: 31
HURT stats (rel) min: 0.22% max: 1.66% x̄: 1.09% x̃: 1.31%
95% mean confidence interval for instructions value: -1.49 -1.44
95% mean confidence interval for instructions %-change: -2.42% -2.34%
Instructions are helped.
total cycles in shared programs: 377865412 -> 377639034 (-0.06%)
cycles in affected programs: 40169572 -> 39943194 (-0.56%)
helped: 15550
HURT: 1938
helped stats (abs) min: 1 max: 2482 x̄: 25.67 x̃: 18
helped stats (rel) min: <.01% max: 37.77% x̄: 3.00% x̃: 2.25%
HURT stats (abs) min: 1 max: 4862 x̄: 89.17 x̃: 35
HURT stats (rel) min: <.01% max: 67.67% x̄: 6.16% x̃: 2.75%
95% mean confidence interval for cycles value: -14.42 -11.47
95% mean confidence interval for cycles %-change: -2.05% -1.91%
Cycles are helped.
total spills in shared programs: 26769 -> 26814 (0.17%)
spills in affected programs: 826 -> 871 (5.45%)
helped: 9
HURT: 10
total fills in shared programs: 38383 -> 38425 (0.11%)
fills in affected programs: 834 -> 876 (5.04%)
helped: 9
HURT: 10
LOST: 5
GAINED: 10
Ivy Bridge
total instructions in shared programs: 12079250 -> 12044139 (-0.29%)
instructions in affected programs: 2409680 -> 2374569 (-1.46%)
helped: 16135
HURT: 0
helped stats (abs) min: 1 max: 23 x̄: 2.18 x̃: 2
helped stats (rel) min: 0.07% max: 37.50% x̄: 2.72% x̃: 1.68%
95% mean confidence interval for instructions value: -2.21 -2.14
95% mean confidence interval for instructions %-change: -2.76% -2.67%
Instructions are helped.
total cycles in shared programs: 180116747 -> 179900405 (-0.12%)
cycles in affected programs: 25439823 -> 25223481 (-0.85%)
helped: 13817
HURT: 1499
helped stats (abs) min: 1 max: 1886 x̄: 26.40 x̃: 18
helped stats (rel) min: <.01% max: 38.84% x̄: 2.57% x̃: 1.97%
HURT stats (abs) min: 1 max: 3684 x̄: 98.99 x̃: 52
HURT stats (rel) min: <.01% max: 97.01% x̄: 6.37% x̃: 3.42%
95% mean confidence interval for cycles value: -15.68 -12.57
95% mean confidence interval for cycles %-change: -1.77% -1.63%
Cycles are helped.
LOST: 8
GAINED: 10
Sandy Bridge
total instructions in shared programs: 10878990 -> 10863659 (-0.14%)
instructions in affected programs: 1806702 -> 1791371 (-0.85%)
helped: 13023
HURT: 0
helped stats (abs) min: 1 max: 5 x̄: 1.18 x̃: 1
helped stats (rel) min: 0.07% max: 13.79% x̄: 1.65% x̃: 1.10%
95% mean confidence interval for instructions value: -1.18 -1.17
95% mean confidence interval for instructions %-change: -1.68% -1.62%
Instructions are helped.
total cycles in shared programs: 154082878 -> 153862810 (-0.14%)
cycles in affected programs: 20199374 -> 19979306 (-1.09%)
helped: 12048
HURT: 510
helped stats (abs) min: 1 max: 323 x̄: 20.57 x̃: 18
helped stats (rel) min: 0.03% max: 17.78% x̄: 2.05% x̃: 1.52%
HURT stats (abs) min: 1 max: 448 x̄: 54.39 x̃: 16
HURT stats (rel) min: 0.02% max: 37.98% x̄: 4.13% x̃: 1.17%
95% mean confidence interval for cycles value: -17.97 -17.08
95% mean confidence interval for cycles %-change: -1.84% -1.75%
Cycles are helped.
LOST: 1
GAINED: 0
Iron Lake
total instructions in shared programs: 8155075 -> 8142729 (-0.15%)
instructions in affected programs: 949495 -> 937149 (-1.30%)
helped: 5810
HURT: 0
helped stats (abs) min: 1 max: 8 x̄: 2.12 x̃: 2
helped stats (rel) min: 0.10% max: 16.67% x̄: 2.53% x̃: 1.85%
95% mean confidence interval for instructions value: -2.14 -2.11
95% mean confidence interval for instructions %-change: -2.59% -2.48%
Instructions are helped.
total cycles in shared programs: 188584610 -> 188549632 (-0.02%)
cycles in affected programs: 17274446 -> 17239468 (-0.20%)
helped: 3881
HURT: 90
helped stats (abs) min: 2 max: 168 x̄: 9.08 x̃: 6
helped stats (rel) min: <.01% max: 23.53% x̄: 0.83% x̃: 0.30%
HURT stats (abs) min: 2 max: 10 x̄: 2.80 x̃: 2
HURT stats (rel) min: <.01% max: 0.60% x̄: 0.10% x̃: 0.07%
95% mean confidence interval for cycles value: -9.35 -8.27
95% mean confidence interval for cycles %-change: -0.85% -0.77%
Cycles are helped.
GM45
total instructions in shared programs: 5019308 -> 5013119 (-0.12%)
instructions in affected programs: 489028 -> 482839 (-1.27%)
helped: 2912
HURT: 0
helped stats (abs) min: 1 max: 8 x̄: 2.13 x̃: 2
helped stats (rel) min: 0.10% max: 16.67% x̄: 2.46% x̃: 1.81%
95% mean confidence interval for instructions value: -2.14 -2.11
95% mean confidence interval for instructions %-change: -2.54% -2.39%
Instructions are helped.
total cycles in shared programs: 129002592 -> 128977804 (-0.02%)
cycles in affected programs: 12669152 -> 12644364 (-0.20%)
helped: 2759
HURT: 37
helped stats (abs) min: 2 max: 168 x̄: 9.03 x̃: 4
helped stats (rel) min: <.01% max: 21.43% x̄: 0.75% x̃: 0.31%
HURT stats (abs) min: 2 max: 10 x̄: 3.62 x̃: 4
HURT stats (rel) min: <.01% max: 0.41% x̄: 0.10% x̃: 0.04%
95% mean confidence interval for cycles value: -9.53 -8.20
95% mean confidence interval for cycles %-change: -0.79% -0.70%
Cycles are helped.
Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2019-05-20 17:25:01 -07:00
|
|
|
/* Re-emit the instruction that generated the Boolean value, but
|
|
|
|
|
* do not store it. Since this instruction will be conditional,
|
|
|
|
|
* other instructions that want to use the real Boolean value may
|
|
|
|
|
* get garbage. This was a problem for piglit's fs-discard-exit-2
|
|
|
|
|
* test.
|
|
|
|
|
*
|
|
|
|
|
* Ideally we'd detect that the instruction cannot have a
|
|
|
|
|
* conditional modifier before emitting the instructions. Alas,
|
|
|
|
|
* that is nigh impossible. Instead, we're going to assume the
|
|
|
|
|
* instruction (or last instruction) generated can have a
|
|
|
|
|
* conditional modifier. If it cannot, fallback to the old-style
|
|
|
|
|
* compare, and hope dead code elimination will clean up the
|
|
|
|
|
* extra instructions generated.
|
|
|
|
|
*/
|
|
|
|
|
nir_emit_alu(bld, alu, false);
|
|
|
|
|
|
|
|
|
|
cmp = (fs_inst *) instructions.get_tail();
|
|
|
|
|
if (cmp->conditional_mod == BRW_CONDITIONAL_NONE) {
|
|
|
|
|
if (cmp->can_do_cmod())
|
|
|
|
|
cmp->conditional_mod = BRW_CONDITIONAL_Z;
|
|
|
|
|
else
|
|
|
|
|
cmp = NULL;
|
|
|
|
|
} else {
|
|
|
|
|
/* The old sequence that would have been generated is,
|
|
|
|
|
* basically, bool_result == false. This is equivalent to
|
|
|
|
|
* !bool_result, so negate the old modifier.
|
|
|
|
|
*/
|
|
|
|
|
cmp->conditional_mod = brw_negate_cmod(cmp->conditional_mod);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (cmp == NULL) {
|
|
|
|
|
cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
|
|
|
|
|
brw_imm_d(0), BRW_CONDITIONAL_Z);
|
|
|
|
|
}
|
2014-08-19 15:22:43 -07:00
|
|
|
} else {
|
|
|
|
|
fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
|
|
|
|
|
BRW_REGISTER_TYPE_UW));
|
2015-06-03 21:01:32 +03:00
|
|
|
cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
|
2014-08-19 15:22:43 -07:00
|
|
|
}
|
intel/fs: Improve discard_if code generation
Previously we would blindly emit an sequence like:
mov(1) f0.1<1>UW g1.14<0,1,0>UW
...
cmp.l.f0(16) g7<1>F g5<8,8,1>F 0x41700000F /* 15F */
(+f0.1) cmp.z.f0.1(16) null<1>D g7<8,8,1>D 0D
The first move sets the flags based on the initial execution mask.
Later discard sequences contain a predicated compare that can only
remove more SIMD channels. Often times the only user of the result from
the first compare is the second compare. Instead, generate a sequence
like
mov(1) f0.1<1>UW g1.14<0,1,0>UW
...
cmp.l.f0(16) g7<1>F g5<8,8,1>F 0x41700000F /* 15F */
(+f0.1) cmp.ge.f0.1(8) null<1>F g5<8,8,1>F 0x41700000F /* 15F */
If the results stored in g7 and f0.0 are not used, the comparison will
be eliminated. This removes an instruction and potentially reduces
register pressure.
v2: Major re-write of the commit message (including fixing the assembly
code). Suggested by Matt.
All Gen8+ platforms had similar results. (Ice Lake shown)
total instructions in shared programs: 17224434 -> 17198659 (-0.15%)
instructions in affected programs: 2908125 -> 2882350 (-0.89%)
helped: 18891
HURT: 5
helped stats (abs) min: 1 max: 12 x̄: 1.38 x̃: 1
helped stats (rel) min: 0.03% max: 25.00% x̄: 1.76% x̃: 1.02%
HURT stats (abs) min: 9 max: 105 x̄: 51.40 x̃: 35
HURT stats (rel) min: 0.43% max: 4.92% x̄: 2.34% x̃: 1.56%
95% mean confidence interval for instructions value: -1.39 -1.34
95% mean confidence interval for instructions %-change: -1.79% -1.73%
Instructions are helped.
total cycles in shared programs: 361468458 -> 361170679 (-0.08%)
cycles in affected programs: 38470116 -> 38172337 (-0.77%)
helped: 16202
HURT: 1456
helped stats (abs) min: 1 max: 4473 x̄: 26.24 x̃: 18
helped stats (rel) min: <.01% max: 28.44% x̄: 2.90% x̃: 2.18%
HURT stats (abs) min: 1 max: 5982 x̄: 87.51 x̃: 28
HURT stats (rel) min: <.01% max: 51.29% x̄: 5.48% x̃: 1.64%
95% mean confidence interval for cycles value: -18.24 -15.49
95% mean confidence interval for cycles %-change: -2.26% -2.14%
Cycles are helped.
total spills in shared programs: 12147 -> 12176 (0.24%)
spills in affected programs: 175 -> 204 (16.57%)
helped: 8
HURT: 5
total fills in shared programs: 25262 -> 25292 (0.12%)
fills in affected programs: 269 -> 299 (11.15%)
helped: 8
HURT: 5
Haswell
total instructions in shared programs: 13530316 -> 13502647 (-0.20%)
instructions in affected programs: 2507824 -> 2480155 (-1.10%)
helped: 18859
HURT: 10
helped stats (abs) min: 1 max: 12 x̄: 1.48 x̃: 1
helped stats (rel) min: 0.03% max: 27.78% x̄: 2.38% x̃: 1.41%
HURT stats (abs) min: 5 max: 39 x̄: 25.70 x̃: 31
HURT stats (rel) min: 0.22% max: 1.66% x̄: 1.09% x̃: 1.31%
95% mean confidence interval for instructions value: -1.49 -1.44
95% mean confidence interval for instructions %-change: -2.42% -2.34%
Instructions are helped.
total cycles in shared programs: 377865412 -> 377639034 (-0.06%)
cycles in affected programs: 40169572 -> 39943194 (-0.56%)
helped: 15550
HURT: 1938
helped stats (abs) min: 1 max: 2482 x̄: 25.67 x̃: 18
helped stats (rel) min: <.01% max: 37.77% x̄: 3.00% x̃: 2.25%
HURT stats (abs) min: 1 max: 4862 x̄: 89.17 x̃: 35
HURT stats (rel) min: <.01% max: 67.67% x̄: 6.16% x̃: 2.75%
95% mean confidence interval for cycles value: -14.42 -11.47
95% mean confidence interval for cycles %-change: -2.05% -1.91%
Cycles are helped.
total spills in shared programs: 26769 -> 26814 (0.17%)
spills in affected programs: 826 -> 871 (5.45%)
helped: 9
HURT: 10
total fills in shared programs: 38383 -> 38425 (0.11%)
fills in affected programs: 834 -> 876 (5.04%)
helped: 9
HURT: 10
LOST: 5
GAINED: 10
Ivy Bridge
total instructions in shared programs: 12079250 -> 12044139 (-0.29%)
instructions in affected programs: 2409680 -> 2374569 (-1.46%)
helped: 16135
HURT: 0
helped stats (abs) min: 1 max: 23 x̄: 2.18 x̃: 2
helped stats (rel) min: 0.07% max: 37.50% x̄: 2.72% x̃: 1.68%
95% mean confidence interval for instructions value: -2.21 -2.14
95% mean confidence interval for instructions %-change: -2.76% -2.67%
Instructions are helped.
total cycles in shared programs: 180116747 -> 179900405 (-0.12%)
cycles in affected programs: 25439823 -> 25223481 (-0.85%)
helped: 13817
HURT: 1499
helped stats (abs) min: 1 max: 1886 x̄: 26.40 x̃: 18
helped stats (rel) min: <.01% max: 38.84% x̄: 2.57% x̃: 1.97%
HURT stats (abs) min: 1 max: 3684 x̄: 98.99 x̃: 52
HURT stats (rel) min: <.01% max: 97.01% x̄: 6.37% x̃: 3.42%
95% mean confidence interval for cycles value: -15.68 -12.57
95% mean confidence interval for cycles %-change: -1.77% -1.63%
Cycles are helped.
LOST: 8
GAINED: 10
Sandy Bridge
total instructions in shared programs: 10878990 -> 10863659 (-0.14%)
instructions in affected programs: 1806702 -> 1791371 (-0.85%)
helped: 13023
HURT: 0
helped stats (abs) min: 1 max: 5 x̄: 1.18 x̃: 1
helped stats (rel) min: 0.07% max: 13.79% x̄: 1.65% x̃: 1.10%
95% mean confidence interval for instructions value: -1.18 -1.17
95% mean confidence interval for instructions %-change: -1.68% -1.62%
Instructions are helped.
total cycles in shared programs: 154082878 -> 153862810 (-0.14%)
cycles in affected programs: 20199374 -> 19979306 (-1.09%)
helped: 12048
HURT: 510
helped stats (abs) min: 1 max: 323 x̄: 20.57 x̃: 18
helped stats (rel) min: 0.03% max: 17.78% x̄: 2.05% x̃: 1.52%
HURT stats (abs) min: 1 max: 448 x̄: 54.39 x̃: 16
HURT stats (rel) min: 0.02% max: 37.98% x̄: 4.13% x̃: 1.17%
95% mean confidence interval for cycles value: -17.97 -17.08
95% mean confidence interval for cycles %-change: -1.84% -1.75%
Cycles are helped.
LOST: 1
GAINED: 0
Iron Lake
total instructions in shared programs: 8155075 -> 8142729 (-0.15%)
instructions in affected programs: 949495 -> 937149 (-1.30%)
helped: 5810
HURT: 0
helped stats (abs) min: 1 max: 8 x̄: 2.12 x̃: 2
helped stats (rel) min: 0.10% max: 16.67% x̄: 2.53% x̃: 1.85%
95% mean confidence interval for instructions value: -2.14 -2.11
95% mean confidence interval for instructions %-change: -2.59% -2.48%
Instructions are helped.
total cycles in shared programs: 188584610 -> 188549632 (-0.02%)
cycles in affected programs: 17274446 -> 17239468 (-0.20%)
helped: 3881
HURT: 90
helped stats (abs) min: 2 max: 168 x̄: 9.08 x̃: 6
helped stats (rel) min: <.01% max: 23.53% x̄: 0.83% x̃: 0.30%
HURT stats (abs) min: 2 max: 10 x̄: 2.80 x̃: 2
HURT stats (rel) min: <.01% max: 0.60% x̄: 0.10% x̃: 0.07%
95% mean confidence interval for cycles value: -9.35 -8.27
95% mean confidence interval for cycles %-change: -0.85% -0.77%
Cycles are helped.
GM45
total instructions in shared programs: 5019308 -> 5013119 (-0.12%)
instructions in affected programs: 489028 -> 482839 (-1.27%)
helped: 2912
HURT: 0
helped stats (abs) min: 1 max: 8 x̄: 2.13 x̃: 2
helped stats (rel) min: 0.10% max: 16.67% x̄: 2.46% x̃: 1.81%
95% mean confidence interval for instructions value: -2.14 -2.11
95% mean confidence interval for instructions %-change: -2.54% -2.39%
Instructions are helped.
total cycles in shared programs: 129002592 -> 128977804 (-0.02%)
cycles in affected programs: 12669152 -> 12644364 (-0.20%)
helped: 2759
HURT: 37
helped stats (abs) min: 2 max: 168 x̄: 9.03 x̃: 4
helped stats (rel) min: <.01% max: 21.43% x̄: 0.75% x̃: 0.31%
HURT stats (abs) min: 2 max: 10 x̄: 3.62 x̃: 4
HURT stats (rel) min: <.01% max: 0.41% x̄: 0.10% x̃: 0.04%
95% mean confidence interval for cycles value: -9.53 -8.20
95% mean confidence interval for cycles %-change: -0.79% -0.70%
Cycles are helped.
Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2019-05-20 17:25:01 -07:00
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
cmp->predicate = BRW_PREDICATE_NORMAL;
|
2020-01-04 14:32:09 -08:00
|
|
|
cmp->flag_subreg = sample_mask_flag_subreg(this);
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2020-04-25 14:59:30 -05:00
|
|
|
emit_discard_jump();
|
2017-01-05 17:51:51 -08:00
|
|
|
|
2020-01-04 16:16:24 -08:00
|
|
|
if (devinfo->gen < 7)
|
|
|
|
|
limit_dispatch_width(
|
|
|
|
|
16, "Fragment discard/demote not implemented in SIMD32 mode.\n");
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-12 03:57:25 -07:00
|
|
|
case nir_intrinsic_load_input: {
|
|
|
|
|
/* load_input is only used for flat inputs */
|
2019-07-19 17:38:04 -05:00
|
|
|
assert(nir_dest_bit_size(instr->dest) == 32);
|
2016-07-12 03:57:25 -07:00
|
|
|
unsigned base = nir_intrinsic_base(instr);
|
2018-05-18 15:13:25 -07:00
|
|
|
unsigned comp = nir_intrinsic_component(instr);
|
2016-07-12 03:57:25 -07:00
|
|
|
unsigned num_components = instr->num_components;
|
2015-05-05 20:52:58 +03:00
|
|
|
|
2016-07-12 03:57:25 -07:00
|
|
|
/* Special case fields in the VUE header */
|
|
|
|
|
if (base == VARYING_SLOT_LAYER)
|
2018-05-18 15:13:25 -07:00
|
|
|
comp = 1;
|
2016-07-12 03:57:25 -07:00
|
|
|
else if (base == VARYING_SLOT_VIEWPORT)
|
2018-05-18 15:13:25 -07:00
|
|
|
comp = 2;
|
2015-05-05 20:52:58 +03:00
|
|
|
|
2016-07-12 03:57:25 -07:00
|
|
|
for (unsigned int i = 0; i < num_components; i++) {
|
2019-07-19 17:38:04 -05:00
|
|
|
bld.MOV(offset(dest, bld, i),
|
|
|
|
|
retype(component(interp_reg(base, comp + i), 3), dest.type));
|
2016-07-12 03:57:25 -07:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-11 14:55:40 -05:00
|
|
|
case nir_intrinsic_load_fs_input_interp_deltas: {
|
|
|
|
|
assert(stage == MESA_SHADER_FRAGMENT);
|
|
|
|
|
assert(nir_src_as_uint(instr->src[0]) == 0);
|
|
|
|
|
fs_reg interp = interp_reg(nir_intrinsic_base(instr),
|
|
|
|
|
nir_intrinsic_component(instr));
|
|
|
|
|
dest.type = BRW_REGISTER_TYPE_F;
|
|
|
|
|
bld.MOV(offset(dest, bld, 0), component(interp, 3));
|
|
|
|
|
bld.MOV(offset(dest, bld, 1), component(interp, 1));
|
|
|
|
|
bld.MOV(offset(dest, bld, 2), component(interp, 0));
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-12 03:57:25 -07:00
|
|
|
case nir_intrinsic_load_barycentric_pixel:
|
|
|
|
|
case nir_intrinsic_load_barycentric_centroid:
|
2019-04-11 14:12:58 -05:00
|
|
|
case nir_intrinsic_load_barycentric_sample: {
|
|
|
|
|
/* Use the delta_xy values computed from the payload */
|
|
|
|
|
const glsl_interp_mode interp_mode =
|
|
|
|
|
(enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
|
|
|
|
|
enum brw_barycentric_mode bary =
|
|
|
|
|
brw_barycentric_mode(interp_mode, instr->intrinsic);
|
2020-01-03 17:08:51 -08:00
|
|
|
const fs_reg srcs[] = { offset(this->delta_xy[bary], bld, 0),
|
|
|
|
|
offset(this->delta_xy[bary], bld, 1) };
|
|
|
|
|
bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
|
2016-07-12 03:57:25 -07:00
|
|
|
break;
|
2019-04-11 14:12:58 -05:00
|
|
|
}
|
2016-07-12 03:57:25 -07:00
|
|
|
|
|
|
|
|
case nir_intrinsic_load_barycentric_at_sample: {
|
|
|
|
|
const glsl_interp_mode interpolation =
|
|
|
|
|
(enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
|
2015-07-20 17:38:15 +03:00
|
|
|
|
2018-10-20 09:55:28 -05:00
|
|
|
if (nir_src_is_const(instr->src[0])) {
|
|
|
|
|
unsigned msg_data = nir_src_as_uint(instr->src[0]) << 4;
|
2016-07-12 03:57:25 -07:00
|
|
|
|
|
|
|
|
emit_pixel_interpolater_send(bld,
|
|
|
|
|
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
|
2020-01-03 16:12:23 -08:00
|
|
|
dest,
|
2016-07-12 03:57:25 -07:00
|
|
|
fs_reg(), /* src */
|
|
|
|
|
brw_imm_ud(msg_data),
|
|
|
|
|
interpolation);
|
|
|
|
|
} else {
|
|
|
|
|
const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
|
|
|
|
|
BRW_REGISTER_TYPE_UD);
|
|
|
|
|
|
|
|
|
|
if (nir_src_is_dynamically_uniform(instr->src[0])) {
|
|
|
|
|
const fs_reg sample_id = bld.emit_uniformize(sample_src);
|
|
|
|
|
const fs_reg msg_data = vgrf(glsl_type::uint_type);
|
|
|
|
|
bld.exec_all().group(1, 0)
|
|
|
|
|
.SHL(msg_data, sample_id, brw_imm_ud(4u));
|
2015-11-04 23:05:07 -08:00
|
|
|
emit_pixel_interpolater_send(bld,
|
|
|
|
|
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
|
2020-01-03 16:12:23 -08:00
|
|
|
dest,
|
2015-11-04 23:05:07 -08:00
|
|
|
fs_reg(), /* src */
|
2019-12-27 17:06:30 -08:00
|
|
|
component(msg_data, 0),
|
2015-11-04 23:05:07 -08:00
|
|
|
interpolation);
|
|
|
|
|
} else {
|
2016-07-12 03:57:25 -07:00
|
|
|
/* Make a loop that sends a message to the pixel interpolater
|
|
|
|
|
* for the sample number in each live channel. If there are
|
|
|
|
|
* multiple channels with the same sample number then these
|
|
|
|
|
* will be handled simultaneously with a single interation of
|
|
|
|
|
* the loop.
|
|
|
|
|
*/
|
|
|
|
|
bld.emit(BRW_OPCODE_DO);
|
|
|
|
|
|
|
|
|
|
/* Get the next live sample number into sample_id_reg */
|
|
|
|
|
const fs_reg sample_id = bld.emit_uniformize(sample_src);
|
|
|
|
|
|
|
|
|
|
/* Set the flag register so that we can perform the send
|
|
|
|
|
* message on all channels that have the same sample number
|
|
|
|
|
*/
|
|
|
|
|
bld.CMP(bld.null_reg_ud(),
|
|
|
|
|
sample_src, sample_id,
|
|
|
|
|
BRW_CONDITIONAL_EQ);
|
|
|
|
|
const fs_reg msg_data = vgrf(glsl_type::uint_type);
|
|
|
|
|
bld.exec_all().group(1, 0)
|
|
|
|
|
.SHL(msg_data, sample_id, brw_imm_ud(4u));
|
|
|
|
|
fs_inst *inst =
|
2015-11-04 23:05:07 -08:00
|
|
|
emit_pixel_interpolater_send(bld,
|
|
|
|
|
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
|
2020-01-03 16:12:23 -08:00
|
|
|
dest,
|
2015-11-04 23:05:07 -08:00
|
|
|
fs_reg(), /* src */
|
2016-04-25 18:06:13 -07:00
|
|
|
component(msg_data, 0),
|
2015-11-04 23:05:07 -08:00
|
|
|
interpolation);
|
2016-07-12 03:57:25 -07:00
|
|
|
set_predicate(BRW_PREDICATE_NORMAL, inst);
|
2015-07-27 16:26:52 +03:00
|
|
|
|
2016-07-12 03:57:25 -07:00
|
|
|
/* Continue the loop if there are any live channels left */
|
|
|
|
|
set_predicate_inv(BRW_PREDICATE_NORMAL,
|
|
|
|
|
true, /* inverse */
|
|
|
|
|
bld.emit(BRW_OPCODE_WHILE));
|
2015-11-04 23:05:07 -08:00
|
|
|
}
|
|
|
|
|
}
|
2016-07-12 03:57:25 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2015-07-27 16:26:52 +03:00
|
|
|
|
2016-07-12 03:57:25 -07:00
|
|
|
case nir_intrinsic_load_barycentric_at_offset: {
|
|
|
|
|
const glsl_interp_mode interpolation =
|
|
|
|
|
(enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
|
2015-07-27 16:26:52 +03:00
|
|
|
|
2016-07-12 03:57:25 -07:00
|
|
|
nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
|
2015-07-27 16:26:52 +03:00
|
|
|
|
2016-07-12 03:57:25 -07:00
|
|
|
if (const_offset) {
|
2018-10-20 09:55:28 -05:00
|
|
|
assert(nir_src_bit_size(instr->src[0]) == 32);
|
2019-03-27 00:59:03 +01:00
|
|
|
unsigned off_x = MIN2((int)(const_offset[0].f32 * 16), 7) & 0xf;
|
|
|
|
|
unsigned off_y = MIN2((int)(const_offset[1].f32 * 16), 7) & 0xf;
|
2015-11-04 23:05:07 -08:00
|
|
|
|
2016-07-12 03:57:25 -07:00
|
|
|
emit_pixel_interpolater_send(bld,
|
|
|
|
|
FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
|
2020-01-03 16:12:23 -08:00
|
|
|
dest,
|
2016-07-12 03:57:25 -07:00
|
|
|
fs_reg(), /* src */
|
|
|
|
|
brw_imm_ud(off_x | (off_y << 4)),
|
|
|
|
|
interpolation);
|
|
|
|
|
} else {
|
|
|
|
|
fs_reg src = vgrf(glsl_type::ivec2_type);
|
|
|
|
|
fs_reg offset_src = retype(get_nir_src(instr->src[0]),
|
|
|
|
|
BRW_REGISTER_TYPE_F);
|
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
|
fs_reg temp = vgrf(glsl_type::float_type);
|
|
|
|
|
bld.MUL(temp, offset(offset_src, bld, i), brw_imm_f(16.0f));
|
|
|
|
|
fs_reg itemp = vgrf(glsl_type::int_type);
|
|
|
|
|
/* float to int */
|
|
|
|
|
bld.MOV(itemp, temp);
|
|
|
|
|
|
|
|
|
|
/* Clamp the upper end of the range to +7/16.
|
|
|
|
|
* ARB_gpu_shader5 requires that we support a maximum offset
|
|
|
|
|
* of +0.5, which isn't representable in a S0.4 value -- if
|
|
|
|
|
* we didn't clamp it, we'd end up with -8/16, which is the
|
|
|
|
|
* opposite of what the shader author wanted.
|
|
|
|
|
*
|
|
|
|
|
* This is legal due to ARB_gpu_shader5's quantization
|
|
|
|
|
* rules:
|
|
|
|
|
*
|
|
|
|
|
* "Not all values of <offset> may be supported; x and y
|
|
|
|
|
* offsets may be rounded to fixed-point values with the
|
|
|
|
|
* number of fraction bits given by the
|
|
|
|
|
* implementation-dependent constant
|
|
|
|
|
* FRAGMENT_INTERPOLATION_OFFSET_BITS"
|
|
|
|
|
*/
|
|
|
|
|
set_condmod(BRW_CONDITIONAL_L,
|
|
|
|
|
bld.SEL(offset(src, bld, i), itemp, brw_imm_d(7)));
|
2015-11-04 23:05:07 -08:00
|
|
|
}
|
2016-07-12 03:57:25 -07:00
|
|
|
|
|
|
|
|
const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
|
|
|
|
|
emit_pixel_interpolater_send(bld,
|
|
|
|
|
opcode,
|
2020-01-03 16:12:23 -08:00
|
|
|
dest,
|
2016-07-12 03:57:25 -07:00
|
|
|
src,
|
|
|
|
|
brw_imm_ud(0u),
|
|
|
|
|
interpolation);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-18 09:59:44 -05:00
|
|
|
case nir_intrinsic_load_frag_coord:
|
|
|
|
|
emit_fragcoord_interpolation(dest);
|
|
|
|
|
break;
|
2015-11-04 23:05:07 -08:00
|
|
|
|
2019-07-18 09:59:44 -05:00
|
|
|
case nir_intrinsic_load_interpolated_input: {
|
2016-07-12 03:57:25 -07:00
|
|
|
assert(instr->src[0].ssa &&
|
|
|
|
|
instr->src[0].ssa->parent_instr->type == nir_instr_type_intrinsic);
|
|
|
|
|
nir_intrinsic_instr *bary_intrinsic =
|
|
|
|
|
nir_instr_as_intrinsic(instr->src[0].ssa->parent_instr);
|
|
|
|
|
nir_intrinsic_op bary_intrin = bary_intrinsic->intrinsic;
|
|
|
|
|
enum glsl_interp_mode interp_mode =
|
|
|
|
|
(enum glsl_interp_mode) nir_intrinsic_interp_mode(bary_intrinsic);
|
|
|
|
|
fs_reg dst_xy;
|
|
|
|
|
|
|
|
|
|
if (bary_intrin == nir_intrinsic_load_barycentric_at_offset ||
|
|
|
|
|
bary_intrin == nir_intrinsic_load_barycentric_at_sample) {
|
2020-01-03 17:08:51 -08:00
|
|
|
/* Use the result of the PI message. */
|
|
|
|
|
dst_xy = retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_F);
|
2016-07-12 03:57:25 -07:00
|
|
|
} else {
|
|
|
|
|
/* Use the delta_xy values computed from the payload */
|
|
|
|
|
enum brw_barycentric_mode bary =
|
|
|
|
|
brw_barycentric_mode(interp_mode, bary_intrin);
|
|
|
|
|
dst_xy = this->delta_xy[bary];
|
2015-11-04 23:05:07 -08:00
|
|
|
}
|
|
|
|
|
|
2016-07-12 03:57:25 -07:00
|
|
|
for (unsigned int i = 0; i < instr->num_components; i++) {
|
|
|
|
|
fs_reg interp =
|
2019-09-01 22:12:07 -05:00
|
|
|
component(interp_reg(nir_intrinsic_base(instr),
|
|
|
|
|
nir_intrinsic_component(instr) + i), 0);
|
2016-07-12 03:57:25 -07:00
|
|
|
interp.type = BRW_REGISTER_TYPE_F;
|
|
|
|
|
dest.type = BRW_REGISTER_TYPE_F;
|
2015-11-04 23:05:07 -08:00
|
|
|
|
2016-08-01 20:59:08 -07:00
|
|
|
if (devinfo->gen < 6 && interp_mode == INTERP_MODE_SMOOTH) {
|
2016-07-12 03:57:25 -07:00
|
|
|
fs_reg tmp = vgrf(glsl_type::float_type);
|
|
|
|
|
bld.emit(FS_OPCODE_LINTERP, tmp, dst_xy, interp);
|
|
|
|
|
bld.MUL(offset(dest, bld, i), tmp, this->pixel_w);
|
|
|
|
|
} else {
|
|
|
|
|
bld.emit(FS_OPCODE_LINTERP, offset(dest, bld, i), dst_xy, interp);
|
|
|
|
|
}
|
2015-11-04 23:05:07 -08:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
2016-07-12 03:57:25 -07:00
|
|
|
|
2015-11-04 23:05:07 -08:00
|
|
|
default:
|
|
|
|
|
nir_emit_intrinsic(bld, instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
|
|
|
|
|
nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
2019-02-03 19:46:16 -06:00
|
|
|
assert(stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL);
|
2016-09-08 23:48:51 -07:00
|
|
|
struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(prog_data);
|
2015-11-04 23:05:07 -08:00
|
|
|
|
|
|
|
|
fs_reg dest;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dest = get_nir_dest(instr->dest);
|
|
|
|
|
|
|
|
|
|
switch (instr->intrinsic) {
|
2020-01-07 14:54:26 -06:00
|
|
|
case nir_intrinsic_control_barrier:
|
2020-01-14 12:03:22 -08:00
|
|
|
/* The whole workgroup fits in a single HW thread, so all the
|
|
|
|
|
* invocations are already executed lock-step. Instead of an actual
|
|
|
|
|
* barrier just emit a scheduling fence, that will generate no code.
|
|
|
|
|
*/
|
2020-04-28 21:04:04 -07:00
|
|
|
if (!nir->info.cs.local_size_variable &&
|
2018-11-12 06:29:51 -08:00
|
|
|
workgroup_size() <= dispatch_width) {
|
2020-01-14 12:03:22 -08:00
|
|
|
bld.exec_all().group(1, 0).emit(FS_OPCODE_SCHEDULING_FENCE);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-04 23:05:07 -08:00
|
|
|
emit_barrier();
|
|
|
|
|
cs_prog_data->uses_barrier = true;
|
|
|
|
|
break;
|
|
|
|
|
|
2017-08-24 11:40:31 -07:00
|
|
|
case nir_intrinsic_load_subgroup_id:
|
|
|
|
|
bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), subgroup_id);
|
2017-09-29 17:57:32 -07:00
|
|
|
break;
|
|
|
|
|
|
2015-11-04 23:05:07 -08:00
|
|
|
case nir_intrinsic_load_local_invocation_id:
|
|
|
|
|
case nir_intrinsic_load_work_group_id: {
|
|
|
|
|
gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
|
|
|
|
|
fs_reg val = nir_system_values[sv];
|
|
|
|
|
assert(val.file != BAD_FILE);
|
|
|
|
|
dest.type = val.type;
|
|
|
|
|
for (unsigned i = 0; i < 3; i++)
|
|
|
|
|
bld.MOV(offset(dest, bld, i), offset(val, bld, i));
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_num_work_groups: {
|
|
|
|
|
const unsigned surface =
|
|
|
|
|
cs_prog_data->binding_table.work_groups_start;
|
|
|
|
|
|
|
|
|
|
cs_prog_data->uses_num_work_groups = true;
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(surface);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(1); /* num components */
|
2015-11-04 23:05:07 -08:00
|
|
|
|
|
|
|
|
/* Read the 3 GLuint components of gl_NumWorkGroups */
|
|
|
|
|
for (unsigned i = 0; i < 3; i++) {
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = brw_imm_ud(i << 2);
|
|
|
|
|
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
|
|
|
|
|
offset(dest, bld, i), srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2015-11-04 23:05:07 -08:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-10 13:01:03 -07:00
|
|
|
case nir_intrinsic_shared_atomic_add:
|
|
|
|
|
case nir_intrinsic_shared_atomic_imin:
|
|
|
|
|
case nir_intrinsic_shared_atomic_umin:
|
|
|
|
|
case nir_intrinsic_shared_atomic_imax:
|
|
|
|
|
case nir_intrinsic_shared_atomic_umax:
|
|
|
|
|
case nir_intrinsic_shared_atomic_and:
|
|
|
|
|
case nir_intrinsic_shared_atomic_or:
|
|
|
|
|
case nir_intrinsic_shared_atomic_xor:
|
|
|
|
|
case nir_intrinsic_shared_atomic_exchange:
|
|
|
|
|
case nir_intrinsic_shared_atomic_comp_swap:
|
2019-08-20 23:10:50 -05:00
|
|
|
nir_emit_shared_atomic(bld, brw_aop_for_nir_intrinsic(instr), instr);
|
2015-10-10 13:01:03 -07:00
|
|
|
break;
|
2018-04-18 14:02:33 -07:00
|
|
|
case nir_intrinsic_shared_atomic_fmin:
|
|
|
|
|
case nir_intrinsic_shared_atomic_fmax:
|
|
|
|
|
case nir_intrinsic_shared_atomic_fcomp_swap:
|
2019-08-20 23:10:50 -05:00
|
|
|
nir_emit_shared_atomic_float(bld, brw_aop_for_nir_intrinsic(instr), instr);
|
2018-04-18 14:02:33 -07:00
|
|
|
break;
|
2015-10-10 13:01:03 -07:00
|
|
|
|
2016-03-25 11:19:53 -07:00
|
|
|
case nir_intrinsic_load_shared: {
|
|
|
|
|
assert(devinfo->gen >= 7);
|
2019-02-03 19:46:16 -06:00
|
|
|
assert(stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL);
|
2016-03-25 11:19:53 -07:00
|
|
|
|
2018-11-12 18:48:10 -06:00
|
|
|
const unsigned bit_size = nir_dest_bit_size(instr->dest);
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[0]);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
2016-03-25 11:19:53 -07:00
|
|
|
|
2018-11-12 18:48:10 -06:00
|
|
|
/* Make dest unsigned because that's what the temporary will be */
|
|
|
|
|
dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
|
2016-03-25 11:19:53 -07:00
|
|
|
|
|
|
|
|
/* Read the vector */
|
2020-03-26 17:06:52 -05:00
|
|
|
assert(nir_dest_bit_size(instr->dest) <= 32);
|
|
|
|
|
assert(nir_intrinsic_align(instr) > 0);
|
|
|
|
|
if (nir_dest_bit_size(instr->dest) == 32 &&
|
|
|
|
|
nir_intrinsic_align(instr) >= 4) {
|
|
|
|
|
assert(nir_dest_num_components(instr->dest) <= 4);
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
|
|
|
|
|
fs_inst *inst =
|
|
|
|
|
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
|
|
|
|
|
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
|
|
|
|
inst->size_written = instr->num_components * dispatch_width * 4;
|
2018-11-12 18:48:10 -06:00
|
|
|
} else {
|
|
|
|
|
assert(nir_dest_num_components(instr->dest) == 1);
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
|
|
|
|
|
|
|
|
|
|
fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
|
|
|
|
|
read_result, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2019-07-13 18:35:20 -05:00
|
|
|
bld.MOV(dest, subscript(read_result, dest.type, 0));
|
2018-11-12 18:48:10 -06:00
|
|
|
}
|
2016-03-25 11:19:53 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_store_shared: {
|
|
|
|
|
assert(devinfo->gen >= 7);
|
2019-02-03 19:46:16 -06:00
|
|
|
assert(stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL);
|
2016-03-25 11:19:53 -07:00
|
|
|
|
2018-11-12 18:48:10 -06:00
|
|
|
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
2016-03-25 11:19:53 -07:00
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg data = get_nir_src(instr->src[0]);
|
|
|
|
|
data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
|
2016-01-26 10:30:39 +01:00
|
|
|
|
2020-03-26 17:06:52 -05:00
|
|
|
assert(nir_src_bit_size(instr->src[0]) <= 32);
|
2018-11-12 18:48:10 -06:00
|
|
|
assert(nir_intrinsic_write_mask(instr) ==
|
2018-11-16 09:23:56 -06:00
|
|
|
(1u << instr->num_components) - 1);
|
2020-03-26 17:06:52 -05:00
|
|
|
assert(nir_intrinsic_align(instr) > 0);
|
|
|
|
|
if (nir_src_bit_size(instr->src[0]) == 32 &&
|
|
|
|
|
nir_intrinsic_align(instr) >= 4) {
|
2018-11-12 18:48:10 -06:00
|
|
|
assert(nir_src_num_components(instr->src[0]) <= 4);
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
|
|
|
|
|
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
|
|
|
|
|
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2018-11-12 18:48:10 -06:00
|
|
|
} else {
|
|
|
|
|
assert(nir_src_num_components(instr->src[0]) == 1);
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
|
|
|
|
|
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data);
|
|
|
|
|
|
|
|
|
|
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
|
|
|
|
|
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2016-03-25 11:19:53 -07:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-12 06:29:51 -08:00
|
|
|
case nir_intrinsic_load_local_group_size: {
|
2020-04-28 09:47:45 -07:00
|
|
|
assert(compiler->lower_variable_group_size);
|
|
|
|
|
assert(nir->info.cs.local_size_variable);
|
2018-11-12 06:29:51 -08:00
|
|
|
for (unsigned i = 0; i < 3; i++) {
|
|
|
|
|
bld.MOV(retype(offset(dest, bld, i), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
group_size[i]);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-04 23:05:07 -08:00
|
|
|
default:
|
|
|
|
|
nir_emit_intrinsic(bld, instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-31 22:12:48 -07:00
|
|
|
static fs_reg
|
|
|
|
|
brw_nir_reduction_op_identity(const fs_builder &bld,
|
|
|
|
|
nir_op op, brw_reg_type type)
|
|
|
|
|
{
|
|
|
|
|
nir_const_value value = nir_alu_binop_identity(op, type_sz(type) * 8);
|
|
|
|
|
switch (type_sz(type)) {
|
2019-04-27 04:31:31 -05:00
|
|
|
case 1:
|
|
|
|
|
if (type == BRW_REGISTER_TYPE_UB) {
|
|
|
|
|
return brw_imm_uw(value.u8);
|
|
|
|
|
} else {
|
|
|
|
|
assert(type == BRW_REGISTER_TYPE_B);
|
|
|
|
|
return brw_imm_w(value.i8);
|
|
|
|
|
}
|
2017-08-31 22:12:48 -07:00
|
|
|
case 2:
|
2019-03-27 00:59:03 +01:00
|
|
|
return retype(brw_imm_uw(value.u16), type);
|
2017-08-31 22:12:48 -07:00
|
|
|
case 4:
|
2019-03-27 00:59:03 +01:00
|
|
|
return retype(brw_imm_ud(value.u32), type);
|
2017-08-31 22:12:48 -07:00
|
|
|
case 8:
|
|
|
|
|
if (type == BRW_REGISTER_TYPE_DF)
|
2019-03-27 00:59:03 +01:00
|
|
|
return setup_imm_df(bld, value.f64);
|
2017-08-31 22:12:48 -07:00
|
|
|
else
|
2019-03-27 00:59:03 +01:00
|
|
|
return retype(brw_imm_u64(value.u64), type);
|
2017-08-31 22:12:48 -07:00
|
|
|
default:
|
|
|
|
|
unreachable("Invalid type size");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static opcode
|
|
|
|
|
brw_op_for_nir_reduction_op(nir_op op)
|
|
|
|
|
{
|
|
|
|
|
switch (op) {
|
|
|
|
|
case nir_op_iadd: return BRW_OPCODE_ADD;
|
|
|
|
|
case nir_op_fadd: return BRW_OPCODE_ADD;
|
|
|
|
|
case nir_op_imul: return BRW_OPCODE_MUL;
|
|
|
|
|
case nir_op_fmul: return BRW_OPCODE_MUL;
|
|
|
|
|
case nir_op_imin: return BRW_OPCODE_SEL;
|
|
|
|
|
case nir_op_umin: return BRW_OPCODE_SEL;
|
|
|
|
|
case nir_op_fmin: return BRW_OPCODE_SEL;
|
|
|
|
|
case nir_op_imax: return BRW_OPCODE_SEL;
|
|
|
|
|
case nir_op_umax: return BRW_OPCODE_SEL;
|
|
|
|
|
case nir_op_fmax: return BRW_OPCODE_SEL;
|
|
|
|
|
case nir_op_iand: return BRW_OPCODE_AND;
|
|
|
|
|
case nir_op_ior: return BRW_OPCODE_OR;
|
|
|
|
|
case nir_op_ixor: return BRW_OPCODE_XOR;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid reduction operation");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static brw_conditional_mod
|
|
|
|
|
brw_cond_mod_for_nir_reduction_op(nir_op op)
|
|
|
|
|
{
|
|
|
|
|
switch (op) {
|
|
|
|
|
case nir_op_iadd: return BRW_CONDITIONAL_NONE;
|
|
|
|
|
case nir_op_fadd: return BRW_CONDITIONAL_NONE;
|
|
|
|
|
case nir_op_imul: return BRW_CONDITIONAL_NONE;
|
|
|
|
|
case nir_op_fmul: return BRW_CONDITIONAL_NONE;
|
|
|
|
|
case nir_op_imin: return BRW_CONDITIONAL_L;
|
|
|
|
|
case nir_op_umin: return BRW_CONDITIONAL_L;
|
|
|
|
|
case nir_op_fmin: return BRW_CONDITIONAL_L;
|
|
|
|
|
case nir_op_imax: return BRW_CONDITIONAL_GE;
|
|
|
|
|
case nir_op_umax: return BRW_CONDITIONAL_GE;
|
|
|
|
|
case nir_op_fmax: return BRW_CONDITIONAL_GE;
|
|
|
|
|
case nir_op_iand: return BRW_CONDITIONAL_NONE;
|
|
|
|
|
case nir_op_ior: return BRW_CONDITIONAL_NONE;
|
|
|
|
|
case nir_op_ixor: return BRW_CONDITIONAL_NONE;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid reduction operation");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-16 16:23:10 -05:00
|
|
|
fs_reg
|
|
|
|
|
fs_visitor::get_nir_image_intrinsic_image(const brw::fs_builder &bld,
|
|
|
|
|
nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
fs_reg image = retype(get_nir_src_imm(instr->src[0]), BRW_REGISTER_TYPE_UD);
|
2020-01-15 00:31:49 -08:00
|
|
|
fs_reg surf_index = image;
|
2018-08-16 16:23:10 -05:00
|
|
|
|
|
|
|
|
if (stage_prog_data->binding_table.image_start > 0) {
|
|
|
|
|
if (image.file == BRW_IMMEDIATE_VALUE) {
|
2020-01-15 00:31:49 -08:00
|
|
|
surf_index =
|
|
|
|
|
brw_imm_ud(image.d + stage_prog_data->binding_table.image_start);
|
2018-08-16 16:23:10 -05:00
|
|
|
} else {
|
2020-01-15 00:31:49 -08:00
|
|
|
surf_index = vgrf(glsl_type::uint_type);
|
|
|
|
|
bld.ADD(surf_index, image,
|
2018-08-16 16:23:10 -05:00
|
|
|
brw_imm_d(stage_prog_data->binding_table.image_start));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-15 00:31:49 -08:00
|
|
|
return bld.emit_uniformize(surf_index);
|
2018-08-16 16:23:10 -05:00
|
|
|
}
|
|
|
|
|
|
2018-10-20 10:05:33 -05:00
|
|
|
fs_reg
|
|
|
|
|
fs_visitor::get_nir_ssbo_intrinsic_index(const brw::fs_builder &bld,
|
|
|
|
|
nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
/* SSBO stores are weird in that their index is in src[1] */
|
|
|
|
|
const unsigned src = instr->intrinsic == nir_intrinsic_store_ssbo ? 1 : 0;
|
|
|
|
|
|
|
|
|
|
fs_reg surf_index;
|
2018-10-20 09:55:28 -05:00
|
|
|
if (nir_src_is_const(instr->src[src])) {
|
2018-10-20 10:05:33 -05:00
|
|
|
unsigned index = stage_prog_data->binding_table.ssbo_start +
|
2018-10-20 09:55:28 -05:00
|
|
|
nir_src_as_uint(instr->src[src]);
|
2018-10-20 10:05:33 -05:00
|
|
|
surf_index = brw_imm_ud(index);
|
|
|
|
|
} else {
|
|
|
|
|
surf_index = vgrf(glsl_type::uint_type);
|
|
|
|
|
bld.ADD(surf_index, get_nir_src(instr->src[src]),
|
|
|
|
|
brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
return bld.emit_uniformize(surf_index);
|
2018-10-20 10:05:33 -05:00
|
|
|
}
|
|
|
|
|
|
2019-02-28 08:15:30 -06:00
|
|
|
/**
|
|
|
|
|
* The offsets we get from NIR act as if each SIMD channel has it's own blob
|
|
|
|
|
* of contiguous space. However, if we actually place each SIMD channel in
|
|
|
|
|
* it's own space, we end up with terrible cache performance because each SIMD
|
|
|
|
|
* channel accesses a different cache line even when they're all accessing the
|
|
|
|
|
* same byte offset. To deal with this problem, we swizzle the address using
|
|
|
|
|
* a simple algorithm which ensures that any time a SIMD message reads or
|
|
|
|
|
* writes the same address, it's all in the same cache line. We have to keep
|
|
|
|
|
* the bottom two bits fixed so that we can read/write up to a dword at a time
|
|
|
|
|
* and the individual element is contiguous. We do this by splitting the
|
|
|
|
|
* address as follows:
|
|
|
|
|
*
|
|
|
|
|
* 31 4-6 2 0
|
|
|
|
|
* +-------------------------------+------------+----------+
|
|
|
|
|
* | Hi address bits | chan index | addr low |
|
|
|
|
|
* +-------------------------------+------------+----------+
|
|
|
|
|
*
|
|
|
|
|
* In other words, the bottom two address bits stay, and the top 30 get
|
|
|
|
|
* shifted up so that we can stick the SIMD channel index in the middle. This
|
|
|
|
|
* way, we can access 8, 16, or 32-bit elements and, when accessing a 32-bit
|
|
|
|
|
* at the same logical offset, the scratch read/write instruction acts on
|
|
|
|
|
* continuous elements and we get good cache locality.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg
|
|
|
|
|
fs_visitor::swizzle_nir_scratch_addr(const brw::fs_builder &bld,
|
|
|
|
|
const fs_reg &nir_addr,
|
|
|
|
|
bool in_dwords)
|
|
|
|
|
{
|
|
|
|
|
const fs_reg &chan_index =
|
|
|
|
|
nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION];
|
|
|
|
|
const unsigned chan_index_bits = ffs(dispatch_width) - 1;
|
|
|
|
|
|
|
|
|
|
fs_reg addr = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
if (in_dwords) {
|
|
|
|
|
/* In this case, we know the address is aligned to a DWORD and we want
|
|
|
|
|
* the final address in DWORDs.
|
|
|
|
|
*/
|
|
|
|
|
bld.SHL(addr, nir_addr, brw_imm_ud(chan_index_bits - 2));
|
|
|
|
|
bld.OR(addr, addr, chan_index);
|
|
|
|
|
} else {
|
|
|
|
|
/* This case substantially more annoying because we have to pay
|
|
|
|
|
* attention to those pesky two bottom bits.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg addr_hi = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.AND(addr_hi, nir_addr, brw_imm_ud(~0x3u));
|
|
|
|
|
bld.SHL(addr_hi, addr_hi, brw_imm_ud(chan_index_bits));
|
|
|
|
|
fs_reg chan_addr = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.SHL(chan_addr, chan_index, brw_imm_ud(2));
|
|
|
|
|
bld.AND(addr, nir_addr, brw_imm_ud(0x3u));
|
|
|
|
|
bld.OR(addr, addr, addr_hi);
|
|
|
|
|
bld.OR(addr, addr, chan_addr);
|
|
|
|
|
}
|
|
|
|
|
return addr;
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-04 23:05:07 -08:00
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
fs_reg dest;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dest = get_nir_dest(instr->dest);
|
|
|
|
|
|
|
|
|
|
switch (instr->intrinsic) {
|
2018-08-16 16:23:10 -05:00
|
|
|
case nir_intrinsic_image_load:
|
|
|
|
|
case nir_intrinsic_image_store:
|
|
|
|
|
case nir_intrinsic_image_atomic_add:
|
2019-08-20 22:32:50 -05:00
|
|
|
case nir_intrinsic_image_atomic_imin:
|
|
|
|
|
case nir_intrinsic_image_atomic_umin:
|
|
|
|
|
case nir_intrinsic_image_atomic_imax:
|
|
|
|
|
case nir_intrinsic_image_atomic_umax:
|
2018-08-16 16:23:10 -05:00
|
|
|
case nir_intrinsic_image_atomic_and:
|
|
|
|
|
case nir_intrinsic_image_atomic_or:
|
|
|
|
|
case nir_intrinsic_image_atomic_xor:
|
|
|
|
|
case nir_intrinsic_image_atomic_exchange:
|
2019-02-12 00:47:54 -06:00
|
|
|
case nir_intrinsic_image_atomic_comp_swap:
|
|
|
|
|
case nir_intrinsic_bindless_image_load:
|
|
|
|
|
case nir_intrinsic_bindless_image_store:
|
|
|
|
|
case nir_intrinsic_bindless_image_atomic_add:
|
2019-08-20 22:32:50 -05:00
|
|
|
case nir_intrinsic_bindless_image_atomic_imin:
|
|
|
|
|
case nir_intrinsic_bindless_image_atomic_umin:
|
|
|
|
|
case nir_intrinsic_bindless_image_atomic_imax:
|
|
|
|
|
case nir_intrinsic_bindless_image_atomic_umax:
|
2019-02-12 00:47:54 -06:00
|
|
|
case nir_intrinsic_bindless_image_atomic_and:
|
|
|
|
|
case nir_intrinsic_bindless_image_atomic_or:
|
|
|
|
|
case nir_intrinsic_bindless_image_atomic_xor:
|
|
|
|
|
case nir_intrinsic_bindless_image_atomic_exchange:
|
|
|
|
|
case nir_intrinsic_bindless_image_atomic_comp_swap: {
|
2015-11-04 23:05:07 -08:00
|
|
|
/* Get some metadata from the image intrinsic. */
|
|
|
|
|
const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
2019-02-12 00:47:54 -06:00
|
|
|
|
|
|
|
|
switch (instr->intrinsic) {
|
|
|
|
|
case nir_intrinsic_image_load:
|
|
|
|
|
case nir_intrinsic_image_store:
|
|
|
|
|
case nir_intrinsic_image_atomic_add:
|
2019-08-20 22:32:50 -05:00
|
|
|
case nir_intrinsic_image_atomic_imin:
|
|
|
|
|
case nir_intrinsic_image_atomic_umin:
|
|
|
|
|
case nir_intrinsic_image_atomic_imax:
|
|
|
|
|
case nir_intrinsic_image_atomic_umax:
|
2019-02-12 00:47:54 -06:00
|
|
|
case nir_intrinsic_image_atomic_and:
|
|
|
|
|
case nir_intrinsic_image_atomic_or:
|
|
|
|
|
case nir_intrinsic_image_atomic_xor:
|
|
|
|
|
case nir_intrinsic_image_atomic_exchange:
|
|
|
|
|
case nir_intrinsic_image_atomic_comp_swap:
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
|
|
|
|
|
get_nir_image_intrinsic_image(bld, instr);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
/* Bindless */
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE_HANDLE] =
|
|
|
|
|
bld.emit_uniformize(get_nir_src(instr->src[0]));
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] =
|
2020-02-05 15:46:40 -08:00
|
|
|
brw_imm_ud(nir_image_intrinsic_coord_components(instr));
|
2015-11-04 23:05:07 -08:00
|
|
|
|
|
|
|
|
/* Emit an image load, store or atomic op. */
|
2019-02-12 00:47:54 -06:00
|
|
|
if (instr->intrinsic == nir_intrinsic_image_load ||
|
|
|
|
|
instr->intrinsic == nir_intrinsic_bindless_image_load) {
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
|
|
|
|
|
fs_inst *inst =
|
|
|
|
|
bld.emit(SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL,
|
|
|
|
|
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
|
|
|
|
inst->size_written = instr->num_components * dispatch_width * 4;
|
2019-02-12 00:47:54 -06:00
|
|
|
} else if (instr->intrinsic == nir_intrinsic_image_store ||
|
|
|
|
|
instr->intrinsic == nir_intrinsic_bindless_image_store) {
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = get_nir_src(instr->src[3]);
|
|
|
|
|
bld.emit(SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL,
|
|
|
|
|
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2018-08-24 17:23:26 -07:00
|
|
|
} else {
|
2018-08-24 17:41:01 -07:00
|
|
|
unsigned num_srcs = info->num_srcs;
|
2019-08-20 23:10:50 -05:00
|
|
|
int op = brw_aop_for_nir_intrinsic(instr);
|
|
|
|
|
if (op == BRW_AOP_INC || op == BRW_AOP_DEC) {
|
2018-08-24 17:41:01 -07:00
|
|
|
assert(num_srcs == 4);
|
2019-08-20 23:10:50 -05:00
|
|
|
num_srcs = 3;
|
2018-08-24 17:23:26 -07:00
|
|
|
}
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
|
2015-11-04 23:05:07 -08:00
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg data;
|
|
|
|
|
if (num_srcs >= 4)
|
|
|
|
|
data = get_nir_src(instr->src[3]);
|
|
|
|
|
if (num_srcs >= 5) {
|
|
|
|
|
fs_reg tmp = bld.vgrf(data.type, 2);
|
|
|
|
|
fs_reg sources[2] = { data, get_nir_src(instr->src[4]) };
|
|
|
|
|
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
|
|
|
|
|
data = tmp;
|
|
|
|
|
}
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
|
2015-07-27 16:26:52 +03:00
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
bld.emit(SHADER_OPCODE_TYPED_ATOMIC_LOGICAL,
|
|
|
|
|
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2018-03-27 16:27:20 -07:00
|
|
|
}
|
2015-07-27 16:26:52 +03:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-12 00:47:54 -06:00
|
|
|
case nir_intrinsic_image_size:
|
|
|
|
|
case nir_intrinsic_bindless_image_size: {
|
2018-08-16 11:01:24 -05:00
|
|
|
/* Unlike the [un]typed load and store opcodes, the TXS that this turns
|
|
|
|
|
* into will handle the binding table index for us in the geneerator.
|
2019-02-12 00:47:54 -06:00
|
|
|
* Incidentally, this means that we can handle bindless with exactly the
|
|
|
|
|
* same code.
|
2018-08-16 11:01:24 -05:00
|
|
|
*/
|
|
|
|
|
fs_reg image = retype(get_nir_src_imm(instr->src[0]),
|
|
|
|
|
BRW_REGISTER_TYPE_UD);
|
|
|
|
|
image = bld.emit_uniformize(image);
|
|
|
|
|
|
2020-08-19 18:21:33 -05:00
|
|
|
assert(nir_src_as_uint(instr->src[1]) == 0);
|
|
|
|
|
|
2018-10-31 09:52:33 -05:00
|
|
|
fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
|
2019-02-12 00:47:54 -06:00
|
|
|
if (instr->intrinsic == nir_intrinsic_image_size)
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SURFACE] = image;
|
|
|
|
|
else
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE] = image;
|
2018-10-31 09:52:33 -05:00
|
|
|
srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_d(0);
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(0);
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(0);
|
|
|
|
|
|
2018-08-16 11:01:24 -05:00
|
|
|
/* Since the image size is always uniform, we can just emit a SIMD8
|
|
|
|
|
* query instruction and splat the result out.
|
|
|
|
|
*/
|
|
|
|
|
const fs_builder ubld = bld.exec_all().group(8, 0);
|
|
|
|
|
|
|
|
|
|
fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4);
|
2018-10-31 09:52:33 -05:00
|
|
|
fs_inst *inst = ubld.emit(SHADER_OPCODE_IMAGE_SIZE_LOGICAL,
|
|
|
|
|
tmp, srcs, ARRAY_SIZE(srcs));
|
2018-08-16 11:01:24 -05:00
|
|
|
inst->size_written = 4 * REG_SIZE;
|
|
|
|
|
|
|
|
|
|
for (unsigned c = 0; c < instr->dest.ssa.num_components; ++c) {
|
|
|
|
|
if (c == 2 && nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_CUBE) {
|
|
|
|
|
bld.emit(SHADER_OPCODE_INT_QUOTIENT,
|
|
|
|
|
offset(retype(dest, tmp.type), bld, c),
|
|
|
|
|
component(offset(tmp, ubld, c), 0), brw_imm_ud(6));
|
|
|
|
|
} else {
|
|
|
|
|
bld.MOV(offset(retype(dest, tmp.type), bld, c),
|
|
|
|
|
component(offset(tmp, ubld, c), 0));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-16 16:23:10 -05:00
|
|
|
case nir_intrinsic_image_load_raw_intel: {
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
|
|
|
|
|
get_nir_image_intrinsic_image(bld, instr);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
|
2018-01-27 13:19:57 -08:00
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_inst *inst =
|
|
|
|
|
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
|
|
|
|
|
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
|
|
|
|
inst->size_written = instr->num_components * dispatch_width * 4;
|
2018-01-27 13:19:57 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-16 16:23:10 -05:00
|
|
|
case nir_intrinsic_image_store_raw_intel: {
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
|
|
|
|
|
get_nir_image_intrinsic_image(bld, instr);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = get_nir_src(instr->src[2]);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
|
|
|
|
|
|
|
|
|
|
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
|
|
|
|
|
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2018-01-27 13:19:57 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-05 09:13:20 +02:00
|
|
|
case nir_intrinsic_scoped_barrier:
|
|
|
|
|
assert(nir_intrinsic_execution_scope(instr) == NIR_SCOPE_NONE);
|
2020-06-05 16:04:13 -07:00
|
|
|
/* Fall through. */
|
2018-06-21 09:45:19 +02:00
|
|
|
case nir_intrinsic_group_memory_barrier:
|
|
|
|
|
case nir_intrinsic_memory_barrier_shared:
|
2015-10-10 13:00:04 -07:00
|
|
|
case nir_intrinsic_memory_barrier_buffer:
|
|
|
|
|
case nir_intrinsic_memory_barrier_image:
|
intel/fs,vec4: Pull stall logic for memory fences up into the IR
Instead of emitting the stall MOV "inside" the
SHADER_OPCODE_MEMORY_FENCE generation, use the scheduling fences when
creating the IR.
For IvyBridge, every (data cache) fence is accompained by a render
cache fence, that now is explicit in the IR, two
SHADER_OPCODE_MEMORY_FENCEs are emitted (with different SFIDs).
Because Begin and End interlock intrinsics are effectively memory
barriers, move its handling alongside the other memory barrier
intrinsics. The SHADER_OPCODE_INTERLOCK is still used to distinguish
if we are going to use a SENDC (for Begin) or regular SEND (for End).
This change is a preparation to allow emitting both SENDs in Gen11+
before we can stall on them.
Shader-db results for IVB (i965):
total instructions in shared programs: 11971190 -> 11971200 (<.01%)
instructions in affected programs: 11482 -> 11492 (0.09%)
helped: 0
HURT: 8
HURT stats (abs) min: 1 max: 3 x̄: 1.25 x̃: 1
HURT stats (rel) min: 0.03% max: 0.50% x̄: 0.14% x̃: 0.10%
95% mean confidence interval for instructions value: 0.66 1.84
95% mean confidence interval for instructions %-change: 0.01% 0.27%
Instructions are HURT.
Unlike the previous code, that used the `mov g1 g2` trick to force
both `g1` and `g2` to stall, the scheduling fence will generate `mov
null g1` and `mov null g2`. During review it was decided it was not
worth keeping the special codepath for the small effect will have.
Shader-db results for HSW (i965), BDW and SKL don't have a change
on instruction count, but do report changes in cycles count, showing
SKL results below
total cycles in shared programs: 341738444 -> 341710570 (<.01%)
cycles in affected programs: 7240002 -> 7212128 (-0.38%)
helped: 46
HURT: 5
helped stats (abs) min: 14 max: 1940 x̄: 676.22 x̃: 154
helped stats (rel) min: <.01% max: 2.62% x̄: 1.28% x̃: 0.95%
HURT stats (abs) min: 2 max: 1768 x̄: 646.40 x̃: 362
HURT stats (rel) min: <.01% max: 0.83% x̄: 0.28% x̃: 0.08%
95% mean confidence interval for cycles value: -777.71 -315.38
95% mean confidence interval for cycles %-change: -1.42% -0.83%
Cycles are helped.
This seems to be the effect of allocating two registers separatedly
instead of a single one with size 2, which causes different register
allocation, affecting the cycle estimates.
while ICL also has not change on instruction count but report changes
negative changes in cycles
total cycles in shared programs: 352665369 -> 352707484 (0.01%)
cycles in affected programs: 9608288 -> 9650403 (0.44%)
helped: 4
HURT: 104
helped stats (abs) min: 24 max: 128 x̄: 88.50 x̃: 101
helped stats (rel) min: <.01% max: 0.85% x̄: 0.46% x̃: 0.49%
HURT stats (abs) min: 2 max: 2016 x̄: 408.36 x̃: 48
HURT stats (rel) min: <.01% max: 3.31% x̄: 0.88% x̃: 0.45%
95% mean confidence interval for cycles value: 256.67 523.24
95% mean confidence interval for cycles %-change: 0.63% 1.03%
Cycles are HURT.
AFAICT this is the result of the case above.
Shader-db results for TGL have similar cycles result as ICL, but also
affect instructions
total instructions in shared programs: 17690586 -> 17690597 (<.01%)
instructions in affected programs: 64617 -> 64628 (0.02%)
helped: 55
HURT: 32
helped stats (abs) min: 1 max: 16 x̄: 4.13 x̃: 3
helped stats (rel) min: 0.05% max: 2.78% x̄: 0.86% x̃: 0.74%
HURT stats (abs) min: 1 max: 65 x̄: 7.44 x̃: 2
HURT stats (rel) min: 0.05% max: 4.58% x̄: 1.13% x̃: 0.69%
95% mean confidence interval for instructions value: -2.03 2.28
95% mean confidence interval for instructions %-change: -0.41% 0.15%
Inconclusive result (value mean confidence interval includes 0).
Now that more is done in the IR, more dependencies are visible and
more SWSB annotations are emitted. Mixed with different register
allocation decisions like above, some shaders will see more `sync
nops` while others able to avoid them.
Most of the new `sync nops` are also redundant and could be dropped,
which will be fixed in a separate change.
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3278>
2020-01-17 15:07:44 -08:00
|
|
|
case nir_intrinsic_memory_barrier:
|
|
|
|
|
case nir_intrinsic_begin_invocation_interlock:
|
|
|
|
|
case nir_intrinsic_end_invocation_interlock: {
|
2019-07-10 12:02:23 -07:00
|
|
|
bool l3_fence, slm_fence;
|
intel/fs,vec4: Pull stall logic for memory fences up into the IR
Instead of emitting the stall MOV "inside" the
SHADER_OPCODE_MEMORY_FENCE generation, use the scheduling fences when
creating the IR.
For IvyBridge, every (data cache) fence is accompained by a render
cache fence, that now is explicit in the IR, two
SHADER_OPCODE_MEMORY_FENCEs are emitted (with different SFIDs).
Because Begin and End interlock intrinsics are effectively memory
barriers, move its handling alongside the other memory barrier
intrinsics. The SHADER_OPCODE_INTERLOCK is still used to distinguish
if we are going to use a SENDC (for Begin) or regular SEND (for End).
This change is a preparation to allow emitting both SENDs in Gen11+
before we can stall on them.
Shader-db results for IVB (i965):
total instructions in shared programs: 11971190 -> 11971200 (<.01%)
instructions in affected programs: 11482 -> 11492 (0.09%)
helped: 0
HURT: 8
HURT stats (abs) min: 1 max: 3 x̄: 1.25 x̃: 1
HURT stats (rel) min: 0.03% max: 0.50% x̄: 0.14% x̃: 0.10%
95% mean confidence interval for instructions value: 0.66 1.84
95% mean confidence interval for instructions %-change: 0.01% 0.27%
Instructions are HURT.
Unlike the previous code, that used the `mov g1 g2` trick to force
both `g1` and `g2` to stall, the scheduling fence will generate `mov
null g1` and `mov null g2`. During review it was decided it was not
worth keeping the special codepath for the small effect will have.
Shader-db results for HSW (i965), BDW and SKL don't have a change
on instruction count, but do report changes in cycles count, showing
SKL results below
total cycles in shared programs: 341738444 -> 341710570 (<.01%)
cycles in affected programs: 7240002 -> 7212128 (-0.38%)
helped: 46
HURT: 5
helped stats (abs) min: 14 max: 1940 x̄: 676.22 x̃: 154
helped stats (rel) min: <.01% max: 2.62% x̄: 1.28% x̃: 0.95%
HURT stats (abs) min: 2 max: 1768 x̄: 646.40 x̃: 362
HURT stats (rel) min: <.01% max: 0.83% x̄: 0.28% x̃: 0.08%
95% mean confidence interval for cycles value: -777.71 -315.38
95% mean confidence interval for cycles %-change: -1.42% -0.83%
Cycles are helped.
This seems to be the effect of allocating two registers separatedly
instead of a single one with size 2, which causes different register
allocation, affecting the cycle estimates.
while ICL also has not change on instruction count but report changes
negative changes in cycles
total cycles in shared programs: 352665369 -> 352707484 (0.01%)
cycles in affected programs: 9608288 -> 9650403 (0.44%)
helped: 4
HURT: 104
helped stats (abs) min: 24 max: 128 x̄: 88.50 x̃: 101
helped stats (rel) min: <.01% max: 0.85% x̄: 0.46% x̃: 0.49%
HURT stats (abs) min: 2 max: 2016 x̄: 408.36 x̃: 48
HURT stats (rel) min: <.01% max: 3.31% x̄: 0.88% x̃: 0.45%
95% mean confidence interval for cycles value: 256.67 523.24
95% mean confidence interval for cycles %-change: 0.63% 1.03%
Cycles are HURT.
AFAICT this is the result of the case above.
Shader-db results for TGL have similar cycles result as ICL, but also
affect instructions
total instructions in shared programs: 17690586 -> 17690597 (<.01%)
instructions in affected programs: 64617 -> 64628 (0.02%)
helped: 55
HURT: 32
helped stats (abs) min: 1 max: 16 x̄: 4.13 x̃: 3
helped stats (rel) min: 0.05% max: 2.78% x̄: 0.86% x̃: 0.74%
HURT stats (abs) min: 1 max: 65 x̄: 7.44 x̃: 2
HURT stats (rel) min: 0.05% max: 4.58% x̄: 1.13% x̃: 0.69%
95% mean confidence interval for instructions value: -2.03 2.28
95% mean confidence interval for instructions %-change: -0.41% 0.15%
Inconclusive result (value mean confidence interval includes 0).
Now that more is done in the IR, more dependencies are visible and
more SWSB annotations are emitted. Mixed with different register
allocation decisions like above, some shaders will see more `sync
nops` while others able to avoid them.
Most of the new `sync nops` are also redundant and could be dropped,
which will be fixed in a separate change.
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3278>
2020-01-17 15:07:44 -08:00
|
|
|
const enum opcode opcode =
|
|
|
|
|
instr->intrinsic == nir_intrinsic_begin_invocation_interlock ?
|
|
|
|
|
SHADER_OPCODE_INTERLOCK : SHADER_OPCODE_MEMORY_FENCE;
|
|
|
|
|
|
|
|
|
|
switch (instr->intrinsic) {
|
2020-05-05 09:13:20 +02:00
|
|
|
case nir_intrinsic_scoped_barrier: {
|
2019-09-05 11:08:05 -07:00
|
|
|
nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
|
|
|
|
|
l3_fence = modes & (nir_var_shader_out |
|
|
|
|
|
nir_var_mem_ssbo |
|
|
|
|
|
nir_var_mem_global);
|
2019-12-31 01:01:27 -08:00
|
|
|
slm_fence = modes & nir_var_mem_shared;
|
intel/fs,vec4: Pull stall logic for memory fences up into the IR
Instead of emitting the stall MOV "inside" the
SHADER_OPCODE_MEMORY_FENCE generation, use the scheduling fences when
creating the IR.
For IvyBridge, every (data cache) fence is accompained by a render
cache fence, that now is explicit in the IR, two
SHADER_OPCODE_MEMORY_FENCEs are emitted (with different SFIDs).
Because Begin and End interlock intrinsics are effectively memory
barriers, move its handling alongside the other memory barrier
intrinsics. The SHADER_OPCODE_INTERLOCK is still used to distinguish
if we are going to use a SENDC (for Begin) or regular SEND (for End).
This change is a preparation to allow emitting both SENDs in Gen11+
before we can stall on them.
Shader-db results for IVB (i965):
total instructions in shared programs: 11971190 -> 11971200 (<.01%)
instructions in affected programs: 11482 -> 11492 (0.09%)
helped: 0
HURT: 8
HURT stats (abs) min: 1 max: 3 x̄: 1.25 x̃: 1
HURT stats (rel) min: 0.03% max: 0.50% x̄: 0.14% x̃: 0.10%
95% mean confidence interval for instructions value: 0.66 1.84
95% mean confidence interval for instructions %-change: 0.01% 0.27%
Instructions are HURT.
Unlike the previous code, that used the `mov g1 g2` trick to force
both `g1` and `g2` to stall, the scheduling fence will generate `mov
null g1` and `mov null g2`. During review it was decided it was not
worth keeping the special codepath for the small effect will have.
Shader-db results for HSW (i965), BDW and SKL don't have a change
on instruction count, but do report changes in cycles count, showing
SKL results below
total cycles in shared programs: 341738444 -> 341710570 (<.01%)
cycles in affected programs: 7240002 -> 7212128 (-0.38%)
helped: 46
HURT: 5
helped stats (abs) min: 14 max: 1940 x̄: 676.22 x̃: 154
helped stats (rel) min: <.01% max: 2.62% x̄: 1.28% x̃: 0.95%
HURT stats (abs) min: 2 max: 1768 x̄: 646.40 x̃: 362
HURT stats (rel) min: <.01% max: 0.83% x̄: 0.28% x̃: 0.08%
95% mean confidence interval for cycles value: -777.71 -315.38
95% mean confidence interval for cycles %-change: -1.42% -0.83%
Cycles are helped.
This seems to be the effect of allocating two registers separatedly
instead of a single one with size 2, which causes different register
allocation, affecting the cycle estimates.
while ICL also has not change on instruction count but report changes
negative changes in cycles
total cycles in shared programs: 352665369 -> 352707484 (0.01%)
cycles in affected programs: 9608288 -> 9650403 (0.44%)
helped: 4
HURT: 104
helped stats (abs) min: 24 max: 128 x̄: 88.50 x̃: 101
helped stats (rel) min: <.01% max: 0.85% x̄: 0.46% x̃: 0.49%
HURT stats (abs) min: 2 max: 2016 x̄: 408.36 x̃: 48
HURT stats (rel) min: <.01% max: 3.31% x̄: 0.88% x̃: 0.45%
95% mean confidence interval for cycles value: 256.67 523.24
95% mean confidence interval for cycles %-change: 0.63% 1.03%
Cycles are HURT.
AFAICT this is the result of the case above.
Shader-db results for TGL have similar cycles result as ICL, but also
affect instructions
total instructions in shared programs: 17690586 -> 17690597 (<.01%)
instructions in affected programs: 64617 -> 64628 (0.02%)
helped: 55
HURT: 32
helped stats (abs) min: 1 max: 16 x̄: 4.13 x̃: 3
helped stats (rel) min: 0.05% max: 2.78% x̄: 0.86% x̃: 0.74%
HURT stats (abs) min: 1 max: 65 x̄: 7.44 x̃: 2
HURT stats (rel) min: 0.05% max: 4.58% x̄: 1.13% x̃: 0.69%
95% mean confidence interval for instructions value: -2.03 2.28
95% mean confidence interval for instructions %-change: -0.41% 0.15%
Inconclusive result (value mean confidence interval includes 0).
Now that more is done in the IR, more dependencies are visible and
more SWSB annotations are emitted. Mixed with different register
allocation decisions like above, some shaders will see more `sync
nops` while others able to avoid them.
Most of the new `sync nops` are also redundant and could be dropped,
which will be fixed in a separate change.
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3278>
2020-01-17 15:07:44 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_begin_invocation_interlock:
|
|
|
|
|
case nir_intrinsic_end_invocation_interlock:
|
|
|
|
|
/* For beginInvocationInterlockARB(), we will generate a memory fence
|
|
|
|
|
* but with a different opcode so that generator can pick SENDC
|
|
|
|
|
* instead of SEND.
|
|
|
|
|
*
|
|
|
|
|
* For endInvocationInterlockARB(), we need to insert a memory fence which
|
|
|
|
|
* stalls in the shader until the memory transactions prior to that
|
|
|
|
|
* fence are complete. This ensures that the shader does not end before
|
|
|
|
|
* any writes from its critical section have landed. Otherwise, you can
|
|
|
|
|
* end up with a case where the next invocation on that pixel properly
|
|
|
|
|
* stalls for previous FS invocation on its pixel to complete but
|
|
|
|
|
* doesn't actually wait for the dataport memory transactions from that
|
|
|
|
|
* thread to land before submitting its own.
|
|
|
|
|
*
|
|
|
|
|
* Handling them here will allow the logic for IVB render cache (see
|
|
|
|
|
* below) to be reused.
|
|
|
|
|
*/
|
|
|
|
|
l3_fence = true;
|
|
|
|
|
slm_fence = false;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2019-12-31 01:01:27 -08:00
|
|
|
l3_fence = instr->intrinsic != nir_intrinsic_memory_barrier_shared;
|
|
|
|
|
slm_fence = instr->intrinsic == nir_intrinsic_group_memory_barrier ||
|
|
|
|
|
instr->intrinsic == nir_intrinsic_memory_barrier ||
|
|
|
|
|
instr->intrinsic == nir_intrinsic_memory_barrier_shared;
|
intel/fs,vec4: Pull stall logic for memory fences up into the IR
Instead of emitting the stall MOV "inside" the
SHADER_OPCODE_MEMORY_FENCE generation, use the scheduling fences when
creating the IR.
For IvyBridge, every (data cache) fence is accompained by a render
cache fence, that now is explicit in the IR, two
SHADER_OPCODE_MEMORY_FENCEs are emitted (with different SFIDs).
Because Begin and End interlock intrinsics are effectively memory
barriers, move its handling alongside the other memory barrier
intrinsics. The SHADER_OPCODE_INTERLOCK is still used to distinguish
if we are going to use a SENDC (for Begin) or regular SEND (for End).
This change is a preparation to allow emitting both SENDs in Gen11+
before we can stall on them.
Shader-db results for IVB (i965):
total instructions in shared programs: 11971190 -> 11971200 (<.01%)
instructions in affected programs: 11482 -> 11492 (0.09%)
helped: 0
HURT: 8
HURT stats (abs) min: 1 max: 3 x̄: 1.25 x̃: 1
HURT stats (rel) min: 0.03% max: 0.50% x̄: 0.14% x̃: 0.10%
95% mean confidence interval for instructions value: 0.66 1.84
95% mean confidence interval for instructions %-change: 0.01% 0.27%
Instructions are HURT.
Unlike the previous code, that used the `mov g1 g2` trick to force
both `g1` and `g2` to stall, the scheduling fence will generate `mov
null g1` and `mov null g2`. During review it was decided it was not
worth keeping the special codepath for the small effect will have.
Shader-db results for HSW (i965), BDW and SKL don't have a change
on instruction count, but do report changes in cycles count, showing
SKL results below
total cycles in shared programs: 341738444 -> 341710570 (<.01%)
cycles in affected programs: 7240002 -> 7212128 (-0.38%)
helped: 46
HURT: 5
helped stats (abs) min: 14 max: 1940 x̄: 676.22 x̃: 154
helped stats (rel) min: <.01% max: 2.62% x̄: 1.28% x̃: 0.95%
HURT stats (abs) min: 2 max: 1768 x̄: 646.40 x̃: 362
HURT stats (rel) min: <.01% max: 0.83% x̄: 0.28% x̃: 0.08%
95% mean confidence interval for cycles value: -777.71 -315.38
95% mean confidence interval for cycles %-change: -1.42% -0.83%
Cycles are helped.
This seems to be the effect of allocating two registers separatedly
instead of a single one with size 2, which causes different register
allocation, affecting the cycle estimates.
while ICL also has not change on instruction count but report changes
negative changes in cycles
total cycles in shared programs: 352665369 -> 352707484 (0.01%)
cycles in affected programs: 9608288 -> 9650403 (0.44%)
helped: 4
HURT: 104
helped stats (abs) min: 24 max: 128 x̄: 88.50 x̃: 101
helped stats (rel) min: <.01% max: 0.85% x̄: 0.46% x̃: 0.49%
HURT stats (abs) min: 2 max: 2016 x̄: 408.36 x̃: 48
HURT stats (rel) min: <.01% max: 3.31% x̄: 0.88% x̃: 0.45%
95% mean confidence interval for cycles value: 256.67 523.24
95% mean confidence interval for cycles %-change: 0.63% 1.03%
Cycles are HURT.
AFAICT this is the result of the case above.
Shader-db results for TGL have similar cycles result as ICL, but also
affect instructions
total instructions in shared programs: 17690586 -> 17690597 (<.01%)
instructions in affected programs: 64617 -> 64628 (0.02%)
helped: 55
HURT: 32
helped stats (abs) min: 1 max: 16 x̄: 4.13 x̃: 3
helped stats (rel) min: 0.05% max: 2.78% x̄: 0.86% x̃: 0.74%
HURT stats (abs) min: 1 max: 65 x̄: 7.44 x̃: 2
HURT stats (rel) min: 0.05% max: 4.58% x̄: 1.13% x̃: 0.69%
95% mean confidence interval for instructions value: -2.03 2.28
95% mean confidence interval for instructions %-change: -0.41% 0.15%
Inconclusive result (value mean confidence interval includes 0).
Now that more is done in the IR, more dependencies are visible and
more SWSB annotations are emitted. Mixed with different register
allocation decisions like above, some shaders will see more `sync
nops` while others able to avoid them.
Most of the new `sync nops` are also redundant and could be dropped,
which will be fixed in a separate change.
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3278>
2020-01-17 15:07:44 -08:00
|
|
|
break;
|
2019-07-10 12:02:23 -07:00
|
|
|
}
|
|
|
|
|
|
2019-02-03 19:46:16 -06:00
|
|
|
if (stage != MESA_SHADER_COMPUTE && stage != MESA_SHADER_KERNEL)
|
2020-01-13 15:48:12 -08:00
|
|
|
slm_fence = false;
|
|
|
|
|
|
2019-12-31 01:01:27 -08:00
|
|
|
/* If the workgroup fits in a single HW thread, the messages for SLM are
|
|
|
|
|
* processed in-order and the shader itself is already synchronized so
|
|
|
|
|
* the memory fence is not necessary.
|
|
|
|
|
*
|
|
|
|
|
* TODO: Check if applies for many HW threads sharing same Data Port.
|
|
|
|
|
*/
|
2020-04-28 21:04:04 -07:00
|
|
|
if (!nir->info.cs.local_size_variable &&
|
2018-11-12 06:29:51 -08:00
|
|
|
slm_fence && workgroup_size() <= dispatch_width)
|
2019-12-31 01:01:27 -08:00
|
|
|
slm_fence = false;
|
|
|
|
|
|
|
|
|
|
/* Prior to Gen11, there's only L3 fence, so emit that instead. */
|
|
|
|
|
if (slm_fence && devinfo->gen < 11) {
|
|
|
|
|
slm_fence = false;
|
|
|
|
|
l3_fence = true;
|
|
|
|
|
}
|
|
|
|
|
|
intel/fs,vec4: Pull stall logic for memory fences up into the IR
Instead of emitting the stall MOV "inside" the
SHADER_OPCODE_MEMORY_FENCE generation, use the scheduling fences when
creating the IR.
For IvyBridge, every (data cache) fence is accompained by a render
cache fence, that now is explicit in the IR, two
SHADER_OPCODE_MEMORY_FENCEs are emitted (with different SFIDs).
Because Begin and End interlock intrinsics are effectively memory
barriers, move its handling alongside the other memory barrier
intrinsics. The SHADER_OPCODE_INTERLOCK is still used to distinguish
if we are going to use a SENDC (for Begin) or regular SEND (for End).
This change is a preparation to allow emitting both SENDs in Gen11+
before we can stall on them.
Shader-db results for IVB (i965):
total instructions in shared programs: 11971190 -> 11971200 (<.01%)
instructions in affected programs: 11482 -> 11492 (0.09%)
helped: 0
HURT: 8
HURT stats (abs) min: 1 max: 3 x̄: 1.25 x̃: 1
HURT stats (rel) min: 0.03% max: 0.50% x̄: 0.14% x̃: 0.10%
95% mean confidence interval for instructions value: 0.66 1.84
95% mean confidence interval for instructions %-change: 0.01% 0.27%
Instructions are HURT.
Unlike the previous code, that used the `mov g1 g2` trick to force
both `g1` and `g2` to stall, the scheduling fence will generate `mov
null g1` and `mov null g2`. During review it was decided it was not
worth keeping the special codepath for the small effect will have.
Shader-db results for HSW (i965), BDW and SKL don't have a change
on instruction count, but do report changes in cycles count, showing
SKL results below
total cycles in shared programs: 341738444 -> 341710570 (<.01%)
cycles in affected programs: 7240002 -> 7212128 (-0.38%)
helped: 46
HURT: 5
helped stats (abs) min: 14 max: 1940 x̄: 676.22 x̃: 154
helped stats (rel) min: <.01% max: 2.62% x̄: 1.28% x̃: 0.95%
HURT stats (abs) min: 2 max: 1768 x̄: 646.40 x̃: 362
HURT stats (rel) min: <.01% max: 0.83% x̄: 0.28% x̃: 0.08%
95% mean confidence interval for cycles value: -777.71 -315.38
95% mean confidence interval for cycles %-change: -1.42% -0.83%
Cycles are helped.
This seems to be the effect of allocating two registers separatedly
instead of a single one with size 2, which causes different register
allocation, affecting the cycle estimates.
while ICL also has not change on instruction count but report changes
negative changes in cycles
total cycles in shared programs: 352665369 -> 352707484 (0.01%)
cycles in affected programs: 9608288 -> 9650403 (0.44%)
helped: 4
HURT: 104
helped stats (abs) min: 24 max: 128 x̄: 88.50 x̃: 101
helped stats (rel) min: <.01% max: 0.85% x̄: 0.46% x̃: 0.49%
HURT stats (abs) min: 2 max: 2016 x̄: 408.36 x̃: 48
HURT stats (rel) min: <.01% max: 3.31% x̄: 0.88% x̃: 0.45%
95% mean confidence interval for cycles value: 256.67 523.24
95% mean confidence interval for cycles %-change: 0.63% 1.03%
Cycles are HURT.
AFAICT this is the result of the case above.
Shader-db results for TGL have similar cycles result as ICL, but also
affect instructions
total instructions in shared programs: 17690586 -> 17690597 (<.01%)
instructions in affected programs: 64617 -> 64628 (0.02%)
helped: 55
HURT: 32
helped stats (abs) min: 1 max: 16 x̄: 4.13 x̃: 3
helped stats (rel) min: 0.05% max: 2.78% x̄: 0.86% x̃: 0.74%
HURT stats (abs) min: 1 max: 65 x̄: 7.44 x̃: 2
HURT stats (rel) min: 0.05% max: 4.58% x̄: 1.13% x̃: 0.69%
95% mean confidence interval for instructions value: -2.03 2.28
95% mean confidence interval for instructions %-change: -0.41% 0.15%
Inconclusive result (value mean confidence interval includes 0).
Now that more is done in the IR, more dependencies are visible and
more SWSB annotations are emitted. Mixed with different register
allocation decisions like above, some shaders will see more `sync
nops` while others able to avoid them.
Most of the new `sync nops` are also redundant and could be dropped,
which will be fixed in a separate change.
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3278>
2020-01-17 15:07:44 -08:00
|
|
|
/* IVB does typed surface access through the render cache, so we need
|
|
|
|
|
* to flush it too.
|
|
|
|
|
*/
|
|
|
|
|
const bool needs_render_fence =
|
|
|
|
|
devinfo->gen == 7 && !devinfo->is_haswell;
|
|
|
|
|
|
2019-07-10 12:02:23 -07:00
|
|
|
/* Be conservative in Gen11+ and always stall in a fence. Since there
|
|
|
|
|
* are two different fences, and shader might want to synchronize
|
|
|
|
|
* between them.
|
|
|
|
|
*
|
2020-01-17 14:17:58 -08:00
|
|
|
* TODO: Use scope and visibility information for the barriers from NIR
|
|
|
|
|
* to make a better decision on whether we need to stall.
|
2019-07-10 12:02:23 -07:00
|
|
|
*/
|
intel/fs,vec4: Pull stall logic for memory fences up into the IR
Instead of emitting the stall MOV "inside" the
SHADER_OPCODE_MEMORY_FENCE generation, use the scheduling fences when
creating the IR.
For IvyBridge, every (data cache) fence is accompained by a render
cache fence, that now is explicit in the IR, two
SHADER_OPCODE_MEMORY_FENCEs are emitted (with different SFIDs).
Because Begin and End interlock intrinsics are effectively memory
barriers, move its handling alongside the other memory barrier
intrinsics. The SHADER_OPCODE_INTERLOCK is still used to distinguish
if we are going to use a SENDC (for Begin) or regular SEND (for End).
This change is a preparation to allow emitting both SENDs in Gen11+
before we can stall on them.
Shader-db results for IVB (i965):
total instructions in shared programs: 11971190 -> 11971200 (<.01%)
instructions in affected programs: 11482 -> 11492 (0.09%)
helped: 0
HURT: 8
HURT stats (abs) min: 1 max: 3 x̄: 1.25 x̃: 1
HURT stats (rel) min: 0.03% max: 0.50% x̄: 0.14% x̃: 0.10%
95% mean confidence interval for instructions value: 0.66 1.84
95% mean confidence interval for instructions %-change: 0.01% 0.27%
Instructions are HURT.
Unlike the previous code, that used the `mov g1 g2` trick to force
both `g1` and `g2` to stall, the scheduling fence will generate `mov
null g1` and `mov null g2`. During review it was decided it was not
worth keeping the special codepath for the small effect will have.
Shader-db results for HSW (i965), BDW and SKL don't have a change
on instruction count, but do report changes in cycles count, showing
SKL results below
total cycles in shared programs: 341738444 -> 341710570 (<.01%)
cycles in affected programs: 7240002 -> 7212128 (-0.38%)
helped: 46
HURT: 5
helped stats (abs) min: 14 max: 1940 x̄: 676.22 x̃: 154
helped stats (rel) min: <.01% max: 2.62% x̄: 1.28% x̃: 0.95%
HURT stats (abs) min: 2 max: 1768 x̄: 646.40 x̃: 362
HURT stats (rel) min: <.01% max: 0.83% x̄: 0.28% x̃: 0.08%
95% mean confidence interval for cycles value: -777.71 -315.38
95% mean confidence interval for cycles %-change: -1.42% -0.83%
Cycles are helped.
This seems to be the effect of allocating two registers separatedly
instead of a single one with size 2, which causes different register
allocation, affecting the cycle estimates.
while ICL also has not change on instruction count but report changes
negative changes in cycles
total cycles in shared programs: 352665369 -> 352707484 (0.01%)
cycles in affected programs: 9608288 -> 9650403 (0.44%)
helped: 4
HURT: 104
helped stats (abs) min: 24 max: 128 x̄: 88.50 x̃: 101
helped stats (rel) min: <.01% max: 0.85% x̄: 0.46% x̃: 0.49%
HURT stats (abs) min: 2 max: 2016 x̄: 408.36 x̃: 48
HURT stats (rel) min: <.01% max: 3.31% x̄: 0.88% x̃: 0.45%
95% mean confidence interval for cycles value: 256.67 523.24
95% mean confidence interval for cycles %-change: 0.63% 1.03%
Cycles are HURT.
AFAICT this is the result of the case above.
Shader-db results for TGL have similar cycles result as ICL, but also
affect instructions
total instructions in shared programs: 17690586 -> 17690597 (<.01%)
instructions in affected programs: 64617 -> 64628 (0.02%)
helped: 55
HURT: 32
helped stats (abs) min: 1 max: 16 x̄: 4.13 x̃: 3
helped stats (rel) min: 0.05% max: 2.78% x̄: 0.86% x̃: 0.74%
HURT stats (abs) min: 1 max: 65 x̄: 7.44 x̃: 2
HURT stats (rel) min: 0.05% max: 4.58% x̄: 1.13% x̃: 0.69%
95% mean confidence interval for instructions value: -2.03 2.28
95% mean confidence interval for instructions %-change: -0.41% 0.15%
Inconclusive result (value mean confidence interval includes 0).
Now that more is done in the IR, more dependencies are visible and
more SWSB annotations are emitted. Mixed with different register
allocation decisions like above, some shaders will see more `sync
nops` while others able to avoid them.
Most of the new `sync nops` are also redundant and could be dropped,
which will be fixed in a separate change.
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3278>
2020-01-17 15:07:44 -08:00
|
|
|
const bool stall = devinfo->gen >= 11 || needs_render_fence ||
|
|
|
|
|
instr->intrinsic == nir_intrinsic_end_invocation_interlock;
|
|
|
|
|
|
|
|
|
|
const bool commit_enable = stall ||
|
|
|
|
|
devinfo->gen >= 10; /* HSD ES # 1404612949 */
|
2019-07-10 12:02:23 -07:00
|
|
|
|
2020-01-17 14:17:58 -08:00
|
|
|
unsigned fence_regs_count = 0;
|
|
|
|
|
fs_reg fence_regs[2] = {};
|
|
|
|
|
|
2016-04-25 17:30:54 -07:00
|
|
|
const fs_builder ubld = bld.group(8, 0);
|
2019-07-10 12:02:23 -07:00
|
|
|
|
|
|
|
|
if (l3_fence) {
|
intel/fs,vec4: Pull stall logic for memory fences up into the IR
Instead of emitting the stall MOV "inside" the
SHADER_OPCODE_MEMORY_FENCE generation, use the scheduling fences when
creating the IR.
For IvyBridge, every (data cache) fence is accompained by a render
cache fence, that now is explicit in the IR, two
SHADER_OPCODE_MEMORY_FENCEs are emitted (with different SFIDs).
Because Begin and End interlock intrinsics are effectively memory
barriers, move its handling alongside the other memory barrier
intrinsics. The SHADER_OPCODE_INTERLOCK is still used to distinguish
if we are going to use a SENDC (for Begin) or regular SEND (for End).
This change is a preparation to allow emitting both SENDs in Gen11+
before we can stall on them.
Shader-db results for IVB (i965):
total instructions in shared programs: 11971190 -> 11971200 (<.01%)
instructions in affected programs: 11482 -> 11492 (0.09%)
helped: 0
HURT: 8
HURT stats (abs) min: 1 max: 3 x̄: 1.25 x̃: 1
HURT stats (rel) min: 0.03% max: 0.50% x̄: 0.14% x̃: 0.10%
95% mean confidence interval for instructions value: 0.66 1.84
95% mean confidence interval for instructions %-change: 0.01% 0.27%
Instructions are HURT.
Unlike the previous code, that used the `mov g1 g2` trick to force
both `g1` and `g2` to stall, the scheduling fence will generate `mov
null g1` and `mov null g2`. During review it was decided it was not
worth keeping the special codepath for the small effect will have.
Shader-db results for HSW (i965), BDW and SKL don't have a change
on instruction count, but do report changes in cycles count, showing
SKL results below
total cycles in shared programs: 341738444 -> 341710570 (<.01%)
cycles in affected programs: 7240002 -> 7212128 (-0.38%)
helped: 46
HURT: 5
helped stats (abs) min: 14 max: 1940 x̄: 676.22 x̃: 154
helped stats (rel) min: <.01% max: 2.62% x̄: 1.28% x̃: 0.95%
HURT stats (abs) min: 2 max: 1768 x̄: 646.40 x̃: 362
HURT stats (rel) min: <.01% max: 0.83% x̄: 0.28% x̃: 0.08%
95% mean confidence interval for cycles value: -777.71 -315.38
95% mean confidence interval for cycles %-change: -1.42% -0.83%
Cycles are helped.
This seems to be the effect of allocating two registers separatedly
instead of a single one with size 2, which causes different register
allocation, affecting the cycle estimates.
while ICL also has not change on instruction count but report changes
negative changes in cycles
total cycles in shared programs: 352665369 -> 352707484 (0.01%)
cycles in affected programs: 9608288 -> 9650403 (0.44%)
helped: 4
HURT: 104
helped stats (abs) min: 24 max: 128 x̄: 88.50 x̃: 101
helped stats (rel) min: <.01% max: 0.85% x̄: 0.46% x̃: 0.49%
HURT stats (abs) min: 2 max: 2016 x̄: 408.36 x̃: 48
HURT stats (rel) min: <.01% max: 3.31% x̄: 0.88% x̃: 0.45%
95% mean confidence interval for cycles value: 256.67 523.24
95% mean confidence interval for cycles %-change: 0.63% 1.03%
Cycles are HURT.
AFAICT this is the result of the case above.
Shader-db results for TGL have similar cycles result as ICL, but also
affect instructions
total instructions in shared programs: 17690586 -> 17690597 (<.01%)
instructions in affected programs: 64617 -> 64628 (0.02%)
helped: 55
HURT: 32
helped stats (abs) min: 1 max: 16 x̄: 4.13 x̃: 3
helped stats (rel) min: 0.05% max: 2.78% x̄: 0.86% x̃: 0.74%
HURT stats (abs) min: 1 max: 65 x̄: 7.44 x̃: 2
HURT stats (rel) min: 0.05% max: 4.58% x̄: 1.13% x̃: 0.69%
95% mean confidence interval for instructions value: -2.03 2.28
95% mean confidence interval for instructions %-change: -0.41% 0.15%
Inconclusive result (value mean confidence interval includes 0).
Now that more is done in the IR, more dependencies are visible and
more SWSB annotations are emitted. Mixed with different register
allocation decisions like above, some shaders will see more `sync
nops` while others able to avoid them.
Most of the new `sync nops` are also redundant and could be dropped,
which will be fixed in a separate change.
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3278>
2020-01-17 15:07:44 -08:00
|
|
|
fs_inst *fence =
|
|
|
|
|
ubld.emit(opcode,
|
|
|
|
|
ubld.vgrf(BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_vec8_grf(0, 0),
|
|
|
|
|
brw_imm_ud(commit_enable),
|
|
|
|
|
brw_imm_ud(/* bti */ 0));
|
|
|
|
|
fence->sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
|
|
|
|
|
|
2020-01-17 14:17:58 -08:00
|
|
|
fence_regs[fence_regs_count++] = fence->dst;
|
|
|
|
|
|
intel/fs,vec4: Pull stall logic for memory fences up into the IR
Instead of emitting the stall MOV "inside" the
SHADER_OPCODE_MEMORY_FENCE generation, use the scheduling fences when
creating the IR.
For IvyBridge, every (data cache) fence is accompained by a render
cache fence, that now is explicit in the IR, two
SHADER_OPCODE_MEMORY_FENCEs are emitted (with different SFIDs).
Because Begin and End interlock intrinsics are effectively memory
barriers, move its handling alongside the other memory barrier
intrinsics. The SHADER_OPCODE_INTERLOCK is still used to distinguish
if we are going to use a SENDC (for Begin) or regular SEND (for End).
This change is a preparation to allow emitting both SENDs in Gen11+
before we can stall on them.
Shader-db results for IVB (i965):
total instructions in shared programs: 11971190 -> 11971200 (<.01%)
instructions in affected programs: 11482 -> 11492 (0.09%)
helped: 0
HURT: 8
HURT stats (abs) min: 1 max: 3 x̄: 1.25 x̃: 1
HURT stats (rel) min: 0.03% max: 0.50% x̄: 0.14% x̃: 0.10%
95% mean confidence interval for instructions value: 0.66 1.84
95% mean confidence interval for instructions %-change: 0.01% 0.27%
Instructions are HURT.
Unlike the previous code, that used the `mov g1 g2` trick to force
both `g1` and `g2` to stall, the scheduling fence will generate `mov
null g1` and `mov null g2`. During review it was decided it was not
worth keeping the special codepath for the small effect will have.
Shader-db results for HSW (i965), BDW and SKL don't have a change
on instruction count, but do report changes in cycles count, showing
SKL results below
total cycles in shared programs: 341738444 -> 341710570 (<.01%)
cycles in affected programs: 7240002 -> 7212128 (-0.38%)
helped: 46
HURT: 5
helped stats (abs) min: 14 max: 1940 x̄: 676.22 x̃: 154
helped stats (rel) min: <.01% max: 2.62% x̄: 1.28% x̃: 0.95%
HURT stats (abs) min: 2 max: 1768 x̄: 646.40 x̃: 362
HURT stats (rel) min: <.01% max: 0.83% x̄: 0.28% x̃: 0.08%
95% mean confidence interval for cycles value: -777.71 -315.38
95% mean confidence interval for cycles %-change: -1.42% -0.83%
Cycles are helped.
This seems to be the effect of allocating two registers separatedly
instead of a single one with size 2, which causes different register
allocation, affecting the cycle estimates.
while ICL also has not change on instruction count but report changes
negative changes in cycles
total cycles in shared programs: 352665369 -> 352707484 (0.01%)
cycles in affected programs: 9608288 -> 9650403 (0.44%)
helped: 4
HURT: 104
helped stats (abs) min: 24 max: 128 x̄: 88.50 x̃: 101
helped stats (rel) min: <.01% max: 0.85% x̄: 0.46% x̃: 0.49%
HURT stats (abs) min: 2 max: 2016 x̄: 408.36 x̃: 48
HURT stats (rel) min: <.01% max: 3.31% x̄: 0.88% x̃: 0.45%
95% mean confidence interval for cycles value: 256.67 523.24
95% mean confidence interval for cycles %-change: 0.63% 1.03%
Cycles are HURT.
AFAICT this is the result of the case above.
Shader-db results for TGL have similar cycles result as ICL, but also
affect instructions
total instructions in shared programs: 17690586 -> 17690597 (<.01%)
instructions in affected programs: 64617 -> 64628 (0.02%)
helped: 55
HURT: 32
helped stats (abs) min: 1 max: 16 x̄: 4.13 x̃: 3
helped stats (rel) min: 0.05% max: 2.78% x̄: 0.86% x̃: 0.74%
HURT stats (abs) min: 1 max: 65 x̄: 7.44 x̃: 2
HURT stats (rel) min: 0.05% max: 4.58% x̄: 1.13% x̃: 0.69%
95% mean confidence interval for instructions value: -2.03 2.28
95% mean confidence interval for instructions %-change: -0.41% 0.15%
Inconclusive result (value mean confidence interval includes 0).
Now that more is done in the IR, more dependencies are visible and
more SWSB annotations are emitted. Mixed with different register
allocation decisions like above, some shaders will see more `sync
nops` while others able to avoid them.
Most of the new `sync nops` are also redundant and could be dropped,
which will be fixed in a separate change.
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3278>
2020-01-17 15:07:44 -08:00
|
|
|
if (needs_render_fence) {
|
|
|
|
|
fs_inst *render_fence =
|
|
|
|
|
ubld.emit(opcode,
|
|
|
|
|
ubld.vgrf(BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_vec8_grf(0, 0),
|
|
|
|
|
brw_imm_ud(commit_enable),
|
|
|
|
|
brw_imm_ud(/* bti */ 0));
|
|
|
|
|
render_fence->sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
|
|
|
|
|
|
2020-01-17 14:17:58 -08:00
|
|
|
fence_regs[fence_regs_count++] = render_fence->dst;
|
intel/fs,vec4: Pull stall logic for memory fences up into the IR
Instead of emitting the stall MOV "inside" the
SHADER_OPCODE_MEMORY_FENCE generation, use the scheduling fences when
creating the IR.
For IvyBridge, every (data cache) fence is accompained by a render
cache fence, that now is explicit in the IR, two
SHADER_OPCODE_MEMORY_FENCEs are emitted (with different SFIDs).
Because Begin and End interlock intrinsics are effectively memory
barriers, move its handling alongside the other memory barrier
intrinsics. The SHADER_OPCODE_INTERLOCK is still used to distinguish
if we are going to use a SENDC (for Begin) or regular SEND (for End).
This change is a preparation to allow emitting both SENDs in Gen11+
before we can stall on them.
Shader-db results for IVB (i965):
total instructions in shared programs: 11971190 -> 11971200 (<.01%)
instructions in affected programs: 11482 -> 11492 (0.09%)
helped: 0
HURT: 8
HURT stats (abs) min: 1 max: 3 x̄: 1.25 x̃: 1
HURT stats (rel) min: 0.03% max: 0.50% x̄: 0.14% x̃: 0.10%
95% mean confidence interval for instructions value: 0.66 1.84
95% mean confidence interval for instructions %-change: 0.01% 0.27%
Instructions are HURT.
Unlike the previous code, that used the `mov g1 g2` trick to force
both `g1` and `g2` to stall, the scheduling fence will generate `mov
null g1` and `mov null g2`. During review it was decided it was not
worth keeping the special codepath for the small effect will have.
Shader-db results for HSW (i965), BDW and SKL don't have a change
on instruction count, but do report changes in cycles count, showing
SKL results below
total cycles in shared programs: 341738444 -> 341710570 (<.01%)
cycles in affected programs: 7240002 -> 7212128 (-0.38%)
helped: 46
HURT: 5
helped stats (abs) min: 14 max: 1940 x̄: 676.22 x̃: 154
helped stats (rel) min: <.01% max: 2.62% x̄: 1.28% x̃: 0.95%
HURT stats (abs) min: 2 max: 1768 x̄: 646.40 x̃: 362
HURT stats (rel) min: <.01% max: 0.83% x̄: 0.28% x̃: 0.08%
95% mean confidence interval for cycles value: -777.71 -315.38
95% mean confidence interval for cycles %-change: -1.42% -0.83%
Cycles are helped.
This seems to be the effect of allocating two registers separatedly
instead of a single one with size 2, which causes different register
allocation, affecting the cycle estimates.
while ICL also has not change on instruction count but report changes
negative changes in cycles
total cycles in shared programs: 352665369 -> 352707484 (0.01%)
cycles in affected programs: 9608288 -> 9650403 (0.44%)
helped: 4
HURT: 104
helped stats (abs) min: 24 max: 128 x̄: 88.50 x̃: 101
helped stats (rel) min: <.01% max: 0.85% x̄: 0.46% x̃: 0.49%
HURT stats (abs) min: 2 max: 2016 x̄: 408.36 x̃: 48
HURT stats (rel) min: <.01% max: 3.31% x̄: 0.88% x̃: 0.45%
95% mean confidence interval for cycles value: 256.67 523.24
95% mean confidence interval for cycles %-change: 0.63% 1.03%
Cycles are HURT.
AFAICT this is the result of the case above.
Shader-db results for TGL have similar cycles result as ICL, but also
affect instructions
total instructions in shared programs: 17690586 -> 17690597 (<.01%)
instructions in affected programs: 64617 -> 64628 (0.02%)
helped: 55
HURT: 32
helped stats (abs) min: 1 max: 16 x̄: 4.13 x̃: 3
helped stats (rel) min: 0.05% max: 2.78% x̄: 0.86% x̃: 0.74%
HURT stats (abs) min: 1 max: 65 x̄: 7.44 x̃: 2
HURT stats (rel) min: 0.05% max: 4.58% x̄: 1.13% x̃: 0.69%
95% mean confidence interval for instructions value: -2.03 2.28
95% mean confidence interval for instructions %-change: -0.41% 0.15%
Inconclusive result (value mean confidence interval includes 0).
Now that more is done in the IR, more dependencies are visible and
more SWSB annotations are emitted. Mixed with different register
allocation decisions like above, some shaders will see more `sync
nops` while others able to avoid them.
Most of the new `sync nops` are also redundant and could be dropped,
which will be fixed in a separate change.
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3278>
2020-01-17 15:07:44 -08:00
|
|
|
}
|
2019-07-10 12:02:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (slm_fence) {
|
intel/fs,vec4: Pull stall logic for memory fences up into the IR
Instead of emitting the stall MOV "inside" the
SHADER_OPCODE_MEMORY_FENCE generation, use the scheduling fences when
creating the IR.
For IvyBridge, every (data cache) fence is accompained by a render
cache fence, that now is explicit in the IR, two
SHADER_OPCODE_MEMORY_FENCEs are emitted (with different SFIDs).
Because Begin and End interlock intrinsics are effectively memory
barriers, move its handling alongside the other memory barrier
intrinsics. The SHADER_OPCODE_INTERLOCK is still used to distinguish
if we are going to use a SENDC (for Begin) or regular SEND (for End).
This change is a preparation to allow emitting both SENDs in Gen11+
before we can stall on them.
Shader-db results for IVB (i965):
total instructions in shared programs: 11971190 -> 11971200 (<.01%)
instructions in affected programs: 11482 -> 11492 (0.09%)
helped: 0
HURT: 8
HURT stats (abs) min: 1 max: 3 x̄: 1.25 x̃: 1
HURT stats (rel) min: 0.03% max: 0.50% x̄: 0.14% x̃: 0.10%
95% mean confidence interval for instructions value: 0.66 1.84
95% mean confidence interval for instructions %-change: 0.01% 0.27%
Instructions are HURT.
Unlike the previous code, that used the `mov g1 g2` trick to force
both `g1` and `g2` to stall, the scheduling fence will generate `mov
null g1` and `mov null g2`. During review it was decided it was not
worth keeping the special codepath for the small effect will have.
Shader-db results for HSW (i965), BDW and SKL don't have a change
on instruction count, but do report changes in cycles count, showing
SKL results below
total cycles in shared programs: 341738444 -> 341710570 (<.01%)
cycles in affected programs: 7240002 -> 7212128 (-0.38%)
helped: 46
HURT: 5
helped stats (abs) min: 14 max: 1940 x̄: 676.22 x̃: 154
helped stats (rel) min: <.01% max: 2.62% x̄: 1.28% x̃: 0.95%
HURT stats (abs) min: 2 max: 1768 x̄: 646.40 x̃: 362
HURT stats (rel) min: <.01% max: 0.83% x̄: 0.28% x̃: 0.08%
95% mean confidence interval for cycles value: -777.71 -315.38
95% mean confidence interval for cycles %-change: -1.42% -0.83%
Cycles are helped.
This seems to be the effect of allocating two registers separatedly
instead of a single one with size 2, which causes different register
allocation, affecting the cycle estimates.
while ICL also has not change on instruction count but report changes
negative changes in cycles
total cycles in shared programs: 352665369 -> 352707484 (0.01%)
cycles in affected programs: 9608288 -> 9650403 (0.44%)
helped: 4
HURT: 104
helped stats (abs) min: 24 max: 128 x̄: 88.50 x̃: 101
helped stats (rel) min: <.01% max: 0.85% x̄: 0.46% x̃: 0.49%
HURT stats (abs) min: 2 max: 2016 x̄: 408.36 x̃: 48
HURT stats (rel) min: <.01% max: 3.31% x̄: 0.88% x̃: 0.45%
95% mean confidence interval for cycles value: 256.67 523.24
95% mean confidence interval for cycles %-change: 0.63% 1.03%
Cycles are HURT.
AFAICT this is the result of the case above.
Shader-db results for TGL have similar cycles result as ICL, but also
affect instructions
total instructions in shared programs: 17690586 -> 17690597 (<.01%)
instructions in affected programs: 64617 -> 64628 (0.02%)
helped: 55
HURT: 32
helped stats (abs) min: 1 max: 16 x̄: 4.13 x̃: 3
helped stats (rel) min: 0.05% max: 2.78% x̄: 0.86% x̃: 0.74%
HURT stats (abs) min: 1 max: 65 x̄: 7.44 x̃: 2
HURT stats (rel) min: 0.05% max: 4.58% x̄: 1.13% x̃: 0.69%
95% mean confidence interval for instructions value: -2.03 2.28
95% mean confidence interval for instructions %-change: -0.41% 0.15%
Inconclusive result (value mean confidence interval includes 0).
Now that more is done in the IR, more dependencies are visible and
more SWSB annotations are emitted. Mixed with different register
allocation decisions like above, some shaders will see more `sync
nops` while others able to avoid them.
Most of the new `sync nops` are also redundant and could be dropped,
which will be fixed in a separate change.
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3278>
2020-01-17 15:07:44 -08:00
|
|
|
assert(opcode == SHADER_OPCODE_MEMORY_FENCE);
|
|
|
|
|
fs_inst *fence =
|
|
|
|
|
ubld.emit(opcode,
|
|
|
|
|
ubld.vgrf(BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_vec8_grf(0, 0),
|
|
|
|
|
brw_imm_ud(commit_enable),
|
|
|
|
|
brw_imm_ud(GEN7_BTI_SLM));
|
|
|
|
|
fence->sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
|
|
|
|
|
|
2020-01-17 14:17:58 -08:00
|
|
|
fence_regs[fence_regs_count++] = fence->dst;
|
2019-07-10 12:02:23 -07:00
|
|
|
}
|
|
|
|
|
|
2020-01-17 14:17:58 -08:00
|
|
|
assert(fence_regs_count <= 2);
|
|
|
|
|
|
|
|
|
|
if (stall || fence_regs_count == 0) {
|
|
|
|
|
ubld.exec_all().group(1, 0).emit(
|
|
|
|
|
FS_OPCODE_SCHEDULING_FENCE, ubld.null_reg_ud(),
|
|
|
|
|
fence_regs, fence_regs_count);
|
|
|
|
|
}
|
2019-12-31 01:01:27 -08:00
|
|
|
|
2015-07-27 16:25:55 +03:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-07 14:18:56 -06:00
|
|
|
case nir_intrinsic_memory_barrier_tcs_patch:
|
|
|
|
|
break;
|
|
|
|
|
|
2015-10-07 11:50:01 +01:00
|
|
|
case nir_intrinsic_shader_clock: {
|
|
|
|
|
/* We cannot do anything if there is an event, so ignore it for now */
|
2016-09-01 00:35:03 -07:00
|
|
|
const fs_reg shader_clock = get_timestamp(bld);
|
|
|
|
|
const fs_reg srcs[] = { component(shader_clock, 0),
|
|
|
|
|
component(shader_clock, 1) };
|
2015-10-07 11:50:01 +01:00
|
|
|
bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-16 16:23:10 -05:00
|
|
|
case nir_intrinsic_image_samples:
|
2015-08-27 23:34:47 -04:00
|
|
|
/* The driver does not support multi-sampled images. */
|
2015-11-02 11:26:16 -08:00
|
|
|
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1));
|
2015-08-27 23:34:47 -04:00
|
|
|
break;
|
|
|
|
|
|
2014-12-03 17:03:19 -08:00
|
|
|
case nir_intrinsic_load_uniform: {
|
2018-02-20 10:28:41 +01:00
|
|
|
/* Offsets are in bytes but they should always aligned to
|
|
|
|
|
* the type size
|
|
|
|
|
*/
|
|
|
|
|
assert(instr->const_index[0] % 4 == 0 ||
|
|
|
|
|
instr->const_index[0] % type_sz(dest.type) == 0);
|
2015-11-10 21:12:47 -08:00
|
|
|
|
2015-12-07 22:41:50 -08:00
|
|
|
fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type);
|
2015-03-18 15:18:54 -07:00
|
|
|
|
2018-10-20 09:55:28 -05:00
|
|
|
if (nir_src_is_const(instr->src[0])) {
|
|
|
|
|
unsigned load_offset = nir_src_as_uint(instr->src[0]);
|
|
|
|
|
assert(load_offset % type_sz(dest.type) == 0);
|
2018-02-20 10:28:41 +01:00
|
|
|
/* For 16-bit types we add the module of the const_index[0]
|
|
|
|
|
* offset to access to not 32-bit aligned element
|
|
|
|
|
*/
|
2018-10-20 09:55:28 -05:00
|
|
|
src.offset = load_offset + instr->const_index[0] % 4;
|
2015-11-24 15:12:20 -08:00
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < instr->num_components; j++) {
|
|
|
|
|
bld.MOV(offset(dest, bld, j), offset(src, bld, j));
|
|
|
|
|
}
|
2015-11-25 14:14:05 -08:00
|
|
|
} else {
|
2015-11-24 15:12:20 -08:00
|
|
|
fs_reg indirect = retype(get_nir_src(instr->src[0]),
|
|
|
|
|
BRW_REGISTER_TYPE_UD);
|
2015-05-19 16:57:43 -07:00
|
|
|
|
2015-11-24 15:12:20 -08:00
|
|
|
/* We need to pass a size to the MOV_INDIRECT but we don't want it to
|
|
|
|
|
* go past the end of the uniform. In order to keep the n'th
|
|
|
|
|
* component from running past, we subtract off the size of all but
|
|
|
|
|
* one component of the vector.
|
|
|
|
|
*/
|
2016-04-19 09:49:40 +02:00
|
|
|
assert(instr->const_index[1] >=
|
|
|
|
|
instr->num_components * (int) type_sz(dest.type));
|
2015-11-24 15:12:20 -08:00
|
|
|
unsigned read_size = instr->const_index[1] -
|
2016-04-19 09:49:40 +02:00
|
|
|
(instr->num_components - 1) * type_sz(dest.type);
|
2015-11-24 15:12:20 -08:00
|
|
|
|
2017-02-13 13:24:18 +01:00
|
|
|
bool supports_64bit_indirects =
|
2017-06-20 11:06:24 +01:00
|
|
|
!devinfo->is_cherryview && !gen_device_info_is_9lp(devinfo);
|
2016-06-13 08:29:53 +02:00
|
|
|
|
2017-02-13 13:24:18 +01:00
|
|
|
if (type_sz(dest.type) != 8 || supports_64bit_indirects) {
|
|
|
|
|
for (unsigned j = 0; j < instr->num_components; j++) {
|
2016-06-13 08:29:53 +02:00
|
|
|
bld.emit(SHADER_OPCODE_MOV_INDIRECT,
|
|
|
|
|
offset(dest, bld, j), offset(src, bld, j),
|
|
|
|
|
indirect, brw_imm_ud(read_size));
|
2017-02-13 13:24:18 +01:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
const unsigned num_mov_indirects =
|
|
|
|
|
type_sz(dest.type) / type_sz(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
/* We read a little bit less per MOV INDIRECT, as they are now
|
|
|
|
|
* 32-bits ones instead of 64-bit. Fix read_size then.
|
|
|
|
|
*/
|
|
|
|
|
const unsigned read_size_32bit = read_size -
|
|
|
|
|
(num_mov_indirects - 1) * type_sz(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
for (unsigned j = 0; j < instr->num_components; j++) {
|
|
|
|
|
for (unsigned i = 0; i < num_mov_indirects; i++) {
|
|
|
|
|
bld.emit(SHADER_OPCODE_MOV_INDIRECT,
|
|
|
|
|
subscript(offset(dest, bld, j), BRW_REGISTER_TYPE_UD, i),
|
|
|
|
|
subscript(offset(src, bld, j), BRW_REGISTER_TYPE_UD, i),
|
|
|
|
|
indirect, brw_imm_ud(read_size_32bit));
|
|
|
|
|
}
|
2016-06-13 08:29:53 +02:00
|
|
|
}
|
2015-11-24 15:12:20 -08:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-03 17:03:19 -08:00
|
|
|
case nir_intrinsic_load_ubo: {
|
2014-12-08 17:34:52 -08:00
|
|
|
fs_reg surf_index;
|
2018-10-20 09:55:28 -05:00
|
|
|
if (nir_src_is_const(instr->src[0])) {
|
2015-10-30 08:39:11 +01:00
|
|
|
const unsigned index = stage_prog_data->binding_table.ubo_start +
|
2018-10-20 09:55:28 -05:00
|
|
|
nir_src_as_uint(instr->src[0]);
|
2015-11-02 11:26:16 -08:00
|
|
|
surf_index = brw_imm_ud(index);
|
2014-12-08 17:34:52 -08:00
|
|
|
} else {
|
|
|
|
|
/* The block index is not a constant. Evaluate the index expression
|
2015-02-19 14:48:29 +02:00
|
|
|
* per-channel and add the base UBO index; we have to select a value
|
|
|
|
|
* from any live channel.
|
2014-12-08 17:34:52 -08:00
|
|
|
*/
|
2014-05-16 02:21:51 -07:00
|
|
|
surf_index = vgrf(glsl_type::uint_type);
|
2015-06-03 21:01:32 +03:00
|
|
|
bld.ADD(surf_index, get_nir_src(instr->src[0]),
|
2015-11-02 11:26:16 -08:00
|
|
|
brw_imm_ud(stage_prog_data->binding_table.ubo_start));
|
2015-07-13 15:52:28 +03:00
|
|
|
surf_index = bld.emit_uniformize(surf_index);
|
2014-12-08 17:34:52 -08:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2018-10-20 09:55:28 -05:00
|
|
|
if (!nir_src_is_const(instr->src[1])) {
|
2015-10-31 16:52:29 -07:00
|
|
|
fs_reg base_offset = retype(get_nir_src(instr->src[1]),
|
2015-11-25 09:59:03 -08:00
|
|
|
BRW_REGISTER_TYPE_UD);
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2014-12-12 22:38:41 -08:00
|
|
|
for (int i = 0; i < instr->num_components; i++)
|
2015-06-18 12:07:27 -07:00
|
|
|
VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
|
2016-01-13 10:17:10 +01:00
|
|
|
base_offset, i * type_sz(dest.type));
|
2019-09-09 22:21:17 -07:00
|
|
|
|
|
|
|
|
prog_data->has_ubo_pull = true;
|
2014-12-08 17:34:52 -08:00
|
|
|
} else {
|
2016-01-13 10:17:10 +01:00
|
|
|
/* Even if we are loading doubles, a pull constant load will load
|
|
|
|
|
* a 32-bit vec4, so should only reserve vgrf space for that. If we
|
|
|
|
|
* need to load a full dvec4 we will have to emit 2 loads. This is
|
|
|
|
|
* similar to demote_pull_constants(), except that in that case we
|
|
|
|
|
* see individual accesses to each component of the vector and then
|
|
|
|
|
* we let CSE deal with duplicate loads. Here we see a vector access
|
|
|
|
|
* and we have to split it if necessary.
|
|
|
|
|
*/
|
2016-05-19 12:50:01 +02:00
|
|
|
const unsigned type_size = type_sz(dest.type);
|
2018-10-20 09:55:28 -05:00
|
|
|
const unsigned load_offset = nir_src_as_uint(instr->src[1]);
|
2016-11-29 05:20:20 -08:00
|
|
|
|
|
|
|
|
/* See if we've selected this as a push constant candidate */
|
2018-10-20 09:55:28 -05:00
|
|
|
if (nir_src_is_const(instr->src[0])) {
|
|
|
|
|
const unsigned ubo_block = nir_src_as_uint(instr->src[0]);
|
|
|
|
|
const unsigned offset_256b = load_offset / 32;
|
2016-11-29 05:20:20 -08:00
|
|
|
|
|
|
|
|
fs_reg push_reg;
|
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
|
const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
|
|
|
|
|
if (range->block == ubo_block &&
|
|
|
|
|
offset_256b >= range->start &&
|
|
|
|
|
offset_256b < range->start + range->length) {
|
|
|
|
|
|
|
|
|
|
push_reg = fs_reg(UNIFORM, UBO_START + i, dest.type);
|
2018-10-20 09:55:28 -05:00
|
|
|
push_reg.offset = load_offset - 32 * range->start;
|
2016-11-29 05:20:20 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (push_reg.file != BAD_FILE) {
|
|
|
|
|
for (unsigned i = 0; i < instr->num_components; i++) {
|
|
|
|
|
bld.MOV(offset(dest, bld, i),
|
|
|
|
|
byte_offset(push_reg, i * type_size));
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-09 22:21:17 -07:00
|
|
|
prog_data->has_ubo_pull = true;
|
|
|
|
|
|
2016-12-08 19:18:00 -08:00
|
|
|
const unsigned block_sz = 64; /* Fetch one cacheline at a time. */
|
|
|
|
|
const fs_builder ubld = bld.exec_all().group(block_sz / 4, 0);
|
|
|
|
|
const fs_reg packed_consts = ubld.vgrf(BRW_REGISTER_TYPE_UD);
|
2016-12-08 20:05:18 -08:00
|
|
|
|
2016-05-19 12:50:01 +02:00
|
|
|
for (unsigned c = 0; c < instr->num_components;) {
|
2018-10-20 09:55:28 -05:00
|
|
|
const unsigned base = load_offset + c * type_size;
|
2016-12-08 19:18:00 -08:00
|
|
|
/* Number of usable components in the next block-aligned load. */
|
2016-05-19 12:50:01 +02:00
|
|
|
const unsigned count = MIN2(instr->num_components - c,
|
2016-12-08 19:18:00 -08:00
|
|
|
(block_sz - base % block_sz) / type_size);
|
2016-01-13 10:17:10 +01:00
|
|
|
|
2016-12-08 20:05:18 -08:00
|
|
|
ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
|
2016-12-08 19:18:00 -08:00
|
|
|
packed_consts, surf_index,
|
|
|
|
|
brw_imm_ud(base & ~(block_sz - 1)));
|
2016-01-13 10:17:10 +01:00
|
|
|
|
2016-05-19 12:50:01 +02:00
|
|
|
const fs_reg consts =
|
2016-12-08 19:18:00 -08:00
|
|
|
retype(byte_offset(packed_consts, base & (block_sz - 1)),
|
|
|
|
|
dest.type);
|
2014-12-08 17:34:52 -08:00
|
|
|
|
2016-05-19 12:50:01 +02:00
|
|
|
for (unsigned d = 0; d < count; d++)
|
|
|
|
|
bld.MOV(offset(dest, bld, c + d), component(consts, d));
|
2014-12-08 17:34:52 -08:00
|
|
|
|
2016-05-19 12:50:01 +02:00
|
|
|
c += count;
|
2016-01-13 10:17:10 +01:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-14 17:13:57 -06:00
|
|
|
case nir_intrinsic_load_global: {
|
|
|
|
|
assert(devinfo->gen >= 8);
|
|
|
|
|
|
2020-03-26 17:06:52 -05:00
|
|
|
assert(nir_dest_bit_size(instr->dest) <= 32);
|
|
|
|
|
assert(nir_intrinsic_align(instr) > 0);
|
|
|
|
|
if (nir_dest_bit_size(instr->dest) == 32 &&
|
|
|
|
|
nir_intrinsic_align(instr) >= 4) {
|
|
|
|
|
assert(nir_dest_num_components(instr->dest) <= 4);
|
2018-11-14 17:13:57 -06:00
|
|
|
fs_inst *inst = bld.emit(SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL,
|
|
|
|
|
dest,
|
|
|
|
|
get_nir_src(instr->src[0]), /* Address */
|
|
|
|
|
fs_reg(), /* No source data */
|
|
|
|
|
brw_imm_ud(instr->num_components));
|
|
|
|
|
inst->size_written = instr->num_components *
|
|
|
|
|
inst->dst.component_size(inst->exec_size);
|
|
|
|
|
} else {
|
|
|
|
|
const unsigned bit_size = nir_dest_bit_size(instr->dest);
|
|
|
|
|
assert(nir_dest_num_components(instr->dest) == 1);
|
|
|
|
|
fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.emit(SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL,
|
|
|
|
|
tmp,
|
|
|
|
|
get_nir_src(instr->src[0]), /* Address */
|
|
|
|
|
fs_reg(), /* No source data */
|
|
|
|
|
brw_imm_ud(bit_size));
|
2019-07-13 18:35:20 -05:00
|
|
|
bld.MOV(dest, subscript(tmp, dest.type, 0));
|
2018-11-14 17:13:57 -06:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_store_global:
|
|
|
|
|
assert(devinfo->gen >= 8);
|
|
|
|
|
|
2020-03-26 17:06:52 -05:00
|
|
|
assert(nir_src_bit_size(instr->src[0]) <= 32);
|
|
|
|
|
assert(nir_intrinsic_write_mask(instr) ==
|
|
|
|
|
(1u << instr->num_components) - 1);
|
|
|
|
|
assert(nir_intrinsic_align(instr) > 0);
|
|
|
|
|
if (nir_src_bit_size(instr->src[0]) == 32 &&
|
|
|
|
|
nir_intrinsic_align(instr) >= 4) {
|
|
|
|
|
assert(nir_src_num_components(instr->src[0]) <= 4);
|
2018-11-14 17:13:57 -06:00
|
|
|
bld.emit(SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL,
|
|
|
|
|
fs_reg(),
|
|
|
|
|
get_nir_src(instr->src[1]), /* Address */
|
|
|
|
|
get_nir_src(instr->src[0]), /* Data */
|
|
|
|
|
brw_imm_ud(instr->num_components));
|
|
|
|
|
} else {
|
|
|
|
|
assert(nir_src_num_components(instr->src[0]) == 1);
|
2020-03-26 17:06:52 -05:00
|
|
|
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
|
2018-11-14 17:13:57 -06:00
|
|
|
brw_reg_type data_type =
|
|
|
|
|
brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
|
|
|
|
|
fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.MOV(tmp, retype(get_nir_src(instr->src[0]), data_type));
|
|
|
|
|
bld.emit(SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL,
|
|
|
|
|
fs_reg(),
|
|
|
|
|
get_nir_src(instr->src[1]), /* Address */
|
|
|
|
|
tmp, /* Data */
|
|
|
|
|
brw_imm_ud(nir_src_bit_size(instr->src[0])));
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2018-11-26 15:15:04 -06:00
|
|
|
case nir_intrinsic_global_atomic_add:
|
|
|
|
|
case nir_intrinsic_global_atomic_imin:
|
|
|
|
|
case nir_intrinsic_global_atomic_umin:
|
|
|
|
|
case nir_intrinsic_global_atomic_imax:
|
|
|
|
|
case nir_intrinsic_global_atomic_umax:
|
|
|
|
|
case nir_intrinsic_global_atomic_and:
|
|
|
|
|
case nir_intrinsic_global_atomic_or:
|
|
|
|
|
case nir_intrinsic_global_atomic_xor:
|
|
|
|
|
case nir_intrinsic_global_atomic_exchange:
|
|
|
|
|
case nir_intrinsic_global_atomic_comp_swap:
|
2019-08-20 23:10:50 -05:00
|
|
|
nir_emit_global_atomic(bld, brw_aop_for_nir_intrinsic(instr), instr);
|
2018-11-26 15:15:04 -06:00
|
|
|
break;
|
|
|
|
|
case nir_intrinsic_global_atomic_fmin:
|
|
|
|
|
case nir_intrinsic_global_atomic_fmax:
|
|
|
|
|
case nir_intrinsic_global_atomic_fcomp_swap:
|
2019-08-20 23:10:50 -05:00
|
|
|
nir_emit_global_atomic_float(bld, brw_aop_for_nir_intrinsic(instr), instr);
|
2018-11-26 15:15:04 -06:00
|
|
|
break;
|
|
|
|
|
|
2015-06-01 09:28:29 +02:00
|
|
|
case nir_intrinsic_load_ssbo: {
|
|
|
|
|
assert(devinfo->gen >= 7);
|
|
|
|
|
|
2018-11-12 18:48:10 -06:00
|
|
|
const unsigned bit_size = nir_dest_bit_size(instr->dest);
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
|
|
|
|
|
get_nir_ssbo_intrinsic_index(bld, instr);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
2015-06-01 09:28:29 +02:00
|
|
|
|
2018-11-12 18:48:10 -06:00
|
|
|
/* Make dest unsigned because that's what the temporary will be */
|
|
|
|
|
dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
|
2015-06-01 09:28:29 +02:00
|
|
|
|
2018-11-12 18:48:10 -06:00
|
|
|
/* Read the vector */
|
2020-03-26 17:06:52 -05:00
|
|
|
assert(nir_dest_bit_size(instr->dest) <= 32);
|
|
|
|
|
assert(nir_intrinsic_align(instr) > 0);
|
|
|
|
|
if (nir_dest_bit_size(instr->dest) == 32 &&
|
|
|
|
|
nir_intrinsic_align(instr) >= 4) {
|
|
|
|
|
assert(nir_dest_num_components(instr->dest) <= 4);
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
|
|
|
|
|
fs_inst *inst =
|
|
|
|
|
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
|
|
|
|
|
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
|
|
|
|
inst->size_written = instr->num_components * dispatch_width * 4;
|
2018-11-12 18:48:10 -06:00
|
|
|
} else {
|
|
|
|
|
assert(nir_dest_num_components(instr->dest) == 1);
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
|
|
|
|
|
|
|
|
|
|
fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
|
|
|
|
|
read_result, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2019-07-13 18:35:20 -05:00
|
|
|
bld.MOV(dest, subscript(read_result, dest.type, 0));
|
2018-11-12 18:48:10 -06:00
|
|
|
}
|
2015-06-01 09:28:29 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-01 09:26:01 +02:00
|
|
|
case nir_intrinsic_store_ssbo: {
|
|
|
|
|
assert(devinfo->gen >= 7);
|
|
|
|
|
|
2018-11-12 18:48:10 -06:00
|
|
|
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
|
|
|
|
|
get_nir_ssbo_intrinsic_index(bld, instr);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[2]);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
2015-06-01 09:26:01 +02:00
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg data = get_nir_src(instr->src[0]);
|
|
|
|
|
data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
|
2015-10-21 23:43:34 -07:00
|
|
|
|
2020-03-26 17:06:52 -05:00
|
|
|
assert(nir_src_bit_size(instr->src[0]) <= 32);
|
2018-11-12 18:48:10 -06:00
|
|
|
assert(nir_intrinsic_write_mask(instr) ==
|
2018-11-16 09:23:56 -06:00
|
|
|
(1u << instr->num_components) - 1);
|
2020-03-26 17:06:52 -05:00
|
|
|
assert(nir_intrinsic_align(instr) > 0);
|
|
|
|
|
if (nir_src_bit_size(instr->src[0]) == 32 &&
|
|
|
|
|
nir_intrinsic_align(instr) >= 4) {
|
2018-11-12 18:48:10 -06:00
|
|
|
assert(nir_src_num_components(instr->src[0]) <= 4);
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
|
|
|
|
|
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
|
|
|
|
|
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2018-11-12 18:48:10 -06:00
|
|
|
} else {
|
|
|
|
|
assert(nir_src_num_components(instr->src[0]) == 1);
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
|
|
|
|
|
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data);
|
|
|
|
|
|
|
|
|
|
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
|
|
|
|
|
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2015-06-01 09:26:01 +02:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-03 17:03:19 -08:00
|
|
|
case nir_intrinsic_store_output: {
|
2019-07-19 17:38:04 -05:00
|
|
|
assert(nir_src_bit_size(instr->src[0]) == 32);
|
2014-08-15 10:32:07 -07:00
|
|
|
fs_reg src = get_nir_src(instr->src[0]);
|
2015-12-07 22:41:50 -08:00
|
|
|
|
2018-10-20 09:55:28 -05:00
|
|
|
unsigned store_offset = nir_src_as_uint(instr->src[1]);
|
2016-05-09 10:14:48 +02:00
|
|
|
unsigned num_components = instr->num_components;
|
2016-05-23 16:48:05 +10:00
|
|
|
unsigned first_component = nir_intrinsic_component(instr);
|
2016-05-09 10:14:48 +02:00
|
|
|
|
2017-08-27 21:48:03 -07:00
|
|
|
fs_reg new_dest = retype(offset(outputs[instr->const_index[0]], bld,
|
2018-10-20 09:55:28 -05:00
|
|
|
4 * store_offset), src.type);
|
2016-05-09 10:14:48 +02:00
|
|
|
for (unsigned j = 0; j < num_components; j++) {
|
2016-05-23 16:48:05 +10:00
|
|
|
bld.MOV(offset(new_dest, bld, j + first_component),
|
|
|
|
|
offset(src, bld, j));
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-01 09:41:47 +02:00
|
|
|
case nir_intrinsic_ssbo_atomic_add:
|
2015-09-28 10:47:22 +02:00
|
|
|
case nir_intrinsic_ssbo_atomic_imin:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic_umin:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic_imax:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic_umax:
|
2015-06-01 09:41:47 +02:00
|
|
|
case nir_intrinsic_ssbo_atomic_and:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic_or:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic_xor:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic_exchange:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic_comp_swap:
|
2019-08-20 23:10:50 -05:00
|
|
|
nir_emit_ssbo_atomic(bld, brw_aop_for_nir_intrinsic(instr), instr);
|
2015-06-01 09:41:47 +02:00
|
|
|
break;
|
2018-04-18 14:02:33 -07:00
|
|
|
case nir_intrinsic_ssbo_atomic_fmin:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic_fmax:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic_fcomp_swap:
|
2019-08-20 23:10:50 -05:00
|
|
|
nir_emit_ssbo_atomic_float(bld, brw_aop_for_nir_intrinsic(instr), instr);
|
2018-04-18 14:02:33 -07:00
|
|
|
break;
|
2015-06-01 09:41:47 +02:00
|
|
|
|
2015-06-01 09:45:51 +02:00
|
|
|
case nir_intrinsic_get_buffer_size: {
|
2019-01-12 10:58:33 -06:00
|
|
|
assert(nir_src_num_components(instr->src[0]) == 1);
|
2018-10-20 09:55:28 -05:00
|
|
|
unsigned ssbo_index = nir_src_is_const(instr->src[0]) ?
|
|
|
|
|
nir_src_as_uint(instr->src[0]) : 0;
|
2015-06-01 09:45:51 +02:00
|
|
|
|
2016-05-18 14:27:20 -07:00
|
|
|
/* A resinfo's sampler message is used to get the buffer size. The
|
|
|
|
|
* SIMD8's writeback message consists of four registers and SIMD16's
|
|
|
|
|
* writeback message consists of 8 destination registers (two per each
|
|
|
|
|
* component). Because we are only interested on the first channel of
|
|
|
|
|
* the first returned component, where resinfo returns the buffer size
|
|
|
|
|
* for SURFTYPE_BUFFER, we can just use the SIMD8 variant regardless of
|
|
|
|
|
* the dispatch width.
|
|
|
|
|
*/
|
|
|
|
|
const fs_builder ubld = bld.exec_all().group(8, 0);
|
|
|
|
|
fs_reg src_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
fs_reg ret_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4);
|
2015-06-01 09:45:51 +02:00
|
|
|
|
2016-05-18 14:27:20 -07:00
|
|
|
/* Set LOD = 0 */
|
|
|
|
|
ubld.MOV(src_payload, brw_imm_d(0));
|
2015-11-10 13:45:21 +01:00
|
|
|
|
2015-10-30 11:10:02 +01:00
|
|
|
const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
|
2017-12-10 17:03:32 -08:00
|
|
|
fs_inst *inst = ubld.emit(SHADER_OPCODE_GET_BUFFER_SIZE, ret_payload,
|
2016-05-18 14:27:20 -07:00
|
|
|
src_payload, brw_imm_ud(index));
|
2015-06-01 09:45:51 +02:00
|
|
|
inst->header_size = 0;
|
2016-05-18 14:27:20 -07:00
|
|
|
inst->mlen = 1;
|
2016-09-07 13:38:20 -07:00
|
|
|
inst->size_written = 4 * REG_SIZE;
|
2015-10-30 11:10:02 +01:00
|
|
|
|
2018-01-30 09:59:34 +01:00
|
|
|
/* SKL PRM, vol07, 3D Media GPGPU Engine, Bounds Checking and Faulting:
|
|
|
|
|
*
|
|
|
|
|
* "Out-of-bounds checking is always performed at a DWord granularity. If
|
|
|
|
|
* any part of the DWord is out-of-bounds then the whole DWord is
|
|
|
|
|
* considered out-of-bounds."
|
|
|
|
|
*
|
|
|
|
|
* This implies that types with size smaller than 4-bytes need to be
|
|
|
|
|
* padded if they don't complete the last dword of the buffer. But as we
|
|
|
|
|
* need to maintain the original size we need to reverse the padding
|
|
|
|
|
* calculation to return the correct size to know the number of elements
|
|
|
|
|
* of an unsized array. As we stored in the last two bits of the surface
|
|
|
|
|
* size the needed padding for the buffer, we calculate here the
|
|
|
|
|
* original buffer_size reversing the surface_size calculation:
|
|
|
|
|
*
|
|
|
|
|
* surface_size = isl_align(buffer_size, 4) +
|
|
|
|
|
* (isl_align(buffer_size) - buffer_size)
|
|
|
|
|
*
|
|
|
|
|
* buffer_size = surface_size & ~3 - surface_size & 3
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
fs_reg size_aligned4 = ubld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
fs_reg size_padding = ubld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
fs_reg buffer_size = ubld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
|
|
|
|
|
ubld.AND(size_padding, ret_payload, brw_imm_ud(3));
|
|
|
|
|
ubld.AND(size_aligned4, ret_payload, brw_imm_ud(~3));
|
|
|
|
|
ubld.ADD(buffer_size, size_aligned4, negate(size_padding));
|
|
|
|
|
|
|
|
|
|
bld.MOV(retype(dest, ret_payload.type), component(buffer_size, 0));
|
2015-06-01 09:45:51 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-28 08:15:30 -06:00
|
|
|
case nir_intrinsic_load_scratch: {
|
|
|
|
|
assert(devinfo->gen >= 7);
|
|
|
|
|
|
|
|
|
|
assert(nir_dest_num_components(instr->dest) == 1);
|
|
|
|
|
const unsigned bit_size = nir_dest_bit_size(instr->dest);
|
|
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
|
|
|
|
|
if (devinfo->gen >= 8) {
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
|
|
|
|
|
brw_imm_ud(GEN8_BTI_STATELESS_NON_COHERENT);
|
|
|
|
|
} else {
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(BRW_BTI_STATELESS);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
|
|
|
|
|
const fs_reg nir_addr = get_nir_src(instr->src[0]);
|
|
|
|
|
|
|
|
|
|
/* Make dest unsigned because that's what the temporary will be */
|
|
|
|
|
dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
|
|
|
|
|
|
|
|
|
|
/* Read the vector */
|
2020-03-26 17:06:52 -05:00
|
|
|
assert(nir_dest_num_components(instr->dest) == 1);
|
|
|
|
|
assert(nir_dest_bit_size(instr->dest) <= 32);
|
2020-08-19 18:53:32 -05:00
|
|
|
assert(nir_intrinsic_align(instr) > 0);
|
2020-03-26 17:06:52 -05:00
|
|
|
if (nir_dest_bit_size(instr->dest) >= 4 &&
|
|
|
|
|
nir_intrinsic_align(instr) >= 4) {
|
2019-02-28 08:15:30 -06:00
|
|
|
/* The offset for a DWORD scattered message is in dwords. */
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
|
|
|
|
|
swizzle_nir_scratch_addr(bld, nir_addr, true);
|
|
|
|
|
|
|
|
|
|
bld.emit(SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL,
|
|
|
|
|
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
|
|
|
|
} else {
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
|
|
|
|
|
swizzle_nir_scratch_addr(bld, nir_addr, false);
|
|
|
|
|
|
|
|
|
|
fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
|
|
|
|
|
read_result, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
|
|
|
|
bld.MOV(dest, read_result);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_store_scratch: {
|
|
|
|
|
assert(devinfo->gen >= 7);
|
|
|
|
|
|
|
|
|
|
assert(nir_src_num_components(instr->src[0]) == 1);
|
|
|
|
|
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
|
|
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
|
|
|
|
|
if (devinfo->gen >= 8) {
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] =
|
|
|
|
|
brw_imm_ud(GEN8_BTI_STATELESS_NON_COHERENT);
|
|
|
|
|
} else {
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(BRW_BTI_STATELESS);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
|
|
|
|
|
const fs_reg nir_addr = get_nir_src(instr->src[1]);
|
|
|
|
|
|
|
|
|
|
fs_reg data = get_nir_src(instr->src[0]);
|
|
|
|
|
data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
|
|
|
|
|
|
2020-03-26 17:06:52 -05:00
|
|
|
assert(nir_src_num_components(instr->src[0]) == 1);
|
|
|
|
|
assert(nir_src_bit_size(instr->src[0]) <= 32);
|
|
|
|
|
assert(nir_intrinsic_write_mask(instr) == 1);
|
2020-05-11 18:31:49 -05:00
|
|
|
assert(nir_intrinsic_align(instr) > 0);
|
2020-03-26 17:06:52 -05:00
|
|
|
if (nir_src_bit_size(instr->src[0]) == 32 &&
|
|
|
|
|
nir_intrinsic_align(instr) >= 4) {
|
2019-02-28 08:15:30 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
|
|
|
|
|
|
|
|
|
|
/* The offset for a DWORD scattered message is in dwords. */
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
|
|
|
|
|
swizzle_nir_scratch_addr(bld, nir_addr, true);
|
|
|
|
|
|
|
|
|
|
bld.emit(SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL,
|
|
|
|
|
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
|
|
|
|
|
} else {
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data);
|
|
|
|
|
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
|
|
|
|
|
swizzle_nir_scratch_addr(bld, nir_addr, false);
|
|
|
|
|
|
|
|
|
|
bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
|
|
|
|
|
fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-22 15:28:24 -06:00
|
|
|
case nir_intrinsic_load_subgroup_size:
|
|
|
|
|
/* This should only happen for fragment shaders because every other case
|
|
|
|
|
* is lowered in NIR so we can optimize on it.
|
|
|
|
|
*/
|
|
|
|
|
assert(stage == MESA_SHADER_FRAGMENT);
|
|
|
|
|
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(dispatch_width));
|
|
|
|
|
break;
|
|
|
|
|
|
2017-08-31 21:56:43 -07:00
|
|
|
case nir_intrinsic_load_subgroup_invocation:
|
|
|
|
|
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
|
|
|
|
|
nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION]);
|
2016-05-22 16:33:44 -07:00
|
|
|
break;
|
|
|
|
|
|
2017-06-22 16:46:39 -07:00
|
|
|
case nir_intrinsic_load_subgroup_eq_mask:
|
|
|
|
|
case nir_intrinsic_load_subgroup_ge_mask:
|
|
|
|
|
case nir_intrinsic_load_subgroup_gt_mask:
|
|
|
|
|
case nir_intrinsic_load_subgroup_le_mask:
|
|
|
|
|
case nir_intrinsic_load_subgroup_lt_mask:
|
|
|
|
|
unreachable("not reached");
|
|
|
|
|
|
2017-06-20 22:39:22 -07:00
|
|
|
case nir_intrinsic_vote_any: {
|
2017-08-30 12:07:00 -07:00
|
|
|
const fs_builder ubld = bld.exec_all().group(1, 0);
|
2017-06-20 22:39:22 -07:00
|
|
|
|
|
|
|
|
/* The any/all predicates do not consider channel enables. To prevent
|
|
|
|
|
* dead channels from affecting the result, we initialize the flag with
|
|
|
|
|
* with the identity value for the logical operation.
|
|
|
|
|
*/
|
2017-09-01 23:24:15 -07:00
|
|
|
if (dispatch_width == 32) {
|
|
|
|
|
/* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
|
|
|
|
|
ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(0));
|
|
|
|
|
} else {
|
|
|
|
|
ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0));
|
|
|
|
|
}
|
2017-06-20 22:39:22 -07:00
|
|
|
bld.CMP(bld.null_reg_d(), get_nir_src(instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
|
2017-09-06 20:32:30 -07:00
|
|
|
|
2017-10-12 16:17:03 -07:00
|
|
|
/* For some reason, the any/all predicates don't work properly with
|
|
|
|
|
* SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
|
|
|
|
|
* doesn't read the correct subset of the flag register and you end up
|
|
|
|
|
* getting garbage in the second half. Work around this by using a pair
|
|
|
|
|
* of 1-wide MOVs and scattering the result.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
|
|
|
|
|
ubld.MOV(res1, brw_imm_d(0));
|
2017-09-01 23:24:15 -07:00
|
|
|
set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ANY8H :
|
|
|
|
|
dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ANY16H :
|
|
|
|
|
BRW_PREDICATE_ALIGN1_ANY32H,
|
2017-10-12 16:17:03 -07:00
|
|
|
ubld.MOV(res1, brw_imm_d(-1)));
|
|
|
|
|
|
|
|
|
|
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
|
2017-06-20 22:39:22 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case nir_intrinsic_vote_all: {
|
2017-08-30 12:07:00 -07:00
|
|
|
const fs_builder ubld = bld.exec_all().group(1, 0);
|
2017-06-20 22:39:22 -07:00
|
|
|
|
|
|
|
|
/* The any/all predicates do not consider channel enables. To prevent
|
|
|
|
|
* dead channels from affecting the result, we initialize the flag with
|
|
|
|
|
* with the identity value for the logical operation.
|
|
|
|
|
*/
|
2017-09-01 23:24:15 -07:00
|
|
|
if (dispatch_width == 32) {
|
|
|
|
|
/* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
|
|
|
|
|
ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(0xffffffff));
|
|
|
|
|
} else {
|
|
|
|
|
ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
|
|
|
|
|
}
|
2017-06-20 22:39:22 -07:00
|
|
|
bld.CMP(bld.null_reg_d(), get_nir_src(instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
|
2017-09-06 20:32:30 -07:00
|
|
|
|
2017-10-12 16:17:03 -07:00
|
|
|
/* For some reason, the any/all predicates don't work properly with
|
|
|
|
|
* SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
|
|
|
|
|
* doesn't read the correct subset of the flag register and you end up
|
|
|
|
|
* getting garbage in the second half. Work around this by using a pair
|
|
|
|
|
* of 1-wide MOVs and scattering the result.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
|
|
|
|
|
ubld.MOV(res1, brw_imm_d(0));
|
2017-09-01 23:24:15 -07:00
|
|
|
set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ALL8H :
|
|
|
|
|
dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
|
|
|
|
|
BRW_PREDICATE_ALIGN1_ALL32H,
|
2017-10-12 16:17:03 -07:00
|
|
|
ubld.MOV(res1, brw_imm_d(-1)));
|
|
|
|
|
|
|
|
|
|
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
|
2017-06-20 22:39:22 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2017-08-28 17:38:53 -07:00
|
|
|
case nir_intrinsic_vote_feq:
|
2017-08-28 17:33:33 -07:00
|
|
|
case nir_intrinsic_vote_ieq: {
|
2017-06-20 22:39:22 -07:00
|
|
|
fs_reg value = get_nir_src(instr->src[0]);
|
2017-08-28 17:38:53 -07:00
|
|
|
if (instr->intrinsic == nir_intrinsic_vote_feq) {
|
|
|
|
|
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
|
2019-01-04 10:15:39 +01:00
|
|
|
value.type = bit_size == 8 ? BRW_REGISTER_TYPE_B :
|
|
|
|
|
brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_F);
|
2017-08-28 17:38:53 -07:00
|
|
|
}
|
|
|
|
|
|
2017-06-20 22:39:22 -07:00
|
|
|
fs_reg uniformized = bld.emit_uniformize(value);
|
2017-08-30 12:07:00 -07:00
|
|
|
const fs_builder ubld = bld.exec_all().group(1, 0);
|
2017-06-20 22:39:22 -07:00
|
|
|
|
|
|
|
|
/* The any/all predicates do not consider channel enables. To prevent
|
|
|
|
|
* dead channels from affecting the result, we initialize the flag with
|
|
|
|
|
* with the identity value for the logical operation.
|
|
|
|
|
*/
|
2017-09-01 23:24:15 -07:00
|
|
|
if (dispatch_width == 32) {
|
|
|
|
|
/* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
|
|
|
|
|
ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(0xffffffff));
|
|
|
|
|
} else {
|
|
|
|
|
ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
|
|
|
|
|
}
|
2017-06-20 22:39:22 -07:00
|
|
|
bld.CMP(bld.null_reg_d(), value, uniformized, BRW_CONDITIONAL_Z);
|
2017-09-06 20:32:30 -07:00
|
|
|
|
2017-10-12 16:17:03 -07:00
|
|
|
/* For some reason, the any/all predicates don't work properly with
|
|
|
|
|
* SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
|
|
|
|
|
* doesn't read the correct subset of the flag register and you end up
|
|
|
|
|
* getting garbage in the second half. Work around this by using a pair
|
|
|
|
|
* of 1-wide MOVs and scattering the result.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
|
|
|
|
|
ubld.MOV(res1, brw_imm_d(0));
|
2017-09-01 23:24:15 -07:00
|
|
|
set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ALL8H :
|
|
|
|
|
dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
|
|
|
|
|
BRW_PREDICATE_ALIGN1_ALL32H,
|
2017-10-12 16:17:03 -07:00
|
|
|
ubld.MOV(res1, brw_imm_d(-1)));
|
|
|
|
|
|
|
|
|
|
bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
|
2017-06-20 22:39:22 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2017-06-22 16:46:39 -07:00
|
|
|
|
|
|
|
|
case nir_intrinsic_ballot: {
|
|
|
|
|
const fs_reg value = retype(get_nir_src(instr->src[0]),
|
|
|
|
|
BRW_REGISTER_TYPE_UD);
|
2017-09-06 18:37:34 -07:00
|
|
|
struct brw_reg flag = brw_flag_reg(0, 0);
|
|
|
|
|
/* FIXME: For SIMD32 programs, this causes us to stomp on f0.1 as well
|
|
|
|
|
* as f0.0. This is a problem for fragment programs as we currently use
|
|
|
|
|
* f0.1 for discards. Fortunately, we don't support SIMD32 fragment
|
|
|
|
|
* programs yet so this isn't a problem. When we do, something will
|
|
|
|
|
* have to change.
|
|
|
|
|
*/
|
|
|
|
|
if (dispatch_width == 32)
|
|
|
|
|
flag.type = BRW_REGISTER_TYPE_UD;
|
2017-06-22 16:46:39 -07:00
|
|
|
|
2017-08-30 12:07:00 -07:00
|
|
|
bld.exec_all().group(1, 0).MOV(flag, brw_imm_ud(0u));
|
2017-06-22 16:46:39 -07:00
|
|
|
bld.CMP(bld.null_reg_ud(), value, brw_imm_ud(0u), BRW_CONDITIONAL_NZ);
|
|
|
|
|
|
2017-06-30 15:11:15 -07:00
|
|
|
if (instr->dest.ssa.bit_size > 32) {
|
|
|
|
|
dest.type = BRW_REGISTER_TYPE_UQ;
|
|
|
|
|
} else {
|
|
|
|
|
dest.type = BRW_REGISTER_TYPE_UD;
|
|
|
|
|
}
|
2017-06-22 16:46:39 -07:00
|
|
|
bld.MOV(dest, flag);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_read_invocation: {
|
|
|
|
|
const fs_reg value = get_nir_src(instr->src[0]);
|
|
|
|
|
const fs_reg invocation = get_nir_src(instr->src[1]);
|
|
|
|
|
fs_reg tmp = bld.vgrf(value.type);
|
|
|
|
|
|
|
|
|
|
bld.exec_all().emit(SHADER_OPCODE_BROADCAST, tmp, value,
|
2017-09-01 22:35:43 -07:00
|
|
|
bld.emit_uniformize(invocation));
|
2017-06-22 16:46:39 -07:00
|
|
|
|
2017-09-01 22:37:42 -07:00
|
|
|
bld.MOV(retype(dest, value.type), fs_reg(component(tmp, 0)));
|
2017-06-22 16:46:39 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_read_first_invocation: {
|
|
|
|
|
const fs_reg value = get_nir_src(instr->src[0]);
|
2017-09-01 22:37:42 -07:00
|
|
|
bld.MOV(retype(dest, value.type), bld.emit_uniformize(value));
|
2017-06-22 16:46:39 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-29 09:21:32 -07:00
|
|
|
case nir_intrinsic_shuffle: {
|
|
|
|
|
const fs_reg value = get_nir_src(instr->src[0]);
|
|
|
|
|
const fs_reg index = get_nir_src(instr->src[1]);
|
|
|
|
|
|
|
|
|
|
bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, index);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-21 22:17:37 -07:00
|
|
|
case nir_intrinsic_first_invocation: {
|
|
|
|
|
fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
|
|
|
|
bld.exec_all().emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, tmp);
|
|
|
|
|
bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
|
|
|
|
|
fs_reg(component(tmp, 0)));
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-01 15:18:02 -07:00
|
|
|
case nir_intrinsic_quad_broadcast: {
|
|
|
|
|
const fs_reg value = get_nir_src(instr->src[0]);
|
2018-10-20 09:55:28 -05:00
|
|
|
const unsigned index = nir_src_as_uint(instr->src[1]);
|
2017-09-01 15:18:02 -07:00
|
|
|
|
|
|
|
|
bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, retype(dest, value.type),
|
2018-10-20 09:55:28 -05:00
|
|
|
value, brw_imm_ud(index), brw_imm_ud(4));
|
2017-09-01 15:18:02 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_quad_swap_horizontal: {
|
|
|
|
|
const fs_reg value = get_nir_src(instr->src[0]);
|
|
|
|
|
const fs_reg tmp = bld.vgrf(value.type);
|
2019-07-26 16:03:08 -05:00
|
|
|
if (devinfo->gen <= 7) {
|
|
|
|
|
/* The hardware doesn't seem to support these crazy regions with
|
|
|
|
|
* compressed instructions on gen7 and earlier so we fall back to
|
|
|
|
|
* using quad swizzles. Fortunately, we don't support 64-bit
|
|
|
|
|
* anything in Vulkan on gen7.
|
|
|
|
|
*/
|
|
|
|
|
assert(nir_src_bit_size(instr->src[0]) == 32);
|
|
|
|
|
const fs_builder ubld = bld.exec_all();
|
|
|
|
|
ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
|
|
|
|
|
brw_imm_ud(BRW_SWIZZLE4(1,0,3,2)));
|
|
|
|
|
bld.MOV(retype(dest, value.type), tmp);
|
|
|
|
|
} else {
|
|
|
|
|
const fs_builder ubld = bld.exec_all().group(dispatch_width / 2, 0);
|
2017-09-01 15:18:02 -07:00
|
|
|
|
2019-07-26 16:03:08 -05:00
|
|
|
const fs_reg src_left = horiz_stride(value, 2);
|
|
|
|
|
const fs_reg src_right = horiz_stride(horiz_offset(value, 1), 2);
|
|
|
|
|
const fs_reg tmp_left = horiz_stride(tmp, 2);
|
|
|
|
|
const fs_reg tmp_right = horiz_stride(horiz_offset(tmp, 1), 2);
|
2017-09-01 15:18:02 -07:00
|
|
|
|
2019-07-26 16:03:08 -05:00
|
|
|
ubld.MOV(tmp_left, src_right);
|
|
|
|
|
ubld.MOV(tmp_right, src_left);
|
2018-12-07 15:40:43 -08:00
|
|
|
|
2019-07-26 16:03:08 -05:00
|
|
|
}
|
2017-09-01 15:18:02 -07:00
|
|
|
bld.MOV(retype(dest, value.type), tmp);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_quad_swap_vertical: {
|
|
|
|
|
const fs_reg value = get_nir_src(instr->src[0]);
|
|
|
|
|
if (nir_src_bit_size(instr->src[0]) == 32) {
|
|
|
|
|
/* For 32-bit, we can use a SIMD4x2 instruction to do this easily */
|
|
|
|
|
const fs_reg tmp = bld.vgrf(value.type);
|
|
|
|
|
const fs_builder ubld = bld.exec_all();
|
|
|
|
|
ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
|
|
|
|
|
brw_imm_ud(BRW_SWIZZLE4(2,3,0,1)));
|
|
|
|
|
bld.MOV(retype(dest, value.type), tmp);
|
|
|
|
|
} else {
|
|
|
|
|
/* For larger data types, we have to either emit dispatch_width many
|
|
|
|
|
* MOVs or else fall back to doing indirects.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
|
|
|
|
|
bld.XOR(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
|
|
|
|
|
brw_imm_w(0x2));
|
|
|
|
|
bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, idx);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_quad_swap_diagonal: {
|
|
|
|
|
const fs_reg value = get_nir_src(instr->src[0]);
|
|
|
|
|
if (nir_src_bit_size(instr->src[0]) == 32) {
|
|
|
|
|
/* For 32-bit, we can use a SIMD4x2 instruction to do this easily */
|
|
|
|
|
const fs_reg tmp = bld.vgrf(value.type);
|
|
|
|
|
const fs_builder ubld = bld.exec_all();
|
|
|
|
|
ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
|
|
|
|
|
brw_imm_ud(BRW_SWIZZLE4(3,2,1,0)));
|
|
|
|
|
bld.MOV(retype(dest, value.type), tmp);
|
|
|
|
|
} else {
|
|
|
|
|
/* For larger data types, we have to either emit dispatch_width many
|
|
|
|
|
* MOVs or else fall back to doing indirects.
|
|
|
|
|
*/
|
|
|
|
|
fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
|
|
|
|
|
bld.XOR(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
|
|
|
|
|
brw_imm_w(0x3));
|
|
|
|
|
bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, idx);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-31 22:12:48 -07:00
|
|
|
case nir_intrinsic_reduce: {
|
|
|
|
|
fs_reg src = get_nir_src(instr->src[0]);
|
|
|
|
|
nir_op redop = (nir_op)nir_intrinsic_reduction_op(instr);
|
|
|
|
|
unsigned cluster_size = nir_intrinsic_cluster_size(instr);
|
|
|
|
|
if (cluster_size == 0 || cluster_size > dispatch_width)
|
|
|
|
|
cluster_size = dispatch_width;
|
|
|
|
|
|
|
|
|
|
/* Figure out the source type */
|
|
|
|
|
src.type = brw_type_for_nir_type(devinfo,
|
|
|
|
|
(nir_alu_type)(nir_op_infos[redop].input_types[0] |
|
|
|
|
|
nir_src_bit_size(instr->src[0])));
|
|
|
|
|
|
|
|
|
|
fs_reg identity = brw_nir_reduction_op_identity(bld, redop, src.type);
|
|
|
|
|
opcode brw_op = brw_op_for_nir_reduction_op(redop);
|
|
|
|
|
brw_conditional_mod cond_mod = brw_cond_mod_for_nir_reduction_op(redop);
|
|
|
|
|
|
2019-06-04 11:39:25 -05:00
|
|
|
/* There are a couple of register region issues that make things
|
|
|
|
|
* complicated for 8-bit types:
|
|
|
|
|
*
|
|
|
|
|
* 1. Only raw moves are allowed to write to a packed 8-bit
|
|
|
|
|
* destination.
|
|
|
|
|
* 2. If we use a strided destination, the efficient way to do scan
|
|
|
|
|
* operations ends up using strides that are too big to encode in
|
|
|
|
|
* an instruction.
|
|
|
|
|
*
|
|
|
|
|
* To get around these issues, we just do all 8-bit scan operations in
|
|
|
|
|
* 16 bits. It's actually fewer instructions than what we'd have to do
|
|
|
|
|
* if we were trying to do it in native 8-bit types and the results are
|
|
|
|
|
* the same once we truncate to 8 bits at the end.
|
|
|
|
|
*/
|
|
|
|
|
brw_reg_type scan_type = src.type;
|
|
|
|
|
if (type_sz(scan_type) == 1)
|
|
|
|
|
scan_type = brw_reg_type_from_bit_size(16, src.type);
|
|
|
|
|
|
2017-08-31 22:12:48 -07:00
|
|
|
/* Set up a register for all of our scratching around and initialize it
|
|
|
|
|
* to reduction operation's identity value.
|
|
|
|
|
*/
|
2019-06-04 11:39:25 -05:00
|
|
|
fs_reg scan = bld.vgrf(scan_type);
|
2017-08-31 22:12:48 -07:00
|
|
|
bld.exec_all().emit(SHADER_OPCODE_SEL_EXEC, scan, src, identity);
|
|
|
|
|
|
|
|
|
|
bld.emit_scan(brw_op, scan, cluster_size, cond_mod);
|
|
|
|
|
|
|
|
|
|
dest.type = src.type;
|
|
|
|
|
if (cluster_size * type_sz(src.type) >= REG_SIZE * 2) {
|
|
|
|
|
/* In this case, CLUSTER_BROADCAST instruction isn't needed because
|
|
|
|
|
* the distance between clusters is at least 2 GRFs. In this case,
|
|
|
|
|
* we don't need the weird striding of the CLUSTER_BROADCAST
|
|
|
|
|
* instruction and can just do regular MOVs.
|
|
|
|
|
*/
|
|
|
|
|
assert((cluster_size * type_sz(src.type)) % (REG_SIZE * 2) == 0);
|
|
|
|
|
const unsigned groups =
|
|
|
|
|
(dispatch_width * type_sz(src.type)) / (REG_SIZE * 2);
|
|
|
|
|
const unsigned group_size = dispatch_width / groups;
|
|
|
|
|
for (unsigned i = 0; i < groups; i++) {
|
|
|
|
|
const unsigned cluster = (i * group_size) / cluster_size;
|
|
|
|
|
const unsigned comp = cluster * cluster_size + (cluster_size - 1);
|
|
|
|
|
bld.group(group_size, i).MOV(horiz_offset(dest, i * group_size),
|
|
|
|
|
component(scan, comp));
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, dest, scan,
|
|
|
|
|
brw_imm_ud(cluster_size - 1), brw_imm_ud(cluster_size));
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_inclusive_scan:
|
|
|
|
|
case nir_intrinsic_exclusive_scan: {
|
|
|
|
|
fs_reg src = get_nir_src(instr->src[0]);
|
|
|
|
|
nir_op redop = (nir_op)nir_intrinsic_reduction_op(instr);
|
|
|
|
|
|
|
|
|
|
/* Figure out the source type */
|
|
|
|
|
src.type = brw_type_for_nir_type(devinfo,
|
|
|
|
|
(nir_alu_type)(nir_op_infos[redop].input_types[0] |
|
|
|
|
|
nir_src_bit_size(instr->src[0])));
|
|
|
|
|
|
|
|
|
|
fs_reg identity = brw_nir_reduction_op_identity(bld, redop, src.type);
|
|
|
|
|
opcode brw_op = brw_op_for_nir_reduction_op(redop);
|
|
|
|
|
brw_conditional_mod cond_mod = brw_cond_mod_for_nir_reduction_op(redop);
|
|
|
|
|
|
2019-06-04 11:39:25 -05:00
|
|
|
/* There are a couple of register region issues that make things
|
|
|
|
|
* complicated for 8-bit types:
|
|
|
|
|
*
|
|
|
|
|
* 1. Only raw moves are allowed to write to a packed 8-bit
|
|
|
|
|
* destination.
|
|
|
|
|
* 2. If we use a strided destination, the efficient way to do scan
|
|
|
|
|
* operations ends up using strides that are too big to encode in
|
|
|
|
|
* an instruction.
|
|
|
|
|
*
|
|
|
|
|
* To get around these issues, we just do all 8-bit scan operations in
|
|
|
|
|
* 16 bits. It's actually fewer instructions than what we'd have to do
|
|
|
|
|
* if we were trying to do it in native 8-bit types and the results are
|
|
|
|
|
* the same once we truncate to 8 bits at the end.
|
|
|
|
|
*/
|
|
|
|
|
brw_reg_type scan_type = src.type;
|
|
|
|
|
if (type_sz(scan_type) == 1)
|
|
|
|
|
scan_type = brw_reg_type_from_bit_size(16, src.type);
|
|
|
|
|
|
2017-08-31 22:12:48 -07:00
|
|
|
/* Set up a register for all of our scratching around and initialize it
|
|
|
|
|
* to reduction operation's identity value.
|
|
|
|
|
*/
|
2019-06-04 11:39:25 -05:00
|
|
|
fs_reg scan = bld.vgrf(scan_type);
|
2017-08-31 22:12:48 -07:00
|
|
|
const fs_builder allbld = bld.exec_all();
|
|
|
|
|
allbld.emit(SHADER_OPCODE_SEL_EXEC, scan, src, identity);
|
|
|
|
|
|
|
|
|
|
if (instr->intrinsic == nir_intrinsic_exclusive_scan) {
|
|
|
|
|
/* Exclusive scan is a bit harder because we have to do an annoying
|
|
|
|
|
* shift of the contents before we can begin. To make things worse,
|
|
|
|
|
* we can't do this with a normal stride; we have to use indirects.
|
|
|
|
|
*/
|
2019-06-04 11:39:25 -05:00
|
|
|
fs_reg shifted = bld.vgrf(scan_type);
|
2017-08-31 22:12:48 -07:00
|
|
|
fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
|
|
|
|
|
allbld.ADD(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
|
|
|
|
|
brw_imm_w(-1));
|
|
|
|
|
allbld.emit(SHADER_OPCODE_SHUFFLE, shifted, scan, idx);
|
|
|
|
|
allbld.group(1, 0).MOV(component(shifted, 0), identity);
|
|
|
|
|
scan = shifted;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bld.emit_scan(brw_op, scan, dispatch_width, cond_mod);
|
|
|
|
|
|
|
|
|
|
bld.MOV(retype(dest, src.type), scan);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
default:
|
|
|
|
|
unreachable("unknown intrinsic");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-01 09:41:47 +02:00
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
|
|
|
|
|
int op, nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
2019-01-12 18:30:47 -06:00
|
|
|
/* The BTI untyped atomic messages only support 32-bit atomics. If you
|
|
|
|
|
* just look at the big table of messages in the Vol 7 of the SKL PRM, they
|
|
|
|
|
* appear to exist. However, if you look at Vol 2a, there are no message
|
|
|
|
|
* descriptors provided for Qword atomic ops except for A64 messages.
|
|
|
|
|
*/
|
|
|
|
|
assert(nir_dest_bit_size(instr->dest) == 32);
|
|
|
|
|
|
2015-06-01 09:41:47 +02:00
|
|
|
fs_reg dest;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dest = get_nir_dest(instr->dest);
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] = get_nir_ssbo_intrinsic_index(bld, instr);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
|
|
|
|
|
|
|
|
|
|
fs_reg data;
|
2018-08-22 20:31:11 -07:00
|
|
|
if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
|
2019-02-11 16:11:35 -06:00
|
|
|
data = get_nir_src(instr->src[2]);
|
|
|
|
|
|
|
|
|
|
if (op == BRW_AOP_CMPWR) {
|
|
|
|
|
fs_reg tmp = bld.vgrf(data.type, 2);
|
|
|
|
|
fs_reg sources[2] = { data, get_nir_src(instr->src[3]) };
|
|
|
|
|
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
|
|
|
|
|
data = tmp;
|
|
|
|
|
}
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
|
2015-06-01 09:41:47 +02:00
|
|
|
|
2016-07-21 21:28:15 -07:00
|
|
|
/* Emit the actual atomic operation */
|
2015-06-01 09:41:47 +02:00
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
|
|
|
|
|
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2015-10-10 13:01:03 -07:00
|
|
|
}
|
|
|
|
|
|
2018-04-18 14:02:33 -07:00
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_ssbo_atomic_float(const fs_builder &bld,
|
|
|
|
|
int op, nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
fs_reg dest;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dest = get_nir_dest(instr->dest);
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] = get_nir_ssbo_intrinsic_index(bld, instr);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
|
|
|
|
|
|
|
|
|
|
fs_reg data = get_nir_src(instr->src[2]);
|
|
|
|
|
if (op == BRW_AOP_FCMPWR) {
|
|
|
|
|
fs_reg tmp = bld.vgrf(data.type, 2);
|
|
|
|
|
fs_reg sources[2] = { data, get_nir_src(instr->src[3]) };
|
|
|
|
|
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
|
|
|
|
|
data = tmp;
|
|
|
|
|
}
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
|
2018-04-18 14:02:33 -07:00
|
|
|
|
|
|
|
|
/* Emit the actual atomic operation */
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL,
|
|
|
|
|
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2018-04-18 14:02:33 -07:00
|
|
|
}
|
|
|
|
|
|
2015-10-10 13:01:03 -07:00
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_shared_atomic(const fs_builder &bld,
|
|
|
|
|
int op, nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
fs_reg dest;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dest = get_nir_dest(instr->dest);
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
|
|
|
|
|
|
|
|
|
|
fs_reg data;
|
2018-08-22 20:31:11 -07:00
|
|
|
if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
|
2019-02-11 16:11:35 -06:00
|
|
|
data = get_nir_src(instr->src[1]);
|
|
|
|
|
if (op == BRW_AOP_CMPWR) {
|
|
|
|
|
fs_reg tmp = bld.vgrf(data.type, 2);
|
|
|
|
|
fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
|
|
|
|
|
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
|
|
|
|
|
data = tmp;
|
|
|
|
|
}
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
|
2015-10-10 13:01:03 -07:00
|
|
|
|
2016-07-18 14:55:07 -07:00
|
|
|
/* Get the offset */
|
2018-10-20 09:55:28 -05:00
|
|
|
if (nir_src_is_const(instr->src[0])) {
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
|
|
|
|
|
brw_imm_ud(instr->const_index[0] + nir_src_as_uint(instr->src[0]));
|
2016-07-18 14:55:07 -07:00
|
|
|
} else {
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = vgrf(glsl_type::uint_type);
|
|
|
|
|
bld.ADD(srcs[SURFACE_LOGICAL_SRC_ADDRESS],
|
2016-07-18 14:55:07 -07:00
|
|
|
retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(instr->const_index[0]));
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-10 13:01:03 -07:00
|
|
|
/* Emit the actual atomic operation operation */
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
|
|
|
|
|
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2015-06-01 09:41:47 +02:00
|
|
|
}
|
|
|
|
|
|
2018-04-18 14:02:33 -07:00
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_shared_atomic_float(const fs_builder &bld,
|
|
|
|
|
int op, nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
fs_reg dest;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dest = get_nir_dest(instr->dest);
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
|
|
|
|
|
|
|
|
|
|
fs_reg data = get_nir_src(instr->src[1]);
|
|
|
|
|
if (op == BRW_AOP_FCMPWR) {
|
|
|
|
|
fs_reg tmp = bld.vgrf(data.type, 2);
|
|
|
|
|
fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
|
|
|
|
|
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
|
|
|
|
|
data = tmp;
|
|
|
|
|
}
|
|
|
|
|
srcs[SURFACE_LOGICAL_SRC_DATA] = data;
|
2018-04-18 14:02:33 -07:00
|
|
|
|
|
|
|
|
/* Get the offset */
|
2018-10-20 09:55:28 -05:00
|
|
|
if (nir_src_is_const(instr->src[0])) {
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
|
|
|
|
|
brw_imm_ud(instr->const_index[0] + nir_src_as_uint(instr->src[0]));
|
2018-04-18 14:02:33 -07:00
|
|
|
} else {
|
2019-02-11 16:11:35 -06:00
|
|
|
srcs[SURFACE_LOGICAL_SRC_ADDRESS] = vgrf(glsl_type::uint_type);
|
|
|
|
|
bld.ADD(srcs[SURFACE_LOGICAL_SRC_ADDRESS],
|
|
|
|
|
retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
|
|
|
|
|
brw_imm_ud(instr->const_index[0]));
|
2018-04-18 14:02:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Emit the actual atomic operation operation */
|
|
|
|
|
|
2019-02-11 16:11:35 -06:00
|
|
|
bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL,
|
|
|
|
|
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
2018-04-18 14:02:33 -07:00
|
|
|
}
|
|
|
|
|
|
2018-11-26 15:15:04 -06:00
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_global_atomic(const fs_builder &bld,
|
|
|
|
|
int op, nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
fs_reg dest;
|
|
|
|
|
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
dest = get_nir_dest(instr->dest);
|
|
|
|
|
|
|
|
|
|
fs_reg addr = get_nir_src(instr->src[0]);
|
|
|
|
|
|
|
|
|
|
fs_reg data;
|
|
|
|
|
if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
|
|
|
|
|
data = get_nir_src(instr->src[1]);
|
|
|
|
|
|
|
|
|
|
if (op == BRW_AOP_CMPWR) {
|
|
|
|
|
fs_reg tmp = bld.vgrf(data.type, 2);
|
|
|
|
|
fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
|
|
|
|
|
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
|
|
|
|
|
data = tmp;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-12 18:30:47 -06:00
|
|
|
if (nir_dest_bit_size(instr->dest) == 64) {
|
|
|
|
|
bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL,
|
|
|
|
|
dest, addr, data, brw_imm_ud(op));
|
|
|
|
|
} else {
|
|
|
|
|
assert(nir_dest_bit_size(instr->dest) == 32);
|
|
|
|
|
bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
|
|
|
|
|
dest, addr, data, brw_imm_ud(op));
|
|
|
|
|
}
|
2018-11-26 15:15:04 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::nir_emit_global_atomic_float(const fs_builder &bld,
|
|
|
|
|
int op, nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
assert(nir_intrinsic_infos[instr->intrinsic].has_dest);
|
|
|
|
|
fs_reg dest = get_nir_dest(instr->dest);
|
|
|
|
|
|
|
|
|
|
fs_reg addr = get_nir_src(instr->src[0]);
|
|
|
|
|
|
|
|
|
|
assert(op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC);
|
|
|
|
|
fs_reg data = get_nir_src(instr->src[1]);
|
|
|
|
|
|
|
|
|
|
if (op == BRW_AOP_FCMPWR) {
|
|
|
|
|
fs_reg tmp = bld.vgrf(data.type, 2);
|
|
|
|
|
fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
|
|
|
|
|
bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
|
|
|
|
|
data = tmp;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-14 14:40:35 -05:00
|
|
|
bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT_LOGICAL,
|
2018-11-26 15:15:04 -06:00
|
|
|
dest, addr, data, brw_imm_ud(op));
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
void
|
2015-06-03 21:02:57 +03:00
|
|
|
fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
|
2014-08-15 10:32:07 -07:00
|
|
|
{
|
2016-02-06 09:05:10 -08:00
|
|
|
unsigned texture = instr->texture_index;
|
2016-02-05 18:24:02 -08:00
|
|
|
unsigned sampler = instr->sampler_index;
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2016-05-03 10:41:38 -07:00
|
|
|
fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2016-05-03 10:41:38 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture);
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(sampler);
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2015-06-10 09:50:47 -07:00
|
|
|
int lod_components = 0;
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2016-03-25 14:02:50 -07:00
|
|
|
/* The hardware requires a LOD for buffer textures */
|
|
|
|
|
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
|
2016-05-03 10:41:38 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_d(0);
|
2016-03-25 14:02:50 -07:00
|
|
|
|
2016-11-28 18:13:02 -08:00
|
|
|
uint32_t header_bits = 0;
|
2014-08-15 10:32:07 -07:00
|
|
|
for (unsigned i = 0; i < instr->num_srcs; i++) {
|
2015-01-09 20:01:13 -08:00
|
|
|
fs_reg src = get_nir_src(instr->src[i].src);
|
|
|
|
|
switch (instr->src[i].src_type) {
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_tex_src_bias:
|
2016-05-04 15:10:25 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_LOD] =
|
|
|
|
|
retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2016-12-12 08:32:38 -05:00
|
|
|
case nir_tex_src_comparator:
|
2016-05-03 10:41:38 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_SHADOW_C] = retype(src, BRW_REGISTER_TYPE_F);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
case nir_tex_src_coord:
|
2014-10-15 12:18:25 -07:00
|
|
|
switch (instr->op) {
|
|
|
|
|
case nir_texop_txf:
|
|
|
|
|
case nir_texop_txf_ms:
|
2016-05-03 12:34:51 -07:00
|
|
|
case nir_texop_txf_ms_mcs:
|
2015-11-17 17:57:08 -08:00
|
|
|
case nir_texop_samples_identical:
|
2016-05-03 10:41:38 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_D);
|
2014-10-15 12:18:25 -07:00
|
|
|
break;
|
|
|
|
|
default:
|
2016-05-03 10:41:38 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_F);
|
2014-10-15 12:18:25 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
case nir_tex_src_ddx:
|
2016-05-03 10:41:38 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_F);
|
2014-08-15 10:32:07 -07:00
|
|
|
lod_components = nir_tex_instr_src_size(instr, i);
|
|
|
|
|
break;
|
|
|
|
|
case nir_tex_src_ddy:
|
2016-05-03 10:41:38 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_LOD2] = retype(src, BRW_REGISTER_TYPE_F);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
case nir_tex_src_lod:
|
2014-10-15 12:18:25 -07:00
|
|
|
switch (instr->op) {
|
|
|
|
|
case nir_texop_txs:
|
2016-05-04 15:10:25 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_LOD] =
|
|
|
|
|
retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_UD);
|
2014-10-15 12:18:25 -07:00
|
|
|
break;
|
|
|
|
|
case nir_texop_txf:
|
2016-05-04 15:10:25 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_LOD] =
|
|
|
|
|
retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_D);
|
2014-10-15 12:18:25 -07:00
|
|
|
break;
|
|
|
|
|
default:
|
2016-05-04 15:10:25 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_LOD] =
|
|
|
|
|
retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
|
2014-10-15 12:18:25 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2018-10-11 15:57:50 -05:00
|
|
|
case nir_tex_src_min_lod:
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_MIN_LOD] =
|
|
|
|
|
retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
|
|
|
|
|
break;
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_tex_src_ms_index:
|
2016-05-03 10:41:38 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = retype(src, BRW_REGISTER_TYPE_UD);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2016-02-09 14:51:28 -08:00
|
|
|
|
|
|
|
|
case nir_tex_src_offset: {
|
2019-03-27 17:34:10 -05:00
|
|
|
uint32_t offset_bits = 0;
|
|
|
|
|
if (brw_texture_offset(instr, i, &offset_bits)) {
|
2016-11-27 21:05:34 -05:00
|
|
|
header_bits |= offset_bits;
|
2016-02-09 14:51:28 -08:00
|
|
|
} else {
|
2016-11-28 18:13:02 -08:00
|
|
|
srcs[TEX_LOGICAL_SRC_TG4_OFFSET] =
|
2016-05-03 10:41:38 -07:00
|
|
|
retype(src, BRW_REGISTER_TYPE_D);
|
2016-02-09 14:51:28 -08:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
2016-02-09 14:51:28 -08:00
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
case nir_tex_src_projector:
|
2015-04-01 11:38:53 -07:00
|
|
|
unreachable("should be lowered");
|
2014-12-05 16:43:56 -08:00
|
|
|
|
2016-02-06 09:05:10 -08:00
|
|
|
case nir_tex_src_texture_offset: {
|
2014-12-05 16:43:56 -08:00
|
|
|
/* Emit code to evaluate the actual indexing expression */
|
2016-05-03 10:41:38 -07:00
|
|
|
fs_reg tmp = vgrf(glsl_type::uint_type);
|
|
|
|
|
bld.ADD(tmp, src, brw_imm_ud(texture));
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SURFACE] = bld.emit_uniformize(tmp);
|
2014-12-05 16:43:56 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-05 18:24:02 -08:00
|
|
|
case nir_tex_src_sampler_offset: {
|
|
|
|
|
/* Emit code to evaluate the actual indexing expression */
|
2016-05-03 10:41:38 -07:00
|
|
|
fs_reg tmp = vgrf(glsl_type::uint_type);
|
|
|
|
|
bld.ADD(tmp, src, brw_imm_ud(sampler));
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SAMPLER] = bld.emit_uniformize(tmp);
|
2016-02-05 18:24:02 -08:00
|
|
|
break;
|
|
|
|
|
}
|
2015-11-02 17:58:29 -08:00
|
|
|
|
2019-02-06 15:42:17 -06:00
|
|
|
case nir_tex_src_texture_handle:
|
|
|
|
|
assert(nir_tex_instr_src_index(instr, nir_tex_src_texture_offset) == -1);
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SURFACE] = fs_reg();
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE] = bld.emit_uniformize(src);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_tex_src_sampler_handle:
|
|
|
|
|
assert(nir_tex_instr_src_index(instr, nir_tex_src_sampler_offset) == -1);
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SAMPLER] = fs_reg();
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SAMPLER_HANDLE] = bld.emit_uniformize(src);
|
|
|
|
|
break;
|
|
|
|
|
|
2016-05-03 12:34:51 -07:00
|
|
|
case nir_tex_src_ms_mcs:
|
|
|
|
|
assert(instr->op == nir_texop_txf_ms);
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_MCS] = retype(src, BRW_REGISTER_TYPE_D);
|
|
|
|
|
break;
|
|
|
|
|
|
2016-05-01 21:20:02 -07:00
|
|
|
case nir_tex_src_plane: {
|
2018-10-20 09:55:28 -05:00
|
|
|
const uint32_t plane = nir_src_as_uint(instr->src[i].src);
|
2016-05-01 21:20:02 -07:00
|
|
|
const uint32_t texture_index =
|
|
|
|
|
instr->texture_index +
|
|
|
|
|
stage_prog_data->binding_table.plane_start[plane] -
|
|
|
|
|
stage_prog_data->binding_table.texture_start;
|
|
|
|
|
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture_index);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
default:
|
|
|
|
|
unreachable("unknown texture source");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-03 12:34:51 -07:00
|
|
|
if (srcs[TEX_LOGICAL_SRC_MCS].file == BAD_FILE &&
|
|
|
|
|
(instr->op == nir_texop_txf_ms ||
|
|
|
|
|
instr->op == nir_texop_samples_identical)) {
|
2015-04-15 18:00:05 -07:00
|
|
|
if (devinfo->gen >= 7 &&
|
2016-02-06 09:05:10 -08:00
|
|
|
key_tex->compressed_multisample_layout_mask & (1 << texture)) {
|
2016-05-03 10:41:38 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_MCS] =
|
|
|
|
|
emit_mcs_fetch(srcs[TEX_LOGICAL_SRC_COORDINATE],
|
|
|
|
|
instr->coord_components,
|
2019-02-06 15:42:17 -06:00
|
|
|
srcs[TEX_LOGICAL_SRC_SURFACE],
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE]);
|
2015-03-09 01:58:51 -07:00
|
|
|
} else {
|
2016-05-03 10:41:38 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_MCS] = brw_imm_ud(0u);
|
2015-03-09 01:58:51 -07:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
|
2016-05-03 10:41:38 -07:00
|
|
|
srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(instr->coord_components);
|
|
|
|
|
srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(lod_components);
|
|
|
|
|
|
|
|
|
|
enum opcode opcode;
|
2014-08-15 10:32:07 -07:00
|
|
|
switch (instr->op) {
|
2016-05-03 10:41:38 -07:00
|
|
|
case nir_texop_tex:
|
2019-04-18 21:04:57 -07:00
|
|
|
opcode = SHADER_OPCODE_TEX_LOGICAL;
|
2016-05-03 10:41:38 -07:00
|
|
|
break;
|
|
|
|
|
case nir_texop_txb:
|
|
|
|
|
opcode = FS_OPCODE_TXB_LOGICAL;
|
|
|
|
|
break;
|
|
|
|
|
case nir_texop_txl:
|
|
|
|
|
opcode = SHADER_OPCODE_TXL_LOGICAL;
|
|
|
|
|
break;
|
|
|
|
|
case nir_texop_txd:
|
|
|
|
|
opcode = SHADER_OPCODE_TXD_LOGICAL;
|
|
|
|
|
break;
|
|
|
|
|
case nir_texop_txf:
|
|
|
|
|
opcode = SHADER_OPCODE_TXF_LOGICAL;
|
|
|
|
|
break;
|
|
|
|
|
case nir_texop_txf_ms:
|
|
|
|
|
if ((key_tex->msaa_16 & (1 << sampler)))
|
|
|
|
|
opcode = SHADER_OPCODE_TXF_CMS_W_LOGICAL;
|
|
|
|
|
else
|
|
|
|
|
opcode = SHADER_OPCODE_TXF_CMS_LOGICAL;
|
|
|
|
|
break;
|
2016-05-03 12:34:51 -07:00
|
|
|
case nir_texop_txf_ms_mcs:
|
|
|
|
|
opcode = SHADER_OPCODE_TXF_MCS_LOGICAL;
|
|
|
|
|
break;
|
2016-05-03 10:41:38 -07:00
|
|
|
case nir_texop_query_levels:
|
|
|
|
|
case nir_texop_txs:
|
|
|
|
|
opcode = SHADER_OPCODE_TXS_LOGICAL;
|
|
|
|
|
break;
|
|
|
|
|
case nir_texop_lod:
|
|
|
|
|
opcode = SHADER_OPCODE_LOD_LOGICAL;
|
|
|
|
|
break;
|
|
|
|
|
case nir_texop_tg4:
|
2016-11-28 18:13:02 -08:00
|
|
|
if (srcs[TEX_LOGICAL_SRC_TG4_OFFSET].file != BAD_FILE)
|
2016-05-03 10:41:38 -07:00
|
|
|
opcode = SHADER_OPCODE_TG4_OFFSET_LOGICAL;
|
|
|
|
|
else
|
|
|
|
|
opcode = SHADER_OPCODE_TG4_LOGICAL;
|
|
|
|
|
break;
|
2016-05-20 00:37:37 -07:00
|
|
|
case nir_texop_texture_samples:
|
|
|
|
|
opcode = SHADER_OPCODE_SAMPLEINFO_LOGICAL;
|
|
|
|
|
break;
|
2016-05-03 12:24:51 -07:00
|
|
|
case nir_texop_samples_identical: {
|
|
|
|
|
fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
|
|
|
|
|
|
|
|
|
|
/* If mcs is an immediate value, it means there is no MCS. In that case
|
|
|
|
|
* just return false.
|
|
|
|
|
*/
|
|
|
|
|
if (srcs[TEX_LOGICAL_SRC_MCS].file == BRW_IMMEDIATE_VALUE) {
|
|
|
|
|
bld.MOV(dst, brw_imm_ud(0u));
|
|
|
|
|
} else if ((key_tex->msaa_16 & (1 << sampler))) {
|
|
|
|
|
fs_reg tmp = vgrf(glsl_type::uint_type);
|
|
|
|
|
bld.OR(tmp, srcs[TEX_LOGICAL_SRC_MCS],
|
|
|
|
|
offset(srcs[TEX_LOGICAL_SRC_MCS], bld, 1));
|
|
|
|
|
bld.CMP(dst, tmp, brw_imm_ud(0u), BRW_CONDITIONAL_EQ);
|
|
|
|
|
} else {
|
|
|
|
|
bld.CMP(dst, srcs[TEX_LOGICAL_SRC_MCS], brw_imm_ud(0u),
|
|
|
|
|
BRW_CONDITIONAL_EQ);
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
default:
|
|
|
|
|
unreachable("unknown texture opcode");
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-28 18:13:02 -08:00
|
|
|
if (instr->op == nir_texop_tg4) {
|
|
|
|
|
if (instr->component == 1 &&
|
|
|
|
|
key_tex->gather_channel_quirk_mask & (1 << texture)) {
|
|
|
|
|
/* gather4 sampler is broken for green channel on RG32F --
|
|
|
|
|
* we must ask for blue instead.
|
|
|
|
|
*/
|
|
|
|
|
header_bits |= 2 << 16;
|
|
|
|
|
} else {
|
|
|
|
|
header_bits |= instr->component << 16;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-20 19:03:21 -08:00
|
|
|
fs_reg dst = bld.vgrf(brw_type_for_nir_type(devinfo, instr->dest_type), 4);
|
2016-05-03 10:41:38 -07:00
|
|
|
fs_inst *inst = bld.emit(opcode, dst, srcs, ARRAY_SIZE(srcs));
|
2016-11-28 18:13:02 -08:00
|
|
|
inst->offset = header_bits;
|
i965/fs: Reduce the response length of sampler messages on Skylake.
Often, we don't need a full 4 channels worth of data from the sampler.
For example, depth comparisons and red textures only return one value.
To handle this, the sampler message header contains a mask which can
be used to disable channels, and reduce the message length (in SIMD16
mode on all hardware, and SIMD8 mode on Broadwell and later).
We've never used it before, since it required setting up a message
header. This meant trading a smaller response length for a larger
message length and additional MOVs to set it up.
However, Skylake introduces a terrific new feature: for headerless
messages, you can simply reduce the response length, and it makes
the implicit header contain an appropriate mask. So to read only
RG, you would simply set the message length to 2 or 4 (SIMD8/16).
This means we can finally take advantage of this at no cost.
total instructions in shared programs: 9091831 -> 9073067 (-0.21%)
instructions in affected programs: 191370 -> 172606 (-9.81%)
helped: 2609
HURT: 0
total cycles in shared programs: 70868114 -> 68454752 (-3.41%)
cycles in affected programs: 35841154 -> 33427792 (-6.73%)
helped: 16357
HURT: 8188
total spills in shared programs: 3492 -> 1707 (-51.12%)
spills in affected programs: 2749 -> 964 (-64.93%)
helped: 74
HURT: 0
total fills in shared programs: 4266 -> 2647 (-37.95%)
fills in affected programs: 3029 -> 1410 (-53.45%)
helped: 74
HURT: 0
LOST: 1
GAINED: 143
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2016-04-23 01:54:33 -07:00
|
|
|
|
2016-05-03 10:41:38 -07:00
|
|
|
const unsigned dest_size = nir_tex_instr_dest_size(instr);
|
|
|
|
|
if (devinfo->gen >= 9 &&
|
|
|
|
|
instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
|
|
|
|
|
unsigned write_mask = instr->dest.is_ssa ?
|
|
|
|
|
nir_ssa_def_components_read(&instr->dest.ssa):
|
|
|
|
|
(1 << dest_size) - 1;
|
i965/fs: Reduce the response length of sampler messages on Skylake.
Often, we don't need a full 4 channels worth of data from the sampler.
For example, depth comparisons and red textures only return one value.
To handle this, the sampler message header contains a mask which can
be used to disable channels, and reduce the message length (in SIMD16
mode on all hardware, and SIMD8 mode on Broadwell and later).
We've never used it before, since it required setting up a message
header. This meant trading a smaller response length for a larger
message length and additional MOVs to set it up.
However, Skylake introduces a terrific new feature: for headerless
messages, you can simply reduce the response length, and it makes
the implicit header contain an appropriate mask. So to read only
RG, you would simply set the message length to 2 or 4 (SIMD8/16).
This means we can finally take advantage of this at no cost.
total instructions in shared programs: 9091831 -> 9073067 (-0.21%)
instructions in affected programs: 191370 -> 172606 (-9.81%)
helped: 2609
HURT: 0
total cycles in shared programs: 70868114 -> 68454752 (-3.41%)
cycles in affected programs: 35841154 -> 33427792 (-6.73%)
helped: 16357
HURT: 8188
total spills in shared programs: 3492 -> 1707 (-51.12%)
spills in affected programs: 2749 -> 964 (-64.93%)
helped: 74
HURT: 0
total fills in shared programs: 4266 -> 2647 (-37.95%)
fills in affected programs: 3029 -> 1410 (-53.45%)
helped: 74
HURT: 0
LOST: 1
GAINED: 143
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2016-04-23 01:54:33 -07:00
|
|
|
assert(write_mask != 0); /* dead code should have been eliminated */
|
2016-09-01 18:43:48 -07:00
|
|
|
inst->size_written = util_last_bit(write_mask) *
|
|
|
|
|
inst->dst.component_size(inst->exec_size);
|
2016-05-03 10:41:38 -07:00
|
|
|
} else {
|
2016-09-01 18:43:48 -07:00
|
|
|
inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
|
2016-05-03 10:41:38 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (srcs[TEX_LOGICAL_SRC_SHADOW_C].file != BAD_FILE)
|
|
|
|
|
inst->shadow_compare = true;
|
|
|
|
|
|
2016-11-28 18:13:02 -08:00
|
|
|
if (instr->op == nir_texop_tg4 && devinfo->gen == 6)
|
|
|
|
|
emit_gen6_gather_wa(key_tex->gen6_gather_wa[texture], dst);
|
2016-05-03 10:41:38 -07:00
|
|
|
|
2016-05-03 11:57:30 -07:00
|
|
|
fs_reg nir_dest[4];
|
|
|
|
|
for (unsigned i = 0; i < dest_size; i++)
|
|
|
|
|
nir_dest[i] = offset(dst, bld, i);
|
i965/fs: Reduce the response length of sampler messages on Skylake.
Often, we don't need a full 4 channels worth of data from the sampler.
For example, depth comparisons and red textures only return one value.
To handle this, the sampler message header contains a mask which can
be used to disable channels, and reduce the message length (in SIMD16
mode on all hardware, and SIMD8 mode on Broadwell and later).
We've never used it before, since it required setting up a message
header. This meant trading a smaller response length for a larger
message length and additional MOVs to set it up.
However, Skylake introduces a terrific new feature: for headerless
messages, you can simply reduce the response length, and it makes
the implicit header contain an appropriate mask. So to read only
RG, you would simply set the message length to 2 or 4 (SIMD8/16).
This means we can finally take advantage of this at no cost.
total instructions in shared programs: 9091831 -> 9073067 (-0.21%)
instructions in affected programs: 191370 -> 172606 (-9.81%)
helped: 2609
HURT: 0
total cycles in shared programs: 70868114 -> 68454752 (-3.41%)
cycles in affected programs: 35841154 -> 33427792 (-6.73%)
helped: 16357
HURT: 8188
total spills in shared programs: 3492 -> 1707 (-51.12%)
spills in affected programs: 2749 -> 964 (-64.93%)
helped: 74
HURT: 0
total fills in shared programs: 4266 -> 2647 (-37.95%)
fills in affected programs: 3029 -> 1410 (-53.45%)
helped: 74
HURT: 0
LOST: 1
GAINED: 143
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2016-04-23 01:54:33 -07:00
|
|
|
|
2016-05-03 11:57:30 -07:00
|
|
|
if (instr->op == nir_texop_query_levels) {
|
|
|
|
|
/* # levels is in .w */
|
|
|
|
|
nir_dest[0] = offset(dst, bld, 3);
|
2016-07-18 16:25:12 -07:00
|
|
|
} else if (instr->op == nir_texop_txs &&
|
|
|
|
|
dest_size >= 3 && devinfo->gen < 7) {
|
|
|
|
|
/* Gen4-6 return 0 instead of 1 for single layer surfaces. */
|
2016-05-03 10:41:38 -07:00
|
|
|
fs_reg depth = offset(dst, bld, 2);
|
2016-07-18 16:25:12 -07:00
|
|
|
nir_dest[2] = vgrf(glsl_type::int_type);
|
|
|
|
|
bld.emit_minmax(nir_dest[2], depth, brw_imm_d(1), BRW_CONDITIONAL_GE);
|
2016-05-03 10:41:38 -07:00
|
|
|
}
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2016-05-03 11:57:30 -07:00
|
|
|
bld.LOAD_PAYLOAD(get_nir_dest(instr->dest), nir_dest, dest_size, 0);
|
2014-08-15 10:32:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2015-06-03 20:57:12 +03:00
|
|
|
fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
|
2014-08-15 10:32:07 -07:00
|
|
|
{
|
|
|
|
|
switch (instr->type) {
|
|
|
|
|
case nir_jump_break:
|
2015-06-03 20:57:12 +03:00
|
|
|
bld.emit(BRW_OPCODE_BREAK);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
case nir_jump_continue:
|
2015-06-03 20:57:12 +03:00
|
|
|
bld.emit(BRW_OPCODE_CONTINUE);
|
2014-08-15 10:32:07 -07:00
|
|
|
break;
|
|
|
|
|
case nir_jump_return:
|
|
|
|
|
default:
|
|
|
|
|
unreachable("unknown jump");
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-01-22 14:00:38 +01:00
|
|
|
|
2018-06-09 11:45:01 +02:00
|
|
|
/*
|
|
|
|
|
* This helper takes a source register and un/shuffles it into the destination
|
|
|
|
|
* register.
|
|
|
|
|
*
|
|
|
|
|
* If source type size is smaller than destination type size the operation
|
|
|
|
|
* needed is a component shuffle. The opposite case would be an unshuffle. If
|
|
|
|
|
* source/destination type size is equal a shuffle is done that would be
|
|
|
|
|
* equivalent to a simple MOV.
|
|
|
|
|
*
|
|
|
|
|
* For example, if source is a 16-bit type and destination is 32-bit. A 3
|
|
|
|
|
* components .xyz 16-bit vector on SIMD8 would be.
|
|
|
|
|
*
|
|
|
|
|
* |x1|x2|x3|x4|x5|x6|x7|x8|y1|y2|y3|y4|y5|y6|y7|y8|
|
|
|
|
|
* |z1|z2|z3|z4|z5|z6|z7|z8| | | | | | | | |
|
|
|
|
|
*
|
|
|
|
|
* This helper will return the following 2 32-bit components with the 16-bit
|
|
|
|
|
* values shuffled:
|
|
|
|
|
*
|
|
|
|
|
* |x1 y1|x2 y2|x3 y3|x4 y4|x5 y5|x6 y6|x7 y7|x8 y8|
|
|
|
|
|
* |z1 |z2 |z3 |z4 |z5 |z6 |z7 |z8 |
|
|
|
|
|
*
|
|
|
|
|
* For unshuffle, the example would be the opposite, a 64-bit type source
|
|
|
|
|
* and a 32-bit destination. A 2 component .xy 64-bit vector on SIMD8
|
|
|
|
|
* would be:
|
|
|
|
|
*
|
|
|
|
|
* | x1l x1h | x2l x2h | x3l x3h | x4l x4h |
|
|
|
|
|
* | x5l x5h | x6l x6h | x7l x7h | x8l x8h |
|
|
|
|
|
* | y1l y1h | y2l y2h | y3l y3h | y4l y4h |
|
|
|
|
|
* | y5l y5h | y6l y6h | y7l y7h | y8l y8h |
|
|
|
|
|
*
|
|
|
|
|
* The returned result would be the following 4 32-bit components unshuffled:
|
|
|
|
|
*
|
|
|
|
|
* | x1l | x2l | x3l | x4l | x5l | x6l | x7l | x8l |
|
|
|
|
|
* | x1h | x2h | x3h | x4h | x5h | x6h | x7h | x8h |
|
|
|
|
|
* | y1l | y2l | y3l | y4l | y5l | y6l | y7l | y8l |
|
|
|
|
|
* | y1h | y2h | y3h | y4h | y5h | y6h | y7h | y8h |
|
|
|
|
|
*
|
|
|
|
|
* - Source and destination register must not be overlapped.
|
|
|
|
|
* - components units are measured in terms of the smaller type between
|
|
|
|
|
* source and destination because we are un/shuffling the smaller
|
|
|
|
|
* components from/into the bigger ones.
|
|
|
|
|
* - first_component parameter allows skipping source components.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
shuffle_src_to_dst(const fs_builder &bld,
|
|
|
|
|
const fs_reg &dst,
|
|
|
|
|
const fs_reg &src,
|
|
|
|
|
uint32_t first_component,
|
|
|
|
|
uint32_t components)
|
|
|
|
|
{
|
|
|
|
|
if (type_sz(src.type) == type_sz(dst.type)) {
|
|
|
|
|
assert(!regions_overlap(dst,
|
|
|
|
|
type_sz(dst.type) * bld.dispatch_width() * components,
|
|
|
|
|
offset(src, bld, first_component),
|
|
|
|
|
type_sz(src.type) * bld.dispatch_width() * components));
|
|
|
|
|
for (unsigned i = 0; i < components; i++) {
|
|
|
|
|
bld.MOV(retype(offset(dst, bld, i), src.type),
|
|
|
|
|
offset(src, bld, i + first_component));
|
|
|
|
|
}
|
|
|
|
|
} else if (type_sz(src.type) < type_sz(dst.type)) {
|
|
|
|
|
/* Source is shuffled into destination */
|
|
|
|
|
unsigned size_ratio = type_sz(dst.type) / type_sz(src.type);
|
|
|
|
|
assert(!regions_overlap(dst,
|
|
|
|
|
type_sz(dst.type) * bld.dispatch_width() *
|
|
|
|
|
DIV_ROUND_UP(components, size_ratio),
|
|
|
|
|
offset(src, bld, first_component),
|
|
|
|
|
type_sz(src.type) * bld.dispatch_width() * components));
|
|
|
|
|
|
|
|
|
|
brw_reg_type shuffle_type =
|
|
|
|
|
brw_reg_type_from_bit_size(8 * type_sz(src.type),
|
|
|
|
|
BRW_REGISTER_TYPE_D);
|
|
|
|
|
for (unsigned i = 0; i < components; i++) {
|
|
|
|
|
fs_reg shuffle_component_i =
|
|
|
|
|
subscript(offset(dst, bld, i / size_ratio),
|
|
|
|
|
shuffle_type, i % size_ratio);
|
|
|
|
|
bld.MOV(shuffle_component_i,
|
|
|
|
|
retype(offset(src, bld, i + first_component), shuffle_type));
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/* Source is unshuffled into destination */
|
|
|
|
|
unsigned size_ratio = type_sz(src.type) / type_sz(dst.type);
|
|
|
|
|
assert(!regions_overlap(dst,
|
|
|
|
|
type_sz(dst.type) * bld.dispatch_width() * components,
|
|
|
|
|
offset(src, bld, first_component / size_ratio),
|
|
|
|
|
type_sz(src.type) * bld.dispatch_width() *
|
|
|
|
|
DIV_ROUND_UP(components + (first_component % size_ratio),
|
|
|
|
|
size_ratio)));
|
|
|
|
|
|
|
|
|
|
brw_reg_type shuffle_type =
|
|
|
|
|
brw_reg_type_from_bit_size(8 * type_sz(dst.type),
|
|
|
|
|
BRW_REGISTER_TYPE_D);
|
|
|
|
|
for (unsigned i = 0; i < components; i++) {
|
|
|
|
|
fs_reg shuffle_component_i =
|
|
|
|
|
subscript(offset(src, bld, (first_component + i) / size_ratio),
|
|
|
|
|
shuffle_type, (first_component + i) % size_ratio);
|
|
|
|
|
bld.MOV(retype(offset(dst, bld, i), shuffle_type),
|
|
|
|
|
shuffle_component_i);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-09 11:45:22 +02:00
|
|
|
void
|
|
|
|
|
shuffle_from_32bit_read(const fs_builder &bld,
|
|
|
|
|
const fs_reg &dst,
|
|
|
|
|
const fs_reg &src,
|
|
|
|
|
uint32_t first_component,
|
|
|
|
|
uint32_t components)
|
|
|
|
|
{
|
|
|
|
|
assert(type_sz(src.type) == 4);
|
|
|
|
|
|
|
|
|
|
/* This function takes components in units of the destination type while
|
|
|
|
|
* shuffle_src_to_dst takes components in units of the smallest type
|
|
|
|
|
*/
|
|
|
|
|
if (type_sz(dst.type) > 4) {
|
|
|
|
|
assert(type_sz(dst.type) == 8);
|
|
|
|
|
first_component *= 2;
|
|
|
|
|
components *= 2;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
shuffle_src_to_dst(bld, dst, src, first_component, components);
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-09 14:12:43 +01:00
|
|
|
fs_reg
|
|
|
|
|
setup_imm_df(const fs_builder &bld, double v)
|
|
|
|
|
{
|
2016-08-22 15:01:08 -07:00
|
|
|
const struct gen_device_info *devinfo = bld.shader->devinfo;
|
2016-03-09 14:12:43 +01:00
|
|
|
assert(devinfo->gen >= 7);
|
|
|
|
|
|
|
|
|
|
if (devinfo->gen >= 8)
|
|
|
|
|
return brw_imm_df(v);
|
|
|
|
|
|
2016-07-07 09:19:43 +02:00
|
|
|
/* gen7.5 does not support DF immediates straighforward but the DIM
|
|
|
|
|
* instruction allows to set the 64-bit immediate value.
|
|
|
|
|
*/
|
|
|
|
|
if (devinfo->is_haswell) {
|
2016-12-23 07:37:38 +01:00
|
|
|
const fs_builder ubld = bld.exec_all().group(1, 0);
|
2016-07-07 09:19:43 +02:00
|
|
|
fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_DF, 1);
|
|
|
|
|
ubld.DIM(dst, brw_imm_df(v));
|
|
|
|
|
return component(dst, 0);
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-09 14:12:43 +01:00
|
|
|
/* gen7 does not support DF immediates, so we generate a 64-bit constant by
|
|
|
|
|
* writing the low 32-bit of the constant to suboffset 0 of a VGRF and
|
|
|
|
|
* the high 32-bit to suboffset 4 and then applying a stride of 0.
|
|
|
|
|
*
|
|
|
|
|
* Alternatively, we could also produce a normal VGRF (without stride 0)
|
|
|
|
|
* by writing to all the channels in the VGRF, however, that would hit the
|
|
|
|
|
* gen7 bug where we have to split writes that span more than 1 register
|
|
|
|
|
* into instructions with a width of 4 (otherwise the write to the second
|
|
|
|
|
* register written runs into an execmask hardware bug) which isn't very
|
|
|
|
|
* nice.
|
|
|
|
|
*/
|
|
|
|
|
union {
|
|
|
|
|
double d;
|
|
|
|
|
struct {
|
|
|
|
|
uint32_t i1;
|
|
|
|
|
uint32_t i2;
|
|
|
|
|
};
|
|
|
|
|
} di;
|
|
|
|
|
|
|
|
|
|
di.d = v;
|
|
|
|
|
|
|
|
|
|
const fs_builder ubld = bld.exec_all().group(1, 0);
|
|
|
|
|
const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
|
|
|
|
|
ubld.MOV(tmp, brw_imm_ud(di.i1));
|
|
|
|
|
ubld.MOV(horiz_offset(tmp, 1), brw_imm_ud(di.i2));
|
|
|
|
|
|
|
|
|
|
return component(retype(tmp, BRW_REGISTER_TYPE_DF), 0);
|
|
|
|
|
}
|
2018-07-27 13:38:38 +02:00
|
|
|
|
|
|
|
|
fs_reg
|
|
|
|
|
setup_imm_b(const fs_builder &bld, int8_t v)
|
|
|
|
|
{
|
|
|
|
|
const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_B);
|
|
|
|
|
bld.MOV(tmp, brw_imm_w(v));
|
|
|
|
|
return tmp;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fs_reg
|
|
|
|
|
setup_imm_ub(const fs_builder &bld, uint8_t v)
|
|
|
|
|
{
|
|
|
|
|
const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UB);
|
|
|
|
|
bld.MOV(tmp, brw_imm_uw(v));
|
|
|
|
|
return tmp;
|
|
|
|
|
}
|