2014-07-10 18:18:17 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*
|
|
|
|
|
* Authors:
|
|
|
|
|
* Connor Abbott (cwabbott0@gmail.com)
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
2019-03-03 10:00:14 -06:00
|
|
|
#include "float64_glsl.h"
|
2016-05-09 14:35:58 -07:00
|
|
|
#include "glsl_to_nir.h"
|
2016-05-25 16:00:38 -07:00
|
|
|
#include "ir_visitor.h"
|
|
|
|
|
#include "ir_hierarchical_visitor.h"
|
|
|
|
|
#include "ir.h"
|
2019-02-20 17:13:49 +11:00
|
|
|
#include "ir_optimization.h"
|
2019-03-03 10:00:14 -06:00
|
|
|
#include "program.h"
|
2016-05-25 16:00:38 -07:00
|
|
|
#include "compiler/nir/nir_control_flow.h"
|
|
|
|
|
#include "compiler/nir/nir_builder.h"
|
2019-10-11 16:02:25 +02:00
|
|
|
#include "compiler/nir/nir_builtin_builder.h"
|
2019-06-04 11:41:25 +02:00
|
|
|
#include "compiler/nir/nir_deref.h"
|
2019-03-03 10:00:14 -06:00
|
|
|
#include "main/errors.h"
|
2018-04-08 13:13:08 -04:00
|
|
|
#include "main/mtypes.h"
|
2019-03-03 10:00:14 -06:00
|
|
|
#include "main/shaderobj.h"
|
2018-11-13 09:45:03 -06:00
|
|
|
#include "util/u_math.h"
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* pass to lower GLSL IR to NIR
|
|
|
|
|
*
|
|
|
|
|
* This will lower variable dereferences to loads/stores of corresponding
|
|
|
|
|
* variables in NIR - the variables will be converted to registers in a later
|
|
|
|
|
* pass.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
|
|
class nir_visitor : public ir_visitor
|
|
|
|
|
{
|
|
|
|
|
public:
|
2022-01-07 12:47:08 +10:00
|
|
|
nir_visitor(const struct gl_constants *consts, nir_shader *shader);
|
2014-07-10 18:18:17 -07:00
|
|
|
~nir_visitor();
|
|
|
|
|
|
|
|
|
|
virtual void visit(ir_variable *);
|
|
|
|
|
virtual void visit(ir_function *);
|
|
|
|
|
virtual void visit(ir_function_signature *);
|
|
|
|
|
virtual void visit(ir_loop *);
|
|
|
|
|
virtual void visit(ir_if *);
|
|
|
|
|
virtual void visit(ir_discard *);
|
2019-09-20 09:27:00 -07:00
|
|
|
virtual void visit(ir_demote *);
|
2014-07-10 18:18:17 -07:00
|
|
|
virtual void visit(ir_loop_jump *);
|
|
|
|
|
virtual void visit(ir_return *);
|
|
|
|
|
virtual void visit(ir_call *);
|
|
|
|
|
virtual void visit(ir_assignment *);
|
|
|
|
|
virtual void visit(ir_emit_vertex *);
|
|
|
|
|
virtual void visit(ir_end_primitive *);
|
|
|
|
|
virtual void visit(ir_expression *);
|
|
|
|
|
virtual void visit(ir_swizzle *);
|
|
|
|
|
virtual void visit(ir_texture *);
|
|
|
|
|
virtual void visit(ir_constant *);
|
|
|
|
|
virtual void visit(ir_dereference_variable *);
|
|
|
|
|
virtual void visit(ir_dereference_record *);
|
|
|
|
|
virtual void visit(ir_dereference_array *);
|
2014-09-07 19:24:15 +12:00
|
|
|
virtual void visit(ir_barrier *);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2015-12-26 10:00:47 -08:00
|
|
|
void create_function(ir_function_signature *ir);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
private:
|
2016-04-04 10:16:11 +02:00
|
|
|
void add_instr(nir_instr *instr, unsigned num_components, unsigned bit_size);
|
2015-10-30 23:32:50 -04:00
|
|
|
nir_ssa_def *evaluate_rvalue(ir_rvalue *ir);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2015-10-30 23:56:49 -04:00
|
|
|
nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_ssa_def **srcs);
|
|
|
|
|
nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_ssa_def *src1);
|
|
|
|
|
nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_ssa_def *src1,
|
|
|
|
|
nir_ssa_def *src2);
|
|
|
|
|
nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_ssa_def *src1,
|
|
|
|
|
nir_ssa_def *src2, nir_ssa_def *src3);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2019-03-09 09:40:09 -06:00
|
|
|
bool supports_std430;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
nir_shader *shader;
|
|
|
|
|
nir_function_impl *impl;
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder b;
|
2015-10-30 23:47:46 -04:00
|
|
|
nir_ssa_def *result; /* result of the expression tree last visited */
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
nir_deref_instr *evaluate_deref(ir_instruction *ir);
|
2015-04-02 21:24:38 -07:00
|
|
|
|
2018-11-12 13:08:50 -05:00
|
|
|
nir_constant *constant_copy(ir_constant *ir, void *mem_ctx);
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
/* most recent deref instruction created */
|
|
|
|
|
nir_deref_instr *deref;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
/* whether the IR we're operating on is per-function or global */
|
|
|
|
|
bool is_global;
|
|
|
|
|
|
2018-12-10 10:58:43 -08:00
|
|
|
ir_function_signature *sig;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
/* map of ir_variable -> nir_variable */
|
|
|
|
|
struct hash_table *var_table;
|
|
|
|
|
|
|
|
|
|
/* map of ir_function_signature -> nir_function_overload */
|
|
|
|
|
struct hash_table *overload_table;
|
2021-12-28 21:11:07 +08:00
|
|
|
|
|
|
|
|
/* set of nir_variable hold sparse result */
|
|
|
|
|
struct set *sparse_variable_set;
|
|
|
|
|
|
|
|
|
|
void adjust_sparse_variable(nir_deref_instr *var_deref, const glsl_type *type,
|
|
|
|
|
nir_ssa_def *dest);
|
2023-03-22 13:03:05 -07:00
|
|
|
|
|
|
|
|
const struct gl_constants *consts;
|
2014-07-10 18:18:17 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This visitor runs before the main visitor, calling create_function() for
|
|
|
|
|
* each function so that the main visitor can resolve forward references in
|
|
|
|
|
* calls.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
class nir_function_visitor : public ir_hierarchical_visitor
|
|
|
|
|
{
|
|
|
|
|
public:
|
|
|
|
|
nir_function_visitor(nir_visitor *v) : visitor(v)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
virtual ir_visitor_status visit_enter(ir_function *);
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
nir_visitor *visitor;
|
|
|
|
|
};
|
|
|
|
|
|
2019-02-20 17:13:49 +11:00
|
|
|
/* glsl_to_nir can only handle converting certain function paramaters
|
|
|
|
|
* to NIR. This visitor checks for parameters it can't currently handle.
|
|
|
|
|
*/
|
|
|
|
|
class ir_function_param_visitor : public ir_hierarchical_visitor
|
|
|
|
|
{
|
|
|
|
|
public:
|
|
|
|
|
ir_function_param_visitor()
|
|
|
|
|
: unsupported(false)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
virtual ir_visitor_status visit_enter(ir_function_signature *ir)
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
if (ir->is_intrinsic())
|
|
|
|
|
return visit_continue;
|
|
|
|
|
|
|
|
|
|
foreach_in_list(ir_variable, param, &ir->parameters) {
|
|
|
|
|
if (!param->type->is_vector() || !param->type->is_scalar()) {
|
|
|
|
|
unsupported = true;
|
|
|
|
|
return visit_stop;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (param->data.mode == ir_var_function_inout) {
|
|
|
|
|
unsupported = true;
|
|
|
|
|
return visit_stop;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-29 13:51:32 +03:00
|
|
|
if (!glsl_type_is_vector_or_scalar(ir->return_type) &&
|
|
|
|
|
!ir->return_type->is_void()) {
|
|
|
|
|
unsupported = true;
|
|
|
|
|
return visit_stop;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-20 17:13:49 +11:00
|
|
|
return visit_continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool unsupported;
|
|
|
|
|
};
|
|
|
|
|
|
2016-04-28 12:18:34 +01:00
|
|
|
} /* end of anonymous namespace */
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2019-02-20 17:13:49 +11:00
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
has_unsupported_function_param(exec_list *ir)
|
|
|
|
|
{
|
|
|
|
|
ir_function_param_visitor visitor;
|
|
|
|
|
visit_list_elements(&visitor, ir);
|
|
|
|
|
return visitor.unsupported;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
nir_shader *
|
2022-01-07 12:47:08 +10:00
|
|
|
glsl_to_nir(const struct gl_constants *consts,
|
2019-02-20 17:13:49 +11:00
|
|
|
const struct gl_shader_program *shader_prog,
|
2015-08-05 16:39:32 -07:00
|
|
|
gl_shader_stage stage,
|
|
|
|
|
const nir_shader_compiler_options *options)
|
2014-07-10 18:18:17 -07:00
|
|
|
{
|
2016-06-30 14:55:40 +10:00
|
|
|
struct gl_linked_shader *sh = shader_prog->_LinkedShaders[stage];
|
2015-08-05 16:39:32 -07:00
|
|
|
|
2019-02-20 17:13:49 +11:00
|
|
|
const struct gl_shader_compiler_options *gl_options =
|
2022-01-07 12:47:08 +10:00
|
|
|
&consts->ShaderCompilerOptions[stage];
|
2019-02-20 17:13:49 +11:00
|
|
|
|
|
|
|
|
/* glsl_to_nir can only handle converting certain function paramaters
|
|
|
|
|
* to NIR. If we find something we can't handle then we get the GLSL IR
|
|
|
|
|
* opts to remove it before we continue on.
|
|
|
|
|
*
|
|
|
|
|
* TODO: add missing glsl ir to nir support and remove this loop.
|
|
|
|
|
*/
|
|
|
|
|
while (has_unsupported_function_param(sh->ir)) {
|
2022-06-08 14:34:24 +10:00
|
|
|
do_common_optimization(sh->ir, true, gl_options, consts->NativeIntegers);
|
2019-02-20 17:13:49 +11:00
|
|
|
}
|
|
|
|
|
|
2016-10-13 15:20:38 +11:00
|
|
|
nir_shader *shader = nir_shader_create(NULL, stage, options,
|
|
|
|
|
&sh->Program->info);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2022-01-07 12:47:08 +10:00
|
|
|
nir_visitor v1(consts, shader);
|
2014-07-10 18:18:17 -07:00
|
|
|
nir_function_visitor v2(&v1);
|
2015-03-06 01:22:49 -08:00
|
|
|
v2.run(sh->ir);
|
|
|
|
|
visit_exec_list(sh->ir, &v1);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2022-02-16 14:41:45 +11:00
|
|
|
/* The GLSL IR won't be needed anymore. */
|
|
|
|
|
ralloc_free(sh->ir);
|
|
|
|
|
sh->ir = NULL;
|
|
|
|
|
|
2019-02-20 17:13:49 +11:00
|
|
|
nir_validate_shader(shader, "after glsl to nir, before function inline");
|
2023-03-06 16:15:16 -08:00
|
|
|
if (should_print_nir(shader)) {
|
|
|
|
|
printf("glsl_to_nir\n");
|
|
|
|
|
nir_print_shader(shader, stdout);
|
|
|
|
|
}
|
2019-02-20 17:13:49 +11:00
|
|
|
|
|
|
|
|
/* We have to lower away local constant initializers right before we
|
|
|
|
|
* inline functions. That way they get properly initialized at the top
|
|
|
|
|
* of the function and not at the top of its caller.
|
|
|
|
|
*/
|
2023-03-06 16:15:16 -08:00
|
|
|
NIR_PASS_V(shader, nir_lower_variable_initializers, nir_var_all);
|
|
|
|
|
NIR_PASS_V(shader, nir_lower_returns);
|
|
|
|
|
NIR_PASS_V(shader, nir_inline_functions);
|
|
|
|
|
NIR_PASS_V(shader, nir_opt_deref);
|
2019-02-20 17:13:49 +11:00
|
|
|
|
|
|
|
|
nir_validate_shader(shader, "after function inlining and return lowering");
|
|
|
|
|
|
|
|
|
|
/* Now that we have inlined everything remove all of the functions except
|
|
|
|
|
* main().
|
|
|
|
|
*/
|
|
|
|
|
foreach_list_typed_safe(nir_function, function, node, &(shader)->functions){
|
|
|
|
|
if (strcmp("main", function->name) != 0) {
|
|
|
|
|
exec_node_remove(&function->node);
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-07-15 16:57:06 -07:00
|
|
|
|
2017-05-08 09:20:21 -07:00
|
|
|
shader->info.name = ralloc_asprintf(shader, "GLSL%d", shader_prog->Name);
|
2015-10-05 16:54:36 -07:00
|
|
|
if (shader_prog->Label)
|
2017-05-08 09:20:21 -07:00
|
|
|
shader->info.label = ralloc_strdup(shader, shader_prog->Label);
|
2017-07-06 08:35:37 +00:00
|
|
|
|
2022-07-07 14:13:54 -05:00
|
|
|
shader->info.subgroup_size = SUBGROUP_SIZE_UNIFORM;
|
|
|
|
|
|
nir, glsl: move pixel_center_integer/origin_upper_left to shader_info.fs
On GLSL that info is set as a layout qualifier when redeclaring
gl_FragCoord, so somehow tied to a specific variable. But in practice,
they behave as a global of the shader. On ARB programs they are set
using a global OPTION (defined at ARB_fragment_coord_conventions), and
on SPIR-V using ExecutionModes, that are also not tied specifically to
the builtin.
This patch moves that info from nir variable and ir variable to nir
shader and gl_program shader_info respectively, so the map is more
similar to SPIR-V, and ARB programs, instead of more similar to GLSL.
FWIW, shader_info.fs already had pixel_center_integer, so this change
also removes some redundancy. Also, as struct gl_program also includes
a shader_info, we removed gl_program::OriginUpperLeft and
PixelCenterInteger, as it would be superfluous.
This change was needed because recently spirv_to_nir changed the order
in which execution modes and variables are handled, so the variables
didn't get the correct values. Now the info is set on the shader
itself, and we don't need to go back to the builtin variable to set
it.
Fixes: e68871f6a ("spirv: Handle constants and types before execution
modes")
v2: (Jason)
* glsl_to_nir: get the info before glsl_to_nir, while all the rest
of the info gathering is happening
* prog_to_nir: gather the info on a general info-gathering pass,
not on variable setup.
v3: (Jason)
* Squash with the patch that removes that info from ir variable
* anv: assert that OriginUpperLeft is true. It should be already
set by spirv_to_nir.
* blorp: set origin_upper_left on its core "compile fragment
shader", not just on some specific places (for this we added an
helper on a previous patch).
* prog_to_nir: no need to gather specifically this fragcoord modes
as the full gl_program shader_info is copied.
* spirv_to_nir: assert that we are a fragment shader when handling
this execution modes.
v4: (reported by failing gitlab pipeline #18750)
* state_tracker: update too due changes on ir.h/gl_program
v5:
* blorp: minor change after change on previous patch
* radeonsi: update due this change.
v6: (Timothy Arceri)
* prog_to_nir: remove extra whitespace
* shader_info: don't use :1 on origin_upper_left
* glsl: program.fs.origin_upper_left/pixel_center_integer can be
move out of the shader list loop
2019-02-07 18:43:58 +01:00
|
|
|
if (shader->info.stage == MESA_SHADER_FRAGMENT) {
|
|
|
|
|
shader->info.fs.pixel_center_integer = sh->Program->info.fs.pixel_center_integer;
|
|
|
|
|
shader->info.fs.origin_upper_left = sh->Program->info.fs.origin_upper_left;
|
2021-09-10 17:06:43 -07:00
|
|
|
shader->info.fs.advanced_blend_modes = sh->Program->info.fs.advanced_blend_modes;
|
2021-12-01 15:49:38 -06:00
|
|
|
|
2023-01-13 10:53:16 +01:00
|
|
|
nir_foreach_variable_in_shader(var, shader) {
|
2021-12-01 15:49:38 -06:00
|
|
|
if (var->data.mode == nir_var_system_value &&
|
|
|
|
|
(var->data.location == SYSTEM_VALUE_SAMPLE_ID ||
|
|
|
|
|
var->data.location == SYSTEM_VALUE_SAMPLE_POS))
|
|
|
|
|
shader->info.fs.uses_sample_shading = true;
|
|
|
|
|
|
|
|
|
|
if (var->data.mode == nir_var_shader_in && var->data.sample)
|
|
|
|
|
shader->info.fs.uses_sample_shading = true;
|
2022-05-02 18:19:30 -05:00
|
|
|
|
2023-01-13 10:53:16 +01:00
|
|
|
if (var->data.mode == nir_var_shader_out && var->data.fb_fetch_output)
|
|
|
|
|
shader->info.fs.uses_sample_shading = true;
|
|
|
|
|
}
|
nir, glsl: move pixel_center_integer/origin_upper_left to shader_info.fs
On GLSL that info is set as a layout qualifier when redeclaring
gl_FragCoord, so somehow tied to a specific variable. But in practice,
they behave as a global of the shader. On ARB programs they are set
using a global OPTION (defined at ARB_fragment_coord_conventions), and
on SPIR-V using ExecutionModes, that are also not tied specifically to
the builtin.
This patch moves that info from nir variable and ir variable to nir
shader and gl_program shader_info respectively, so the map is more
similar to SPIR-V, and ARB programs, instead of more similar to GLSL.
FWIW, shader_info.fs already had pixel_center_integer, so this change
also removes some redundancy. Also, as struct gl_program also includes
a shader_info, we removed gl_program::OriginUpperLeft and
PixelCenterInteger, as it would be superfluous.
This change was needed because recently spirv_to_nir changed the order
in which execution modes and variables are handled, so the variables
didn't get the correct values. Now the info is set on the shader
itself, and we don't need to go back to the builtin variable to set
it.
Fixes: e68871f6a ("spirv: Handle constants and types before execution
modes")
v2: (Jason)
* glsl_to_nir: get the info before glsl_to_nir, while all the rest
of the info gathering is happening
* prog_to_nir: gather the info on a general info-gathering pass,
not on variable setup.
v3: (Jason)
* Squash with the patch that removes that info from ir variable
* anv: assert that OriginUpperLeft is true. It should be already
set by spirv_to_nir.
* blorp: set origin_upper_left on its core "compile fragment
shader", not just on some specific places (for this we added an
helper on a previous patch).
* prog_to_nir: no need to gather specifically this fragcoord modes
as the full gl_program shader_info is copied.
* spirv_to_nir: assert that we are a fragment shader when handling
this execution modes.
v4: (reported by failing gitlab pipeline #18750)
* state_tracker: update too due changes on ir.h/gl_program
v5:
* blorp: minor change after change on previous patch
* radeonsi: update due this change.
v6: (Timothy Arceri)
* prog_to_nir: remove extra whitespace
* shader_info: don't use :1 on origin_upper_left
* glsl: program.fs.origin_upper_left/pixel_center_integer can be
move out of the shader list loop
2019-02-07 18:43:58 +01:00
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
return shader;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-07 12:47:08 +10:00
|
|
|
nir_visitor::nir_visitor(const struct gl_constants *consts, nir_shader *shader)
|
2014-07-10 18:18:17 -07:00
|
|
|
{
|
2023-03-22 13:03:05 -07:00
|
|
|
this->consts = consts;
|
2022-01-07 12:47:08 +10:00
|
|
|
this->supports_std430 = consts->UseSTD430AsDefaultPacking;
|
2014-07-10 18:18:17 -07:00
|
|
|
this->shader = shader;
|
|
|
|
|
this->is_global = true;
|
2019-01-11 11:50:53 -08:00
|
|
|
this->var_table = _mesa_pointer_hash_table_create(NULL);
|
|
|
|
|
this->overload_table = _mesa_pointer_hash_table_create(NULL);
|
2021-12-28 21:11:07 +08:00
|
|
|
this->sparse_variable_set = _mesa_pointer_set_create(NULL);
|
2016-05-18 10:58:29 -04:00
|
|
|
this->result = NULL;
|
|
|
|
|
this->impl = NULL;
|
2019-02-08 15:23:46 +10:00
|
|
|
this->deref = NULL;
|
2019-05-17 12:20:19 +10:00
|
|
|
this->sig = NULL;
|
2016-05-18 10:58:29 -04:00
|
|
|
memset(&this->b, 0, sizeof(this->b));
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_visitor::~nir_visitor()
|
|
|
|
|
{
|
|
|
|
|
_mesa_hash_table_destroy(this->var_table, NULL);
|
|
|
|
|
_mesa_hash_table_destroy(this->overload_table, NULL);
|
2021-12-28 21:11:07 +08:00
|
|
|
_mesa_set_destroy(this->sparse_variable_set, NULL);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
nir_deref_instr *
|
|
|
|
|
nir_visitor::evaluate_deref(ir_instruction *ir)
|
2015-04-02 21:24:38 -07:00
|
|
|
{
|
|
|
|
|
ir->accept(this);
|
2018-03-15 09:58:22 -07:00
|
|
|
return this->deref;
|
2015-04-02 21:24:38 -07:00
|
|
|
}
|
|
|
|
|
|
2018-11-12 13:08:50 -05:00
|
|
|
nir_constant *
|
|
|
|
|
nir_visitor::constant_copy(ir_constant *ir, void *mem_ctx)
|
2014-07-10 18:18:17 -07:00
|
|
|
{
|
|
|
|
|
if (ir == NULL)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2017-10-02 00:19:31 -07:00
|
|
|
nir_constant *ret = rzalloc(mem_ctx, nir_constant);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2016-11-29 22:19:28 -08:00
|
|
|
const unsigned rows = ir->type->vector_elements;
|
|
|
|
|
const unsigned cols = ir->type->matrix_columns;
|
2014-07-10 18:18:17 -07:00
|
|
|
unsigned i;
|
2015-11-06 11:35:21 -05:00
|
|
|
|
|
|
|
|
ret->num_elements = 0;
|
2014-07-10 18:18:17 -07:00
|
|
|
switch (ir->type->base_type) {
|
|
|
|
|
case GLSL_TYPE_UINT:
|
2016-12-05 11:51:54 -08:00
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
2019-06-06 10:51:25 -05:00
|
|
|
ret->values[r].u32 = ir->value.u[r];
|
2016-12-05 11:51:54 -08:00
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
2020-05-08 22:16:42 -04:00
|
|
|
case GLSL_TYPE_UINT16:
|
|
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
ret->values[r].u16 = ir->value.u16[r];
|
|
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
case GLSL_TYPE_INT:
|
2016-12-05 11:51:54 -08:00
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
2019-06-06 10:51:25 -05:00
|
|
|
ret->values[r].i32 = ir->value.i[r];
|
2016-12-05 11:51:54 -08:00
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
2020-05-08 22:16:42 -04:00
|
|
|
case GLSL_TYPE_INT16:
|
|
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
ret->values[r].i16 = ir->value.i16[r];
|
|
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
case GLSL_TYPE_FLOAT:
|
2019-04-21 12:42:21 +02:00
|
|
|
case GLSL_TYPE_FLOAT16:
|
2015-11-12 11:18:50 +01:00
|
|
|
case GLSL_TYPE_DOUBLE:
|
2019-06-06 10:51:25 -05:00
|
|
|
if (cols > 1) {
|
|
|
|
|
ret->elements = ralloc_array(mem_ctx, nir_constant *, cols);
|
|
|
|
|
ret->num_elements = cols;
|
|
|
|
|
for (unsigned c = 0; c < cols; c++) {
|
|
|
|
|
nir_constant *col_const = rzalloc(mem_ctx, nir_constant);
|
|
|
|
|
col_const->num_elements = 0;
|
|
|
|
|
switch (ir->type->base_type) {
|
|
|
|
|
case GLSL_TYPE_FLOAT:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
col_const->values[r].f32 = ir->value.f[c * rows + r];
|
|
|
|
|
break;
|
|
|
|
|
|
2019-04-21 12:42:21 +02:00
|
|
|
case GLSL_TYPE_FLOAT16:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
col_const->values[r].u16 = ir->value.f16[c * rows + r];
|
|
|
|
|
break;
|
|
|
|
|
|
2019-06-06 10:51:25 -05:00
|
|
|
case GLSL_TYPE_DOUBLE:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
col_const->values[r].f64 = ir->value.d[c * rows + r];
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Cannot get here from the first level switch");
|
|
|
|
|
}
|
|
|
|
|
ret->elements[c] = col_const;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
switch (ir->type->base_type) {
|
|
|
|
|
case GLSL_TYPE_FLOAT:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
ret->values[r].f32 = ir->value.f[r];
|
|
|
|
|
break;
|
|
|
|
|
|
2019-04-21 12:42:21 +02:00
|
|
|
case GLSL_TYPE_FLOAT16:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
ret->values[r].u16 = ir->value.f16[r];
|
|
|
|
|
break;
|
|
|
|
|
|
2019-06-06 10:51:25 -05:00
|
|
|
case GLSL_TYPE_DOUBLE:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
ret->values[r].f64 = ir->value.d[r];
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Cannot get here from the first level switch");
|
|
|
|
|
}
|
2016-11-29 22:19:28 -08:00
|
|
|
}
|
2015-11-12 11:18:50 +01:00
|
|
|
break;
|
|
|
|
|
|
2016-09-01 14:17:49 -07:00
|
|
|
case GLSL_TYPE_UINT64:
|
|
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
2019-06-06 10:51:25 -05:00
|
|
|
ret->values[r].u64 = ir->value.u64[r];
|
2016-09-01 14:17:49 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case GLSL_TYPE_INT64:
|
|
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
2019-06-06 10:51:25 -05:00
|
|
|
ret->values[r].i64 = ir->value.i64[r];
|
2016-09-01 14:17:49 -07:00
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
case GLSL_TYPE_BOOL:
|
2016-12-05 11:51:54 -08:00
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
2019-06-06 10:51:25 -05:00
|
|
|
ret->values[r].b = ir->value.b[r];
|
2016-12-05 11:51:54 -08:00
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case GLSL_TYPE_STRUCT:
|
|
|
|
|
case GLSL_TYPE_ARRAY:
|
|
|
|
|
ret->elements = ralloc_array(mem_ctx, nir_constant *,
|
|
|
|
|
ir->type->length);
|
2015-11-06 11:35:21 -05:00
|
|
|
ret->num_elements = ir->type->length;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
for (i = 0; i < ir->type->length; i++)
|
2017-09-07 19:02:48 -07:00
|
|
|
ret->elements[i] = constant_copy(ir->const_elements[i], mem_ctx);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-28 21:11:07 +08:00
|
|
|
void
|
|
|
|
|
nir_visitor::adjust_sparse_variable(nir_deref_instr *var_deref, const glsl_type *type,
|
|
|
|
|
nir_ssa_def *dest)
|
|
|
|
|
{
|
|
|
|
|
const glsl_type *texel_type = type->field_type("texel");
|
|
|
|
|
assert(texel_type);
|
|
|
|
|
|
|
|
|
|
assert(var_deref->deref_type == nir_deref_type_var);
|
|
|
|
|
nir_variable *var = var_deref->var;
|
|
|
|
|
|
|
|
|
|
/* Adjust nir_variable type to align with sparse nir instructions.
|
|
|
|
|
* Because the nir_variable is created with struct type from ir_variable,
|
|
|
|
|
* but sparse nir instructions output with vector dest.
|
|
|
|
|
*/
|
|
|
|
|
var->type = glsl_type::get_instance(texel_type->get_base_type()->base_type,
|
|
|
|
|
dest->num_components, 1);
|
|
|
|
|
|
|
|
|
|
var_deref->type = var->type;
|
|
|
|
|
|
|
|
|
|
/* Record the adjusted variable. */
|
|
|
|
|
_mesa_set_add(this->sparse_variable_set, var);
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-22 11:29:47 +11:00
|
|
|
static unsigned
|
|
|
|
|
get_nir_how_declared(unsigned how_declared)
|
|
|
|
|
{
|
|
|
|
|
if (how_declared == ir_var_hidden)
|
|
|
|
|
return nir_var_hidden;
|
|
|
|
|
|
|
|
|
|
return nir_var_declared_normally;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_variable *ir)
|
|
|
|
|
{
|
2018-12-10 10:58:43 -08:00
|
|
|
/* FINISHME: inout parameters */
|
|
|
|
|
assert(ir->data.mode != ir_var_function_inout);
|
|
|
|
|
|
|
|
|
|
if (ir->data.mode == ir_var_function_out)
|
|
|
|
|
return;
|
|
|
|
|
|
2017-10-02 00:17:22 -07:00
|
|
|
nir_variable *var = rzalloc(shader, nir_variable);
|
2014-07-10 18:18:17 -07:00
|
|
|
var->type = ir->type;
|
|
|
|
|
var->name = ralloc_strdup(var, ir->name);
|
|
|
|
|
|
2021-11-02 12:46:06 +11:00
|
|
|
var->data.assigned = ir->data.assigned;
|
2017-09-11 16:19:22 +10:00
|
|
|
var->data.always_active_io = ir->data.always_active_io;
|
2014-07-10 18:18:17 -07:00
|
|
|
var->data.read_only = ir->data.read_only;
|
|
|
|
|
var->data.centroid = ir->data.centroid;
|
|
|
|
|
var->data.sample = ir->data.sample;
|
2015-10-02 00:01:23 -07:00
|
|
|
var->data.patch = ir->data.patch;
|
2019-10-22 11:29:47 +11:00
|
|
|
var->data.how_declared = get_nir_how_declared(ir->data.how_declared);
|
2014-07-10 18:18:17 -07:00
|
|
|
var->data.invariant = ir->data.invariant;
|
2015-02-14 12:10:32 -08:00
|
|
|
var->data.location = ir->data.location;
|
2021-11-02 12:46:06 +11:00
|
|
|
var->data.must_be_shader_input = ir->data.must_be_shader_input;
|
2017-11-07 13:56:08 +11:00
|
|
|
var->data.stream = ir->data.stream;
|
2019-10-25 14:55:06 -04:00
|
|
|
if (ir->data.stream & (1u << 31))
|
|
|
|
|
var->data.stream |= NIR_STREAM_PACKED;
|
2019-10-22 14:54:34 +11:00
|
|
|
|
|
|
|
|
var->data.precision = ir->data.precision;
|
|
|
|
|
var->data.explicit_location = ir->data.explicit_location;
|
2020-03-13 14:18:27 +11:00
|
|
|
var->data.matrix_layout = ir->data.matrix_layout;
|
2019-10-22 14:54:34 +11:00
|
|
|
var->data.from_named_ifc_block = ir->data.from_named_ifc_block;
|
2016-12-05 23:09:18 -08:00
|
|
|
var->data.compact = false;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
switch(ir->data.mode) {
|
|
|
|
|
case ir_var_auto:
|
|
|
|
|
case ir_var_temporary:
|
|
|
|
|
if (is_global)
|
2019-01-15 23:56:29 +01:00
|
|
|
var->data.mode = nir_var_shader_temp;
|
2014-07-10 18:18:17 -07:00
|
|
|
else
|
2019-01-16 00:05:04 +01:00
|
|
|
var->data.mode = nir_var_function_temp;
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_var_function_in:
|
|
|
|
|
case ir_var_const_in:
|
2019-01-16 00:05:04 +01:00
|
|
|
var->data.mode = nir_var_function_temp;
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_var_shader_in:
|
2019-07-31 15:17:17 -05:00
|
|
|
if (shader->info.stage == MESA_SHADER_GEOMETRY &&
|
|
|
|
|
ir->data.location == VARYING_SLOT_PRIMITIVE_ID) {
|
2015-09-23 15:40:33 -07:00
|
|
|
/* For whatever reason, GLSL IR makes gl_PrimitiveIDIn an input */
|
|
|
|
|
var->data.location = SYSTEM_VALUE_PRIMITIVE_ID;
|
|
|
|
|
var->data.mode = nir_var_system_value;
|
2015-02-14 12:10:32 -08:00
|
|
|
} else {
|
|
|
|
|
var->data.mode = nir_var_shader_in;
|
2016-12-05 23:09:18 -08:00
|
|
|
|
2017-09-14 19:52:38 -07:00
|
|
|
if (shader->info.stage == MESA_SHADER_TESS_EVAL &&
|
2016-12-05 23:09:18 -08:00
|
|
|
(ir->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
|
|
|
|
|
ir->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)) {
|
|
|
|
|
var->data.compact = ir->type->without_array()->is_scalar();
|
|
|
|
|
}
|
2019-01-23 01:55:45 -08:00
|
|
|
|
|
|
|
|
if (shader->info.stage > MESA_SHADER_VERTEX &&
|
|
|
|
|
ir->data.location >= VARYING_SLOT_CLIP_DIST0 &&
|
|
|
|
|
ir->data.location <= VARYING_SLOT_CULL_DIST1) {
|
|
|
|
|
var->data.compact = ir->type->without_array()->is_scalar();
|
|
|
|
|
}
|
2015-02-14 12:10:32 -08:00
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_var_shader_out:
|
|
|
|
|
var->data.mode = nir_var_shader_out;
|
2017-09-14 19:52:38 -07:00
|
|
|
if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
|
2016-12-05 23:09:18 -08:00
|
|
|
(ir->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
|
|
|
|
|
ir->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)) {
|
|
|
|
|
var->data.compact = ir->type->without_array()->is_scalar();
|
|
|
|
|
}
|
2019-01-23 01:55:45 -08:00
|
|
|
|
|
|
|
|
if (shader->info.stage <= MESA_SHADER_GEOMETRY &&
|
|
|
|
|
ir->data.location >= VARYING_SLOT_CLIP_DIST0 &&
|
|
|
|
|
ir->data.location <= VARYING_SLOT_CULL_DIST1) {
|
|
|
|
|
var->data.compact = ir->type->without_array()->is_scalar();
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_var_uniform:
|
2018-12-14 11:21:50 -06:00
|
|
|
if (ir->get_interface_type())
|
2019-01-16 00:09:27 +01:00
|
|
|
var->data.mode = nir_var_mem_ubo;
|
2021-09-15 11:41:09 -05:00
|
|
|
else if (ir->type->contains_image() && !ir->data.bindless)
|
2021-10-15 12:58:22 -05:00
|
|
|
var->data.mode = nir_var_image;
|
2018-12-14 11:21:50 -06:00
|
|
|
else
|
|
|
|
|
var->data.mode = nir_var_uniform;
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
2015-05-18 15:47:18 +02:00
|
|
|
case ir_var_shader_storage:
|
2019-01-16 00:11:23 +01:00
|
|
|
var->data.mode = nir_var_mem_ssbo;
|
2015-05-18 15:47:18 +02:00
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
case ir_var_system_value:
|
|
|
|
|
var->data.mode = nir_var_system_value;
|
|
|
|
|
break;
|
|
|
|
|
|
2022-07-18 16:59:12 -07:00
|
|
|
case ir_var_shader_shared:
|
|
|
|
|
var->data.mode = nir_var_mem_shared;
|
|
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2019-11-07 17:54:42 -05:00
|
|
|
unsigned mem_access = 0;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (ir->data.memory_read_only)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_NON_WRITEABLE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (ir->data.memory_write_only)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_NON_READABLE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (ir->data.memory_coherent)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_COHERENT;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (ir->data.memory_volatile)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_VOLATILE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (ir->data.memory_restrict)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_RESTRICT;
|
2019-03-09 09:40:09 -06:00
|
|
|
|
2020-01-14 17:12:06 +11:00
|
|
|
var->interface_type = ir->get_interface_type();
|
|
|
|
|
|
2019-03-09 09:40:09 -06:00
|
|
|
/* For UBO and SSBO variables, we need explicit types */
|
|
|
|
|
if (var->data.mode & (nir_var_mem_ubo | nir_var_mem_ssbo)) {
|
|
|
|
|
const glsl_type *explicit_ifc_type =
|
|
|
|
|
ir->get_interface_type()->get_explicit_interface_type(supports_std430);
|
|
|
|
|
|
2020-01-14 17:12:06 +11:00
|
|
|
var->interface_type = explicit_ifc_type;
|
|
|
|
|
|
2019-03-09 09:40:09 -06:00
|
|
|
if (ir->type->without_array()->is_interface()) {
|
|
|
|
|
/* If the type contains the interface, wrap the explicit type in the
|
|
|
|
|
* right number of arrays.
|
|
|
|
|
*/
|
2021-10-15 16:26:32 -05:00
|
|
|
var->type = glsl_type_wrap_in_arrays(explicit_ifc_type, ir->type);
|
2019-03-09 09:40:09 -06:00
|
|
|
} else {
|
|
|
|
|
/* Otherwise, this variable is one entry in the interface */
|
2019-04-10 13:10:48 -07:00
|
|
|
UNUSED bool found = false;
|
2019-03-09 09:40:09 -06:00
|
|
|
for (unsigned i = 0; i < explicit_ifc_type->length; i++) {
|
|
|
|
|
const glsl_struct_field *field =
|
|
|
|
|
&explicit_ifc_type->fields.structure[i];
|
|
|
|
|
if (strcmp(ir->name, field->name) != 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
var->type = field->type;
|
|
|
|
|
if (field->memory_read_only)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_NON_WRITEABLE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (field->memory_write_only)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_NON_READABLE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (field->memory_coherent)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_COHERENT;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (field->memory_volatile)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_VOLATILE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (field->memory_restrict)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_RESTRICT;
|
2019-03-09 09:40:09 -06:00
|
|
|
|
|
|
|
|
found = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
assert(found);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
var->data.interpolation = ir->data.interpolation;
|
|
|
|
|
var->data.location_frac = ir->data.location_frac;
|
|
|
|
|
|
|
|
|
|
switch (ir->data.depth_layout) {
|
|
|
|
|
case ir_depth_layout_none:
|
|
|
|
|
var->data.depth_layout = nir_depth_layout_none;
|
|
|
|
|
break;
|
|
|
|
|
case ir_depth_layout_any:
|
|
|
|
|
var->data.depth_layout = nir_depth_layout_any;
|
|
|
|
|
break;
|
|
|
|
|
case ir_depth_layout_greater:
|
|
|
|
|
var->data.depth_layout = nir_depth_layout_greater;
|
|
|
|
|
break;
|
|
|
|
|
case ir_depth_layout_less:
|
|
|
|
|
var->data.depth_layout = nir_depth_layout_less;
|
|
|
|
|
break;
|
|
|
|
|
case ir_depth_layout_unchanged:
|
|
|
|
|
var->data.depth_layout = nir_depth_layout_unchanged;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var->data.index = ir->data.index;
|
2017-06-09 17:59:45 +02:00
|
|
|
var->data.descriptor_set = 0;
|
2014-07-10 18:18:17 -07:00
|
|
|
var->data.binding = ir->data.binding;
|
2018-02-23 16:06:30 +01:00
|
|
|
var->data.explicit_binding = ir->data.explicit_binding;
|
2021-11-02 12:46:06 +11:00
|
|
|
var->data.explicit_offset = ir->data.explicit_xfb_offset;
|
2018-02-19 08:41:56 +11:00
|
|
|
var->data.bindless = ir->data.bindless;
|
2015-12-29 21:02:56 +11:00
|
|
|
var->data.offset = ir->data.offset;
|
2019-11-07 17:54:42 -05:00
|
|
|
var->data.access = (gl_access_qualifier)mem_access;
|
2018-08-16 15:11:12 -05:00
|
|
|
|
2019-11-04 17:14:10 -08:00
|
|
|
if (var->type->without_array()->is_image()) {
|
2019-10-25 15:18:32 -04:00
|
|
|
var->data.image.format = ir->data.image_format;
|
2019-11-07 16:53:58 -05:00
|
|
|
} else if (var->data.mode == nir_var_shader_out) {
|
2019-10-25 15:18:32 -04:00
|
|
|
var->data.xfb.buffer = ir->data.xfb_buffer;
|
|
|
|
|
var->data.xfb.stride = ir->data.xfb_stride;
|
|
|
|
|
}
|
2018-08-16 15:11:12 -05:00
|
|
|
|
2016-07-19 20:33:46 -07:00
|
|
|
var->data.fb_fetch_output = ir->data.fb_fetch_output;
|
2017-12-12 17:09:19 +01:00
|
|
|
var->data.explicit_xfb_buffer = ir->data.explicit_xfb_buffer;
|
|
|
|
|
var->data.explicit_xfb_stride = ir->data.explicit_xfb_stride;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
var->num_state_slots = ir->get_num_state_slots();
|
2014-12-02 18:07:13 -08:00
|
|
|
if (var->num_state_slots > 0) {
|
2018-03-23 10:40:02 +00:00
|
|
|
var->state_slots = rzalloc_array(var, nir_state_slot,
|
|
|
|
|
var->num_state_slots);
|
2014-12-02 18:07:13 -08:00
|
|
|
|
|
|
|
|
ir_state_slot *state_slots = ir->get_state_slots();
|
|
|
|
|
for (unsigned i = 0; i < var->num_state_slots; i++) {
|
2020-11-12 16:41:15 -05:00
|
|
|
for (unsigned j = 0; j < 4; j++)
|
2014-12-02 18:07:13 -08:00
|
|
|
var->state_slots[i].tokens[j] = state_slots[i].tokens[j];
|
|
|
|
|
var->state_slots[i].swizzle = state_slots[i].swizzle;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
var->state_slots = NULL;
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2023-03-06 16:26:14 -08:00
|
|
|
/* Values declared const will have ir->constant_value instead of
|
|
|
|
|
* ir->constant_initializer.
|
|
|
|
|
*/
|
|
|
|
|
if (ir->constant_initializer)
|
|
|
|
|
var->constant_initializer = constant_copy(ir->constant_initializer, var);
|
|
|
|
|
else
|
|
|
|
|
var->constant_initializer = constant_copy(ir->constant_value, var);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2019-01-16 00:05:04 +01:00
|
|
|
if (var->data.mode == nir_var_function_temp)
|
2015-10-09 07:05:11 -07:00
|
|
|
nir_function_impl_add_variable(impl, var);
|
|
|
|
|
else
|
|
|
|
|
nir_shader_add_variable(shader, var);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
_mesa_hash_table_insert(var_table, ir, var);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ir_visitor_status
|
|
|
|
|
nir_function_visitor::visit_enter(ir_function *ir)
|
|
|
|
|
{
|
|
|
|
|
foreach_in_list(ir_function_signature, sig, &ir->signatures) {
|
2015-12-26 10:00:47 -08:00
|
|
|
visitor->create_function(sig);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
2015-12-26 10:00:47 -08:00
|
|
|
return visit_continue_with_parent;
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2015-12-26 10:00:47 -08:00
|
|
|
nir_visitor::create_function(ir_function_signature *ir)
|
2014-07-10 18:18:17 -07:00
|
|
|
{
|
2016-08-31 18:38:57 -07:00
|
|
|
if (ir->is_intrinsic())
|
2014-07-10 18:18:17 -07:00
|
|
|
return;
|
|
|
|
|
|
2015-12-26 10:00:47 -08:00
|
|
|
nir_function *func = nir_function_create(shader, ir->function_name());
|
2018-09-06 11:12:24 -07:00
|
|
|
if (strcmp(ir->function_name(), "main") == 0)
|
|
|
|
|
func->is_entrypoint = true;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2018-12-10 10:58:43 -08:00
|
|
|
func->num_params = ir->parameters.length() +
|
|
|
|
|
(ir->return_type != glsl_type::void_type);
|
|
|
|
|
func->params = ralloc_array(shader, nir_parameter, func->num_params);
|
|
|
|
|
|
|
|
|
|
unsigned np = 0;
|
|
|
|
|
|
|
|
|
|
if (ir->return_type != glsl_type::void_type) {
|
|
|
|
|
/* The return value is a variable deref (basically an out parameter) */
|
|
|
|
|
func->params[np].num_components = 1;
|
|
|
|
|
func->params[np].bit_size = 32;
|
|
|
|
|
np++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
foreach_in_list(ir_variable, param, &ir->parameters) {
|
|
|
|
|
/* FINISHME: pass arrays, structs, etc by reference? */
|
|
|
|
|
assert(param->type->is_vector() || param->type->is_scalar());
|
|
|
|
|
|
|
|
|
|
if (param->data.mode == ir_var_function_in) {
|
|
|
|
|
func->params[np].num_components = param->type->vector_elements;
|
|
|
|
|
func->params[np].bit_size = glsl_get_bit_size(param->type);
|
|
|
|
|
} else {
|
|
|
|
|
func->params[np].num_components = 1;
|
|
|
|
|
func->params[np].bit_size = 32;
|
|
|
|
|
}
|
|
|
|
|
np++;
|
|
|
|
|
}
|
|
|
|
|
assert(np == func->num_params);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2015-12-26 10:00:47 -08:00
|
|
|
_mesa_hash_table_insert(this->overload_table, ir, func);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_function *ir)
|
|
|
|
|
{
|
|
|
|
|
foreach_in_list(ir_function_signature, sig, &ir->signatures)
|
|
|
|
|
sig->accept(this);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_function_signature *ir)
|
|
|
|
|
{
|
2016-08-31 18:38:57 -07:00
|
|
|
if (ir->is_intrinsic())
|
2014-07-10 18:18:17 -07:00
|
|
|
return;
|
|
|
|
|
|
2018-12-10 10:58:43 -08:00
|
|
|
this->sig = ir;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
struct hash_entry *entry =
|
|
|
|
|
_mesa_hash_table_search(this->overload_table, ir);
|
|
|
|
|
|
|
|
|
|
assert(entry);
|
2015-12-26 10:00:47 -08:00
|
|
|
nir_function *func = (nir_function *) entry->data;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
if (ir->is_defined) {
|
2015-12-26 10:00:47 -08:00
|
|
|
nir_function_impl *impl = nir_function_impl_create(func);
|
2014-07-10 18:18:17 -07:00
|
|
|
this->impl = impl;
|
|
|
|
|
|
|
|
|
|
this->is_global = false;
|
|
|
|
|
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_init(&b, impl);
|
|
|
|
|
b.cursor = nir_after_cf_list(&impl->body);
|
2018-12-10 10:58:43 -08:00
|
|
|
|
|
|
|
|
unsigned i = (ir->return_type != glsl_type::void_type) ? 1 : 0;
|
|
|
|
|
|
|
|
|
|
foreach_in_list(ir_variable, param, &ir->parameters) {
|
|
|
|
|
nir_variable *var =
|
|
|
|
|
nir_local_variable_create(impl, param->type, param->name);
|
|
|
|
|
|
|
|
|
|
if (param->data.mode == ir_var_function_in) {
|
|
|
|
|
nir_store_var(&b, var, nir_load_param(&b, i), ~0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_mesa_hash_table_insert(var_table, param, var);
|
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
visit_exec_list(&ir->body, this);
|
|
|
|
|
|
|
|
|
|
this->is_global = true;
|
|
|
|
|
} else {
|
2015-12-26 10:00:47 -08:00
|
|
|
func->impl = NULL;
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_loop *ir)
|
|
|
|
|
{
|
2017-02-15 10:04:47 -08:00
|
|
|
nir_push_loop(&b);
|
2015-10-31 16:31:59 -04:00
|
|
|
visit_exec_list(&ir->body_instructions, this);
|
2017-02-15 10:04:47 -08:00
|
|
|
nir_pop_loop(&b, NULL);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_if *ir)
|
|
|
|
|
{
|
2017-02-15 10:04:47 -08:00
|
|
|
nir_push_if(&b, evaluate_rvalue(ir->condition));
|
2014-07-10 18:18:17 -07:00
|
|
|
visit_exec_list(&ir->then_instructions, this);
|
2017-02-15 10:04:47 -08:00
|
|
|
nir_push_else(&b, NULL);
|
2014-07-10 18:18:17 -07:00
|
|
|
visit_exec_list(&ir->else_instructions, this);
|
2017-02-15 10:04:47 -08:00
|
|
|
nir_pop_if(&b, NULL);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_discard *ir)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* discards aren't treated as control flow, because before we lower them
|
|
|
|
|
* they can appear anywhere in the shader and the stuff after them may still
|
|
|
|
|
* be executed (yay, crazy GLSL rules!). However, after lowering, all the
|
|
|
|
|
* discards will be immediately followed by a return.
|
|
|
|
|
*/
|
|
|
|
|
|
2020-09-07 14:17:14 +01:00
|
|
|
if (ir->condition)
|
|
|
|
|
nir_discard_if(&b, evaluate_rvalue(ir->condition));
|
|
|
|
|
else
|
|
|
|
|
nir_discard(&b);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2019-09-20 09:27:00 -07:00
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_demote *ir)
|
|
|
|
|
{
|
2020-09-07 14:17:14 +01:00
|
|
|
nir_demote(&b);
|
2019-09-20 09:27:00 -07:00
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_emit_vertex *ir)
|
|
|
|
|
{
|
2020-09-07 14:17:14 +01:00
|
|
|
nir_emit_vertex(&b, (unsigned)ir->stream_id());
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_end_primitive *ir)
|
|
|
|
|
{
|
2020-09-07 14:17:14 +01:00
|
|
|
nir_end_primitive(&b, (unsigned)ir->stream_id());
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_loop_jump *ir)
|
|
|
|
|
{
|
|
|
|
|
nir_jump_type type;
|
|
|
|
|
switch (ir->mode) {
|
|
|
|
|
case ir_loop_jump::jump_break:
|
|
|
|
|
type = nir_jump_break;
|
|
|
|
|
break;
|
|
|
|
|
case ir_loop_jump::jump_continue:
|
|
|
|
|
type = nir_jump_continue;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_jump_instr *instr = nir_jump_instr_create(this->shader, type);
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_return *ir)
|
|
|
|
|
{
|
2018-12-10 10:58:43 -08:00
|
|
|
if (ir->value != NULL) {
|
|
|
|
|
nir_deref_instr *ret_deref =
|
|
|
|
|
nir_build_deref_cast(&b, nir_load_param(&b, 0),
|
2019-01-16 00:05:04 +01:00
|
|
|
nir_var_function_temp, ir->value->type, 0);
|
2018-12-10 10:58:43 -08:00
|
|
|
|
|
|
|
|
nir_ssa_def *val = evaluate_rvalue(ir->value);
|
|
|
|
|
nir_store_deref(&b, ret_deref, val, ~0);
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
nir_jump_instr *instr = nir_jump_instr_create(this->shader, nir_jump_return);
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2018-11-13 09:45:03 -06:00
|
|
|
static void
|
|
|
|
|
intrinsic_set_std430_align(nir_intrinsic_instr *intrin, const glsl_type *type)
|
|
|
|
|
{
|
|
|
|
|
unsigned bit_size = type->is_boolean() ? 32 : glsl_get_bit_size(type);
|
|
|
|
|
unsigned pow2_components = util_next_power_of_two(type->vector_elements);
|
|
|
|
|
nir_intrinsic_set_align(intrin, (bit_size / 8) * pow2_components, 0);
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-04 11:41:25 +02:00
|
|
|
/* Accumulate any qualifiers along the deref chain to get the actual
|
|
|
|
|
* load/store qualifier.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static enum gl_access_qualifier
|
|
|
|
|
deref_get_qualifier(nir_deref_instr *deref)
|
|
|
|
|
{
|
|
|
|
|
nir_deref_path path;
|
|
|
|
|
nir_deref_path_init(&path, deref, NULL);
|
|
|
|
|
|
2019-11-07 16:53:58 -05:00
|
|
|
unsigned qualifiers = path.path[0]->var->data.access;
|
2019-06-04 11:41:25 +02:00
|
|
|
|
|
|
|
|
const glsl_type *parent_type = path.path[0]->type;
|
|
|
|
|
for (nir_deref_instr **cur_ptr = &path.path[1]; *cur_ptr; cur_ptr++) {
|
|
|
|
|
nir_deref_instr *cur = *cur_ptr;
|
|
|
|
|
|
|
|
|
|
if (parent_type->is_interface()) {
|
|
|
|
|
const struct glsl_struct_field *field =
|
|
|
|
|
&parent_type->fields.structure[cur->strct.index];
|
|
|
|
|
if (field->memory_read_only)
|
|
|
|
|
qualifiers |= ACCESS_NON_WRITEABLE;
|
|
|
|
|
if (field->memory_write_only)
|
|
|
|
|
qualifiers |= ACCESS_NON_READABLE;
|
|
|
|
|
if (field->memory_coherent)
|
|
|
|
|
qualifiers |= ACCESS_COHERENT;
|
|
|
|
|
if (field->memory_volatile)
|
|
|
|
|
qualifiers |= ACCESS_VOLATILE;
|
|
|
|
|
if (field->memory_restrict)
|
|
|
|
|
qualifiers |= ACCESS_RESTRICT;
|
|
|
|
|
}
|
2018-09-14 12:57:32 -07:00
|
|
|
|
2019-06-04 11:41:25 +02:00
|
|
|
parent_type = cur->type;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_deref_path_finish(&path);
|
|
|
|
|
|
|
|
|
|
return (gl_access_qualifier) qualifiers;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_call *ir)
|
|
|
|
|
{
|
2016-08-31 18:38:57 -07:00
|
|
|
if (ir->callee->is_intrinsic()) {
|
2014-07-10 18:18:17 -07:00
|
|
|
nir_intrinsic_op op;
|
2016-08-31 18:09:27 -07:00
|
|
|
|
|
|
|
|
switch (ir->callee->intrinsic_id) {
|
2019-03-14 12:00:04 -05:00
|
|
|
case ir_intrinsic_generic_atomic_add:
|
|
|
|
|
op = ir->return_deref->type->is_integer_32_64()
|
|
|
|
|
? nir_intrinsic_deref_atomic_add : nir_intrinsic_deref_atomic_fadd;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_and:
|
|
|
|
|
op = nir_intrinsic_deref_atomic_and;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_or:
|
|
|
|
|
op = nir_intrinsic_deref_atomic_or;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_xor:
|
|
|
|
|
op = nir_intrinsic_deref_atomic_xor;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_min:
|
|
|
|
|
assert(ir->return_deref);
|
2020-09-27 18:33:06 +05:30
|
|
|
if (ir->return_deref->type == glsl_type::int_type ||
|
|
|
|
|
ir->return_deref->type == glsl_type::int64_t_type)
|
2019-03-14 12:00:04 -05:00
|
|
|
op = nir_intrinsic_deref_atomic_imin;
|
2020-09-27 18:33:06 +05:30
|
|
|
else if (ir->return_deref->type == glsl_type::uint_type ||
|
|
|
|
|
ir->return_deref->type == glsl_type::uint64_t_type)
|
2019-03-14 12:00:04 -05:00
|
|
|
op = nir_intrinsic_deref_atomic_umin;
|
|
|
|
|
else if (ir->return_deref->type == glsl_type::float_type)
|
|
|
|
|
op = nir_intrinsic_deref_atomic_fmin;
|
|
|
|
|
else
|
|
|
|
|
unreachable("Invalid type");
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_max:
|
|
|
|
|
assert(ir->return_deref);
|
2020-09-27 18:33:06 +05:30
|
|
|
if (ir->return_deref->type == glsl_type::int_type ||
|
|
|
|
|
ir->return_deref->type == glsl_type::int64_t_type)
|
2019-03-14 12:00:04 -05:00
|
|
|
op = nir_intrinsic_deref_atomic_imax;
|
2020-09-27 18:33:06 +05:30
|
|
|
else if (ir->return_deref->type == glsl_type::uint_type ||
|
|
|
|
|
ir->return_deref->type == glsl_type::uint64_t_type)
|
2019-03-14 12:00:04 -05:00
|
|
|
op = nir_intrinsic_deref_atomic_umax;
|
|
|
|
|
else if (ir->return_deref->type == glsl_type::float_type)
|
|
|
|
|
op = nir_intrinsic_deref_atomic_fmax;
|
|
|
|
|
else
|
|
|
|
|
unreachable("Invalid type");
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_exchange:
|
|
|
|
|
op = nir_intrinsic_deref_atomic_exchange;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_comp_swap:
|
|
|
|
|
op = ir->return_deref->type->is_integer_32_64()
|
|
|
|
|
? nir_intrinsic_deref_atomic_comp_swap
|
|
|
|
|
: nir_intrinsic_deref_atomic_fcomp_swap;
|
|
|
|
|
break;
|
2016-08-31 18:09:27 -07:00
|
|
|
case ir_intrinsic_atomic_counter_read:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_read_deref;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_increment:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_inc_deref;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_predecrement:
|
nir: Fix OpAtomicCounterIDecrement for uniform atomic counters
From the SPIR-V 1.0 specification, section 3.32.18, "Atomic
Instructions":
"OpAtomicIDecrement:
<skip>
The instruction's result is the Original Value."
However, we were implementing it, for uniform atomic counters, as a
pre-decrement operation, as was the one available from GLSL.
Renamed the former nir intrinsic 'atomic_counter_dec*' to
'atomic_counter_pre_dec*' for clarification purposes, as it implements
a pre-decrement operation as specified for GLSL. From GLSL 4.50 spec,
section 8.10, "Atomic Counter Functions":
"uint atomicCounterDecrement (atomic_uint c)
Atomically
1. decrements the counter for c, and
2. returns the value resulting from the decrement operation.
These two steps are done atomically with respect to the atomic
counter functions in this table."
Added a new nir intrinsic 'atomic_counter_post_dec*' which implements
a post-decrement operation as required by SPIR-V.
v2: (Timothy Arceri)
* Add extra spec quotes on commit message
* Use "post" instead "pos" to avoid confusion with "position"
Signed-off-by: Antia Puentes <apuentes@igalia.com>
Signed-off-by: Alejandro Piñeiro <apinheiro@igalia.com>
Reviewed-by: Timothy Arceri <tarceri@itsqueeze.com>
2018-02-22 13:50:23 +01:00
|
|
|
op = nir_intrinsic_atomic_counter_pre_dec_deref;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
2016-06-29 18:00:22 -07:00
|
|
|
case ir_intrinsic_atomic_counter_add:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_add_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_and:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_and_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_or:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_or_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_xor:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_xor_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_min:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_min_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_max:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_max_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_exchange:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_exchange_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_comp_swap:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_comp_swap_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
2016-08-31 18:09:27 -07:00
|
|
|
case ir_intrinsic_image_load:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_load;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_store:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_store;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_add:
|
2018-04-09 18:36:22 -07:00
|
|
|
op = ir->return_deref->type->is_integer_32_64()
|
|
|
|
|
? nir_intrinsic_image_deref_atomic_add
|
|
|
|
|
: nir_intrinsic_image_deref_atomic_fadd;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_min:
|
2019-08-20 22:32:50 -05:00
|
|
|
if (ir->return_deref->type == glsl_type::int_type)
|
|
|
|
|
op = nir_intrinsic_image_deref_atomic_imin;
|
|
|
|
|
else if (ir->return_deref->type == glsl_type::uint_type)
|
|
|
|
|
op = nir_intrinsic_image_deref_atomic_umin;
|
|
|
|
|
else
|
|
|
|
|
unreachable("Invalid type");
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_max:
|
2019-08-20 22:32:50 -05:00
|
|
|
if (ir->return_deref->type == glsl_type::int_type)
|
|
|
|
|
op = nir_intrinsic_image_deref_atomic_imax;
|
|
|
|
|
else if (ir->return_deref->type == glsl_type::uint_type)
|
|
|
|
|
op = nir_intrinsic_image_deref_atomic_umax;
|
|
|
|
|
else
|
|
|
|
|
unreachable("Invalid type");
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_and:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_atomic_and;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_or:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_atomic_or;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_xor:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_atomic_xor;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_exchange:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_atomic_exchange;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_comp_swap:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_atomic_comp_swap;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
2019-07-24 12:06:34 +02:00
|
|
|
case ir_intrinsic_image_atomic_inc_wrap:
|
|
|
|
|
op = nir_intrinsic_image_deref_atomic_inc_wrap;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_dec_wrap:
|
|
|
|
|
op = nir_intrinsic_image_deref_atomic_dec_wrap;
|
|
|
|
|
break;
|
2016-08-31 18:09:27 -07:00
|
|
|
case ir_intrinsic_memory_barrier:
|
2023-02-24 02:25:44 -08:00
|
|
|
op = shader->options->use_scoped_barrier
|
|
|
|
|
? nir_intrinsic_scoped_barrier
|
|
|
|
|
: nir_intrinsic_memory_barrier;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_size:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_size;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_samples:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_samples;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
2021-12-28 14:58:52 +08:00
|
|
|
case ir_intrinsic_image_sparse_load:
|
|
|
|
|
op = nir_intrinsic_image_deref_sparse_load;
|
|
|
|
|
break;
|
2016-08-31 18:09:27 -07:00
|
|
|
case ir_intrinsic_shader_clock:
|
2015-10-07 11:59:26 +01:00
|
|
|
op = nir_intrinsic_shader_clock;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
2018-04-27 14:12:30 +01:00
|
|
|
case ir_intrinsic_begin_invocation_interlock:
|
|
|
|
|
op = nir_intrinsic_begin_invocation_interlock;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_end_invocation_interlock:
|
|
|
|
|
op = nir_intrinsic_end_invocation_interlock;
|
|
|
|
|
break;
|
2016-08-31 18:09:27 -07:00
|
|
|
case ir_intrinsic_group_memory_barrier:
|
2023-02-24 02:25:44 -08:00
|
|
|
op = shader->options->use_scoped_barrier
|
|
|
|
|
? nir_intrinsic_scoped_barrier
|
|
|
|
|
: nir_intrinsic_group_memory_barrier;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_atomic_counter:
|
2023-02-24 02:25:44 -08:00
|
|
|
op = shader->options->use_scoped_barrier
|
|
|
|
|
? nir_intrinsic_scoped_barrier
|
|
|
|
|
: nir_intrinsic_memory_barrier_atomic_counter;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_buffer:
|
2023-02-24 02:25:44 -08:00
|
|
|
op = shader->options->use_scoped_barrier
|
|
|
|
|
? nir_intrinsic_scoped_barrier
|
|
|
|
|
: nir_intrinsic_memory_barrier_buffer;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_image:
|
2023-02-24 02:25:44 -08:00
|
|
|
op = shader->options->use_scoped_barrier
|
|
|
|
|
? nir_intrinsic_scoped_barrier
|
|
|
|
|
: nir_intrinsic_memory_barrier_image;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_shared:
|
2023-02-24 02:25:44 -08:00
|
|
|
op = shader->options->use_scoped_barrier
|
|
|
|
|
? nir_intrinsic_scoped_barrier
|
|
|
|
|
: nir_intrinsic_memory_barrier_shared;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shared_load:
|
2015-07-28 15:11:37 -07:00
|
|
|
op = nir_intrinsic_load_shared;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shared_store:
|
2015-07-28 15:17:34 -07:00
|
|
|
op = nir_intrinsic_store_shared;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shared_atomic_add:
|
2018-04-09 18:36:22 -07:00
|
|
|
op = ir->return_deref->type->is_integer_32_64()
|
|
|
|
|
? nir_intrinsic_shared_atomic_add
|
|
|
|
|
: nir_intrinsic_shared_atomic_fadd;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shared_atomic_and:
|
2015-10-10 12:25:39 -07:00
|
|
|
op = nir_intrinsic_shared_atomic_and;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shared_atomic_or:
|
2015-10-10 12:25:39 -07:00
|
|
|
op = nir_intrinsic_shared_atomic_or;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shared_atomic_xor:
|
2015-10-10 12:25:39 -07:00
|
|
|
op = nir_intrinsic_shared_atomic_xor;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shared_atomic_min:
|
2015-10-10 12:25:39 -07:00
|
|
|
assert(ir->return_deref);
|
2020-09-27 18:33:06 +05:30
|
|
|
if (ir->return_deref->type == glsl_type::int_type ||
|
|
|
|
|
ir->return_deref->type == glsl_type::int64_t_type)
|
2015-10-10 12:25:39 -07:00
|
|
|
op = nir_intrinsic_shared_atomic_imin;
|
2020-09-27 18:33:06 +05:30
|
|
|
else if (ir->return_deref->type == glsl_type::uint_type ||
|
|
|
|
|
ir->return_deref->type == glsl_type::uint64_t_type)
|
2015-10-10 12:25:39 -07:00
|
|
|
op = nir_intrinsic_shared_atomic_umin;
|
2018-04-18 13:34:25 -07:00
|
|
|
else if (ir->return_deref->type == glsl_type::float_type)
|
|
|
|
|
op = nir_intrinsic_shared_atomic_fmin;
|
2015-10-10 12:25:39 -07:00
|
|
|
else
|
|
|
|
|
unreachable("Invalid type");
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shared_atomic_max:
|
2015-10-10 12:25:39 -07:00
|
|
|
assert(ir->return_deref);
|
2020-09-27 18:33:06 +05:30
|
|
|
if (ir->return_deref->type == glsl_type::int_type ||
|
|
|
|
|
ir->return_deref->type == glsl_type::int64_t_type)
|
2015-10-10 12:25:39 -07:00
|
|
|
op = nir_intrinsic_shared_atomic_imax;
|
2020-09-27 18:33:06 +05:30
|
|
|
else if (ir->return_deref->type == glsl_type::uint_type ||
|
|
|
|
|
ir->return_deref->type == glsl_type::uint64_t_type)
|
2015-10-10 12:25:39 -07:00
|
|
|
op = nir_intrinsic_shared_atomic_umax;
|
2018-04-18 13:34:25 -07:00
|
|
|
else if (ir->return_deref->type == glsl_type::float_type)
|
|
|
|
|
op = nir_intrinsic_shared_atomic_fmax;
|
2015-10-10 12:25:39 -07:00
|
|
|
else
|
|
|
|
|
unreachable("Invalid type");
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shared_atomic_exchange:
|
2015-10-10 12:25:39 -07:00
|
|
|
op = nir_intrinsic_shared_atomic_exchange;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shared_atomic_comp_swap:
|
2018-04-18 13:34:25 -07:00
|
|
|
op = ir->return_deref->type->is_integer_32_64()
|
|
|
|
|
? nir_intrinsic_shared_atomic_comp_swap
|
|
|
|
|
: nir_intrinsic_shared_atomic_fcomp_swap;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
2017-06-20 22:37:43 -07:00
|
|
|
case ir_intrinsic_vote_any:
|
|
|
|
|
op = nir_intrinsic_vote_any;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_vote_all:
|
|
|
|
|
op = nir_intrinsic_vote_all;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_vote_eq:
|
2017-08-28 17:33:33 -07:00
|
|
|
op = nir_intrinsic_vote_ieq;
|
2017-06-20 22:37:43 -07:00
|
|
|
break;
|
2017-06-22 16:43:24 -07:00
|
|
|
case ir_intrinsic_ballot:
|
|
|
|
|
op = nir_intrinsic_ballot;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_read_invocation:
|
|
|
|
|
op = nir_intrinsic_read_invocation;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_read_first_invocation:
|
|
|
|
|
op = nir_intrinsic_read_first_invocation;
|
|
|
|
|
break;
|
2019-09-20 10:50:37 -07:00
|
|
|
case ir_intrinsic_helper_invocation:
|
|
|
|
|
op = nir_intrinsic_is_helper_invocation;
|
|
|
|
|
break;
|
2021-12-29 15:26:09 +08:00
|
|
|
case ir_intrinsic_is_sparse_texels_resident:
|
|
|
|
|
op = nir_intrinsic_is_sparse_texels_resident;
|
|
|
|
|
break;
|
2016-08-31 18:09:27 -07:00
|
|
|
default:
|
2015-02-10 11:21:47 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_intrinsic_instr *instr = nir_intrinsic_instr_create(shader, op);
|
2018-10-18 17:55:49 -05:00
|
|
|
nir_ssa_def *ret = &instr->dest.ssa;
|
2015-05-05 23:04:46 +03:00
|
|
|
|
|
|
|
|
switch (op) {
|
2019-03-14 12:00:04 -05:00
|
|
|
case nir_intrinsic_deref_atomic_add:
|
|
|
|
|
case nir_intrinsic_deref_atomic_imin:
|
|
|
|
|
case nir_intrinsic_deref_atomic_umin:
|
|
|
|
|
case nir_intrinsic_deref_atomic_imax:
|
|
|
|
|
case nir_intrinsic_deref_atomic_umax:
|
|
|
|
|
case nir_intrinsic_deref_atomic_and:
|
|
|
|
|
case nir_intrinsic_deref_atomic_or:
|
|
|
|
|
case nir_intrinsic_deref_atomic_xor:
|
|
|
|
|
case nir_intrinsic_deref_atomic_exchange:
|
|
|
|
|
case nir_intrinsic_deref_atomic_comp_swap:
|
|
|
|
|
case nir_intrinsic_deref_atomic_fadd:
|
|
|
|
|
case nir_intrinsic_deref_atomic_fmin:
|
|
|
|
|
case nir_intrinsic_deref_atomic_fmax:
|
|
|
|
|
case nir_intrinsic_deref_atomic_fcomp_swap: {
|
|
|
|
|
int param_count = ir->actual_parameters.length();
|
|
|
|
|
assert(param_count == 2 || param_count == 3);
|
|
|
|
|
|
|
|
|
|
/* Deref */
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_rvalue *rvalue = (ir_rvalue *) param;
|
|
|
|
|
ir_dereference *deref = rvalue->as_dereference();
|
|
|
|
|
ir_swizzle *swizzle = NULL;
|
|
|
|
|
if (!deref) {
|
|
|
|
|
/* We may have a swizzle to pick off a single vec4 component */
|
|
|
|
|
swizzle = rvalue->as_swizzle();
|
|
|
|
|
assert(swizzle && swizzle->type->vector_elements == 1);
|
|
|
|
|
deref = swizzle->val->as_dereference();
|
|
|
|
|
assert(deref);
|
|
|
|
|
}
|
|
|
|
|
nir_deref_instr *nir_deref = evaluate_deref(deref);
|
|
|
|
|
if (swizzle) {
|
|
|
|
|
nir_deref = nir_build_deref_array_imm(&b, nir_deref,
|
|
|
|
|
swizzle->mask.x);
|
|
|
|
|
}
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(&nir_deref->dest.ssa);
|
|
|
|
|
|
2019-06-04 11:41:25 +02:00
|
|
|
nir_intrinsic_set_access(instr, deref_get_qualifier(nir_deref));
|
|
|
|
|
|
2019-03-14 12:00:04 -05:00
|
|
|
/* data1 parameter (this is always present) */
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_instruction *inst = (ir_instruction *) param;
|
|
|
|
|
instr->src[1] = nir_src_for_ssa(evaluate_rvalue(inst->as_rvalue()));
|
|
|
|
|
|
|
|
|
|
/* data2 parameter (only with atomic_comp_swap) */
|
|
|
|
|
if (param_count == 3) {
|
|
|
|
|
assert(op == nir_intrinsic_deref_atomic_comp_swap ||
|
|
|
|
|
op == nir_intrinsic_deref_atomic_fcomp_swap);
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
inst = (ir_instruction *) param;
|
|
|
|
|
instr->src[2] = nir_src_for_ssa(evaluate_rvalue(inst->as_rvalue()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Atomic result */
|
|
|
|
|
assert(ir->return_deref);
|
2020-09-27 18:33:06 +05:30
|
|
|
if (ir->return_deref->type->is_integer_64()) {
|
|
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
|
|
|
|
ir->return_deref->type->vector_elements, 64, NULL);
|
|
|
|
|
} else {
|
|
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
|
|
|
|
ir->return_deref->type->vector_elements, 32, NULL);
|
|
|
|
|
}
|
2019-03-14 12:00:04 -05:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2018-03-15 09:58:22 -07:00
|
|
|
case nir_intrinsic_atomic_counter_read_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_inc_deref:
|
nir: Fix OpAtomicCounterIDecrement for uniform atomic counters
From the SPIR-V 1.0 specification, section 3.32.18, "Atomic
Instructions":
"OpAtomicIDecrement:
<skip>
The instruction's result is the Original Value."
However, we were implementing it, for uniform atomic counters, as a
pre-decrement operation, as was the one available from GLSL.
Renamed the former nir intrinsic 'atomic_counter_dec*' to
'atomic_counter_pre_dec*' for clarification purposes, as it implements
a pre-decrement operation as specified for GLSL. From GLSL 4.50 spec,
section 8.10, "Atomic Counter Functions":
"uint atomicCounterDecrement (atomic_uint c)
Atomically
1. decrements the counter for c, and
2. returns the value resulting from the decrement operation.
These two steps are done atomically with respect to the atomic
counter functions in this table."
Added a new nir intrinsic 'atomic_counter_post_dec*' which implements
a post-decrement operation as required by SPIR-V.
v2: (Timothy Arceri)
* Add extra spec quotes on commit message
* Use "post" instead "pos" to avoid confusion with "position"
Signed-off-by: Antia Puentes <apuentes@igalia.com>
Signed-off-by: Alejandro Piñeiro <apinheiro@igalia.com>
Reviewed-by: Timothy Arceri <tarceri@itsqueeze.com>
2018-02-22 13:50:23 +01:00
|
|
|
case nir_intrinsic_atomic_counter_pre_dec_deref:
|
2018-03-15 09:58:22 -07:00
|
|
|
case nir_intrinsic_atomic_counter_add_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_min_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_max_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_and_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_or_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_xor_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_exchange_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_comp_swap_deref: {
|
2016-06-29 18:00:22 -07:00
|
|
|
/* Set the counter variable dereference. */
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_dereference *counter = (ir_dereference *)param;
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[0] = nir_src_for_ssa(&evaluate_deref(counter)->dest.ssa);
|
2016-06-29 18:00:22 -07:00
|
|
|
param = param->get_next();
|
|
|
|
|
|
|
|
|
|
/* Set the intrinsic destination. */
|
|
|
|
|
if (ir->return_deref) {
|
|
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Set the intrinsic parameters. */
|
|
|
|
|
if (!param->is_tail_sentinel()) {
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[1] =
|
2016-06-29 18:00:22 -07:00
|
|
|
nir_src_for_ssa(evaluate_rvalue((ir_dereference *)param));
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!param->is_tail_sentinel()) {
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[2] =
|
2016-06-29 18:00:22 -07:00
|
|
|
nir_src_for_ssa(evaluate_rvalue((ir_dereference *)param));
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2015-05-05 23:04:46 +03:00
|
|
|
break;
|
|
|
|
|
}
|
2018-03-15 09:58:22 -07:00
|
|
|
case nir_intrinsic_image_deref_load:
|
|
|
|
|
case nir_intrinsic_image_deref_store:
|
|
|
|
|
case nir_intrinsic_image_deref_atomic_add:
|
2019-08-20 22:32:50 -05:00
|
|
|
case nir_intrinsic_image_deref_atomic_imin:
|
|
|
|
|
case nir_intrinsic_image_deref_atomic_umin:
|
|
|
|
|
case nir_intrinsic_image_deref_atomic_imax:
|
|
|
|
|
case nir_intrinsic_image_deref_atomic_umax:
|
2018-03-15 09:58:22 -07:00
|
|
|
case nir_intrinsic_image_deref_atomic_and:
|
|
|
|
|
case nir_intrinsic_image_deref_atomic_or:
|
|
|
|
|
case nir_intrinsic_image_deref_atomic_xor:
|
|
|
|
|
case nir_intrinsic_image_deref_atomic_exchange:
|
|
|
|
|
case nir_intrinsic_image_deref_atomic_comp_swap:
|
2018-04-09 18:36:22 -07:00
|
|
|
case nir_intrinsic_image_deref_atomic_fadd:
|
2018-03-15 09:58:22 -07:00
|
|
|
case nir_intrinsic_image_deref_samples:
|
2019-07-24 12:06:34 +02:00
|
|
|
case nir_intrinsic_image_deref_size:
|
|
|
|
|
case nir_intrinsic_image_deref_atomic_inc_wrap:
|
2021-12-28 14:58:52 +08:00
|
|
|
case nir_intrinsic_image_deref_atomic_dec_wrap:
|
|
|
|
|
case nir_intrinsic_image_deref_sparse_load: {
|
2015-05-05 23:04:46 +03:00
|
|
|
/* Set the image variable dereference. */
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_dereference *image = (ir_dereference *)param;
|
2019-03-28 23:47:07 +01:00
|
|
|
nir_deref_instr *deref = evaluate_deref(image);
|
|
|
|
|
const glsl_type *type = deref->type;
|
2015-05-05 23:04:46 +03:00
|
|
|
|
2019-06-04 11:41:25 +02:00
|
|
|
nir_intrinsic_set_access(instr, deref_get_qualifier(deref));
|
|
|
|
|
|
2019-03-28 23:47:07 +01:00
|
|
|
instr->src[0] = nir_src_for_ssa(&deref->dest.ssa);
|
2015-05-05 23:04:46 +03:00
|
|
|
param = param->get_next();
|
2021-07-12 09:37:01 -05:00
|
|
|
nir_intrinsic_set_image_dim(instr,
|
|
|
|
|
(glsl_sampler_dim)type->sampler_dimensionality);
|
|
|
|
|
nir_intrinsic_set_image_array(instr, type->sampler_array);
|
2015-05-05 23:04:46 +03:00
|
|
|
|
2015-08-11 17:42:12 +03:00
|
|
|
/* Set the intrinsic destination. */
|
|
|
|
|
if (ir->return_deref) {
|
2021-12-28 14:58:52 +08:00
|
|
|
unsigned num_components;
|
|
|
|
|
if (op == nir_intrinsic_image_deref_sparse_load) {
|
|
|
|
|
const glsl_type *dest_type =
|
|
|
|
|
ir->return_deref->type->field_type("texel");
|
|
|
|
|
/* One extra component to hold residency code. */
|
|
|
|
|
num_components = dest_type->vector_elements + 1;
|
|
|
|
|
} else
|
|
|
|
|
num_components = ir->return_deref->type->vector_elements;
|
|
|
|
|
|
2015-08-11 17:42:12 +03:00
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
2017-03-02 21:42:06 -08:00
|
|
|
num_components, 32, NULL);
|
2015-08-11 17:42:12 +03:00
|
|
|
}
|
|
|
|
|
|
2018-08-14 14:03:05 -05:00
|
|
|
if (op == nir_intrinsic_image_deref_size) {
|
|
|
|
|
instr->num_components = instr->dest.ssa.num_components;
|
2021-12-28 14:58:52 +08:00
|
|
|
} else if (op == nir_intrinsic_image_deref_load ||
|
|
|
|
|
op == nir_intrinsic_image_deref_sparse_load) {
|
|
|
|
|
instr->num_components = instr->dest.ssa.num_components;
|
2020-09-30 21:20:53 -05:00
|
|
|
nir_intrinsic_set_dest_type(instr,
|
|
|
|
|
nir_get_nir_type_for_glsl_base_type(type->sampled_type));
|
|
|
|
|
} else if (op == nir_intrinsic_image_deref_store) {
|
|
|
|
|
instr->num_components = 4;
|
|
|
|
|
nir_intrinsic_set_src_type(instr,
|
2020-04-13 07:50:37 -07:00
|
|
|
nir_get_nir_type_for_glsl_base_type(type->sampled_type));
|
2018-08-14 14:03:05 -05:00
|
|
|
}
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
if (op == nir_intrinsic_image_deref_size ||
|
|
|
|
|
op == nir_intrinsic_image_deref_samples) {
|
2020-08-19 18:21:33 -05:00
|
|
|
/* image_deref_size takes an LOD parameter which is always 0
|
|
|
|
|
* coming from GLSL.
|
|
|
|
|
*/
|
|
|
|
|
if (op == nir_intrinsic_image_deref_size)
|
|
|
|
|
instr->src[1] = nir_src_for_ssa(nir_imm_int(&b, 0));
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2015-08-11 17:42:12 +03:00
|
|
|
break;
|
2015-09-23 08:48:21 +02:00
|
|
|
}
|
2015-08-11 17:42:12 +03:00
|
|
|
|
2015-05-05 23:04:46 +03:00
|
|
|
/* Set the address argument, extending the coordinate vector to four
|
|
|
|
|
* components.
|
|
|
|
|
*/
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_ssa_def *src_addr =
|
|
|
|
|
evaluate_rvalue((ir_dereference *)param);
|
|
|
|
|
nir_ssa_def *srcs[4];
|
2015-05-05 23:04:46 +03:00
|
|
|
|
|
|
|
|
for (int i = 0; i < 4; i++) {
|
2015-10-31 16:31:59 -04:00
|
|
|
if (i < type->coordinate_components())
|
|
|
|
|
srcs[i] = nir_channel(&b, src_addr, i);
|
|
|
|
|
else
|
2021-07-12 09:36:08 -05:00
|
|
|
srcs[i] = nir_ssa_undef(&b, 1, 32);
|
2015-05-05 23:04:46 +03:00
|
|
|
}
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[1] = nir_src_for_ssa(nir_vec(&b, srcs, 4));
|
2015-05-05 23:04:46 +03:00
|
|
|
param = param->get_next();
|
|
|
|
|
|
|
|
|
|
/* Set the sample argument, which is undefined for single-sample
|
|
|
|
|
* images.
|
|
|
|
|
*/
|
|
|
|
|
if (type->sampler_dimensionality == GLSL_SAMPLER_DIM_MS) {
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[2] =
|
2015-10-30 23:32:50 -04:00
|
|
|
nir_src_for_ssa(evaluate_rvalue((ir_dereference *)param));
|
2015-05-05 23:04:46 +03:00
|
|
|
param = param->get_next();
|
|
|
|
|
} else {
|
2021-07-12 09:36:08 -05:00
|
|
|
instr->src[2] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32));
|
2015-05-05 23:04:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Set the intrinsic parameters. */
|
|
|
|
|
if (!param->is_tail_sentinel()) {
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[3] =
|
2015-10-30 23:32:50 -04:00
|
|
|
nir_src_for_ssa(evaluate_rvalue((ir_dereference *)param));
|
2015-05-05 23:04:46 +03:00
|
|
|
param = param->get_next();
|
2021-12-28 14:58:52 +08:00
|
|
|
} else if (op == nir_intrinsic_image_deref_load ||
|
|
|
|
|
op == nir_intrinsic_image_deref_sparse_load) {
|
2020-01-06 08:27:49 +01:00
|
|
|
instr->src[3] = nir_src_for_ssa(nir_imm_int(&b, 0)); /* LOD */
|
2015-05-05 23:04:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!param->is_tail_sentinel()) {
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[4] =
|
2015-10-30 23:32:50 -04:00
|
|
|
nir_src_for_ssa(evaluate_rvalue((ir_dereference *)param));
|
2015-05-05 23:04:46 +03:00
|
|
|
param = param->get_next();
|
2020-01-06 08:27:49 +01:00
|
|
|
} else if (op == nir_intrinsic_image_deref_store) {
|
|
|
|
|
instr->src[4] = nir_src_for_ssa(nir_imm_int(&b, 0)); /* LOD */
|
2015-05-05 23:04:46 +03:00
|
|
|
}
|
2020-01-06 08:27:49 +01:00
|
|
|
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2015-05-05 23:04:46 +03:00
|
|
|
break;
|
|
|
|
|
}
|
2015-05-05 23:10:56 +03:00
|
|
|
case nir_intrinsic_memory_barrier:
|
2015-10-10 08:59:42 -07:00
|
|
|
case nir_intrinsic_group_memory_barrier:
|
|
|
|
|
case nir_intrinsic_memory_barrier_atomic_counter:
|
|
|
|
|
case nir_intrinsic_memory_barrier_buffer:
|
|
|
|
|
case nir_intrinsic_memory_barrier_image:
|
|
|
|
|
case nir_intrinsic_memory_barrier_shared:
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2015-05-05 23:10:56 +03:00
|
|
|
break;
|
2023-02-24 02:25:44 -08:00
|
|
|
case nir_intrinsic_scoped_barrier: {
|
|
|
|
|
/* The nir_intrinsic_scoped_barrier follows the general
|
|
|
|
|
* semantics of SPIR-V memory barriers, so this and other memory
|
|
|
|
|
* barriers use the mapping based on GLSL->SPIR-V from
|
|
|
|
|
*
|
|
|
|
|
* https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_gl_spirv.txt
|
|
|
|
|
*/
|
|
|
|
|
nir_scope scope;
|
|
|
|
|
unsigned modes;
|
|
|
|
|
switch (ir->callee->intrinsic_id) {
|
|
|
|
|
case ir_intrinsic_memory_barrier:
|
|
|
|
|
scope = NIR_SCOPE_DEVICE;
|
|
|
|
|
modes = nir_var_image |
|
|
|
|
|
nir_var_mem_ssbo |
|
|
|
|
|
nir_var_mem_shared |
|
|
|
|
|
nir_var_mem_global;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_buffer:
|
|
|
|
|
scope = NIR_SCOPE_DEVICE;
|
|
|
|
|
modes = nir_var_mem_ssbo |
|
|
|
|
|
nir_var_mem_global;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_image:
|
|
|
|
|
scope = NIR_SCOPE_DEVICE;
|
|
|
|
|
modes = nir_var_image;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_shared:
|
|
|
|
|
/* Both ARB_gl_spirv and glslang lower this to Device scope, so
|
|
|
|
|
* follow their lead. Note GL_KHR_vulkan_glsl also does
|
|
|
|
|
* something similar.
|
|
|
|
|
*/
|
|
|
|
|
scope = NIR_SCOPE_DEVICE;
|
|
|
|
|
modes = nir_var_mem_shared;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_group_memory_barrier:
|
|
|
|
|
scope = NIR_SCOPE_WORKGROUP;
|
|
|
|
|
modes = nir_var_image |
|
|
|
|
|
nir_var_mem_ssbo |
|
|
|
|
|
nir_var_mem_shared |
|
|
|
|
|
nir_var_mem_global;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_atomic_counter:
|
|
|
|
|
/* There's no nir_var_atomic_counter, but since atomic counters are lowered
|
|
|
|
|
* to SSBOs, we use nir_var_mem_ssbo instead.
|
|
|
|
|
*/
|
|
|
|
|
scope = NIR_SCOPE_DEVICE;
|
|
|
|
|
modes = nir_var_mem_ssbo;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("invalid intrinsic id for memory barrier");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_scoped_memory_barrier(&b, scope, NIR_MEMORY_ACQ_REL,
|
|
|
|
|
(nir_variable_mode)modes);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2015-10-07 11:59:26 +01:00
|
|
|
case nir_intrinsic_shader_clock:
|
2017-04-12 21:14:22 +08:00
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest, 2, 32, NULL);
|
2020-05-20 09:54:50 +02:00
|
|
|
nir_intrinsic_set_memory_scope(instr, NIR_SCOPE_SUBGROUP);
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2015-10-07 11:59:26 +01:00
|
|
|
break;
|
2018-04-27 14:12:30 +01:00
|
|
|
case nir_intrinsic_begin_invocation_interlock:
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
case nir_intrinsic_end_invocation_interlock:
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
2015-07-09 10:26:42 +02:00
|
|
|
case nir_intrinsic_store_ssbo: {
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_rvalue *block = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
|
|
|
|
|
assert(write_mask);
|
|
|
|
|
|
2018-11-13 10:19:25 -06:00
|
|
|
nir_ssa_def *nir_val = evaluate_rvalue(val);
|
2018-10-19 11:14:47 -05:00
|
|
|
if (val->type->is_boolean())
|
|
|
|
|
nir_val = nir_b2i32(&b, nir_val);
|
2018-11-13 10:19:25 -06:00
|
|
|
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(nir_val);
|
2015-11-25 14:14:05 -08:00
|
|
|
instr->src[1] = nir_src_for_ssa(evaluate_rvalue(block));
|
|
|
|
|
instr->src[2] = nir_src_for_ssa(evaluate_rvalue(offset));
|
2018-11-13 09:45:03 -06:00
|
|
|
intrinsic_set_std430_align(instr, val->type);
|
2016-01-21 13:32:09 -05:00
|
|
|
nir_intrinsic_set_write_mask(instr, write_mask->value.u[0]);
|
2015-07-09 10:26:42 +02:00
|
|
|
instr->num_components = val->type->vector_elements;
|
|
|
|
|
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2015-07-09 10:26:42 +02:00
|
|
|
break;
|
|
|
|
|
}
|
2015-07-28 15:11:37 -07:00
|
|
|
case nir_intrinsic_load_shared: {
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
2016-01-21 13:32:09 -05:00
|
|
|
nir_intrinsic_set_base(instr, 0);
|
2015-11-25 14:14:05 -08:00
|
|
|
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(offset));
|
2015-07-28 15:11:37 -07:00
|
|
|
|
|
|
|
|
const glsl_type *type = ir->return_deref->var->type;
|
|
|
|
|
instr->num_components = type->vector_elements;
|
2018-11-13 09:45:03 -06:00
|
|
|
intrinsic_set_std430_align(instr, type);
|
2015-07-28 15:11:37 -07:00
|
|
|
|
|
|
|
|
/* Setup destination register */
|
2018-11-13 10:19:25 -06:00
|
|
|
unsigned bit_size = type->is_boolean() ? 32 : glsl_get_bit_size(type);
|
2015-07-28 15:11:37 -07:00
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
2015-11-17 13:57:54 +01:00
|
|
|
type->vector_elements, bit_size, NULL);
|
2015-07-28 15:11:37 -07:00
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2018-10-19 11:14:47 -05:00
|
|
|
|
|
|
|
|
/* The value in shared memory is a 32-bit value */
|
|
|
|
|
if (type->is_boolean())
|
2020-03-27 00:29:14 -05:00
|
|
|
ret = nir_b2b1(&b, &instr->dest.ssa);
|
2015-07-28 15:11:37 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2015-07-28 15:17:34 -07:00
|
|
|
case nir_intrinsic_store_shared: {
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
|
|
|
|
|
assert(write_mask);
|
|
|
|
|
|
2016-01-21 13:32:09 -05:00
|
|
|
nir_intrinsic_set_base(instr, 0);
|
2015-11-25 14:14:05 -08:00
|
|
|
instr->src[1] = nir_src_for_ssa(evaluate_rvalue(offset));
|
2015-07-28 15:17:34 -07:00
|
|
|
|
2016-01-21 13:32:09 -05:00
|
|
|
nir_intrinsic_set_write_mask(instr, write_mask->value.u[0]);
|
2015-07-28 15:17:34 -07:00
|
|
|
|
2018-11-13 10:19:25 -06:00
|
|
|
nir_ssa_def *nir_val = evaluate_rvalue(val);
|
2018-10-19 11:14:47 -05:00
|
|
|
/* The value in shared memory is a 32-bit value */
|
|
|
|
|
if (val->type->is_boolean())
|
2020-03-27 00:29:14 -05:00
|
|
|
nir_val = nir_b2b32(&b, nir_val);
|
2018-11-13 10:19:25 -06:00
|
|
|
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(nir_val);
|
2015-07-28 15:17:34 -07:00
|
|
|
instr->num_components = val->type->vector_elements;
|
2018-11-13 09:45:03 -06:00
|
|
|
intrinsic_set_std430_align(instr, val->type);
|
2015-07-28 15:17:34 -07:00
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2015-10-10 12:25:39 -07:00
|
|
|
case nir_intrinsic_shared_atomic_add:
|
|
|
|
|
case nir_intrinsic_shared_atomic_imin:
|
|
|
|
|
case nir_intrinsic_shared_atomic_umin:
|
|
|
|
|
case nir_intrinsic_shared_atomic_imax:
|
|
|
|
|
case nir_intrinsic_shared_atomic_umax:
|
|
|
|
|
case nir_intrinsic_shared_atomic_and:
|
|
|
|
|
case nir_intrinsic_shared_atomic_or:
|
|
|
|
|
case nir_intrinsic_shared_atomic_xor:
|
|
|
|
|
case nir_intrinsic_shared_atomic_exchange:
|
2018-04-09 18:36:22 -07:00
|
|
|
case nir_intrinsic_shared_atomic_comp_swap:
|
2018-04-18 13:34:25 -07:00
|
|
|
case nir_intrinsic_shared_atomic_fadd:
|
|
|
|
|
case nir_intrinsic_shared_atomic_fmin:
|
|
|
|
|
case nir_intrinsic_shared_atomic_fmax:
|
|
|
|
|
case nir_intrinsic_shared_atomic_fcomp_swap: {
|
2015-10-10 12:25:39 -07:00
|
|
|
int param_count = ir->actual_parameters.length();
|
|
|
|
|
assert(param_count == 2 || param_count == 3);
|
|
|
|
|
|
|
|
|
|
/* Offset */
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_instruction *inst = (ir_instruction *) param;
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(inst->as_rvalue()));
|
|
|
|
|
|
|
|
|
|
/* data1 parameter (this is always present) */
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
inst = (ir_instruction *) param;
|
|
|
|
|
instr->src[1] = nir_src_for_ssa(evaluate_rvalue(inst->as_rvalue()));
|
|
|
|
|
|
|
|
|
|
/* data2 parameter (only with atomic_comp_swap) */
|
|
|
|
|
if (param_count == 3) {
|
2018-04-18 13:34:25 -07:00
|
|
|
assert(op == nir_intrinsic_shared_atomic_comp_swap ||
|
|
|
|
|
op == nir_intrinsic_shared_atomic_fcomp_swap);
|
2015-10-10 12:25:39 -07:00
|
|
|
param = param->get_next();
|
|
|
|
|
inst = (ir_instruction *) param;
|
|
|
|
|
instr->src[2] =
|
|
|
|
|
nir_src_for_ssa(evaluate_rvalue(inst->as_rvalue()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Atomic result */
|
|
|
|
|
assert(ir->return_deref);
|
2016-05-11 15:05:09 -04:00
|
|
|
unsigned bit_size = glsl_get_bit_size(ir->return_deref->type);
|
2015-10-10 12:25:39 -07:00
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
2015-11-17 13:57:54 +01:00
|
|
|
ir->return_deref->type->vector_elements,
|
|
|
|
|
bit_size, NULL);
|
2015-10-10 12:25:39 -07:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2020-06-16 17:46:43 -07:00
|
|
|
case nir_intrinsic_vote_ieq:
|
|
|
|
|
instr->num_components = 1;
|
2021-04-10 17:11:58 +02:00
|
|
|
FALLTHROUGH;
|
2017-06-20 22:37:43 -07:00
|
|
|
case nir_intrinsic_vote_any:
|
2020-06-16 17:46:43 -07:00
|
|
|
case nir_intrinsic_vote_all: {
|
2018-10-19 11:14:47 -05:00
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1, NULL);
|
2017-06-20 22:37:43 -07:00
|
|
|
|
|
|
|
|
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
|
|
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2017-06-22 16:43:24 -07:00
|
|
|
|
|
|
|
|
case nir_intrinsic_ballot: {
|
|
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
|
|
|
|
ir->return_deref->type->vector_elements, 64, NULL);
|
2017-08-22 19:58:59 -07:00
|
|
|
instr->num_components = ir->return_deref->type->vector_elements;
|
2017-06-22 16:43:24 -07:00
|
|
|
|
|
|
|
|
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
|
|
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case nir_intrinsic_read_invocation: {
|
|
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
|
|
|
|
ir->return_deref->type->vector_elements, 32, NULL);
|
|
|
|
|
instr->num_components = ir->return_deref->type->vector_elements;
|
|
|
|
|
|
|
|
|
|
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
|
|
|
|
|
|
|
|
|
|
ir_rvalue *invocation = (ir_rvalue *) ir->actual_parameters.get_head()->next;
|
|
|
|
|
instr->src[1] = nir_src_for_ssa(evaluate_rvalue(invocation));
|
|
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case nir_intrinsic_read_first_invocation: {
|
|
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest,
|
|
|
|
|
ir->return_deref->type->vector_elements, 32, NULL);
|
|
|
|
|
instr->num_components = ir->return_deref->type->vector_elements;
|
|
|
|
|
|
|
|
|
|
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
|
|
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2019-09-20 10:50:37 -07:00
|
|
|
case nir_intrinsic_is_helper_invocation: {
|
|
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1, NULL);
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2021-12-29 15:26:09 +08:00
|
|
|
case nir_intrinsic_is_sparse_texels_resident: {
|
|
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1, NULL);
|
|
|
|
|
|
|
|
|
|
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
|
|
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2015-05-05 23:04:46 +03:00
|
|
|
default:
|
|
|
|
|
unreachable("not reached");
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2021-12-28 21:11:07 +08:00
|
|
|
if (ir->return_deref) {
|
|
|
|
|
nir_deref_instr *ret_deref = evaluate_deref(ir->return_deref);
|
|
|
|
|
|
|
|
|
|
if (op == nir_intrinsic_image_deref_sparse_load)
|
|
|
|
|
adjust_sparse_variable(ret_deref, ir->return_deref->type, ret);
|
|
|
|
|
|
|
|
|
|
nir_store_deref(&b, ret_deref, ret, ~0);
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-10 10:58:43 -08:00
|
|
|
struct hash_entry *entry =
|
|
|
|
|
_mesa_hash_table_search(this->overload_table, ir->callee);
|
|
|
|
|
assert(entry);
|
|
|
|
|
nir_function *callee = (nir_function *) entry->data;
|
|
|
|
|
|
|
|
|
|
nir_call_instr *call = nir_call_instr_create(this->shader, callee);
|
|
|
|
|
|
|
|
|
|
unsigned i = 0;
|
|
|
|
|
nir_deref_instr *ret_deref = NULL;
|
|
|
|
|
if (ir->return_deref) {
|
|
|
|
|
nir_variable *ret_tmp =
|
|
|
|
|
nir_local_variable_create(this->impl, ir->return_deref->type,
|
|
|
|
|
"return_tmp");
|
|
|
|
|
ret_deref = nir_build_deref_var(&b, ret_tmp);
|
|
|
|
|
call->params[i++] = nir_src_for_ssa(&ret_deref->dest.ssa);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
foreach_two_lists(formal_node, &ir->callee->parameters,
|
|
|
|
|
actual_node, &ir->actual_parameters) {
|
|
|
|
|
ir_rvalue *param_rvalue = (ir_rvalue *) actual_node;
|
|
|
|
|
ir_variable *sig_param = (ir_variable *) formal_node;
|
|
|
|
|
|
|
|
|
|
if (sig_param->data.mode == ir_var_function_out) {
|
|
|
|
|
nir_deref_instr *out_deref = evaluate_deref(param_rvalue);
|
|
|
|
|
call->params[i] = nir_src_for_ssa(&out_deref->dest.ssa);
|
|
|
|
|
} else if (sig_param->data.mode == ir_var_function_in) {
|
|
|
|
|
nir_ssa_def *val = evaluate_rvalue(param_rvalue);
|
|
|
|
|
nir_src src = nir_src_for_ssa(val);
|
|
|
|
|
|
2021-09-08 16:41:00 +01:00
|
|
|
nir_src_copy(&call->params[i], &src, &call->instr);
|
2018-12-10 10:58:43 -08:00
|
|
|
} else if (sig_param->data.mode == ir_var_function_inout) {
|
|
|
|
|
unreachable("unimplemented: inout parameters");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &call->instr);
|
|
|
|
|
|
|
|
|
|
if (ir->return_deref)
|
|
|
|
|
nir_store_deref(&b, evaluate_deref(ir->return_deref), nir_load_deref(&b, ret_deref), ~0);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_assignment *ir)
|
|
|
|
|
{
|
2014-12-01 14:11:04 -08:00
|
|
|
unsigned num_components = ir->lhs->type->vector_elements;
|
2021-12-28 21:11:07 +08:00
|
|
|
unsigned write_mask = ir->write_mask;
|
2014-12-01 14:11:04 -08:00
|
|
|
|
2016-03-17 15:20:20 -07:00
|
|
|
b.exact = ir->lhs->variable_referenced()->data.invariant ||
|
|
|
|
|
ir->lhs->variable_referenced()->data.precise;
|
|
|
|
|
|
2014-12-01 14:11:04 -08:00
|
|
|
if ((ir->rhs->as_dereference() || ir->rhs->as_constant()) &&
|
2021-12-28 21:11:07 +08:00
|
|
|
(write_mask == BITFIELD_MASK(num_components) || write_mask == 0)) {
|
2019-06-04 11:41:25 +02:00
|
|
|
nir_deref_instr *lhs = evaluate_deref(ir->lhs);
|
|
|
|
|
nir_deref_instr *rhs = evaluate_deref(ir->rhs);
|
|
|
|
|
enum gl_access_qualifier lhs_qualifiers = deref_get_qualifier(lhs);
|
|
|
|
|
enum gl_access_qualifier rhs_qualifiers = deref_get_qualifier(rhs);
|
2022-01-14 19:10:04 -08:00
|
|
|
|
|
|
|
|
nir_copy_deref_with_access(&b, lhs, rhs, lhs_qualifiers,
|
|
|
|
|
rhs_qualifiers);
|
2014-12-01 14:11:04 -08:00
|
|
|
return;
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2021-12-28 21:11:07 +08:00
|
|
|
ir_texture *tex = ir->rhs->as_texture();
|
|
|
|
|
bool is_sparse = tex && tex->is_sparse;
|
|
|
|
|
|
|
|
|
|
if (!is_sparse)
|
|
|
|
|
assert(ir->rhs->type->is_scalar() || ir->rhs->type->is_vector());
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2014-12-01 14:11:04 -08:00
|
|
|
ir->lhs->accept(this);
|
2018-03-15 09:58:22 -07:00
|
|
|
nir_deref_instr *lhs_deref = this->deref;
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_ssa_def *src = evaluate_rvalue(ir->rhs);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2021-12-28 21:11:07 +08:00
|
|
|
if (is_sparse) {
|
|
|
|
|
adjust_sparse_variable(lhs_deref, tex->type, src);
|
|
|
|
|
|
|
|
|
|
/* correct component and mask because they are 0 for struct */
|
|
|
|
|
num_components = src->num_components;
|
|
|
|
|
write_mask = BITFIELD_MASK(num_components);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (write_mask != BITFIELD_MASK(num_components) && write_mask != 0) {
|
2015-11-29 23:23:44 -08:00
|
|
|
/* GLSL IR will give us the input to the write-masked assignment in a
|
|
|
|
|
* single packed vector. So, for example, if the writemask is xzw, then
|
|
|
|
|
* we have to swizzle x -> x, y -> z, and z -> w and get the y component
|
|
|
|
|
* from the load.
|
2014-07-10 18:18:17 -07:00
|
|
|
*/
|
2015-11-29 23:23:44 -08:00
|
|
|
unsigned swiz[4];
|
2014-12-01 14:11:04 -08:00
|
|
|
unsigned component = 0;
|
2015-11-29 23:23:44 -08:00
|
|
|
for (unsigned i = 0; i < 4; i++) {
|
2021-12-28 21:11:07 +08:00
|
|
|
swiz[i] = write_mask & (1 << i) ? component++ : 0;
|
2014-12-01 14:11:04 -08:00
|
|
|
}
|
2019-05-06 10:23:26 -05:00
|
|
|
src = nir_swizzle(&b, src, swiz, num_components);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2019-06-04 11:41:25 +02:00
|
|
|
enum gl_access_qualifier qualifiers = deref_get_qualifier(lhs_deref);
|
2022-01-14 19:12:24 -08:00
|
|
|
|
|
|
|
|
nir_store_deref_with_access(&b, lhs_deref, src, write_mask,
|
|
|
|
|
qualifiers);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Given an instruction, returns a pointer to its destination or NULL if there
|
|
|
|
|
* is no destination.
|
|
|
|
|
*
|
|
|
|
|
* Note that this only handles instructions we generate at this level.
|
|
|
|
|
*/
|
|
|
|
|
static nir_dest *
|
|
|
|
|
get_instr_dest(nir_instr *instr)
|
|
|
|
|
{
|
|
|
|
|
nir_alu_instr *alu_instr;
|
|
|
|
|
nir_intrinsic_instr *intrinsic_instr;
|
|
|
|
|
nir_tex_instr *tex_instr;
|
|
|
|
|
|
|
|
|
|
switch (instr->type) {
|
|
|
|
|
case nir_instr_type_alu:
|
|
|
|
|
alu_instr = nir_instr_as_alu(instr);
|
|
|
|
|
return &alu_instr->dest.dest;
|
|
|
|
|
|
|
|
|
|
case nir_instr_type_intrinsic:
|
|
|
|
|
intrinsic_instr = nir_instr_as_intrinsic(instr);
|
|
|
|
|
if (nir_intrinsic_infos[intrinsic_instr->intrinsic].has_dest)
|
|
|
|
|
return &intrinsic_instr->dest;
|
|
|
|
|
else
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2014-12-05 11:03:06 -08:00
|
|
|
case nir_instr_type_tex:
|
|
|
|
|
tex_instr = nir_instr_as_tex(instr);
|
2014-07-10 18:18:17 -07:00
|
|
|
return &tex_instr->dest;
|
|
|
|
|
|
|
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2016-04-04 10:16:11 +02:00
|
|
|
nir_visitor::add_instr(nir_instr *instr, unsigned num_components,
|
|
|
|
|
unsigned bit_size)
|
2014-07-10 18:18:17 -07:00
|
|
|
{
|
|
|
|
|
nir_dest *dest = get_instr_dest(instr);
|
|
|
|
|
|
2015-05-19 09:02:06 +02:00
|
|
|
if (dest)
|
2016-04-04 10:16:11 +02:00
|
|
|
nir_ssa_dest_init(instr, dest, num_components, bit_size, NULL);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, instr);
|
2015-10-30 23:47:46 -04:00
|
|
|
|
|
|
|
|
if (dest) {
|
|
|
|
|
assert(dest->is_ssa);
|
|
|
|
|
this->result = &dest->ssa;
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2015-10-30 23:32:50 -04:00
|
|
|
nir_ssa_def *
|
2014-07-10 18:18:17 -07:00
|
|
|
nir_visitor::evaluate_rvalue(ir_rvalue* ir)
|
|
|
|
|
{
|
|
|
|
|
ir->accept(this);
|
|
|
|
|
if (ir->as_dereference() || ir->as_constant()) {
|
|
|
|
|
/*
|
|
|
|
|
* A dereference is being used on the right hand side, which means we
|
|
|
|
|
* must emit a variable load.
|
|
|
|
|
*/
|
|
|
|
|
|
2019-06-04 11:41:25 +02:00
|
|
|
enum gl_access_qualifier access = deref_get_qualifier(this->deref);
|
|
|
|
|
this->result = nir_load_deref_with_access(&b, this->deref, access);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2015-10-30 23:47:46 -04:00
|
|
|
return this->result;
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2015-11-12 11:18:50 +01:00
|
|
|
static bool
|
|
|
|
|
type_is_float(glsl_base_type type)
|
|
|
|
|
{
|
2017-07-01 07:46:41 +02:00
|
|
|
return type == GLSL_TYPE_FLOAT || type == GLSL_TYPE_DOUBLE ||
|
|
|
|
|
type == GLSL_TYPE_FLOAT16;
|
2015-11-12 11:18:50 +01:00
|
|
|
}
|
|
|
|
|
|
2016-09-02 18:46:55 -07:00
|
|
|
static bool
|
|
|
|
|
type_is_signed(glsl_base_type type)
|
|
|
|
|
{
|
2017-07-01 07:46:41 +02:00
|
|
|
return type == GLSL_TYPE_INT || type == GLSL_TYPE_INT64 ||
|
|
|
|
|
type == GLSL_TYPE_INT16;
|
2016-09-02 18:46:55 -07:00
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_expression *ir)
|
|
|
|
|
{
|
2014-12-04 12:16:33 -08:00
|
|
|
/* Some special cases */
|
|
|
|
|
switch (ir->operation) {
|
|
|
|
|
case ir_unop_interpolate_at_centroid:
|
|
|
|
|
case ir_binop_interpolate_at_offset:
|
|
|
|
|
case ir_binop_interpolate_at_sample: {
|
|
|
|
|
ir_dereference *deref = ir->operands[0]->as_dereference();
|
|
|
|
|
ir_swizzle *swizzle = NULL;
|
|
|
|
|
if (!deref) {
|
|
|
|
|
/* the api does not allow a swizzle here, but the varying packing code
|
|
|
|
|
* may have pushed one into here.
|
|
|
|
|
*/
|
|
|
|
|
swizzle = ir->operands[0]->as_swizzle();
|
|
|
|
|
assert(swizzle);
|
|
|
|
|
deref = swizzle->val->as_dereference();
|
|
|
|
|
assert(deref);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
deref->accept(this);
|
|
|
|
|
|
2022-10-17 13:28:19 +11:00
|
|
|
assert(nir_deref_mode_is(this->deref, nir_var_shader_in));
|
2014-12-04 12:16:33 -08:00
|
|
|
nir_intrinsic_op op;
|
2022-10-17 13:28:19 +11:00
|
|
|
switch (ir->operation) {
|
|
|
|
|
case ir_unop_interpolate_at_centroid:
|
|
|
|
|
op = nir_intrinsic_interp_deref_at_centroid;
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_interpolate_at_offset:
|
|
|
|
|
op = nir_intrinsic_interp_deref_at_offset;
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_interpolate_at_sample:
|
|
|
|
|
op = nir_intrinsic_interp_deref_at_sample;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid interpolation intrinsic");
|
2014-12-04 12:16:33 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(shader, op);
|
|
|
|
|
intrin->num_components = deref->type->vector_elements;
|
2018-03-15 09:58:22 -07:00
|
|
|
intrin->src[0] = nir_src_for_ssa(&this->deref->dest.ssa);
|
2014-12-04 12:16:33 -08:00
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
if (intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
|
|
|
|
|
intrin->intrinsic == nir_intrinsic_interp_deref_at_sample)
|
|
|
|
|
intrin->src[1] = nir_src_for_ssa(evaluate_rvalue(ir->operands[1]));
|
2014-12-04 12:16:33 -08:00
|
|
|
|
2016-05-11 15:05:09 -04:00
|
|
|
unsigned bit_size = glsl_get_bit_size(deref->type);
|
2016-04-04 10:16:11 +02:00
|
|
|
add_instr(&intrin->instr, deref->type->vector_elements, bit_size);
|
2014-12-04 12:16:33 -08:00
|
|
|
|
|
|
|
|
if (swizzle) {
|
2015-10-31 16:31:59 -04:00
|
|
|
unsigned swiz[4] = {
|
|
|
|
|
swizzle->mask.x, swizzle->mask.y, swizzle->mask.z, swizzle->mask.w
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
result = nir_swizzle(&b, result, swiz,
|
2019-05-06 10:23:26 -05:00
|
|
|
swizzle->type->vector_elements);
|
2014-12-04 12:16:33 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-14 12:00:04 -05:00
|
|
|
case ir_unop_ssbo_unsized_array_length: {
|
|
|
|
|
nir_intrinsic_instr *intrin =
|
|
|
|
|
nir_intrinsic_instr_create(b.shader,
|
|
|
|
|
nir_intrinsic_deref_buffer_array_length);
|
|
|
|
|
|
|
|
|
|
ir_dereference *deref = ir->operands[0]->as_dereference();
|
|
|
|
|
intrin->src[0] = nir_src_for_ssa(&evaluate_deref(deref)->dest.ssa);
|
|
|
|
|
|
|
|
|
|
add_instr(&intrin->instr, 1, 32);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-02 11:33:48 +11:00
|
|
|
case ir_binop_ubo_load:
|
|
|
|
|
/* UBO loads should only have been lowered in GLSL IR for non-nir drivers,
|
|
|
|
|
* NIR drivers make use of gl_nir_lower_buffers() instead.
|
|
|
|
|
*/
|
|
|
|
|
unreachable("Invalid operation nir doesn't want lowered ubo loads");
|
2014-12-04 12:16:33 -08:00
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-30 23:56:49 -04:00
|
|
|
nir_ssa_def *srcs[4];
|
2017-08-09 13:34:02 +10:00
|
|
|
for (unsigned i = 0; i < ir->num_operands; i++)
|
2015-10-30 23:56:49 -04:00
|
|
|
srcs[i] = evaluate_rvalue(ir->operands[i]);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
glsl_base_type types[4];
|
2017-08-09 13:34:02 +10:00
|
|
|
for (unsigned i = 0; i < ir->num_operands; i++)
|
2019-05-05 11:39:08 +02:00
|
|
|
types[i] = ir->operands[i]->type->base_type;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2019-05-05 11:39:08 +02:00
|
|
|
glsl_base_type out_type = ir->type->base_type;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
switch (ir->operation) {
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_unop_bit_not: result = nir_inot(&b, srcs[0]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_logic_not:
|
2018-12-18 23:31:30 -05:00
|
|
|
result = nir_inot(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_neg:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(types[0]) ? nir_fneg(&b, srcs[0])
|
|
|
|
|
: nir_ineg(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_abs:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(types[0]) ? nir_fabs(&b, srcs[0])
|
|
|
|
|
: nir_iabs(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2018-09-12 17:05:14 -07:00
|
|
|
case ir_unop_clz:
|
|
|
|
|
result = nir_uclz(&b, srcs[0]);
|
|
|
|
|
break;
|
2014-10-15 16:19:26 -07:00
|
|
|
case ir_unop_saturate:
|
2015-11-12 11:18:50 +01:00
|
|
|
assert(type_is_float(types[0]));
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_fsat(&b, srcs[0]);
|
2014-10-15 16:19:26 -07:00
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_sign:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(types[0]) ? nir_fsign(&b, srcs[0])
|
|
|
|
|
: nir_isign(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_unop_rcp: result = nir_frcp(&b, srcs[0]); break;
|
2023-03-22 13:03:05 -07:00
|
|
|
|
|
|
|
|
case ir_unop_rsq:
|
|
|
|
|
if (consts->ForceGLSLAbsSqrt)
|
|
|
|
|
srcs[0] = nir_fabs(&b, srcs[0]);
|
|
|
|
|
result = nir_frsq(&b, srcs[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_unop_sqrt:
|
|
|
|
|
if (consts->ForceGLSLAbsSqrt)
|
|
|
|
|
srcs[0] = nir_fabs(&b, srcs[0]);
|
|
|
|
|
result = nir_fsqrt(&b, srcs[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
2022-05-31 13:36:15 -07:00
|
|
|
case ir_unop_exp: result = nir_fexp2(&b, nir_fmul_imm(&b, srcs[0], M_LOG2E)); break;
|
|
|
|
|
case ir_unop_log: result = nir_fmul_imm(&b, nir_flog2(&b, srcs[0]), 1.0 / M_LOG2E); break;
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_unop_exp2: result = nir_fexp2(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_log2: result = nir_flog2(&b, srcs[0]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_i2f:
|
|
|
|
|
case ir_unop_u2f:
|
|
|
|
|
case ir_unop_b2f:
|
2017-03-07 16:46:44 -08:00
|
|
|
case ir_unop_f2i:
|
|
|
|
|
case ir_unop_f2u:
|
|
|
|
|
case ir_unop_f2b:
|
|
|
|
|
case ir_unop_i2b:
|
|
|
|
|
case ir_unop_b2i:
|
|
|
|
|
case ir_unop_b2i64:
|
|
|
|
|
case ir_unop_d2f:
|
|
|
|
|
case ir_unop_f2d:
|
2019-04-19 15:36:00 +02:00
|
|
|
case ir_unop_f162f:
|
|
|
|
|
case ir_unop_f2f16:
|
2019-05-16 13:25:28 +02:00
|
|
|
case ir_unop_f162b:
|
|
|
|
|
case ir_unop_b2f16:
|
2020-05-08 22:16:42 -04:00
|
|
|
case ir_unop_i2i:
|
|
|
|
|
case ir_unop_u2u:
|
2017-03-07 16:46:44 -08:00
|
|
|
case ir_unop_d2i:
|
|
|
|
|
case ir_unop_d2u:
|
|
|
|
|
case ir_unop_d2b:
|
2016-01-12 14:03:08 +01:00
|
|
|
case ir_unop_i2d:
|
|
|
|
|
case ir_unop_u2d:
|
2017-03-07 16:46:44 -08:00
|
|
|
case ir_unop_i642i:
|
|
|
|
|
case ir_unop_i642u:
|
|
|
|
|
case ir_unop_i642f:
|
|
|
|
|
case ir_unop_i642b:
|
|
|
|
|
case ir_unop_i642d:
|
|
|
|
|
case ir_unop_u642i:
|
|
|
|
|
case ir_unop_u642u:
|
|
|
|
|
case ir_unop_u642f:
|
|
|
|
|
case ir_unop_u642d:
|
|
|
|
|
case ir_unop_i2i64:
|
|
|
|
|
case ir_unop_u2i64:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_f2i64:
|
|
|
|
|
case ir_unop_d2i64:
|
2017-03-07 16:46:44 -08:00
|
|
|
case ir_unop_i2u64:
|
|
|
|
|
case ir_unop_u2u64:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_f2u64:
|
|
|
|
|
case ir_unop_d2u64:
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_i2u:
|
|
|
|
|
case ir_unop_u2i:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_i642u64:
|
2017-03-07 16:46:44 -08:00
|
|
|
case ir_unop_u642i64: {
|
|
|
|
|
nir_alu_type src_type = nir_get_nir_type_for_glsl_base_type(types[0]);
|
|
|
|
|
nir_alu_type dst_type = nir_get_nir_type_for_glsl_base_type(out_type);
|
2022-11-01 18:11:54 -07:00
|
|
|
result = nir_type_convert(&b, srcs[0], src_type, dst_type,
|
|
|
|
|
nir_rounding_mode_undef);
|
2017-03-07 19:54:37 -08:00
|
|
|
/* b2i and b2f don't have fixed bit-size versions so the builder will
|
|
|
|
|
* just assume 32 and we have to fix it up here.
|
|
|
|
|
*/
|
|
|
|
|
result->bit_size = nir_alu_type_get_type_size(dst_type);
|
2017-03-07 16:46:44 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-09 13:07:40 +02:00
|
|
|
case ir_unop_f2fmp: {
|
|
|
|
|
result = nir_build_alu(&b, nir_op_f2fmp, srcs[0], NULL, NULL, NULL);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-08 22:16:42 -04:00
|
|
|
case ir_unop_i2imp: {
|
|
|
|
|
result = nir_build_alu(&b, nir_op_i2imp, srcs[0], NULL, NULL, NULL);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case ir_unop_u2ump: {
|
2020-09-04 01:51:49 -04:00
|
|
|
result = nir_build_alu(&b, nir_op_i2imp, srcs[0], NULL, NULL, NULL);
|
2020-05-08 22:16:42 -04:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_bitcast_i2f:
|
|
|
|
|
case ir_unop_bitcast_f2i:
|
|
|
|
|
case ir_unop_bitcast_u2f:
|
|
|
|
|
case ir_unop_bitcast_f2u:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_bitcast_i642d:
|
|
|
|
|
case ir_unop_bitcast_d2i64:
|
|
|
|
|
case ir_unop_bitcast_u642d:
|
|
|
|
|
case ir_unop_bitcast_d2u64:
|
2015-07-21 14:22:11 +10:00
|
|
|
case ir_unop_subroutine_to_int:
|
2014-07-10 18:18:17 -07:00
|
|
|
/* no-op */
|
2019-05-06 11:45:46 -05:00
|
|
|
result = nir_mov(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_unop_trunc: result = nir_ftrunc(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_ceil: result = nir_fceil(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_floor: result = nir_ffloor(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_fract: result = nir_ffract(&b, srcs[0]); break;
|
2018-03-20 13:06:23 +11:00
|
|
|
case ir_unop_frexp_exp: result = nir_frexp_exp(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_frexp_sig: result = nir_frexp_sig(&b, srcs[0]); break;
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_unop_round_even: result = nir_fround_even(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_sin: result = nir_fsin(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_cos: result = nir_fcos(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_dFdx: result = nir_fddx(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_dFdy: result = nir_fddy(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_dFdx_fine: result = nir_fddx_fine(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_dFdy_fine: result = nir_fddy_fine(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_dFdx_coarse: result = nir_fddx_coarse(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_dFdy_coarse: result = nir_fddy_coarse(&b, srcs[0]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_pack_snorm_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_pack_snorm_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_pack_snorm_4x8:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_pack_snorm_4x8(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_pack_unorm_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_pack_unorm_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_pack_unorm_4x8:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_pack_unorm_4x8(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_pack_half_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_pack_half_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_unpack_snorm_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_unpack_snorm_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_unpack_snorm_4x8:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_unpack_snorm_4x8(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_unpack_unorm_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_unpack_unorm_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_unpack_unorm_4x8:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_unpack_unorm_4x8(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_unpack_half_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_unpack_half_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2018-01-19 15:11:16 +11:00
|
|
|
case ir_unop_pack_sampler_2x32:
|
|
|
|
|
case ir_unop_pack_image_2x32:
|
2015-08-14 12:20:37 -07:00
|
|
|
case ir_unop_pack_double_2x32:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_pack_int_2x32:
|
|
|
|
|
case ir_unop_pack_uint_2x32:
|
2017-02-14 22:15:16 -08:00
|
|
|
result = nir_pack_64_2x32(&b, srcs[0]);
|
2016-09-01 15:21:04 -07:00
|
|
|
break;
|
2018-01-19 15:11:16 +11:00
|
|
|
case ir_unop_unpack_sampler_2x32:
|
|
|
|
|
case ir_unop_unpack_image_2x32:
|
2017-02-14 22:15:16 -08:00
|
|
|
case ir_unop_unpack_double_2x32:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_unpack_int_2x32:
|
|
|
|
|
case ir_unop_unpack_uint_2x32:
|
2017-02-14 22:15:16 -08:00
|
|
|
result = nir_unpack_64_2x32(&b, srcs[0]);
|
2016-09-01 15:21:04 -07:00
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_bitfield_reverse:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_bitfield_reverse(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2014-11-07 10:59:16 -08:00
|
|
|
case ir_unop_bit_count:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_bit_count(&b, srcs[0]);
|
2014-11-07 10:59:16 -08:00
|
|
|
break;
|
|
|
|
|
case ir_unop_find_msb:
|
|
|
|
|
switch (types[0]) {
|
|
|
|
|
case GLSL_TYPE_UINT:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_ufind_msb(&b, srcs[0]);
|
2014-11-07 10:59:16 -08:00
|
|
|
break;
|
|
|
|
|
case GLSL_TYPE_INT:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_ifind_msb(&b, srcs[0]);
|
2014-11-07 10:59:16 -08:00
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid type for findMSB()");
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case ir_unop_find_lsb:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_find_lsb(&b, srcs[0]);
|
2014-11-07 10:59:16 -08:00
|
|
|
break;
|
|
|
|
|
|
2015-06-01 09:44:55 +02:00
|
|
|
case ir_unop_get_buffer_size: {
|
|
|
|
|
nir_intrinsic_instr *load = nir_intrinsic_instr_create(
|
|
|
|
|
this->shader,
|
2020-09-22 03:24:45 -05:00
|
|
|
nir_intrinsic_get_ssbo_size);
|
2015-06-01 09:44:55 +02:00
|
|
|
load->num_components = ir->type->vector_elements;
|
2015-10-30 23:32:50 -04:00
|
|
|
load->src[0] = nir_src_for_ssa(evaluate_rvalue(ir->operands[0]));
|
2016-05-11 15:05:09 -04:00
|
|
|
unsigned bit_size = glsl_get_bit_size(ir->type);
|
2016-04-04 10:16:11 +02:00
|
|
|
add_instr(&load->instr, ir->type->vector_elements, bit_size);
|
2015-06-01 09:44:55 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-11 16:02:25 +02:00
|
|
|
case ir_unop_atan:
|
|
|
|
|
result = nir_atan(&b, srcs[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_add:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(out_type) ? nir_fadd(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_iadd(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2018-09-12 17:05:14 -07:00
|
|
|
case ir_binop_add_sat:
|
|
|
|
|
result = type_is_signed(out_type) ? nir_iadd_sat(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_uadd_sat(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_sub:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(out_type) ? nir_fsub(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_isub(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2018-09-12 17:05:14 -07:00
|
|
|
case ir_binop_sub_sat:
|
|
|
|
|
result = type_is_signed(out_type) ? nir_isub_sat(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_usub_sat(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_abs_sub:
|
|
|
|
|
/* out_type is always unsigned for ir_binop_abs_sub, so we have to key
|
|
|
|
|
* on the type of the sources.
|
|
|
|
|
*/
|
|
|
|
|
result = type_is_signed(types[0]) ? nir_uabs_isub(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_uabs_usub(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_avg:
|
|
|
|
|
result = type_is_signed(out_type) ? nir_ihadd(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_uhadd(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_avg_round:
|
|
|
|
|
result = type_is_signed(out_type) ? nir_irhadd(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_urhadd(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_mul_32x16:
|
|
|
|
|
result = type_is_signed(out_type) ? nir_imul_32x16(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_umul_32x16(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_mul:
|
2019-02-27 14:02:54 -08:00
|
|
|
if (type_is_float(out_type))
|
|
|
|
|
result = nir_fmul(&b, srcs[0], srcs[1]);
|
|
|
|
|
else if (out_type == GLSL_TYPE_INT64 &&
|
|
|
|
|
(ir->operands[0]->type->base_type == GLSL_TYPE_INT ||
|
|
|
|
|
ir->operands[1]->type->base_type == GLSL_TYPE_INT))
|
|
|
|
|
result = nir_imul_2x32_64(&b, srcs[0], srcs[1]);
|
|
|
|
|
else if (out_type == GLSL_TYPE_UINT64 &&
|
|
|
|
|
(ir->operands[0]->type->base_type == GLSL_TYPE_UINT ||
|
|
|
|
|
ir->operands[1]->type->base_type == GLSL_TYPE_UINT))
|
|
|
|
|
result = nir_umul_2x32_64(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_imul(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_div:
|
2015-11-12 11:18:50 +01:00
|
|
|
if (type_is_float(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_fdiv(&b, srcs[0], srcs[1]);
|
2016-09-02 18:46:55 -07:00
|
|
|
else if (type_is_signed(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_idiv(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_udiv(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_mod:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(out_type) ? nir_fmod(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_umod(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_min:
|
2015-11-12 11:18:50 +01:00
|
|
|
if (type_is_float(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_fmin(&b, srcs[0], srcs[1]);
|
2016-09-02 18:46:55 -07:00
|
|
|
else if (type_is_signed(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_imin(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_umin(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_max:
|
2015-11-12 11:18:50 +01:00
|
|
|
if (type_is_float(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_fmax(&b, srcs[0], srcs[1]);
|
2016-09-02 18:46:55 -07:00
|
|
|
else if (type_is_signed(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_imax(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_umax(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_pow: result = nir_fpow(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case ir_binop_bit_and: result = nir_iand(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case ir_binop_bit_or: result = nir_ior(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case ir_binop_bit_xor: result = nir_ixor(&b, srcs[0], srcs[1]); break;
|
2015-03-23 16:04:41 -07:00
|
|
|
case ir_binop_logic_and:
|
2018-12-18 23:31:30 -05:00
|
|
|
result = nir_iand(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2015-03-23 16:04:41 -07:00
|
|
|
case ir_binop_logic_or:
|
2018-12-18 23:31:30 -05:00
|
|
|
result = nir_ior(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2015-11-14 17:47:33 -08:00
|
|
|
case ir_binop_logic_xor:
|
2018-12-18 23:31:30 -05:00
|
|
|
result = nir_ixor(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2020-09-03 09:27:58 -04:00
|
|
|
case ir_binop_lshift: result = nir_ishl(&b, srcs[0], nir_u2u32(&b, srcs[1])); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_rshift:
|
2020-09-03 09:27:58 -04:00
|
|
|
result = (type_is_signed(out_type)) ? nir_ishr(&b, srcs[0], nir_u2u32(&b, srcs[1]))
|
|
|
|
|
: nir_ushr(&b, srcs[0], nir_u2u32(&b, srcs[1]));
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_binop_imul_high:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = (out_type == GLSL_TYPE_INT) ? nir_imul_high(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_umul_high(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_binop_carry: result = nir_uadd_carry(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case ir_binop_borrow: result = nir_usub_borrow(&b, srcs[0], srcs[1]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_less:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0]))
|
|
|
|
|
result = nir_flt(&b, srcs[0], srcs[1]);
|
|
|
|
|
else if (type_is_signed(types[0]))
|
|
|
|
|
result = nir_ilt(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_ult(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_binop_gequal:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0]))
|
|
|
|
|
result = nir_fge(&b, srcs[0], srcs[1]);
|
|
|
|
|
else if (type_is_signed(types[0]))
|
|
|
|
|
result = nir_ige(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_uge(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_binop_equal:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0]))
|
|
|
|
|
result = nir_feq(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_ieq(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_binop_nequal:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0]))
|
2020-08-18 19:51:57 +02:00
|
|
|
result = nir_fneu(&b, srcs[0], srcs[1]);
|
2018-12-18 23:31:30 -05:00
|
|
|
else
|
|
|
|
|
result = nir_ine(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_binop_all_equal:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0])) {
|
|
|
|
|
switch (ir->operands[0]->type->vector_elements) {
|
|
|
|
|
case 1: result = nir_feq(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 2: result = nir_ball_fequal2(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 3: result = nir_ball_fequal3(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 4: result = nir_ball_fequal4(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
switch (ir->operands[0]->type->vector_elements) {
|
2018-12-18 23:31:30 -05:00
|
|
|
case 1: result = nir_ieq(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 2: result = nir_ball_iequal2(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 3: result = nir_ball_iequal3(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 4: result = nir_ball_iequal4(&b, srcs[0], srcs[1]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_any_nequal:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0])) {
|
|
|
|
|
switch (ir->operands[0]->type->vector_elements) {
|
2020-08-18 19:51:57 +02:00
|
|
|
case 1: result = nir_fneu(&b, srcs[0], srcs[1]); break;
|
2018-12-18 23:31:30 -05:00
|
|
|
case 2: result = nir_bany_fnequal2(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 3: result = nir_bany_fnequal3(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 4: result = nir_bany_fnequal4(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
switch (ir->operands[0]->type->vector_elements) {
|
2018-12-18 23:31:30 -05:00
|
|
|
case 1: result = nir_ine(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 2: result = nir_bany_inequal2(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 3: result = nir_bany_inequal3(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 4: result = nir_bany_inequal4(&b, srcs[0], srcs[1]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_dot:
|
2021-01-13 15:11:57 +00:00
|
|
|
result = nir_fdot(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2023-02-22 13:47:37 -08:00
|
|
|
|
2022-01-14 13:44:31 -08:00
|
|
|
case ir_binop_vector_extract:
|
|
|
|
|
result = nir_vector_extract(&b, srcs[0], srcs[1]);
|
2018-05-09 15:17:59 +00:00
|
|
|
break;
|
2023-02-22 13:47:37 -08:00
|
|
|
case ir_triop_vector_insert:
|
|
|
|
|
result = nir_vector_insert(&b, srcs[0], srcs[1], srcs[2]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2019-10-11 16:02:25 +02:00
|
|
|
case ir_binop_atan2:
|
|
|
|
|
result = nir_atan2(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_binop_ldexp: result = nir_ldexp(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case ir_triop_fma:
|
|
|
|
|
result = nir_ffma(&b, srcs[0], srcs[1], srcs[2]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_triop_lrp:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_flrp(&b, srcs[0], srcs[1], srcs[2]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_triop_csel:
|
2018-12-18 23:31:30 -05:00
|
|
|
result = nir_bcsel(&b, srcs[0], srcs[1], srcs[2]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_triop_bitfield_extract:
|
2020-07-18 10:02:11 -07:00
|
|
|
result = ir->type->is_int_16_32() ?
|
|
|
|
|
nir_ibitfield_extract(&b, nir_i2i32(&b, srcs[0]), nir_i2i32(&b, srcs[1]), nir_i2i32(&b, srcs[2])) :
|
|
|
|
|
nir_ubitfield_extract(&b, nir_u2u32(&b, srcs[0]), nir_i2i32(&b, srcs[1]), nir_i2i32(&b, srcs[2]));
|
2022-11-08 13:53:25 -08:00
|
|
|
|
|
|
|
|
if (ir->type->base_type == GLSL_TYPE_INT16) {
|
|
|
|
|
result = nir_i2i16(&b, result);
|
|
|
|
|
} else if (ir->type->base_type == GLSL_TYPE_UINT16) {
|
|
|
|
|
result = nir_u2u16(&b, result);
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_quadop_bitfield_insert:
|
2020-07-18 10:02:11 -07:00
|
|
|
result = nir_bitfield_insert(&b,
|
|
|
|
|
nir_u2u32(&b, srcs[0]), nir_u2u32(&b, srcs[1]),
|
|
|
|
|
nir_i2i32(&b, srcs[2]), nir_i2i32(&b, srcs[3]));
|
2022-11-08 13:53:25 -08:00
|
|
|
|
|
|
|
|
if (ir->type->base_type == GLSL_TYPE_INT16) {
|
|
|
|
|
result = nir_i2i16(&b, result);
|
|
|
|
|
} else if (ir->type->base_type == GLSL_TYPE_UINT16) {
|
|
|
|
|
result = nir_u2u16(&b, result);
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_quadop_vector:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_vec(&b, srcs, ir->type->vector_elements);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
2022-11-08 13:53:25 -08:00
|
|
|
|
|
|
|
|
/* The bit-size of the NIR SSA value must match the bit-size of the
|
|
|
|
|
* original GLSL IR expression.
|
|
|
|
|
*/
|
|
|
|
|
assert(result->bit_size == glsl_base_type_get_bit_size(ir->type->base_type));
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_swizzle *ir)
|
|
|
|
|
{
|
|
|
|
|
unsigned swizzle[4] = { ir->mask.x, ir->mask.y, ir->mask.z, ir->mask.w };
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_swizzle(&b, evaluate_rvalue(ir->val), swizzle,
|
2019-05-06 10:23:26 -05:00
|
|
|
ir->type->vector_elements);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_texture *ir)
|
|
|
|
|
{
|
|
|
|
|
unsigned num_srcs;
|
|
|
|
|
nir_texop op;
|
|
|
|
|
switch (ir->op) {
|
|
|
|
|
case ir_tex:
|
|
|
|
|
op = nir_texop_tex;
|
|
|
|
|
num_srcs = 1; /* coordinate */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txb:
|
|
|
|
|
case ir_txl:
|
|
|
|
|
op = (ir->op == ir_txb) ? nir_texop_txb : nir_texop_txl;
|
|
|
|
|
num_srcs = 2; /* coordinate, bias/lod */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txd:
|
|
|
|
|
op = nir_texop_txd; /* coordinate, dPdx, dPdy */
|
|
|
|
|
num_srcs = 3;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txf:
|
|
|
|
|
op = nir_texop_txf;
|
|
|
|
|
if (ir->lod_info.lod != NULL)
|
|
|
|
|
num_srcs = 2; /* coordinate, lod */
|
|
|
|
|
else
|
|
|
|
|
num_srcs = 1; /* coordinate */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txf_ms:
|
|
|
|
|
op = nir_texop_txf_ms;
|
|
|
|
|
num_srcs = 2; /* coordinate, sample_index */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txs:
|
|
|
|
|
op = nir_texop_txs;
|
|
|
|
|
if (ir->lod_info.lod != NULL)
|
|
|
|
|
num_srcs = 1; /* lod */
|
|
|
|
|
else
|
|
|
|
|
num_srcs = 0;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_lod:
|
|
|
|
|
op = nir_texop_lod;
|
|
|
|
|
num_srcs = 1; /* coordinate */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_tg4:
|
|
|
|
|
op = nir_texop_tg4;
|
|
|
|
|
num_srcs = 1; /* coordinate */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_query_levels:
|
|
|
|
|
op = nir_texop_query_levels;
|
|
|
|
|
num_srcs = 0;
|
|
|
|
|
break;
|
|
|
|
|
|
2015-08-27 23:05:03 -04:00
|
|
|
case ir_texture_samples:
|
|
|
|
|
op = nir_texop_texture_samples;
|
|
|
|
|
num_srcs = 0;
|
|
|
|
|
break;
|
|
|
|
|
|
2015-11-17 17:09:09 -08:00
|
|
|
case ir_samples_identical:
|
|
|
|
|
op = nir_texop_samples_identical;
|
|
|
|
|
num_srcs = 1; /* coordinate */
|
|
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ir->projector != NULL)
|
|
|
|
|
num_srcs++;
|
2016-12-12 08:32:38 -05:00
|
|
|
if (ir->shadow_comparator != NULL)
|
2014-07-10 18:18:17 -07:00
|
|
|
num_srcs++;
|
2019-03-18 21:23:59 +01:00
|
|
|
/* offsets are constants we store inside nir_tex_intrs.offsets */
|
|
|
|
|
if (ir->offset != NULL && !ir->offset->type->is_array())
|
2014-07-10 18:18:17 -07:00
|
|
|
num_srcs++;
|
2022-01-06 17:31:01 +08:00
|
|
|
if (ir->clamp != NULL)
|
|
|
|
|
num_srcs++;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
/* Add one for the texture deref */
|
2018-03-19 10:53:45 -07:00
|
|
|
num_srcs += 2;
|
2018-03-15 09:58:22 -07:00
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
nir_tex_instr *instr = nir_tex_instr_create(this->shader, num_srcs);
|
|
|
|
|
|
|
|
|
|
instr->op = op;
|
|
|
|
|
instr->sampler_dim =
|
|
|
|
|
(glsl_sampler_dim) ir->sampler->type->sampler_dimensionality;
|
|
|
|
|
instr->is_array = ir->sampler->type->sampler_array;
|
|
|
|
|
instr->is_shadow = ir->sampler->type->sampler_shadow;
|
2021-12-28 13:52:36 +08:00
|
|
|
|
|
|
|
|
const glsl_type *dest_type
|
|
|
|
|
= ir->is_sparse ? ir->type->field_type("texel") : ir->type;
|
|
|
|
|
assert(dest_type != glsl_type::error_type);
|
2022-04-22 09:05:26 -04:00
|
|
|
if (instr->is_shadow)
|
|
|
|
|
instr->is_new_style_shadow = (dest_type->vector_elements == 1);
|
2021-12-28 13:52:36 +08:00
|
|
|
instr->dest_type = nir_get_nir_type_for_glsl_type(dest_type);
|
|
|
|
|
instr->is_sparse = ir->is_sparse;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
nir_deref_instr *sampler_deref = evaluate_deref(ir->sampler);
|
2019-03-24 17:11:34 +01:00
|
|
|
|
|
|
|
|
/* check for bindless handles */
|
2020-10-30 12:19:25 -05:00
|
|
|
if (!nir_deref_mode_is(sampler_deref, nir_var_uniform) ||
|
2019-03-24 17:11:34 +01:00
|
|
|
nir_deref_instr_get_variable(sampler_deref)->data.bindless) {
|
|
|
|
|
nir_ssa_def *load = nir_load_deref(&b, sampler_deref);
|
|
|
|
|
instr->src[0].src = nir_src_for_ssa(load);
|
|
|
|
|
instr->src[0].src_type = nir_tex_src_texture_handle;
|
|
|
|
|
instr->src[1].src = nir_src_for_ssa(load);
|
|
|
|
|
instr->src[1].src_type = nir_tex_src_sampler_handle;
|
|
|
|
|
} else {
|
|
|
|
|
instr->src[0].src = nir_src_for_ssa(&sampler_deref->dest.ssa);
|
|
|
|
|
instr->src[0].src_type = nir_tex_src_texture_deref;
|
|
|
|
|
instr->src[1].src = nir_src_for_ssa(&sampler_deref->dest.ssa);
|
|
|
|
|
instr->src[1].src_type = nir_tex_src_sampler_deref;
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2018-03-19 10:53:45 -07:00
|
|
|
unsigned src_number = 2;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
if (ir->coordinate != NULL) {
|
|
|
|
|
instr->coord_components = ir->coordinate->type->vector_elements;
|
2015-10-30 23:32:50 -04:00
|
|
|
instr->src[src_number].src =
|
|
|
|
|
nir_src_for_ssa(evaluate_rvalue(ir->coordinate));
|
2015-01-09 20:01:13 -08:00
|
|
|
instr->src[src_number].src_type = nir_tex_src_coord;
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ir->projector != NULL) {
|
2015-10-30 23:32:50 -04:00
|
|
|
instr->src[src_number].src =
|
|
|
|
|
nir_src_for_ssa(evaluate_rvalue(ir->projector));
|
2015-01-09 20:01:13 -08:00
|
|
|
instr->src[src_number].src_type = nir_tex_src_projector;
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-12 08:32:38 -05:00
|
|
|
if (ir->shadow_comparator != NULL) {
|
2015-10-30 23:32:50 -04:00
|
|
|
instr->src[src_number].src =
|
2016-12-12 08:32:38 -05:00
|
|
|
nir_src_for_ssa(evaluate_rvalue(ir->shadow_comparator));
|
|
|
|
|
instr->src[src_number].src_type = nir_tex_src_comparator;
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ir->offset != NULL) {
|
2019-03-18 21:23:59 +01:00
|
|
|
if (ir->offset->type->is_array()) {
|
|
|
|
|
for (int i = 0; i < ir->offset->type->array_size(); i++) {
|
|
|
|
|
const ir_constant *c =
|
|
|
|
|
ir->offset->as_constant()->get_array_element(i);
|
|
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < 2; ++j) {
|
|
|
|
|
int val = c->get_int_component(j);
|
|
|
|
|
instr->tg4_offsets[i][j] = val;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
assert(ir->offset->type->is_vector() || ir->offset->type->is_scalar());
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2019-03-18 21:23:59 +01:00
|
|
|
instr->src[src_number].src =
|
|
|
|
|
nir_src_for_ssa(evaluate_rvalue(ir->offset));
|
|
|
|
|
instr->src[src_number].src_type = nir_tex_src_offset;
|
|
|
|
|
src_number++;
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2022-01-06 17:31:01 +08:00
|
|
|
if (ir->clamp) {
|
|
|
|
|
instr->src[src_number].src =
|
|
|
|
|
nir_src_for_ssa(evaluate_rvalue(ir->clamp));
|
|
|
|
|
instr->src[src_number].src_type = nir_tex_src_min_lod;
|
|
|
|
|
src_number++;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
switch (ir->op) {
|
|
|
|
|
case ir_txb:
|
2015-10-30 23:32:50 -04:00
|
|
|
instr->src[src_number].src =
|
|
|
|
|
nir_src_for_ssa(evaluate_rvalue(ir->lod_info.bias));
|
2015-01-09 20:01:13 -08:00
|
|
|
instr->src[src_number].src_type = nir_tex_src_bias;
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txl:
|
|
|
|
|
case ir_txf:
|
|
|
|
|
case ir_txs:
|
|
|
|
|
if (ir->lod_info.lod != NULL) {
|
2015-10-30 23:32:50 -04:00
|
|
|
instr->src[src_number].src =
|
|
|
|
|
nir_src_for_ssa(evaluate_rvalue(ir->lod_info.lod));
|
2015-01-09 20:01:13 -08:00
|
|
|
instr->src[src_number].src_type = nir_tex_src_lod;
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txd:
|
2015-10-30 23:32:50 -04:00
|
|
|
instr->src[src_number].src =
|
|
|
|
|
nir_src_for_ssa(evaluate_rvalue(ir->lod_info.grad.dPdx));
|
2015-01-09 20:01:13 -08:00
|
|
|
instr->src[src_number].src_type = nir_tex_src_ddx;
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
2015-10-30 23:32:50 -04:00
|
|
|
instr->src[src_number].src =
|
|
|
|
|
nir_src_for_ssa(evaluate_rvalue(ir->lod_info.grad.dPdy));
|
2015-01-09 20:01:13 -08:00
|
|
|
instr->src[src_number].src_type = nir_tex_src_ddy;
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txf_ms:
|
2015-10-30 23:32:50 -04:00
|
|
|
instr->src[src_number].src =
|
|
|
|
|
nir_src_for_ssa(evaluate_rvalue(ir->lod_info.sample_index));
|
2015-01-09 20:01:13 -08:00
|
|
|
instr->src[src_number].src_type = nir_tex_src_ms_index;
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_tg4:
|
|
|
|
|
instr->component = ir->lod_info.component->as_constant()->value.u[0];
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(src_number == num_srcs);
|
|
|
|
|
|
2021-12-28 13:52:36 +08:00
|
|
|
unsigned bit_size = glsl_get_bit_size(dest_type);
|
2016-04-04 10:16:11 +02:00
|
|
|
add_instr(&instr->instr, nir_tex_instr_dest_size(instr), bit_size);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_constant *ir)
|
|
|
|
|
{
|
|
|
|
|
/*
|
Remove wrongly repeated words in comments
Clean up misrepetitions ('if if', 'the the' etc) found throughout the
comments. This has been done manually, after grepping
case-insensitively for duplicate if, is, the, then, do, for, an,
plus a few other typos corrected in fly-by
v2:
* proper commit message and non-joke title;
* replace two 'as is' followed by 'is' to 'as-is'.
v3:
* 'a integer' => 'an integer' and similar (originally spotted by
Jason Ekstrand, I fixed a few other similar ones while at it)
Signed-off-by: Giuseppe Bilotta <giuseppe.bilotta@gmail.com>
Reviewed-by: Chad Versace <chad.versace@intel.com>
2016-06-23 19:20:18 +02:00
|
|
|
* We don't know if this variable is an array or struct that gets
|
2014-12-19 14:55:45 -08:00
|
|
|
* dereferenced, so do the safe thing an make it a variable with a
|
|
|
|
|
* constant initializer and return a dereference.
|
2014-07-10 18:18:17 -07:00
|
|
|
*/
|
|
|
|
|
|
2015-10-09 07:05:11 -07:00
|
|
|
nir_variable *var =
|
|
|
|
|
nir_local_variable_create(this->impl, ir->type, "const_temp");
|
2014-07-10 18:18:17 -07:00
|
|
|
var->data.read_only = true;
|
|
|
|
|
var->constant_initializer = constant_copy(ir, var);
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
this->deref = nir_build_deref_var(&b, var);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_dereference_variable *ir)
|
|
|
|
|
{
|
2018-12-10 10:58:43 -08:00
|
|
|
if (ir->variable_referenced()->data.mode == ir_var_function_out) {
|
|
|
|
|
unsigned i = (sig->return_type != glsl_type::void_type) ? 1 : 0;
|
|
|
|
|
|
|
|
|
|
foreach_in_list(ir_variable, param, &sig->parameters) {
|
|
|
|
|
if (param == ir->variable_referenced()) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
this->deref = nir_build_deref_cast(&b, nir_load_param(&b, i),
|
2019-01-16 00:05:04 +01:00
|
|
|
nir_var_function_temp, ir->type, 0);
|
2018-12-10 10:58:43 -08:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(ir->variable_referenced()->data.mode != ir_var_function_inout);
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
struct hash_entry *entry =
|
|
|
|
|
_mesa_hash_table_search(this->var_table, ir->var);
|
|
|
|
|
assert(entry);
|
|
|
|
|
nir_variable *var = (nir_variable *) entry->data;
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
this->deref = nir_build_deref_var(&b, var);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_dereference_record *ir)
|
|
|
|
|
{
|
|
|
|
|
ir->record->accept(this);
|
|
|
|
|
|
2017-08-09 13:34:04 +10:00
|
|
|
int field_index = ir->field_idx;
|
2014-11-25 21:36:25 -08:00
|
|
|
assert(field_index >= 0);
|
|
|
|
|
|
2021-12-28 21:11:07 +08:00
|
|
|
/* sparse texture variable is a struct for ir_variable, but it has been
|
|
|
|
|
* converted to a vector for nir_variable.
|
|
|
|
|
*/
|
|
|
|
|
if (this->deref->deref_type == nir_deref_type_var &&
|
|
|
|
|
_mesa_set_search(this->sparse_variable_set, this->deref->var)) {
|
|
|
|
|
nir_ssa_def *load = nir_load_deref(&b, this->deref);
|
|
|
|
|
assert(load->num_components >= 2);
|
|
|
|
|
|
|
|
|
|
nir_ssa_def *ssa;
|
|
|
|
|
const glsl_type *type = ir->record->type;
|
|
|
|
|
if (field_index == type->field_index("code")) {
|
|
|
|
|
/* last channel holds residency code */
|
|
|
|
|
ssa = nir_channel(&b, load, load->num_components - 1);
|
|
|
|
|
} else {
|
|
|
|
|
assert(field_index == type->field_index("texel"));
|
|
|
|
|
|
|
|
|
|
unsigned mask = BITFIELD_MASK(load->num_components - 1);
|
|
|
|
|
ssa = nir_channels(&b, load, mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* still need to create a deref for return */
|
|
|
|
|
nir_variable *tmp =
|
|
|
|
|
nir_local_variable_create(this->impl, ir->type, "deref_tmp");
|
|
|
|
|
this->deref = nir_build_deref_var(&b, tmp);
|
|
|
|
|
nir_store_deref(&b, this->deref, ssa, ~0);
|
|
|
|
|
} else
|
|
|
|
|
this->deref = nir_build_deref_struct(&b, this->deref, field_index);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_dereference_array *ir)
|
|
|
|
|
{
|
2018-03-15 09:58:22 -07:00
|
|
|
nir_ssa_def *index = evaluate_rvalue(ir->array_index);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
ir->array->accept(this);
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
this->deref = nir_build_deref_array(&b, this->deref, index);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
2014-09-07 19:24:15 +12:00
|
|
|
|
|
|
|
|
void
|
2015-08-25 18:19:12 +01:00
|
|
|
nir_visitor::visit(ir_barrier *)
|
2014-09-07 19:24:15 +12:00
|
|
|
{
|
2023-02-24 02:25:44 -08:00
|
|
|
if (shader->options->use_scoped_barrier) {
|
|
|
|
|
if (shader->info.stage == MESA_SHADER_COMPUTE) {
|
2023-03-01 14:47:56 -05:00
|
|
|
nir_scoped_barrier(&b, NIR_SCOPE_WORKGROUP, NIR_SCOPE_WORKGROUP,
|
|
|
|
|
NIR_MEMORY_ACQ_REL, nir_var_mem_shared);
|
2023-02-24 02:25:44 -08:00
|
|
|
} else if (shader->info.stage == MESA_SHADER_TESS_CTRL) {
|
2023-03-01 14:47:56 -05:00
|
|
|
nir_scoped_barrier(&b, NIR_SCOPE_WORKGROUP, NIR_SCOPE_WORKGROUP,
|
|
|
|
|
NIR_MEMORY_ACQ_REL, nir_var_shader_out);
|
2023-02-24 02:25:44 -08:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if (shader->info.stage == MESA_SHADER_COMPUTE)
|
|
|
|
|
nir_memory_barrier_shared(&b);
|
|
|
|
|
else if (shader->info.stage == MESA_SHADER_TESS_CTRL)
|
|
|
|
|
nir_memory_barrier_tcs_patch(&b);
|
2020-01-07 14:40:53 -06:00
|
|
|
|
2023-03-01 14:47:56 -05:00
|
|
|
nir_control_barrier(&b);
|
|
|
|
|
}
|
2014-09-07 19:24:15 +12:00
|
|
|
}
|
2019-03-03 10:00:14 -06:00
|
|
|
|
|
|
|
|
nir_shader *
|
|
|
|
|
glsl_float64_funcs_to_nir(struct gl_context *ctx,
|
|
|
|
|
const nir_shader_compiler_options *options)
|
|
|
|
|
{
|
|
|
|
|
/* We pretend it's a vertex shader. Ultimately, the stage shouldn't
|
|
|
|
|
* matter because we're not optimizing anything here.
|
|
|
|
|
*/
|
|
|
|
|
struct gl_shader *sh = _mesa_new_shader(-1, MESA_SHADER_VERTEX);
|
|
|
|
|
sh->Source = float64_source;
|
|
|
|
|
sh->CompileStatus = COMPILE_FAILURE;
|
|
|
|
|
_mesa_glsl_compile_shader(ctx, sh, false, false, true);
|
|
|
|
|
|
|
|
|
|
if (!sh->CompileStatus) {
|
|
|
|
|
if (sh->InfoLog) {
|
|
|
|
|
_mesa_problem(ctx,
|
|
|
|
|
"fp64 software impl compile failed:\n%s\nsource:\n%s\n",
|
|
|
|
|
sh->InfoLog, float64_source);
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_shader *nir = nir_shader_create(NULL, MESA_SHADER_VERTEX, options, NULL);
|
|
|
|
|
|
2022-01-07 12:47:08 +10:00
|
|
|
nir_visitor v1(&ctx->Const, nir);
|
2019-03-03 10:00:14 -06:00
|
|
|
nir_function_visitor v2(&v1);
|
|
|
|
|
v2.run(sh->ir);
|
|
|
|
|
visit_exec_list(sh->ir, &v1);
|
|
|
|
|
|
|
|
|
|
/* _mesa_delete_shader will try to free sh->Source but it's static const */
|
|
|
|
|
sh->Source = NULL;
|
|
|
|
|
_mesa_delete_shader(ctx, sh);
|
|
|
|
|
|
|
|
|
|
nir_validate_shader(nir, "float64_funcs_to_nir");
|
|
|
|
|
|
2020-02-07 14:18:49 -06:00
|
|
|
NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
|
2019-03-04 14:39:40 -06:00
|
|
|
NIR_PASS_V(nir, nir_lower_returns);
|
|
|
|
|
NIR_PASS_V(nir, nir_inline_functions);
|
|
|
|
|
NIR_PASS_V(nir, nir_opt_deref);
|
|
|
|
|
|
2019-08-08 01:44:52 -07:00
|
|
|
/* Do some optimizations to clean up the shader now. By optimizing the
|
|
|
|
|
* functions in the library, we avoid having to re-do that work every
|
|
|
|
|
* time we inline a copy of a function. Reducing basic blocks also helps
|
|
|
|
|
* with compile times.
|
|
|
|
|
*/
|
|
|
|
|
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
|
|
|
|
|
NIR_PASS_V(nir, nir_copy_prop);
|
|
|
|
|
NIR_PASS_V(nir, nir_opt_dce);
|
|
|
|
|
NIR_PASS_V(nir, nir_opt_cse);
|
|
|
|
|
NIR_PASS_V(nir, nir_opt_gcm, true);
|
|
|
|
|
NIR_PASS_V(nir, nir_opt_peephole_select, 1, false, false);
|
|
|
|
|
NIR_PASS_V(nir, nir_opt_dce);
|
|
|
|
|
|
2019-03-03 10:00:14 -06:00
|
|
|
return nir;
|
|
|
|
|
}
|