2014-07-10 18:18:17 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*
|
|
|
|
|
* Authors:
|
|
|
|
|
* Connor Abbott (cwabbott0@gmail.com)
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
2019-03-03 10:00:14 -06:00
|
|
|
#include "float64_glsl.h"
|
2016-05-09 14:35:58 -07:00
|
|
|
#include "glsl_to_nir.h"
|
2016-05-25 16:00:38 -07:00
|
|
|
#include "ir_visitor.h"
|
|
|
|
|
#include "ir_hierarchical_visitor.h"
|
|
|
|
|
#include "ir.h"
|
2019-02-20 17:13:49 +11:00
|
|
|
#include "ir_optimization.h"
|
2019-03-03 10:00:14 -06:00
|
|
|
#include "program.h"
|
2016-05-25 16:00:38 -07:00
|
|
|
#include "compiler/nir/nir_control_flow.h"
|
|
|
|
|
#include "compiler/nir/nir_builder.h"
|
2019-10-11 16:02:25 +02:00
|
|
|
#include "compiler/nir/nir_builtin_builder.h"
|
2019-06-04 11:41:25 +02:00
|
|
|
#include "compiler/nir/nir_deref.h"
|
2019-03-03 10:00:14 -06:00
|
|
|
#include "main/errors.h"
|
2018-04-08 13:13:08 -04:00
|
|
|
#include "main/mtypes.h"
|
2019-03-03 10:00:14 -06:00
|
|
|
#include "main/shaderobj.h"
|
2018-11-13 09:45:03 -06:00
|
|
|
#include "util/u_math.h"
|
2023-03-16 11:30:21 -07:00
|
|
|
#include "util/perf/cpu_trace.h"
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* pass to lower GLSL IR to NIR
|
|
|
|
|
*
|
|
|
|
|
* This will lower variable dereferences to loads/stores of corresponding
|
|
|
|
|
* variables in NIR - the variables will be converted to registers in a later
|
|
|
|
|
* pass.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
|
|
class nir_visitor : public ir_visitor
|
|
|
|
|
{
|
|
|
|
|
public:
|
2022-01-07 12:47:08 +10:00
|
|
|
nir_visitor(const struct gl_constants *consts, nir_shader *shader);
|
2014-07-10 18:18:17 -07:00
|
|
|
~nir_visitor();
|
|
|
|
|
|
|
|
|
|
virtual void visit(ir_variable *);
|
|
|
|
|
virtual void visit(ir_function *);
|
|
|
|
|
virtual void visit(ir_function_signature *);
|
|
|
|
|
virtual void visit(ir_loop *);
|
|
|
|
|
virtual void visit(ir_if *);
|
|
|
|
|
virtual void visit(ir_discard *);
|
2019-09-20 09:27:00 -07:00
|
|
|
virtual void visit(ir_demote *);
|
2014-07-10 18:18:17 -07:00
|
|
|
virtual void visit(ir_loop_jump *);
|
|
|
|
|
virtual void visit(ir_return *);
|
|
|
|
|
virtual void visit(ir_call *);
|
|
|
|
|
virtual void visit(ir_assignment *);
|
|
|
|
|
virtual void visit(ir_emit_vertex *);
|
|
|
|
|
virtual void visit(ir_end_primitive *);
|
|
|
|
|
virtual void visit(ir_expression *);
|
|
|
|
|
virtual void visit(ir_swizzle *);
|
|
|
|
|
virtual void visit(ir_texture *);
|
|
|
|
|
virtual void visit(ir_constant *);
|
|
|
|
|
virtual void visit(ir_dereference_variable *);
|
|
|
|
|
virtual void visit(ir_dereference_record *);
|
|
|
|
|
virtual void visit(ir_dereference_array *);
|
2014-09-07 19:24:15 +12:00
|
|
|
virtual void visit(ir_barrier *);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2015-12-26 10:00:47 -08:00
|
|
|
void create_function(ir_function_signature *ir);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
private:
|
2016-04-04 10:16:11 +02:00
|
|
|
void add_instr(nir_instr *instr, unsigned num_components, unsigned bit_size);
|
2024-01-26 15:30:06 +11:00
|
|
|
void truncate_after_instruction(exec_node *ir);
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *evaluate_rvalue(ir_rvalue *ir);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_def **srcs);
|
|
|
|
|
nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_def *src1);
|
|
|
|
|
nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_def *src1,
|
|
|
|
|
nir_def *src2);
|
|
|
|
|
nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_def *src1,
|
|
|
|
|
nir_def *src2, nir_def *src3);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2019-03-09 09:40:09 -06:00
|
|
|
bool supports_std430;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
nir_shader *shader;
|
|
|
|
|
nir_function_impl *impl;
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder b;
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *result; /* result of the expression tree last visited */
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
nir_deref_instr *evaluate_deref(ir_instruction *ir);
|
2015-04-02 21:24:38 -07:00
|
|
|
|
2018-11-12 13:08:50 -05:00
|
|
|
nir_constant *constant_copy(ir_constant *ir, void *mem_ctx);
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
/* most recent deref instruction created */
|
|
|
|
|
nir_deref_instr *deref;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
/* whether the IR we're operating on is per-function or global */
|
|
|
|
|
bool is_global;
|
|
|
|
|
|
2018-12-10 10:58:43 -08:00
|
|
|
ir_function_signature *sig;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
/* map of ir_variable -> nir_variable */
|
|
|
|
|
struct hash_table *var_table;
|
|
|
|
|
|
|
|
|
|
/* map of ir_function_signature -> nir_function_overload */
|
|
|
|
|
struct hash_table *overload_table;
|
2021-12-28 21:11:07 +08:00
|
|
|
|
|
|
|
|
/* set of nir_variable hold sparse result */
|
|
|
|
|
struct set *sparse_variable_set;
|
|
|
|
|
|
|
|
|
|
void adjust_sparse_variable(nir_deref_instr *var_deref, const glsl_type *type,
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *dest);
|
2023-03-22 13:03:05 -07:00
|
|
|
|
|
|
|
|
const struct gl_constants *consts;
|
2014-07-10 18:18:17 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This visitor runs before the main visitor, calling create_function() for
|
|
|
|
|
* each function so that the main visitor can resolve forward references in
|
|
|
|
|
* calls.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
class nir_function_visitor : public ir_hierarchical_visitor
|
|
|
|
|
{
|
|
|
|
|
public:
|
|
|
|
|
nir_function_visitor(nir_visitor *v) : visitor(v)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
virtual ir_visitor_status visit_enter(ir_function *);
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
nir_visitor *visitor;
|
|
|
|
|
};
|
|
|
|
|
|
2016-04-28 12:18:34 +01:00
|
|
|
} /* end of anonymous namespace */
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
nir_shader *
|
2022-01-07 12:47:08 +10:00
|
|
|
glsl_to_nir(const struct gl_constants *consts,
|
2024-05-21 12:29:05 +10:00
|
|
|
struct exec_list **ir, shader_info *si, gl_shader_stage stage,
|
2015-08-05 16:39:32 -07:00
|
|
|
const nir_shader_compiler_options *options)
|
2014-07-10 18:18:17 -07:00
|
|
|
{
|
2023-03-16 11:30:21 -07:00
|
|
|
MESA_TRACE_FUNC();
|
|
|
|
|
|
2024-05-21 12:29:05 +10:00
|
|
|
nir_shader *shader = nir_shader_create(NULL, stage, options, si);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2022-01-07 12:47:08 +10:00
|
|
|
nir_visitor v1(consts, shader);
|
2014-07-10 18:18:17 -07:00
|
|
|
nir_function_visitor v2(&v1);
|
2024-05-21 12:29:05 +10:00
|
|
|
v2.run(*ir);
|
|
|
|
|
visit_exec_list(*ir, &v1);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2022-02-16 14:41:45 +11:00
|
|
|
/* The GLSL IR won't be needed anymore. */
|
2024-05-21 12:29:05 +10:00
|
|
|
ralloc_free(*ir);
|
|
|
|
|
*ir = NULL;
|
2022-02-16 14:41:45 +11:00
|
|
|
|
2019-02-20 17:13:49 +11:00
|
|
|
nir_validate_shader(shader, "after glsl to nir, before function inline");
|
2023-03-06 16:15:16 -08:00
|
|
|
if (should_print_nir(shader)) {
|
|
|
|
|
printf("glsl_to_nir\n");
|
|
|
|
|
nir_print_shader(shader, stdout);
|
|
|
|
|
}
|
2019-02-20 17:13:49 +11:00
|
|
|
|
2022-07-07 14:13:54 -05:00
|
|
|
shader->info.subgroup_size = SUBGROUP_SIZE_UNIFORM;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
return shader;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-07 12:47:08 +10:00
|
|
|
nir_visitor::nir_visitor(const struct gl_constants *consts, nir_shader *shader)
|
2014-07-10 18:18:17 -07:00
|
|
|
{
|
2023-03-22 13:03:05 -07:00
|
|
|
this->consts = consts;
|
2022-01-07 12:47:08 +10:00
|
|
|
this->supports_std430 = consts->UseSTD430AsDefaultPacking;
|
2014-07-10 18:18:17 -07:00
|
|
|
this->shader = shader;
|
|
|
|
|
this->is_global = true;
|
2019-01-11 11:50:53 -08:00
|
|
|
this->var_table = _mesa_pointer_hash_table_create(NULL);
|
|
|
|
|
this->overload_table = _mesa_pointer_hash_table_create(NULL);
|
2021-12-28 21:11:07 +08:00
|
|
|
this->sparse_variable_set = _mesa_pointer_set_create(NULL);
|
2016-05-18 10:58:29 -04:00
|
|
|
this->result = NULL;
|
|
|
|
|
this->impl = NULL;
|
2019-02-08 15:23:46 +10:00
|
|
|
this->deref = NULL;
|
2019-05-17 12:20:19 +10:00
|
|
|
this->sig = NULL;
|
2016-05-18 10:58:29 -04:00
|
|
|
memset(&this->b, 0, sizeof(this->b));
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_visitor::~nir_visitor()
|
|
|
|
|
{
|
|
|
|
|
_mesa_hash_table_destroy(this->var_table, NULL);
|
|
|
|
|
_mesa_hash_table_destroy(this->overload_table, NULL);
|
2021-12-28 21:11:07 +08:00
|
|
|
_mesa_set_destroy(this->sparse_variable_set, NULL);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
nir_deref_instr *
|
|
|
|
|
nir_visitor::evaluate_deref(ir_instruction *ir)
|
2015-04-02 21:24:38 -07:00
|
|
|
{
|
|
|
|
|
ir->accept(this);
|
2018-03-15 09:58:22 -07:00
|
|
|
return this->deref;
|
2015-04-02 21:24:38 -07:00
|
|
|
}
|
|
|
|
|
|
2024-01-26 15:30:06 +11:00
|
|
|
void
|
|
|
|
|
nir_visitor::truncate_after_instruction(exec_node *ir)
|
|
|
|
|
{
|
|
|
|
|
if (!ir)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
while (!ir->get_next()->is_tail_sentinel()) {
|
|
|
|
|
((ir_instruction *)ir->get_next())->remove();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-12 13:08:50 -05:00
|
|
|
nir_constant *
|
|
|
|
|
nir_visitor::constant_copy(ir_constant *ir, void *mem_ctx)
|
2014-07-10 18:18:17 -07:00
|
|
|
{
|
|
|
|
|
if (ir == NULL)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2017-10-02 00:19:31 -07:00
|
|
|
nir_constant *ret = rzalloc(mem_ctx, nir_constant);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2016-11-29 22:19:28 -08:00
|
|
|
const unsigned rows = ir->type->vector_elements;
|
|
|
|
|
const unsigned cols = ir->type->matrix_columns;
|
2014-07-10 18:18:17 -07:00
|
|
|
unsigned i;
|
2015-11-06 11:35:21 -05:00
|
|
|
|
|
|
|
|
ret->num_elements = 0;
|
2014-07-10 18:18:17 -07:00
|
|
|
switch (ir->type->base_type) {
|
|
|
|
|
case GLSL_TYPE_UINT:
|
2016-12-05 11:51:54 -08:00
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
2019-06-06 10:51:25 -05:00
|
|
|
ret->values[r].u32 = ir->value.u[r];
|
2016-12-05 11:51:54 -08:00
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
2020-05-08 22:16:42 -04:00
|
|
|
case GLSL_TYPE_UINT16:
|
|
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
ret->values[r].u16 = ir->value.u16[r];
|
|
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
case GLSL_TYPE_INT:
|
2016-12-05 11:51:54 -08:00
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
2019-06-06 10:51:25 -05:00
|
|
|
ret->values[r].i32 = ir->value.i[r];
|
2016-12-05 11:51:54 -08:00
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
2020-05-08 22:16:42 -04:00
|
|
|
case GLSL_TYPE_INT16:
|
|
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
ret->values[r].i16 = ir->value.i16[r];
|
|
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
case GLSL_TYPE_FLOAT:
|
2019-04-21 12:42:21 +02:00
|
|
|
case GLSL_TYPE_FLOAT16:
|
2015-11-12 11:18:50 +01:00
|
|
|
case GLSL_TYPE_DOUBLE:
|
2019-06-06 10:51:25 -05:00
|
|
|
if (cols > 1) {
|
|
|
|
|
ret->elements = ralloc_array(mem_ctx, nir_constant *, cols);
|
|
|
|
|
ret->num_elements = cols;
|
|
|
|
|
for (unsigned c = 0; c < cols; c++) {
|
|
|
|
|
nir_constant *col_const = rzalloc(mem_ctx, nir_constant);
|
|
|
|
|
col_const->num_elements = 0;
|
|
|
|
|
switch (ir->type->base_type) {
|
|
|
|
|
case GLSL_TYPE_FLOAT:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
col_const->values[r].f32 = ir->value.f[c * rows + r];
|
|
|
|
|
break;
|
|
|
|
|
|
2019-04-21 12:42:21 +02:00
|
|
|
case GLSL_TYPE_FLOAT16:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
col_const->values[r].u16 = ir->value.f16[c * rows + r];
|
|
|
|
|
break;
|
|
|
|
|
|
2019-06-06 10:51:25 -05:00
|
|
|
case GLSL_TYPE_DOUBLE:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
col_const->values[r].f64 = ir->value.d[c * rows + r];
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Cannot get here from the first level switch");
|
|
|
|
|
}
|
|
|
|
|
ret->elements[c] = col_const;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
switch (ir->type->base_type) {
|
|
|
|
|
case GLSL_TYPE_FLOAT:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
ret->values[r].f32 = ir->value.f[r];
|
|
|
|
|
break;
|
|
|
|
|
|
2019-04-21 12:42:21 +02:00
|
|
|
case GLSL_TYPE_FLOAT16:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
ret->values[r].u16 = ir->value.f16[r];
|
|
|
|
|
break;
|
|
|
|
|
|
2019-06-06 10:51:25 -05:00
|
|
|
case GLSL_TYPE_DOUBLE:
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
|
|
|
|
ret->values[r].f64 = ir->value.d[r];
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Cannot get here from the first level switch");
|
|
|
|
|
}
|
2016-11-29 22:19:28 -08:00
|
|
|
}
|
2015-11-12 11:18:50 +01:00
|
|
|
break;
|
|
|
|
|
|
2016-09-01 14:17:49 -07:00
|
|
|
case GLSL_TYPE_UINT64:
|
|
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
2019-06-06 10:51:25 -05:00
|
|
|
ret->values[r].u64 = ir->value.u64[r];
|
2016-09-01 14:17:49 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case GLSL_TYPE_INT64:
|
|
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
2019-06-06 10:51:25 -05:00
|
|
|
ret->values[r].i64 = ir->value.i64[r];
|
2016-09-01 14:17:49 -07:00
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
case GLSL_TYPE_BOOL:
|
2016-12-05 11:51:54 -08:00
|
|
|
/* Only float base types can be matrices. */
|
|
|
|
|
assert(cols == 1);
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rows; r++)
|
2019-06-06 10:51:25 -05:00
|
|
|
ret->values[r].b = ir->value.b[r];
|
2016-12-05 11:51:54 -08:00
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case GLSL_TYPE_STRUCT:
|
|
|
|
|
case GLSL_TYPE_ARRAY:
|
|
|
|
|
ret->elements = ralloc_array(mem_ctx, nir_constant *,
|
|
|
|
|
ir->type->length);
|
2015-11-06 11:35:21 -05:00
|
|
|
ret->num_elements = ir->type->length;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
for (i = 0; i < ir->type->length; i++)
|
2017-09-07 19:02:48 -07:00
|
|
|
ret->elements[i] = constant_copy(ir->const_elements[i], mem_ctx);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-28 21:11:07 +08:00
|
|
|
void
|
|
|
|
|
nir_visitor::adjust_sparse_variable(nir_deref_instr *var_deref, const glsl_type *type,
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *dest)
|
2021-12-28 21:11:07 +08:00
|
|
|
{
|
2023-12-14 22:21:26 -08:00
|
|
|
const glsl_type *texel_type = glsl_get_field_type(type, "texel");
|
2021-12-28 21:11:07 +08:00
|
|
|
assert(texel_type);
|
|
|
|
|
|
|
|
|
|
assert(var_deref->deref_type == nir_deref_type_var);
|
|
|
|
|
nir_variable *var = var_deref->var;
|
|
|
|
|
|
|
|
|
|
/* Adjust nir_variable type to align with sparse nir instructions.
|
|
|
|
|
* Because the nir_variable is created with struct type from ir_variable,
|
|
|
|
|
* but sparse nir instructions output with vector dest.
|
|
|
|
|
*/
|
2023-12-14 22:21:26 -08:00
|
|
|
var->type = glsl_simple_type(glsl_get_base_glsl_type(texel_type)->base_type,
|
|
|
|
|
dest->num_components, 1);
|
2021-12-28 21:11:07 +08:00
|
|
|
|
|
|
|
|
var_deref->type = var->type;
|
|
|
|
|
|
|
|
|
|
/* Record the adjusted variable. */
|
|
|
|
|
_mesa_set_add(this->sparse_variable_set, var);
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-22 11:29:47 +11:00
|
|
|
static unsigned
|
|
|
|
|
get_nir_how_declared(unsigned how_declared)
|
|
|
|
|
{
|
|
|
|
|
if (how_declared == ir_var_hidden)
|
|
|
|
|
return nir_var_hidden;
|
|
|
|
|
|
2023-04-05 12:09:19 +10:00
|
|
|
if (how_declared == ir_var_declared_implicitly)
|
|
|
|
|
return nir_var_declared_implicitly;
|
|
|
|
|
|
2019-10-22 11:29:47 +11:00
|
|
|
return nir_var_declared_normally;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_variable *ir)
|
|
|
|
|
{
|
2018-12-10 10:58:43 -08:00
|
|
|
/* FINISHME: inout parameters */
|
|
|
|
|
assert(ir->data.mode != ir_var_function_inout);
|
|
|
|
|
|
|
|
|
|
if (ir->data.mode == ir_var_function_out)
|
|
|
|
|
return;
|
|
|
|
|
|
2017-10-02 00:17:22 -07:00
|
|
|
nir_variable *var = rzalloc(shader, nir_variable);
|
2014-07-10 18:18:17 -07:00
|
|
|
var->type = ir->type;
|
|
|
|
|
var->name = ralloc_strdup(var, ir->name);
|
|
|
|
|
|
2021-11-02 12:46:06 +11:00
|
|
|
var->data.assigned = ir->data.assigned;
|
2014-07-10 18:18:17 -07:00
|
|
|
var->data.read_only = ir->data.read_only;
|
|
|
|
|
var->data.centroid = ir->data.centroid;
|
|
|
|
|
var->data.sample = ir->data.sample;
|
2015-10-02 00:01:23 -07:00
|
|
|
var->data.patch = ir->data.patch;
|
2019-10-22 11:29:47 +11:00
|
|
|
var->data.how_declared = get_nir_how_declared(ir->data.how_declared);
|
2014-07-10 18:18:17 -07:00
|
|
|
var->data.invariant = ir->data.invariant;
|
2023-09-22 16:07:25 +10:00
|
|
|
var->data.explicit_invariant = ir->data.explicit_invariant;
|
2015-02-14 12:10:32 -08:00
|
|
|
var->data.location = ir->data.location;
|
2021-11-02 12:46:06 +11:00
|
|
|
var->data.must_be_shader_input = ir->data.must_be_shader_input;
|
2017-11-07 13:56:08 +11:00
|
|
|
var->data.stream = ir->data.stream;
|
2019-10-25 14:55:06 -04:00
|
|
|
if (ir->data.stream & (1u << 31))
|
|
|
|
|
var->data.stream |= NIR_STREAM_PACKED;
|
2019-10-22 14:54:34 +11:00
|
|
|
|
|
|
|
|
var->data.precision = ir->data.precision;
|
|
|
|
|
var->data.explicit_location = ir->data.explicit_location;
|
2020-03-13 14:18:27 +11:00
|
|
|
var->data.matrix_layout = ir->data.matrix_layout;
|
2019-10-22 14:54:34 +11:00
|
|
|
var->data.from_named_ifc_block = ir->data.from_named_ifc_block;
|
2016-12-05 23:09:18 -08:00
|
|
|
var->data.compact = false;
|
2023-09-25 10:37:05 +10:00
|
|
|
var->data.used = ir->data.used;
|
2024-03-18 11:42:29 +11:00
|
|
|
var->data.max_array_access = ir->data.max_array_access;
|
2024-03-18 12:07:58 +11:00
|
|
|
var->data.implicit_sized_array = ir->data.implicit_sized_array;
|
2024-03-19 16:00:49 +11:00
|
|
|
var->data.from_ssbo_unsized_array = ir->data.from_ssbo_unsized_array;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
switch(ir->data.mode) {
|
|
|
|
|
case ir_var_auto:
|
|
|
|
|
case ir_var_temporary:
|
|
|
|
|
if (is_global)
|
2019-01-15 23:56:29 +01:00
|
|
|
var->data.mode = nir_var_shader_temp;
|
2014-07-10 18:18:17 -07:00
|
|
|
else
|
2019-01-16 00:05:04 +01:00
|
|
|
var->data.mode = nir_var_function_temp;
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_var_function_in:
|
|
|
|
|
case ir_var_const_in:
|
2019-01-16 00:05:04 +01:00
|
|
|
var->data.mode = nir_var_function_temp;
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_var_shader_in:
|
2019-07-31 15:17:17 -05:00
|
|
|
if (shader->info.stage == MESA_SHADER_GEOMETRY &&
|
|
|
|
|
ir->data.location == VARYING_SLOT_PRIMITIVE_ID) {
|
2015-09-23 15:40:33 -07:00
|
|
|
/* For whatever reason, GLSL IR makes gl_PrimitiveIDIn an input */
|
|
|
|
|
var->data.location = SYSTEM_VALUE_PRIMITIVE_ID;
|
|
|
|
|
var->data.mode = nir_var_system_value;
|
2015-02-14 12:10:32 -08:00
|
|
|
} else {
|
|
|
|
|
var->data.mode = nir_var_shader_in;
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_var_shader_out:
|
|
|
|
|
var->data.mode = nir_var_shader_out;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_var_uniform:
|
2018-12-14 11:21:50 -06:00
|
|
|
if (ir->get_interface_type())
|
2019-01-16 00:09:27 +01:00
|
|
|
var->data.mode = nir_var_mem_ubo;
|
2023-12-14 22:21:26 -08:00
|
|
|
else if (glsl_type_contains_image(ir->type) && !ir->data.bindless)
|
2021-10-15 12:58:22 -05:00
|
|
|
var->data.mode = nir_var_image;
|
2018-12-14 11:21:50 -06:00
|
|
|
else
|
|
|
|
|
var->data.mode = nir_var_uniform;
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
2015-05-18 15:47:18 +02:00
|
|
|
case ir_var_shader_storage:
|
2019-01-16 00:11:23 +01:00
|
|
|
var->data.mode = nir_var_mem_ssbo;
|
2015-05-18 15:47:18 +02:00
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
case ir_var_system_value:
|
|
|
|
|
var->data.mode = nir_var_system_value;
|
|
|
|
|
break;
|
|
|
|
|
|
2022-07-18 16:59:12 -07:00
|
|
|
case ir_var_shader_shared:
|
|
|
|
|
var->data.mode = nir_var_mem_shared;
|
|
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2019-11-07 17:54:42 -05:00
|
|
|
unsigned mem_access = 0;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (ir->data.memory_read_only)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_NON_WRITEABLE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (ir->data.memory_write_only)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_NON_READABLE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (ir->data.memory_coherent)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_COHERENT;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (ir->data.memory_volatile)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_VOLATILE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (ir->data.memory_restrict)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_RESTRICT;
|
2019-03-09 09:40:09 -06:00
|
|
|
|
2020-01-14 17:12:06 +11:00
|
|
|
var->interface_type = ir->get_interface_type();
|
|
|
|
|
|
2019-03-09 09:40:09 -06:00
|
|
|
if (var->data.mode & (nir_var_mem_ubo | nir_var_mem_ssbo)) {
|
2024-07-01 15:54:43 +10:00
|
|
|
if (!glsl_type_is_interface(glsl_without_array(ir->type))) {
|
|
|
|
|
/* This variable is one entry in the interface */
|
2019-04-10 13:10:48 -07:00
|
|
|
UNUSED bool found = false;
|
2024-07-01 15:54:43 +10:00
|
|
|
for (unsigned i = 0; i < ir->get_interface_type()->length; i++) {
|
2019-03-09 09:40:09 -06:00
|
|
|
const glsl_struct_field *field =
|
2024-07-01 15:54:43 +10:00
|
|
|
&ir->get_interface_type()->fields.structure[i];
|
2019-03-09 09:40:09 -06:00
|
|
|
if (strcmp(ir->name, field->name) != 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (field->memory_read_only)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_NON_WRITEABLE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (field->memory_write_only)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_NON_READABLE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (field->memory_coherent)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_COHERENT;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (field->memory_volatile)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_VOLATILE;
|
2019-03-09 09:40:09 -06:00
|
|
|
if (field->memory_restrict)
|
2019-11-07 17:54:42 -05:00
|
|
|
mem_access |= ACCESS_RESTRICT;
|
2019-03-09 09:40:09 -06:00
|
|
|
|
|
|
|
|
found = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
assert(found);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
var->data.interpolation = ir->data.interpolation;
|
|
|
|
|
var->data.location_frac = ir->data.location_frac;
|
|
|
|
|
|
|
|
|
|
switch (ir->data.depth_layout) {
|
|
|
|
|
case ir_depth_layout_none:
|
|
|
|
|
var->data.depth_layout = nir_depth_layout_none;
|
|
|
|
|
break;
|
|
|
|
|
case ir_depth_layout_any:
|
|
|
|
|
var->data.depth_layout = nir_depth_layout_any;
|
|
|
|
|
break;
|
|
|
|
|
case ir_depth_layout_greater:
|
|
|
|
|
var->data.depth_layout = nir_depth_layout_greater;
|
|
|
|
|
break;
|
|
|
|
|
case ir_depth_layout_less:
|
|
|
|
|
var->data.depth_layout = nir_depth_layout_less;
|
|
|
|
|
break;
|
|
|
|
|
case ir_depth_layout_unchanged:
|
|
|
|
|
var->data.depth_layout = nir_depth_layout_unchanged;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var->data.index = ir->data.index;
|
2017-06-09 17:59:45 +02:00
|
|
|
var->data.descriptor_set = 0;
|
2014-07-10 18:18:17 -07:00
|
|
|
var->data.binding = ir->data.binding;
|
2018-02-23 16:06:30 +01:00
|
|
|
var->data.explicit_binding = ir->data.explicit_binding;
|
2021-11-02 12:46:06 +11:00
|
|
|
var->data.explicit_offset = ir->data.explicit_xfb_offset;
|
2018-02-19 08:41:56 +11:00
|
|
|
var->data.bindless = ir->data.bindless;
|
2015-12-29 21:02:56 +11:00
|
|
|
var->data.offset = ir->data.offset;
|
2019-11-07 17:54:42 -05:00
|
|
|
var->data.access = (gl_access_qualifier)mem_access;
|
2024-04-02 14:43:33 +11:00
|
|
|
var->data.has_initializer = ir->data.has_initializer;
|
|
|
|
|
var->data.is_implicit_initializer = ir->data.is_implicit_initializer;
|
2018-08-16 15:11:12 -05:00
|
|
|
|
2023-12-14 22:21:26 -08:00
|
|
|
if (glsl_type_is_image(glsl_without_array(var->type))) {
|
2019-10-25 15:18:32 -04:00
|
|
|
var->data.image.format = ir->data.image_format;
|
2019-11-07 16:53:58 -05:00
|
|
|
} else if (var->data.mode == nir_var_shader_out) {
|
2019-10-25 15:18:32 -04:00
|
|
|
var->data.xfb.buffer = ir->data.xfb_buffer;
|
|
|
|
|
var->data.xfb.stride = ir->data.xfb_stride;
|
|
|
|
|
}
|
2018-08-16 15:11:12 -05:00
|
|
|
|
2016-07-19 20:33:46 -07:00
|
|
|
var->data.fb_fetch_output = ir->data.fb_fetch_output;
|
2017-12-12 17:09:19 +01:00
|
|
|
var->data.explicit_xfb_buffer = ir->data.explicit_xfb_buffer;
|
|
|
|
|
var->data.explicit_xfb_stride = ir->data.explicit_xfb_stride;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
var->num_state_slots = ir->get_num_state_slots();
|
2014-12-02 18:07:13 -08:00
|
|
|
if (var->num_state_slots > 0) {
|
2018-03-23 10:40:02 +00:00
|
|
|
var->state_slots = rzalloc_array(var, nir_state_slot,
|
|
|
|
|
var->num_state_slots);
|
2014-12-02 18:07:13 -08:00
|
|
|
|
|
|
|
|
ir_state_slot *state_slots = ir->get_state_slots();
|
|
|
|
|
for (unsigned i = 0; i < var->num_state_slots; i++) {
|
2020-11-12 16:41:15 -05:00
|
|
|
for (unsigned j = 0; j < 4; j++)
|
2014-12-02 18:07:13 -08:00
|
|
|
var->state_slots[i].tokens[j] = state_slots[i].tokens[j];
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
var->state_slots = NULL;
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2023-03-06 16:26:14 -08:00
|
|
|
/* Values declared const will have ir->constant_value instead of
|
|
|
|
|
* ir->constant_initializer.
|
|
|
|
|
*/
|
|
|
|
|
if (ir->constant_initializer)
|
|
|
|
|
var->constant_initializer = constant_copy(ir->constant_initializer, var);
|
|
|
|
|
else
|
|
|
|
|
var->constant_initializer = constant_copy(ir->constant_value, var);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2019-01-16 00:05:04 +01:00
|
|
|
if (var->data.mode == nir_var_function_temp)
|
2015-10-09 07:05:11 -07:00
|
|
|
nir_function_impl_add_variable(impl, var);
|
|
|
|
|
else
|
|
|
|
|
nir_shader_add_variable(shader, var);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
_mesa_hash_table_insert(var_table, ir, var);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ir_visitor_status
|
|
|
|
|
nir_function_visitor::visit_enter(ir_function *ir)
|
|
|
|
|
{
|
|
|
|
|
foreach_in_list(ir_function_signature, sig, &ir->signatures) {
|
2015-12-26 10:00:47 -08:00
|
|
|
visitor->create_function(sig);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
2015-12-26 10:00:47 -08:00
|
|
|
return visit_continue_with_parent;
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2015-12-26 10:00:47 -08:00
|
|
|
nir_visitor::create_function(ir_function_signature *ir)
|
2014-07-10 18:18:17 -07:00
|
|
|
{
|
2016-08-31 18:38:57 -07:00
|
|
|
if (ir->is_intrinsic())
|
2014-07-10 18:18:17 -07:00
|
|
|
return;
|
|
|
|
|
|
2015-12-26 10:00:47 -08:00
|
|
|
nir_function *func = nir_function_create(shader, ir->function_name());
|
2018-09-06 11:12:24 -07:00
|
|
|
if (strcmp(ir->function_name(), "main") == 0)
|
|
|
|
|
func->is_entrypoint = true;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2018-12-10 10:58:43 -08:00
|
|
|
func->num_params = ir->parameters.length() +
|
2023-12-12 12:49:24 -08:00
|
|
|
(ir->return_type != &glsl_type_builtin_void);
|
2018-12-10 10:58:43 -08:00
|
|
|
func->params = ralloc_array(shader, nir_parameter, func->num_params);
|
|
|
|
|
|
|
|
|
|
unsigned np = 0;
|
|
|
|
|
|
2023-12-12 12:49:24 -08:00
|
|
|
if (ir->return_type != &glsl_type_builtin_void) {
|
2018-12-10 10:58:43 -08:00
|
|
|
/* The return value is a variable deref (basically an out parameter) */
|
|
|
|
|
func->params[np].num_components = 1;
|
|
|
|
|
func->params[np].bit_size = 32;
|
2024-02-27 11:26:29 +11:00
|
|
|
func->params[np].type = ir->return_type;
|
|
|
|
|
func->params[np].is_return = true;
|
2018-12-10 10:58:43 -08:00
|
|
|
np++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
foreach_in_list(ir_variable, param, &ir->parameters) {
|
2024-01-08 16:57:41 +11:00
|
|
|
func->params[np].num_components = 1;
|
|
|
|
|
func->params[np].bit_size = 32;
|
2024-02-27 11:26:29 +11:00
|
|
|
|
|
|
|
|
func->params[np].type = param->type;
|
|
|
|
|
func->params[np].is_return = false;
|
2018-12-10 10:58:43 -08:00
|
|
|
np++;
|
|
|
|
|
}
|
|
|
|
|
assert(np == func->num_params);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2024-03-25 17:09:23 +11:00
|
|
|
func->is_subroutine = ir->function()->is_subroutine;
|
|
|
|
|
func->num_subroutine_types = ir->function()->num_subroutine_types;
|
|
|
|
|
func->subroutine_index = ir->function()->subroutine_index;
|
|
|
|
|
func->subroutine_types =
|
|
|
|
|
ralloc_array(func, const struct glsl_type *, func->num_subroutine_types);
|
|
|
|
|
for (int i = 0; i < func->num_subroutine_types; i++)
|
|
|
|
|
func->subroutine_types[i] = ir->function()->subroutine_types[i];
|
|
|
|
|
|
2015-12-26 10:00:47 -08:00
|
|
|
_mesa_hash_table_insert(this->overload_table, ir, func);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_function *ir)
|
|
|
|
|
{
|
|
|
|
|
foreach_in_list(ir_function_signature, sig, &ir->signatures)
|
|
|
|
|
sig->accept(this);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_function_signature *ir)
|
|
|
|
|
{
|
2016-08-31 18:38:57 -07:00
|
|
|
if (ir->is_intrinsic())
|
2014-07-10 18:18:17 -07:00
|
|
|
return;
|
|
|
|
|
|
2018-12-10 10:58:43 -08:00
|
|
|
this->sig = ir;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
struct hash_entry *entry =
|
|
|
|
|
_mesa_hash_table_search(this->overload_table, ir);
|
|
|
|
|
|
|
|
|
|
assert(entry);
|
2015-12-26 10:00:47 -08:00
|
|
|
nir_function *func = (nir_function *) entry->data;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
if (ir->is_defined) {
|
2015-12-26 10:00:47 -08:00
|
|
|
nir_function_impl *impl = nir_function_impl_create(func);
|
2014-07-10 18:18:17 -07:00
|
|
|
this->impl = impl;
|
|
|
|
|
|
|
|
|
|
this->is_global = false;
|
|
|
|
|
|
2023-08-28 13:56:53 -04:00
|
|
|
b = nir_builder_at(nir_after_impl(impl));
|
2018-12-10 10:58:43 -08:00
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
visit_exec_list(&ir->body, this);
|
|
|
|
|
|
|
|
|
|
this->is_global = true;
|
|
|
|
|
} else {
|
2015-12-26 10:00:47 -08:00
|
|
|
func->impl = NULL;
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_loop *ir)
|
|
|
|
|
{
|
2017-02-15 10:04:47 -08:00
|
|
|
nir_push_loop(&b);
|
2015-10-31 16:31:59 -04:00
|
|
|
visit_exec_list(&ir->body_instructions, this);
|
2017-02-15 10:04:47 -08:00
|
|
|
nir_pop_loop(&b, NULL);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_if *ir)
|
|
|
|
|
{
|
2017-02-15 10:04:47 -08:00
|
|
|
nir_push_if(&b, evaluate_rvalue(ir->condition));
|
2014-07-10 18:18:17 -07:00
|
|
|
visit_exec_list(&ir->then_instructions, this);
|
2017-02-15 10:04:47 -08:00
|
|
|
nir_push_else(&b, NULL);
|
2014-07-10 18:18:17 -07:00
|
|
|
visit_exec_list(&ir->else_instructions, this);
|
2017-02-15 10:04:47 -08:00
|
|
|
nir_pop_if(&b, NULL);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_discard *ir)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* discards aren't treated as control flow, because before we lower them
|
|
|
|
|
* they can appear anywhere in the shader and the stuff after them may still
|
|
|
|
|
* be executed (yay, crazy GLSL rules!). However, after lowering, all the
|
|
|
|
|
* discards will be immediately followed by a return.
|
|
|
|
|
*/
|
|
|
|
|
|
2020-09-07 14:17:14 +01:00
|
|
|
if (ir->condition)
|
|
|
|
|
nir_discard_if(&b, evaluate_rvalue(ir->condition));
|
|
|
|
|
else
|
|
|
|
|
nir_discard(&b);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2019-09-20 09:27:00 -07:00
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_demote *ir)
|
|
|
|
|
{
|
2020-09-07 14:17:14 +01:00
|
|
|
nir_demote(&b);
|
2019-09-20 09:27:00 -07:00
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_emit_vertex *ir)
|
|
|
|
|
{
|
2020-09-07 14:17:14 +01:00
|
|
|
nir_emit_vertex(&b, (unsigned)ir->stream_id());
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_end_primitive *ir)
|
|
|
|
|
{
|
2020-09-07 14:17:14 +01:00
|
|
|
nir_end_primitive(&b, (unsigned)ir->stream_id());
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_loop_jump *ir)
|
|
|
|
|
{
|
|
|
|
|
nir_jump_type type;
|
|
|
|
|
switch (ir->mode) {
|
|
|
|
|
case ir_loop_jump::jump_break:
|
|
|
|
|
type = nir_jump_break;
|
|
|
|
|
break;
|
|
|
|
|
case ir_loop_jump::jump_continue:
|
|
|
|
|
type = nir_jump_continue;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_jump_instr *instr = nir_jump_instr_create(this->shader, type);
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2024-01-26 15:30:06 +11:00
|
|
|
|
|
|
|
|
/* Eliminate all instructions after the jump, since they are unreachable
|
|
|
|
|
* and NIR considers adding these instructions illegal.
|
|
|
|
|
*/
|
|
|
|
|
truncate_after_instruction(ir);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_return *ir)
|
|
|
|
|
{
|
2018-12-10 10:58:43 -08:00
|
|
|
if (ir->value != NULL) {
|
|
|
|
|
nir_deref_instr *ret_deref =
|
|
|
|
|
nir_build_deref_cast(&b, nir_load_param(&b, 0),
|
2019-01-16 00:05:04 +01:00
|
|
|
nir_var_function_temp, ir->value->type, 0);
|
2018-12-10 10:58:43 -08:00
|
|
|
|
2024-01-10 14:17:57 +11:00
|
|
|
if (glsl_type_is_vector_or_scalar(ir->value->type)) {
|
|
|
|
|
nir_store_deref(&b, ret_deref, evaluate_rvalue(ir->value), ~0);
|
|
|
|
|
} else {
|
|
|
|
|
nir_copy_deref(&b, ret_deref, evaluate_deref(ir->value));
|
|
|
|
|
}
|
2018-12-10 10:58:43 -08:00
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
nir_jump_instr *instr = nir_jump_instr_create(this->shader, nir_jump_return);
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2024-01-26 15:30:06 +11:00
|
|
|
|
|
|
|
|
/* Eliminate all instructions after the jump, since they are unreachable
|
|
|
|
|
* and NIR considers adding these instructions illegal.
|
|
|
|
|
*/
|
|
|
|
|
truncate_after_instruction(ir);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2018-11-13 09:45:03 -06:00
|
|
|
static void
|
|
|
|
|
intrinsic_set_std430_align(nir_intrinsic_instr *intrin, const glsl_type *type)
|
|
|
|
|
{
|
2023-12-14 22:21:26 -08:00
|
|
|
unsigned bit_size = glsl_type_is_boolean(type) ? 32 : glsl_get_bit_size(type);
|
2018-11-13 09:45:03 -06:00
|
|
|
unsigned pow2_components = util_next_power_of_two(type->vector_elements);
|
|
|
|
|
nir_intrinsic_set_align(intrin, (bit_size / 8) * pow2_components, 0);
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-04 11:41:25 +02:00
|
|
|
/* Accumulate any qualifiers along the deref chain to get the actual
|
|
|
|
|
* load/store qualifier.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static enum gl_access_qualifier
|
|
|
|
|
deref_get_qualifier(nir_deref_instr *deref)
|
|
|
|
|
{
|
|
|
|
|
nir_deref_path path;
|
|
|
|
|
nir_deref_path_init(&path, deref, NULL);
|
|
|
|
|
|
2024-01-08 16:57:41 +11:00
|
|
|
/* Function params can lead to a deref cast just return zero as these
|
|
|
|
|
* params have no qualifers anyway.
|
|
|
|
|
*/
|
|
|
|
|
if (path.path[0]->deref_type != nir_deref_type_var)
|
|
|
|
|
return (gl_access_qualifier) 0;
|
|
|
|
|
|
2019-11-07 16:53:58 -05:00
|
|
|
unsigned qualifiers = path.path[0]->var->data.access;
|
2019-06-04 11:41:25 +02:00
|
|
|
|
|
|
|
|
const glsl_type *parent_type = path.path[0]->type;
|
|
|
|
|
for (nir_deref_instr **cur_ptr = &path.path[1]; *cur_ptr; cur_ptr++) {
|
|
|
|
|
nir_deref_instr *cur = *cur_ptr;
|
|
|
|
|
|
2023-12-14 22:21:26 -08:00
|
|
|
if (glsl_type_is_interface(parent_type)) {
|
2019-06-04 11:41:25 +02:00
|
|
|
const struct glsl_struct_field *field =
|
|
|
|
|
&parent_type->fields.structure[cur->strct.index];
|
|
|
|
|
if (field->memory_read_only)
|
|
|
|
|
qualifiers |= ACCESS_NON_WRITEABLE;
|
|
|
|
|
if (field->memory_write_only)
|
|
|
|
|
qualifiers |= ACCESS_NON_READABLE;
|
|
|
|
|
if (field->memory_coherent)
|
|
|
|
|
qualifiers |= ACCESS_COHERENT;
|
|
|
|
|
if (field->memory_volatile)
|
|
|
|
|
qualifiers |= ACCESS_VOLATILE;
|
|
|
|
|
if (field->memory_restrict)
|
|
|
|
|
qualifiers |= ACCESS_RESTRICT;
|
|
|
|
|
}
|
2018-09-14 12:57:32 -07:00
|
|
|
|
2019-06-04 11:41:25 +02:00
|
|
|
parent_type = cur->type;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_deref_path_finish(&path);
|
|
|
|
|
|
|
|
|
|
return (gl_access_qualifier) qualifiers;
|
|
|
|
|
}
|
|
|
|
|
|
2024-08-02 15:58:24 +08:00
|
|
|
static nir_op
|
|
|
|
|
get_reduction_op(enum ir_intrinsic_id id, const glsl_type *type)
|
|
|
|
|
{
|
|
|
|
|
#define IR_CASE(op) \
|
|
|
|
|
case ir_intrinsic_reduce_##op: \
|
|
|
|
|
case ir_intrinsic_inclusive_##op: \
|
|
|
|
|
case ir_intrinsic_exclusive_##op: \
|
|
|
|
|
return CONV_OP(op);
|
|
|
|
|
|
|
|
|
|
switch (id) {
|
|
|
|
|
|
|
|
|
|
#define CONV_OP(op) \
|
|
|
|
|
type->base_type == GLSL_TYPE_INT || type->base_type == GLSL_TYPE_UINT ? \
|
|
|
|
|
nir_op_i##op : nir_op_f##op
|
|
|
|
|
|
|
|
|
|
IR_CASE(add)
|
|
|
|
|
IR_CASE(mul)
|
|
|
|
|
|
|
|
|
|
#undef CONV_OP
|
|
|
|
|
#define CONV_OP(op) \
|
|
|
|
|
type->base_type == GLSL_TYPE_INT ? nir_op_i##op : \
|
|
|
|
|
(type->base_type == GLSL_TYPE_UINT ? nir_op_u##op : nir_op_f##op)
|
|
|
|
|
|
|
|
|
|
IR_CASE(min)
|
|
|
|
|
IR_CASE(max)
|
|
|
|
|
|
|
|
|
|
#undef CONV_OP
|
|
|
|
|
#define CONV_OP(op) nir_op_i##op
|
|
|
|
|
|
|
|
|
|
IR_CASE(and)
|
|
|
|
|
IR_CASE(or)
|
|
|
|
|
IR_CASE(xor)
|
|
|
|
|
|
|
|
|
|
#undef CONV_OP
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("not reached");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#undef IR_CASE
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_call *ir)
|
|
|
|
|
{
|
2016-08-31 18:38:57 -07:00
|
|
|
if (ir->callee->is_intrinsic()) {
|
2014-07-10 18:18:17 -07:00
|
|
|
nir_intrinsic_op op;
|
2016-08-31 18:09:27 -07:00
|
|
|
|
2023-05-12 09:14:30 -04:00
|
|
|
/* Initialize to something because gcc complains otherwise */
|
|
|
|
|
nir_atomic_op atomic_op = nir_atomic_op_iadd;
|
|
|
|
|
|
2016-08-31 18:09:27 -07:00
|
|
|
switch (ir->callee->intrinsic_id) {
|
2019-03-14 12:00:04 -05:00
|
|
|
case ir_intrinsic_generic_atomic_add:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_deref_atomic;
|
2023-12-14 22:21:26 -08:00
|
|
|
atomic_op = glsl_type_is_integer_32_64(ir->return_deref->type)
|
2023-05-12 09:14:30 -04:00
|
|
|
? nir_atomic_op_iadd : nir_atomic_op_fadd;
|
2019-03-14 12:00:04 -05:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_and:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_deref_atomic;
|
|
|
|
|
atomic_op = nir_atomic_op_iand;
|
2019-03-14 12:00:04 -05:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_or:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_deref_atomic;
|
|
|
|
|
atomic_op = nir_atomic_op_ior;
|
2019-03-14 12:00:04 -05:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_xor:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_deref_atomic;
|
|
|
|
|
atomic_op = nir_atomic_op_ixor;
|
2019-03-14 12:00:04 -05:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_min:
|
|
|
|
|
assert(ir->return_deref);
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_deref_atomic;
|
2023-12-12 12:49:24 -08:00
|
|
|
if (ir->return_deref->type == &glsl_type_builtin_int ||
|
|
|
|
|
ir->return_deref->type == &glsl_type_builtin_int64_t)
|
2023-05-12 09:14:30 -04:00
|
|
|
atomic_op = nir_atomic_op_imin;
|
2023-12-12 12:49:24 -08:00
|
|
|
else if (ir->return_deref->type == &glsl_type_builtin_uint ||
|
|
|
|
|
ir->return_deref->type == &glsl_type_builtin_uint64_t)
|
2023-05-12 09:14:30 -04:00
|
|
|
atomic_op = nir_atomic_op_umin;
|
2023-12-12 12:49:24 -08:00
|
|
|
else if (ir->return_deref->type == &glsl_type_builtin_float)
|
2023-05-12 09:14:30 -04:00
|
|
|
atomic_op = nir_atomic_op_fmin;
|
2019-03-14 12:00:04 -05:00
|
|
|
else
|
|
|
|
|
unreachable("Invalid type");
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_max:
|
|
|
|
|
assert(ir->return_deref);
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_deref_atomic;
|
2023-12-12 12:49:24 -08:00
|
|
|
if (ir->return_deref->type == &glsl_type_builtin_int ||
|
|
|
|
|
ir->return_deref->type == &glsl_type_builtin_int64_t)
|
2023-05-12 09:14:30 -04:00
|
|
|
atomic_op = nir_atomic_op_imax;
|
2023-12-12 12:49:24 -08:00
|
|
|
else if (ir->return_deref->type == &glsl_type_builtin_uint ||
|
|
|
|
|
ir->return_deref->type == &glsl_type_builtin_uint64_t)
|
2023-05-12 09:14:30 -04:00
|
|
|
atomic_op = nir_atomic_op_umax;
|
2023-12-12 12:49:24 -08:00
|
|
|
else if (ir->return_deref->type == &glsl_type_builtin_float)
|
2023-05-12 09:14:30 -04:00
|
|
|
atomic_op = nir_atomic_op_fmax;
|
2019-03-14 12:00:04 -05:00
|
|
|
else
|
|
|
|
|
unreachable("Invalid type");
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_exchange:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_deref_atomic;
|
|
|
|
|
atomic_op = nir_atomic_op_xchg;
|
2019-03-14 12:00:04 -05:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_generic_atomic_comp_swap:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_deref_atomic_swap;
|
2023-12-14 22:21:26 -08:00
|
|
|
atomic_op = glsl_type_is_integer_32_64(ir->return_deref->type)
|
2023-05-12 09:14:30 -04:00
|
|
|
? nir_atomic_op_cmpxchg
|
|
|
|
|
: nir_atomic_op_fcmpxchg;
|
2019-03-14 12:00:04 -05:00
|
|
|
break;
|
2016-08-31 18:09:27 -07:00
|
|
|
case ir_intrinsic_atomic_counter_read:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_read_deref;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_increment:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_inc_deref;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_predecrement:
|
nir: Fix OpAtomicCounterIDecrement for uniform atomic counters
From the SPIR-V 1.0 specification, section 3.32.18, "Atomic
Instructions":
"OpAtomicIDecrement:
<skip>
The instruction's result is the Original Value."
However, we were implementing it, for uniform atomic counters, as a
pre-decrement operation, as was the one available from GLSL.
Renamed the former nir intrinsic 'atomic_counter_dec*' to
'atomic_counter_pre_dec*' for clarification purposes, as it implements
a pre-decrement operation as specified for GLSL. From GLSL 4.50 spec,
section 8.10, "Atomic Counter Functions":
"uint atomicCounterDecrement (atomic_uint c)
Atomically
1. decrements the counter for c, and
2. returns the value resulting from the decrement operation.
These two steps are done atomically with respect to the atomic
counter functions in this table."
Added a new nir intrinsic 'atomic_counter_post_dec*' which implements
a post-decrement operation as required by SPIR-V.
v2: (Timothy Arceri)
* Add extra spec quotes on commit message
* Use "post" instead "pos" to avoid confusion with "position"
Signed-off-by: Antia Puentes <apuentes@igalia.com>
Signed-off-by: Alejandro Piñeiro <apinheiro@igalia.com>
Reviewed-by: Timothy Arceri <tarceri@itsqueeze.com>
2018-02-22 13:50:23 +01:00
|
|
|
op = nir_intrinsic_atomic_counter_pre_dec_deref;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
2016-06-29 18:00:22 -07:00
|
|
|
case ir_intrinsic_atomic_counter_add:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_add_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_and:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_and_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_or:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_or_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_xor:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_xor_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_min:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_min_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_max:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_max_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_exchange:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_exchange_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_atomic_counter_comp_swap:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_atomic_counter_comp_swap_deref;
|
2016-06-29 18:00:22 -07:00
|
|
|
break;
|
2016-08-31 18:09:27 -07:00
|
|
|
case ir_intrinsic_image_load:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_load;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_store:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_store;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_add:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_image_deref_atomic;
|
2023-12-14 22:21:26 -08:00
|
|
|
atomic_op = glsl_type_is_integer_32_64(ir->return_deref->type)
|
2023-05-12 09:14:30 -04:00
|
|
|
? nir_atomic_op_iadd
|
|
|
|
|
: nir_atomic_op_fadd;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_min:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_image_deref_atomic;
|
2023-12-12 12:49:24 -08:00
|
|
|
if (ir->return_deref->type == &glsl_type_builtin_int)
|
2023-05-12 09:14:30 -04:00
|
|
|
atomic_op = nir_atomic_op_imin;
|
2023-12-12 12:49:24 -08:00
|
|
|
else if (ir->return_deref->type == &glsl_type_builtin_uint)
|
2023-05-12 09:14:30 -04:00
|
|
|
atomic_op = nir_atomic_op_umin;
|
2019-08-20 22:32:50 -05:00
|
|
|
else
|
|
|
|
|
unreachable("Invalid type");
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_max:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_image_deref_atomic;
|
2023-12-12 12:49:24 -08:00
|
|
|
if (ir->return_deref->type == &glsl_type_builtin_int)
|
2023-05-12 09:14:30 -04:00
|
|
|
atomic_op = nir_atomic_op_imax;
|
2023-12-12 12:49:24 -08:00
|
|
|
else if (ir->return_deref->type == &glsl_type_builtin_uint)
|
2023-05-12 09:14:30 -04:00
|
|
|
atomic_op = nir_atomic_op_umax;
|
2019-08-20 22:32:50 -05:00
|
|
|
else
|
|
|
|
|
unreachable("Invalid type");
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_and:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_image_deref_atomic;
|
|
|
|
|
atomic_op = nir_atomic_op_iand;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_or:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_image_deref_atomic;
|
|
|
|
|
atomic_op = nir_atomic_op_ior;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_xor:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_image_deref_atomic;
|
|
|
|
|
atomic_op = nir_atomic_op_ixor;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_exchange:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_image_deref_atomic;
|
|
|
|
|
atomic_op = nir_atomic_op_xchg;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_comp_swap:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_image_deref_atomic_swap;
|
|
|
|
|
atomic_op = nir_atomic_op_cmpxchg;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
2019-07-24 12:06:34 +02:00
|
|
|
case ir_intrinsic_image_atomic_inc_wrap:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_image_deref_atomic;
|
|
|
|
|
atomic_op = nir_atomic_op_inc_wrap;
|
2019-07-24 12:06:34 +02:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_atomic_dec_wrap:
|
2023-05-12 09:14:30 -04:00
|
|
|
op = nir_intrinsic_image_deref_atomic;
|
|
|
|
|
atomic_op = nir_atomic_op_dec_wrap;
|
2019-07-24 12:06:34 +02:00
|
|
|
break;
|
2016-08-31 18:09:27 -07:00
|
|
|
case ir_intrinsic_memory_barrier:
|
2023-06-06 16:43:26 -04:00
|
|
|
case ir_intrinsic_memory_barrier_buffer:
|
|
|
|
|
case ir_intrinsic_memory_barrier_image:
|
|
|
|
|
case ir_intrinsic_memory_barrier_shared:
|
|
|
|
|
case ir_intrinsic_memory_barrier_atomic_counter:
|
|
|
|
|
case ir_intrinsic_group_memory_barrier:
|
2024-07-29 17:24:12 +08:00
|
|
|
case ir_intrinsic_subgroup_barrier:
|
|
|
|
|
case ir_intrinsic_subgroup_memory_barrier:
|
|
|
|
|
case ir_intrinsic_subgroup_memory_barrier_buffer:
|
|
|
|
|
case ir_intrinsic_subgroup_memory_barrier_shared:
|
|
|
|
|
case ir_intrinsic_subgroup_memory_barrier_image:
|
2023-07-28 15:08:00 -04:00
|
|
|
op = nir_intrinsic_barrier;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_size:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_size;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_image_samples:
|
2018-03-15 09:58:22 -07:00
|
|
|
op = nir_intrinsic_image_deref_samples;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
2021-12-28 14:58:52 +08:00
|
|
|
case ir_intrinsic_image_sparse_load:
|
|
|
|
|
op = nir_intrinsic_image_deref_sparse_load;
|
|
|
|
|
break;
|
2016-08-31 18:09:27 -07:00
|
|
|
case ir_intrinsic_shader_clock:
|
2015-10-07 11:59:26 +01:00
|
|
|
op = nir_intrinsic_shader_clock;
|
2016-08-31 18:09:27 -07:00
|
|
|
break;
|
2018-04-27 14:12:30 +01:00
|
|
|
case ir_intrinsic_begin_invocation_interlock:
|
|
|
|
|
op = nir_intrinsic_begin_invocation_interlock;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_end_invocation_interlock:
|
|
|
|
|
op = nir_intrinsic_end_invocation_interlock;
|
|
|
|
|
break;
|
2017-06-20 22:37:43 -07:00
|
|
|
case ir_intrinsic_vote_any:
|
|
|
|
|
op = nir_intrinsic_vote_any;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_vote_all:
|
|
|
|
|
op = nir_intrinsic_vote_all;
|
|
|
|
|
break;
|
2024-07-29 20:31:02 +08:00
|
|
|
case ir_intrinsic_vote_eq: {
|
|
|
|
|
ir_rvalue *rvalue = (ir_rvalue *) ir->actual_parameters.get_head();
|
|
|
|
|
op = glsl_type_is_integer(rvalue->type) ? nir_intrinsic_vote_ieq : nir_intrinsic_vote_feq;
|
2017-06-20 22:37:43 -07:00
|
|
|
break;
|
2024-07-29 20:31:02 +08:00
|
|
|
}
|
2017-06-22 16:43:24 -07:00
|
|
|
case ir_intrinsic_ballot:
|
|
|
|
|
op = nir_intrinsic_ballot;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_read_invocation:
|
|
|
|
|
op = nir_intrinsic_read_invocation;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_read_first_invocation:
|
|
|
|
|
op = nir_intrinsic_read_first_invocation;
|
|
|
|
|
break;
|
2019-09-20 10:50:37 -07:00
|
|
|
case ir_intrinsic_helper_invocation:
|
|
|
|
|
op = nir_intrinsic_is_helper_invocation;
|
|
|
|
|
break;
|
2021-12-29 15:26:09 +08:00
|
|
|
case ir_intrinsic_is_sparse_texels_resident:
|
|
|
|
|
op = nir_intrinsic_is_sparse_texels_resident;
|
|
|
|
|
break;
|
2024-07-29 17:24:12 +08:00
|
|
|
case ir_intrinsic_elect:
|
|
|
|
|
op = nir_intrinsic_elect;
|
|
|
|
|
break;
|
2024-07-30 15:12:23 +08:00
|
|
|
case ir_intrinsic_inverse_ballot:
|
|
|
|
|
op = nir_intrinsic_inverse_ballot;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_ballot_bit_extract:
|
|
|
|
|
op = nir_intrinsic_ballot_bitfield_extract;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_ballot_bit_count:
|
|
|
|
|
op = nir_intrinsic_ballot_bit_count_reduce;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_ballot_inclusive_bit_count:
|
|
|
|
|
op = nir_intrinsic_ballot_bit_count_inclusive;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_ballot_exclusive_bit_count:
|
|
|
|
|
op = nir_intrinsic_ballot_bit_count_exclusive;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_ballot_find_lsb:
|
|
|
|
|
op = nir_intrinsic_ballot_find_lsb;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_ballot_find_msb:
|
|
|
|
|
op = nir_intrinsic_ballot_find_msb;
|
|
|
|
|
break;
|
2024-07-30 16:04:04 +08:00
|
|
|
case ir_intrinsic_shuffle:
|
|
|
|
|
op = nir_intrinsic_shuffle;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shuffle_xor:
|
|
|
|
|
op = nir_intrinsic_shuffle_xor;
|
|
|
|
|
break;
|
2024-07-30 16:37:11 +08:00
|
|
|
case ir_intrinsic_shuffle_up:
|
|
|
|
|
op = nir_intrinsic_shuffle_up;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_shuffle_down:
|
|
|
|
|
op = nir_intrinsic_shuffle_down;
|
|
|
|
|
break;
|
2024-08-02 15:58:24 +08:00
|
|
|
case ir_intrinsic_reduce_add:
|
|
|
|
|
case ir_intrinsic_reduce_mul:
|
|
|
|
|
case ir_intrinsic_reduce_min:
|
|
|
|
|
case ir_intrinsic_reduce_max:
|
|
|
|
|
case ir_intrinsic_reduce_and:
|
|
|
|
|
case ir_intrinsic_reduce_or:
|
|
|
|
|
case ir_intrinsic_reduce_xor:
|
|
|
|
|
op = nir_intrinsic_reduce;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_inclusive_add:
|
|
|
|
|
case ir_intrinsic_inclusive_mul:
|
|
|
|
|
case ir_intrinsic_inclusive_min:
|
|
|
|
|
case ir_intrinsic_inclusive_max:
|
|
|
|
|
case ir_intrinsic_inclusive_and:
|
|
|
|
|
case ir_intrinsic_inclusive_or:
|
|
|
|
|
case ir_intrinsic_inclusive_xor:
|
|
|
|
|
op = nir_intrinsic_inclusive_scan;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_exclusive_add:
|
|
|
|
|
case ir_intrinsic_exclusive_mul:
|
|
|
|
|
case ir_intrinsic_exclusive_min:
|
|
|
|
|
case ir_intrinsic_exclusive_max:
|
|
|
|
|
case ir_intrinsic_exclusive_and:
|
|
|
|
|
case ir_intrinsic_exclusive_or:
|
|
|
|
|
case ir_intrinsic_exclusive_xor:
|
|
|
|
|
op = nir_intrinsic_exclusive_scan;
|
|
|
|
|
break;
|
2016-08-31 18:09:27 -07:00
|
|
|
default:
|
2015-02-10 11:21:47 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_intrinsic_instr *instr = nir_intrinsic_instr_create(shader, op);
|
2023-08-14 11:56:00 -05:00
|
|
|
nir_def *ret = &instr->def;
|
2015-05-05 23:04:46 +03:00
|
|
|
|
|
|
|
|
switch (op) {
|
2023-05-12 09:14:30 -04:00
|
|
|
case nir_intrinsic_deref_atomic:
|
|
|
|
|
case nir_intrinsic_deref_atomic_swap: {
|
2019-03-14 12:00:04 -05:00
|
|
|
int param_count = ir->actual_parameters.length();
|
|
|
|
|
assert(param_count == 2 || param_count == 3);
|
|
|
|
|
|
|
|
|
|
/* Deref */
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_rvalue *rvalue = (ir_rvalue *) param;
|
|
|
|
|
ir_dereference *deref = rvalue->as_dereference();
|
|
|
|
|
ir_swizzle *swizzle = NULL;
|
|
|
|
|
if (!deref) {
|
|
|
|
|
/* We may have a swizzle to pick off a single vec4 component */
|
|
|
|
|
swizzle = rvalue->as_swizzle();
|
|
|
|
|
assert(swizzle && swizzle->type->vector_elements == 1);
|
|
|
|
|
deref = swizzle->val->as_dereference();
|
|
|
|
|
assert(deref);
|
|
|
|
|
}
|
|
|
|
|
nir_deref_instr *nir_deref = evaluate_deref(deref);
|
|
|
|
|
if (swizzle) {
|
|
|
|
|
nir_deref = nir_build_deref_array_imm(&b, nir_deref,
|
|
|
|
|
swizzle->mask.x);
|
|
|
|
|
}
|
2023-08-14 11:56:00 -05:00
|
|
|
instr->src[0] = nir_src_for_ssa(&nir_deref->def);
|
2019-03-14 12:00:04 -05:00
|
|
|
|
2023-05-12 09:14:30 -04:00
|
|
|
nir_intrinsic_set_atomic_op(instr, atomic_op);
|
2019-06-04 11:41:25 +02:00
|
|
|
nir_intrinsic_set_access(instr, deref_get_qualifier(nir_deref));
|
|
|
|
|
|
2019-03-14 12:00:04 -05:00
|
|
|
/* data1 parameter (this is always present) */
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_instruction *inst = (ir_instruction *) param;
|
|
|
|
|
instr->src[1] = nir_src_for_ssa(evaluate_rvalue(inst->as_rvalue()));
|
|
|
|
|
|
|
|
|
|
/* data2 parameter (only with atomic_comp_swap) */
|
|
|
|
|
if (param_count == 3) {
|
2023-05-12 09:14:30 -04:00
|
|
|
assert(op == nir_intrinsic_deref_atomic_swap);
|
2019-03-14 12:00:04 -05:00
|
|
|
param = param->get_next();
|
|
|
|
|
inst = (ir_instruction *) param;
|
|
|
|
|
instr->src[2] = nir_src_for_ssa(evaluate_rvalue(inst->as_rvalue()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Atomic result */
|
|
|
|
|
assert(ir->return_deref);
|
2023-12-14 22:21:26 -08:00
|
|
|
if (glsl_type_is_integer_64(ir->return_deref->type)) {
|
2023-08-14 11:56:00 -05:00
|
|
|
nir_def_init(&instr->instr, &instr->def,
|
nir: Drop most instances of nir_ssa_dest_init()
Generated using the following two semantic patches:
@@
expression I, J, NC, BS;
@@
-nir_ssa_dest_init(I, &J->dest, NC, BS);
+nir_def_init(I, &J->dest.ssa, NC, BS);
@@
expression I, J, NC, BS;
@@
-nir_ssa_dest_init(I, &J->dest.dest, NC, BS);
+nir_def_init(I, &J->dest.dest.ssa, NC, BS);
Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24658>
2023-08-12 18:31:52 -05:00
|
|
|
ir->return_deref->type->vector_elements, 64);
|
2020-09-27 18:33:06 +05:30
|
|
|
} else {
|
2023-08-14 11:56:00 -05:00
|
|
|
nir_def_init(&instr->instr, &instr->def,
|
nir: Drop most instances of nir_ssa_dest_init()
Generated using the following two semantic patches:
@@
expression I, J, NC, BS;
@@
-nir_ssa_dest_init(I, &J->dest, NC, BS);
+nir_def_init(I, &J->dest.ssa, NC, BS);
@@
expression I, J, NC, BS;
@@
-nir_ssa_dest_init(I, &J->dest.dest, NC, BS);
+nir_def_init(I, &J->dest.dest.ssa, NC, BS);
Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24658>
2023-08-12 18:31:52 -05:00
|
|
|
ir->return_deref->type->vector_elements, 32);
|
2020-09-27 18:33:06 +05:30
|
|
|
}
|
2019-03-14 12:00:04 -05:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2018-03-15 09:58:22 -07:00
|
|
|
case nir_intrinsic_atomic_counter_read_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_inc_deref:
|
nir: Fix OpAtomicCounterIDecrement for uniform atomic counters
From the SPIR-V 1.0 specification, section 3.32.18, "Atomic
Instructions":
"OpAtomicIDecrement:
<skip>
The instruction's result is the Original Value."
However, we were implementing it, for uniform atomic counters, as a
pre-decrement operation, as was the one available from GLSL.
Renamed the former nir intrinsic 'atomic_counter_dec*' to
'atomic_counter_pre_dec*' for clarification purposes, as it implements
a pre-decrement operation as specified for GLSL. From GLSL 4.50 spec,
section 8.10, "Atomic Counter Functions":
"uint atomicCounterDecrement (atomic_uint c)
Atomically
1. decrements the counter for c, and
2. returns the value resulting from the decrement operation.
These two steps are done atomically with respect to the atomic
counter functions in this table."
Added a new nir intrinsic 'atomic_counter_post_dec*' which implements
a post-decrement operation as required by SPIR-V.
v2: (Timothy Arceri)
* Add extra spec quotes on commit message
* Use "post" instead "pos" to avoid confusion with "position"
Signed-off-by: Antia Puentes <apuentes@igalia.com>
Signed-off-by: Alejandro Piñeiro <apinheiro@igalia.com>
Reviewed-by: Timothy Arceri <tarceri@itsqueeze.com>
2018-02-22 13:50:23 +01:00
|
|
|
case nir_intrinsic_atomic_counter_pre_dec_deref:
|
2018-03-15 09:58:22 -07:00
|
|
|
case nir_intrinsic_atomic_counter_add_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_min_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_max_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_and_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_or_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_xor_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_exchange_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_comp_swap_deref: {
|
2016-06-29 18:00:22 -07:00
|
|
|
/* Set the counter variable dereference. */
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_dereference *counter = (ir_dereference *)param;
|
|
|
|
|
|
2023-08-14 11:56:00 -05:00
|
|
|
instr->src[0] = nir_src_for_ssa(&evaluate_deref(counter)->def);
|
2016-06-29 18:00:22 -07:00
|
|
|
param = param->get_next();
|
|
|
|
|
|
|
|
|
|
/* Set the intrinsic destination. */
|
|
|
|
|
if (ir->return_deref) {
|
2023-08-14 11:56:00 -05:00
|
|
|
nir_def_init(&instr->instr, &instr->def, 1, 32);
|
2016-06-29 18:00:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Set the intrinsic parameters. */
|
|
|
|
|
if (!param->is_tail_sentinel()) {
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[1] =
|
2024-06-18 17:42:29 +02:00
|
|
|
nir_src_for_ssa(evaluate_rvalue((ir_rvalue *)param));
|
2016-06-29 18:00:22 -07:00
|
|
|
param = param->get_next();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!param->is_tail_sentinel()) {
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[2] =
|
2016-06-29 18:00:22 -07:00
|
|
|
nir_src_for_ssa(evaluate_rvalue((ir_dereference *)param));
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2015-05-05 23:04:46 +03:00
|
|
|
break;
|
|
|
|
|
}
|
2018-03-15 09:58:22 -07:00
|
|
|
case nir_intrinsic_image_deref_load:
|
|
|
|
|
case nir_intrinsic_image_deref_store:
|
2023-05-12 09:14:30 -04:00
|
|
|
case nir_intrinsic_image_deref_atomic:
|
|
|
|
|
case nir_intrinsic_image_deref_atomic_swap:
|
2018-03-15 09:58:22 -07:00
|
|
|
case nir_intrinsic_image_deref_samples:
|
2019-07-24 12:06:34 +02:00
|
|
|
case nir_intrinsic_image_deref_size:
|
2021-12-28 14:58:52 +08:00
|
|
|
case nir_intrinsic_image_deref_sparse_load: {
|
2015-05-05 23:04:46 +03:00
|
|
|
/* Set the image variable dereference. */
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_dereference *image = (ir_dereference *)param;
|
2019-03-28 23:47:07 +01:00
|
|
|
nir_deref_instr *deref = evaluate_deref(image);
|
|
|
|
|
const glsl_type *type = deref->type;
|
2015-05-05 23:04:46 +03:00
|
|
|
|
2019-06-04 11:41:25 +02:00
|
|
|
nir_intrinsic_set_access(instr, deref_get_qualifier(deref));
|
|
|
|
|
|
2023-05-12 09:14:30 -04:00
|
|
|
if (op == nir_intrinsic_image_deref_atomic ||
|
|
|
|
|
op == nir_intrinsic_image_deref_atomic_swap) {
|
|
|
|
|
nir_intrinsic_set_atomic_op(instr, atomic_op);
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-14 11:56:00 -05:00
|
|
|
instr->src[0] = nir_src_for_ssa(&deref->def);
|
2015-05-05 23:04:46 +03:00
|
|
|
param = param->get_next();
|
2021-07-12 09:37:01 -05:00
|
|
|
nir_intrinsic_set_image_dim(instr,
|
|
|
|
|
(glsl_sampler_dim)type->sampler_dimensionality);
|
|
|
|
|
nir_intrinsic_set_image_array(instr, type->sampler_array);
|
2015-05-05 23:04:46 +03:00
|
|
|
|
2015-08-11 17:42:12 +03:00
|
|
|
/* Set the intrinsic destination. */
|
|
|
|
|
if (ir->return_deref) {
|
2021-12-28 14:58:52 +08:00
|
|
|
unsigned num_components;
|
|
|
|
|
if (op == nir_intrinsic_image_deref_sparse_load) {
|
|
|
|
|
const glsl_type *dest_type =
|
2023-12-14 22:21:26 -08:00
|
|
|
glsl_get_field_type(ir->return_deref->type, "texel");
|
2021-12-28 14:58:52 +08:00
|
|
|
/* One extra component to hold residency code. */
|
|
|
|
|
num_components = dest_type->vector_elements + 1;
|
|
|
|
|
} else
|
|
|
|
|
num_components = ir->return_deref->type->vector_elements;
|
|
|
|
|
|
2023-08-14 11:56:00 -05:00
|
|
|
nir_def_init(&instr->instr, &instr->def, num_components, 32);
|
2015-08-11 17:42:12 +03:00
|
|
|
}
|
|
|
|
|
|
2018-08-14 14:03:05 -05:00
|
|
|
if (op == nir_intrinsic_image_deref_size) {
|
2023-08-14 11:56:00 -05:00
|
|
|
instr->num_components = instr->def.num_components;
|
2021-12-28 14:58:52 +08:00
|
|
|
} else if (op == nir_intrinsic_image_deref_load ||
|
|
|
|
|
op == nir_intrinsic_image_deref_sparse_load) {
|
2023-08-14 11:56:00 -05:00
|
|
|
instr->num_components = instr->def.num_components;
|
2020-09-30 21:20:53 -05:00
|
|
|
nir_intrinsic_set_dest_type(instr,
|
|
|
|
|
nir_get_nir_type_for_glsl_base_type(type->sampled_type));
|
|
|
|
|
} else if (op == nir_intrinsic_image_deref_store) {
|
|
|
|
|
instr->num_components = 4;
|
|
|
|
|
nir_intrinsic_set_src_type(instr,
|
2020-04-13 07:50:37 -07:00
|
|
|
nir_get_nir_type_for_glsl_base_type(type->sampled_type));
|
2018-08-14 14:03:05 -05:00
|
|
|
}
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
if (op == nir_intrinsic_image_deref_size ||
|
|
|
|
|
op == nir_intrinsic_image_deref_samples) {
|
2020-08-19 18:21:33 -05:00
|
|
|
/* image_deref_size takes an LOD parameter which is always 0
|
|
|
|
|
* coming from GLSL.
|
|
|
|
|
*/
|
|
|
|
|
if (op == nir_intrinsic_image_deref_size)
|
|
|
|
|
instr->src[1] = nir_src_for_ssa(nir_imm_int(&b, 0));
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2015-08-11 17:42:12 +03:00
|
|
|
break;
|
2015-09-23 08:48:21 +02:00
|
|
|
}
|
2015-08-11 17:42:12 +03:00
|
|
|
|
2015-05-05 23:04:46 +03:00
|
|
|
/* Set the address argument, extending the coordinate vector to four
|
|
|
|
|
* components.
|
|
|
|
|
*/
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *src_addr =
|
2024-06-18 17:42:29 +02:00
|
|
|
evaluate_rvalue((ir_rvalue *)param);
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *srcs[4];
|
2015-05-05 23:04:46 +03:00
|
|
|
|
|
|
|
|
for (int i = 0; i < 4; i++) {
|
2023-12-14 22:21:26 -08:00
|
|
|
if (i < glsl_get_sampler_coordinate_components(type))
|
2015-10-31 16:31:59 -04:00
|
|
|
srcs[i] = nir_channel(&b, src_addr, i);
|
|
|
|
|
else
|
2023-08-12 16:17:15 -04:00
|
|
|
srcs[i] = nir_undef(&b, 1, 32);
|
2015-05-05 23:04:46 +03:00
|
|
|
}
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[1] = nir_src_for_ssa(nir_vec(&b, srcs, 4));
|
2015-05-05 23:04:46 +03:00
|
|
|
param = param->get_next();
|
|
|
|
|
|
|
|
|
|
/* Set the sample argument, which is undefined for single-sample
|
|
|
|
|
* images.
|
|
|
|
|
*/
|
|
|
|
|
if (type->sampler_dimensionality == GLSL_SAMPLER_DIM_MS) {
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[2] =
|
2015-10-30 23:32:50 -04:00
|
|
|
nir_src_for_ssa(evaluate_rvalue((ir_dereference *)param));
|
2015-05-05 23:04:46 +03:00
|
|
|
param = param->get_next();
|
|
|
|
|
} else {
|
2023-08-12 16:17:15 -04:00
|
|
|
instr->src[2] = nir_src_for_ssa(nir_undef(&b, 1, 32));
|
2015-05-05 23:04:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Set the intrinsic parameters. */
|
|
|
|
|
if (!param->is_tail_sentinel()) {
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[3] =
|
2024-06-18 17:42:29 +02:00
|
|
|
nir_src_for_ssa(evaluate_rvalue((ir_rvalue *)param));
|
2015-05-05 23:04:46 +03:00
|
|
|
param = param->get_next();
|
2021-12-28 14:58:52 +08:00
|
|
|
} else if (op == nir_intrinsic_image_deref_load ||
|
|
|
|
|
op == nir_intrinsic_image_deref_sparse_load) {
|
2020-01-06 08:27:49 +01:00
|
|
|
instr->src[3] = nir_src_for_ssa(nir_imm_int(&b, 0)); /* LOD */
|
2015-05-05 23:04:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!param->is_tail_sentinel()) {
|
2018-03-15 09:58:22 -07:00
|
|
|
instr->src[4] =
|
2024-06-18 17:42:29 +02:00
|
|
|
nir_src_for_ssa(evaluate_rvalue((ir_rvalue *)param));
|
2015-05-05 23:04:46 +03:00
|
|
|
param = param->get_next();
|
2020-01-06 08:27:49 +01:00
|
|
|
} else if (op == nir_intrinsic_image_deref_store) {
|
|
|
|
|
instr->src[4] = nir_src_for_ssa(nir_imm_int(&b, 0)); /* LOD */
|
2015-05-05 23:04:46 +03:00
|
|
|
}
|
2020-01-06 08:27:49 +01:00
|
|
|
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2015-05-05 23:04:46 +03:00
|
|
|
break;
|
|
|
|
|
}
|
2023-07-28 15:08:00 -04:00
|
|
|
case nir_intrinsic_barrier: {
|
|
|
|
|
/* The nir_intrinsic_barrier follows the general
|
2023-02-24 02:25:44 -08:00
|
|
|
* semantics of SPIR-V memory barriers, so this and other memory
|
|
|
|
|
* barriers use the mapping based on GLSL->SPIR-V from
|
|
|
|
|
*
|
|
|
|
|
* https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_gl_spirv.txt
|
|
|
|
|
*/
|
2024-07-29 17:24:12 +08:00
|
|
|
if (ir->callee->intrinsic_id == ir_intrinsic_subgroup_barrier) {
|
|
|
|
|
nir_barrier(&b, SCOPE_SUBGROUP, SCOPE_SUBGROUP, NIR_MEMORY_ACQ_REL,
|
|
|
|
|
nir_var_image | nir_var_mem_ssbo | nir_var_mem_shared | nir_var_mem_global);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-30 12:05:30 -07:00
|
|
|
mesa_scope scope;
|
2023-02-24 02:25:44 -08:00
|
|
|
unsigned modes;
|
|
|
|
|
switch (ir->callee->intrinsic_id) {
|
|
|
|
|
case ir_intrinsic_memory_barrier:
|
2023-05-30 12:05:30 -07:00
|
|
|
scope = SCOPE_DEVICE;
|
2023-02-24 02:25:44 -08:00
|
|
|
modes = nir_var_image |
|
|
|
|
|
nir_var_mem_ssbo |
|
|
|
|
|
nir_var_mem_shared |
|
|
|
|
|
nir_var_mem_global;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_buffer:
|
2023-05-30 12:05:30 -07:00
|
|
|
scope = SCOPE_DEVICE;
|
2023-02-24 02:25:44 -08:00
|
|
|
modes = nir_var_mem_ssbo |
|
|
|
|
|
nir_var_mem_global;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_image:
|
2023-05-30 12:05:30 -07:00
|
|
|
scope = SCOPE_DEVICE;
|
2023-02-24 02:25:44 -08:00
|
|
|
modes = nir_var_image;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_shared:
|
|
|
|
|
/* Both ARB_gl_spirv and glslang lower this to Device scope, so
|
|
|
|
|
* follow their lead. Note GL_KHR_vulkan_glsl also does
|
|
|
|
|
* something similar.
|
|
|
|
|
*/
|
2023-05-30 12:05:30 -07:00
|
|
|
scope = SCOPE_DEVICE;
|
2023-02-24 02:25:44 -08:00
|
|
|
modes = nir_var_mem_shared;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_group_memory_barrier:
|
2023-05-30 12:05:30 -07:00
|
|
|
scope = SCOPE_WORKGROUP;
|
2023-02-24 02:25:44 -08:00
|
|
|
modes = nir_var_image |
|
|
|
|
|
nir_var_mem_ssbo |
|
|
|
|
|
nir_var_mem_shared |
|
|
|
|
|
nir_var_mem_global;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_memory_barrier_atomic_counter:
|
|
|
|
|
/* There's no nir_var_atomic_counter, but since atomic counters are lowered
|
|
|
|
|
* to SSBOs, we use nir_var_mem_ssbo instead.
|
|
|
|
|
*/
|
2023-05-30 12:05:30 -07:00
|
|
|
scope = SCOPE_DEVICE;
|
2023-02-24 02:25:44 -08:00
|
|
|
modes = nir_var_mem_ssbo;
|
|
|
|
|
break;
|
2024-07-29 17:24:12 +08:00
|
|
|
case ir_intrinsic_subgroup_memory_barrier:
|
|
|
|
|
scope = SCOPE_SUBGROUP;
|
|
|
|
|
modes = nir_var_image |
|
|
|
|
|
nir_var_mem_ssbo |
|
|
|
|
|
nir_var_mem_shared |
|
|
|
|
|
nir_var_mem_global;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_subgroup_memory_barrier_buffer:
|
|
|
|
|
scope = SCOPE_SUBGROUP;
|
|
|
|
|
modes = nir_var_mem_ssbo |
|
|
|
|
|
nir_var_mem_global;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_subgroup_memory_barrier_shared:
|
|
|
|
|
scope = SCOPE_SUBGROUP;
|
|
|
|
|
modes = nir_var_mem_shared;
|
|
|
|
|
break;
|
|
|
|
|
case ir_intrinsic_subgroup_memory_barrier_image:
|
|
|
|
|
scope = SCOPE_SUBGROUP;
|
|
|
|
|
modes = nir_var_image;
|
|
|
|
|
break;
|
2023-02-24 02:25:44 -08:00
|
|
|
default:
|
|
|
|
|
unreachable("invalid intrinsic id for memory barrier");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_scoped_memory_barrier(&b, scope, NIR_MEMORY_ACQ_REL,
|
|
|
|
|
(nir_variable_mode)modes);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2015-07-09 10:26:42 +02:00
|
|
|
case nir_intrinsic_store_ssbo: {
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_rvalue *block = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
|
|
|
|
|
assert(write_mask);
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *nir_val = evaluate_rvalue(val);
|
2023-12-14 22:21:26 -08:00
|
|
|
if (glsl_type_is_boolean(val->type))
|
2018-10-19 11:14:47 -05:00
|
|
|
nir_val = nir_b2i32(&b, nir_val);
|
2018-11-13 10:19:25 -06:00
|
|
|
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(nir_val);
|
2015-11-25 14:14:05 -08:00
|
|
|
instr->src[1] = nir_src_for_ssa(evaluate_rvalue(block));
|
|
|
|
|
instr->src[2] = nir_src_for_ssa(evaluate_rvalue(offset));
|
2018-11-13 09:45:03 -06:00
|
|
|
intrinsic_set_std430_align(instr, val->type);
|
2016-01-21 13:32:09 -05:00
|
|
|
nir_intrinsic_set_write_mask(instr, write_mask->value.u[0]);
|
2015-07-09 10:26:42 +02:00
|
|
|
instr->num_components = val->type->vector_elements;
|
|
|
|
|
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2015-07-09 10:26:42 +02:00
|
|
|
break;
|
|
|
|
|
}
|
2015-07-28 15:11:37 -07:00
|
|
|
case nir_intrinsic_load_shared: {
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
2016-01-21 13:32:09 -05:00
|
|
|
nir_intrinsic_set_base(instr, 0);
|
2015-11-25 14:14:05 -08:00
|
|
|
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(offset));
|
2015-07-28 15:11:37 -07:00
|
|
|
|
|
|
|
|
const glsl_type *type = ir->return_deref->var->type;
|
|
|
|
|
instr->num_components = type->vector_elements;
|
2018-11-13 09:45:03 -06:00
|
|
|
intrinsic_set_std430_align(instr, type);
|
2015-07-28 15:11:37 -07:00
|
|
|
|
|
|
|
|
/* Setup destination register */
|
2023-12-14 22:21:26 -08:00
|
|
|
unsigned bit_size = glsl_type_is_boolean(type) ? 32 : glsl_get_bit_size(type);
|
2023-08-14 11:56:00 -05:00
|
|
|
nir_def_init(&instr->instr, &instr->def, type->vector_elements,
|
nir: Drop most instances of nir_ssa_dest_init()
Generated using the following two semantic patches:
@@
expression I, J, NC, BS;
@@
-nir_ssa_dest_init(I, &J->dest, NC, BS);
+nir_def_init(I, &J->dest.ssa, NC, BS);
@@
expression I, J, NC, BS;
@@
-nir_ssa_dest_init(I, &J->dest.dest, NC, BS);
+nir_def_init(I, &J->dest.dest.ssa, NC, BS);
Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24658>
2023-08-12 18:31:52 -05:00
|
|
|
bit_size);
|
2015-07-28 15:11:37 -07:00
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
2018-10-19 11:14:47 -05:00
|
|
|
|
|
|
|
|
/* The value in shared memory is a 32-bit value */
|
2023-12-14 22:21:26 -08:00
|
|
|
if (glsl_type_is_boolean(type))
|
2023-08-14 11:56:00 -05:00
|
|
|
ret = nir_b2b1(&b, &instr->def);
|
2015-07-28 15:11:37 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2015-07-28 15:17:34 -07:00
|
|
|
case nir_intrinsic_store_shared: {
|
|
|
|
|
exec_node *param = ir->actual_parameters.get_head();
|
|
|
|
|
ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
|
|
|
|
|
|
|
|
|
|
param = param->get_next();
|
|
|
|
|
ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
|
|
|
|
|
assert(write_mask);
|
|
|
|
|
|
2016-01-21 13:32:09 -05:00
|
|
|
nir_intrinsic_set_base(instr, 0);
|
2015-11-25 14:14:05 -08:00
|
|
|
instr->src[1] = nir_src_for_ssa(evaluate_rvalue(offset));
|
2015-07-28 15:17:34 -07:00
|
|
|
|
2016-01-21 13:32:09 -05:00
|
|
|
nir_intrinsic_set_write_mask(instr, write_mask->value.u[0]);
|
2015-07-28 15:17:34 -07:00
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *nir_val = evaluate_rvalue(val);
|
2018-10-19 11:14:47 -05:00
|
|
|
/* The value in shared memory is a 32-bit value */
|
2023-12-14 22:21:26 -08:00
|
|
|
if (glsl_type_is_boolean(val->type))
|
2020-03-27 00:29:14 -05:00
|
|
|
nir_val = nir_b2b32(&b, nir_val);
|
2018-11-13 10:19:25 -06:00
|
|
|
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(nir_val);
|
2015-07-28 15:17:34 -07:00
|
|
|
instr->num_components = val->type->vector_elements;
|
2018-11-13 09:45:03 -06:00
|
|
|
intrinsic_set_std430_align(instr, val->type);
|
2015-07-28 15:17:34 -07:00
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2024-08-02 15:58:24 +08:00
|
|
|
case nir_intrinsic_reduce:
|
|
|
|
|
case nir_intrinsic_inclusive_scan:
|
|
|
|
|
case nir_intrinsic_exclusive_scan: {
|
|
|
|
|
const glsl_type *type = ir->return_deref->type;
|
|
|
|
|
nir_def_init(&instr->instr, &instr->def, glsl_get_vector_elements(type),
|
|
|
|
|
glsl_get_bit_size(type));
|
|
|
|
|
instr->num_components = instr->def.num_components;
|
|
|
|
|
|
|
|
|
|
ir_rvalue *value = (ir_rvalue *)ir->actual_parameters.get_head();
|
|
|
|
|
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
|
|
|
|
|
|
|
|
|
|
nir_intrinsic_set_reduction_op(instr, get_reduction_op(ir->callee->intrinsic_id, type));
|
|
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2024-07-31 12:14:48 +08:00
|
|
|
case nir_intrinsic_shader_clock:
|
|
|
|
|
nir_intrinsic_set_memory_scope(instr, SCOPE_SUBGROUP);
|
2021-04-10 17:11:58 +02:00
|
|
|
FALLTHROUGH;
|
2024-07-31 12:14:48 +08:00
|
|
|
case nir_intrinsic_begin_invocation_interlock:
|
|
|
|
|
case nir_intrinsic_end_invocation_interlock:
|
|
|
|
|
case nir_intrinsic_vote_ieq:
|
2024-07-29 20:31:02 +08:00
|
|
|
case nir_intrinsic_vote_feq:
|
2017-06-20 22:37:43 -07:00
|
|
|
case nir_intrinsic_vote_any:
|
2024-07-31 12:14:48 +08:00
|
|
|
case nir_intrinsic_vote_all:
|
|
|
|
|
case nir_intrinsic_ballot:
|
|
|
|
|
case nir_intrinsic_read_invocation:
|
|
|
|
|
case nir_intrinsic_read_first_invocation:
|
|
|
|
|
case nir_intrinsic_is_helper_invocation:
|
2024-07-29 17:24:12 +08:00
|
|
|
case nir_intrinsic_is_sparse_texels_resident:
|
2024-07-30 15:12:23 +08:00
|
|
|
case nir_intrinsic_elect:
|
|
|
|
|
case nir_intrinsic_inverse_ballot:
|
|
|
|
|
case nir_intrinsic_ballot_bitfield_extract:
|
|
|
|
|
case nir_intrinsic_ballot_bit_count_reduce:
|
|
|
|
|
case nir_intrinsic_ballot_bit_count_inclusive:
|
|
|
|
|
case nir_intrinsic_ballot_bit_count_exclusive:
|
|
|
|
|
case nir_intrinsic_ballot_find_lsb:
|
2024-07-30 16:04:04 +08:00
|
|
|
case nir_intrinsic_ballot_find_msb:
|
|
|
|
|
case nir_intrinsic_shuffle:
|
2024-07-30 16:37:11 +08:00
|
|
|
case nir_intrinsic_shuffle_xor:
|
|
|
|
|
case nir_intrinsic_shuffle_up:
|
|
|
|
|
case nir_intrinsic_shuffle_down: {
|
2024-07-31 12:14:48 +08:00
|
|
|
if (ir->return_deref) {
|
|
|
|
|
const glsl_type *type = ir->return_deref->type;
|
|
|
|
|
nir_def_init(&instr->instr, &instr->def, glsl_get_vector_elements(type),
|
|
|
|
|
glsl_get_bit_size(type));
|
2017-06-22 16:43:24 -07:00
|
|
|
|
2024-07-31 12:14:48 +08:00
|
|
|
if (!nir_intrinsic_dest_components(instr))
|
|
|
|
|
instr->num_components = instr->def.num_components;
|
|
|
|
|
}
|
2017-06-22 16:43:24 -07:00
|
|
|
|
2024-07-31 12:14:48 +08:00
|
|
|
unsigned index = 0;
|
|
|
|
|
foreach_in_list(ir_rvalue, param, &ir->actual_parameters) {
|
|
|
|
|
instr->src[index] = nir_src_for_ssa(evaluate_rvalue(param));
|
2017-06-22 16:43:24 -07:00
|
|
|
|
2024-07-31 12:14:48 +08:00
|
|
|
if (!nir_intrinsic_src_components(instr, index))
|
|
|
|
|
instr->num_components = nir_src_num_components(instr->src[index]);
|
2021-12-29 15:26:09 +08:00
|
|
|
|
2024-07-31 12:14:48 +08:00
|
|
|
index++;
|
|
|
|
|
}
|
2021-12-29 15:26:09 +08:00
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &instr->instr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2015-05-05 23:04:46 +03:00
|
|
|
default:
|
|
|
|
|
unreachable("not reached");
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2021-12-28 21:11:07 +08:00
|
|
|
if (ir->return_deref) {
|
|
|
|
|
nir_deref_instr *ret_deref = evaluate_deref(ir->return_deref);
|
|
|
|
|
|
|
|
|
|
if (op == nir_intrinsic_image_deref_sparse_load)
|
|
|
|
|
adjust_sparse_variable(ret_deref, ir->return_deref->type, ret);
|
|
|
|
|
|
|
|
|
|
nir_store_deref(&b, ret_deref, ret, ~0);
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-10 10:58:43 -08:00
|
|
|
struct hash_entry *entry =
|
|
|
|
|
_mesa_hash_table_search(this->overload_table, ir->callee);
|
|
|
|
|
assert(entry);
|
|
|
|
|
nir_function *callee = (nir_function *) entry->data;
|
|
|
|
|
|
|
|
|
|
nir_call_instr *call = nir_call_instr_create(this->shader, callee);
|
|
|
|
|
|
|
|
|
|
unsigned i = 0;
|
|
|
|
|
nir_deref_instr *ret_deref = NULL;
|
|
|
|
|
if (ir->return_deref) {
|
|
|
|
|
nir_variable *ret_tmp =
|
|
|
|
|
nir_local_variable_create(this->impl, ir->return_deref->type,
|
|
|
|
|
"return_tmp");
|
|
|
|
|
ret_deref = nir_build_deref_var(&b, ret_tmp);
|
2023-08-14 11:56:00 -05:00
|
|
|
call->params[i++] = nir_src_for_ssa(&ret_deref->def);
|
2018-12-10 10:58:43 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
foreach_two_lists(formal_node, &ir->callee->parameters,
|
|
|
|
|
actual_node, &ir->actual_parameters) {
|
|
|
|
|
ir_rvalue *param_rvalue = (ir_rvalue *) actual_node;
|
|
|
|
|
ir_variable *sig_param = (ir_variable *) formal_node;
|
|
|
|
|
|
2024-01-18 14:35:28 +11:00
|
|
|
nir_deref_instr *param_deref;
|
|
|
|
|
if (sig_param->data.mode == ir_var_function_in &&
|
|
|
|
|
glsl_contains_opaque(sig_param->type)) {
|
|
|
|
|
param_deref = evaluate_deref(param_rvalue);
|
|
|
|
|
} else {
|
|
|
|
|
nir_variable *param =
|
|
|
|
|
nir_local_variable_create(this->impl, sig_param->type, "param");
|
|
|
|
|
param->data.precision = sig_param->data.precision;
|
|
|
|
|
param_deref = nir_build_deref_var(&b, param);
|
|
|
|
|
|
|
|
|
|
if (sig_param->data.mode == ir_var_function_in ||
|
|
|
|
|
sig_param->data.mode == ir_var_function_inout) {
|
|
|
|
|
if (glsl_type_is_vector_or_scalar(param->type)) {
|
|
|
|
|
nir_store_deref(&b, param_deref,
|
|
|
|
|
evaluate_rvalue(param_rvalue),
|
|
|
|
|
~0);
|
|
|
|
|
} else {
|
|
|
|
|
nir_copy_deref(&b, param_deref, evaluate_deref(param_rvalue));
|
|
|
|
|
}
|
2024-01-09 16:47:31 +11:00
|
|
|
}
|
2018-12-10 10:58:43 -08:00
|
|
|
}
|
|
|
|
|
|
2024-01-08 16:57:41 +11:00
|
|
|
call->params[i] = nir_src_for_ssa(¶m_deref->def);
|
|
|
|
|
|
2018-12-10 10:58:43 -08:00
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_builder_instr_insert(&b, &call->instr);
|
|
|
|
|
|
2023-09-01 10:45:50 +10:00
|
|
|
/* Copy out params. We must do this after the function call to ensure we
|
|
|
|
|
* do not overwrite global variables prematurely.
|
|
|
|
|
*/
|
|
|
|
|
i = ir->return_deref ? 1 : 0;
|
|
|
|
|
foreach_two_lists(formal_node, &ir->callee->parameters,
|
|
|
|
|
actual_node, &ir->actual_parameters) {
|
|
|
|
|
ir_rvalue *param_rvalue = (ir_rvalue *) actual_node;
|
|
|
|
|
ir_variable *sig_param = (ir_variable *) formal_node;
|
|
|
|
|
|
2023-12-19 14:09:58 +11:00
|
|
|
if (sig_param->data.mode == ir_var_function_out ||
|
|
|
|
|
sig_param->data.mode == ir_var_function_inout) {
|
2024-01-09 16:47:31 +11:00
|
|
|
if (glsl_type_is_vector_or_scalar(sig_param->type)) {
|
|
|
|
|
nir_store_deref(&b, evaluate_deref(param_rvalue),
|
|
|
|
|
nir_load_deref(&b, nir_src_as_deref(call->params[i])),
|
|
|
|
|
~0);
|
|
|
|
|
} else {
|
|
|
|
|
nir_copy_deref(&b, evaluate_deref(param_rvalue),
|
|
|
|
|
nir_src_as_deref(call->params[i]));
|
|
|
|
|
}
|
2023-09-01 10:45:50 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2024-01-10 14:17:57 +11:00
|
|
|
if (ir->return_deref) {
|
|
|
|
|
if (glsl_type_is_vector_or_scalar(ir->return_deref->type)) {
|
|
|
|
|
nir_store_deref(&b, evaluate_deref(ir->return_deref),
|
|
|
|
|
nir_load_deref(&b, ret_deref), ~0);
|
|
|
|
|
} else {
|
|
|
|
|
nir_copy_deref(&b, evaluate_deref(ir->return_deref), ret_deref);
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_assignment *ir)
|
|
|
|
|
{
|
2014-12-01 14:11:04 -08:00
|
|
|
unsigned num_components = ir->lhs->type->vector_elements;
|
2021-12-28 21:11:07 +08:00
|
|
|
unsigned write_mask = ir->write_mask;
|
2014-12-01 14:11:04 -08:00
|
|
|
|
2016-03-17 15:20:20 -07:00
|
|
|
b.exact = ir->lhs->variable_referenced()->data.invariant ||
|
|
|
|
|
ir->lhs->variable_referenced()->data.precise;
|
|
|
|
|
|
2014-12-01 14:11:04 -08:00
|
|
|
if ((ir->rhs->as_dereference() || ir->rhs->as_constant()) &&
|
2021-12-28 21:11:07 +08:00
|
|
|
(write_mask == BITFIELD_MASK(num_components) || write_mask == 0)) {
|
2019-06-04 11:41:25 +02:00
|
|
|
nir_deref_instr *lhs = evaluate_deref(ir->lhs);
|
|
|
|
|
nir_deref_instr *rhs = evaluate_deref(ir->rhs);
|
|
|
|
|
enum gl_access_qualifier lhs_qualifiers = deref_get_qualifier(lhs);
|
|
|
|
|
enum gl_access_qualifier rhs_qualifiers = deref_get_qualifier(rhs);
|
2022-01-14 19:10:04 -08:00
|
|
|
|
|
|
|
|
nir_copy_deref_with_access(&b, lhs, rhs, lhs_qualifiers,
|
|
|
|
|
rhs_qualifiers);
|
2014-12-01 14:11:04 -08:00
|
|
|
return;
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2021-12-28 21:11:07 +08:00
|
|
|
ir_texture *tex = ir->rhs->as_texture();
|
|
|
|
|
bool is_sparse = tex && tex->is_sparse;
|
|
|
|
|
|
|
|
|
|
if (!is_sparse)
|
2023-12-14 22:21:26 -08:00
|
|
|
assert(glsl_type_is_scalar(ir->rhs->type) || glsl_type_is_vector(ir->rhs->type));
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2014-12-01 14:11:04 -08:00
|
|
|
ir->lhs->accept(this);
|
2018-03-15 09:58:22 -07:00
|
|
|
nir_deref_instr *lhs_deref = this->deref;
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *src = evaluate_rvalue(ir->rhs);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2021-12-28 21:11:07 +08:00
|
|
|
if (is_sparse) {
|
|
|
|
|
adjust_sparse_variable(lhs_deref, tex->type, src);
|
|
|
|
|
|
|
|
|
|
/* correct component and mask because they are 0 for struct */
|
|
|
|
|
num_components = src->num_components;
|
|
|
|
|
write_mask = BITFIELD_MASK(num_components);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (write_mask != BITFIELD_MASK(num_components) && write_mask != 0) {
|
2015-11-29 23:23:44 -08:00
|
|
|
/* GLSL IR will give us the input to the write-masked assignment in a
|
|
|
|
|
* single packed vector. So, for example, if the writemask is xzw, then
|
|
|
|
|
* we have to swizzle x -> x, y -> z, and z -> w and get the y component
|
|
|
|
|
* from the load.
|
2014-07-10 18:18:17 -07:00
|
|
|
*/
|
2015-11-29 23:23:44 -08:00
|
|
|
unsigned swiz[4];
|
2014-12-01 14:11:04 -08:00
|
|
|
unsigned component = 0;
|
2015-11-29 23:23:44 -08:00
|
|
|
for (unsigned i = 0; i < 4; i++) {
|
2021-12-28 21:11:07 +08:00
|
|
|
swiz[i] = write_mask & (1 << i) ? component++ : 0;
|
2014-12-01 14:11:04 -08:00
|
|
|
}
|
2019-05-06 10:23:26 -05:00
|
|
|
src = nir_swizzle(&b, src, swiz, num_components);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2019-06-04 11:41:25 +02:00
|
|
|
enum gl_access_qualifier qualifiers = deref_get_qualifier(lhs_deref);
|
2022-01-14 19:12:24 -08:00
|
|
|
|
|
|
|
|
nir_store_deref_with_access(&b, lhs_deref, src, write_mask,
|
|
|
|
|
qualifiers);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Given an instruction, returns a pointer to its destination or NULL if there
|
|
|
|
|
* is no destination.
|
|
|
|
|
*
|
|
|
|
|
* Note that this only handles instructions we generate at this level.
|
|
|
|
|
*/
|
2023-08-12 19:01:17 -05:00
|
|
|
static nir_def *
|
|
|
|
|
get_instr_def(nir_instr *instr)
|
2014-07-10 18:18:17 -07:00
|
|
|
{
|
|
|
|
|
nir_alu_instr *alu_instr;
|
|
|
|
|
nir_intrinsic_instr *intrinsic_instr;
|
|
|
|
|
nir_tex_instr *tex_instr;
|
|
|
|
|
|
|
|
|
|
switch (instr->type) {
|
|
|
|
|
case nir_instr_type_alu:
|
|
|
|
|
alu_instr = nir_instr_as_alu(instr);
|
2023-08-14 11:43:35 -05:00
|
|
|
return &alu_instr->def;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
case nir_instr_type_intrinsic:
|
|
|
|
|
intrinsic_instr = nir_instr_as_intrinsic(instr);
|
|
|
|
|
if (nir_intrinsic_infos[intrinsic_instr->intrinsic].has_dest)
|
2023-08-14 11:56:00 -05:00
|
|
|
return &intrinsic_instr->def;
|
2014-07-10 18:18:17 -07:00
|
|
|
else
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2014-12-05 11:03:06 -08:00
|
|
|
case nir_instr_type_tex:
|
|
|
|
|
tex_instr = nir_instr_as_tex(instr);
|
2023-08-14 11:56:00 -05:00
|
|
|
return &tex_instr->def;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2016-04-04 10:16:11 +02:00
|
|
|
nir_visitor::add_instr(nir_instr *instr, unsigned num_components,
|
|
|
|
|
unsigned bit_size)
|
2014-07-10 18:18:17 -07:00
|
|
|
{
|
2023-08-12 19:01:17 -05:00
|
|
|
nir_def *def = get_instr_def(instr);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2023-08-12 19:01:17 -05:00
|
|
|
if (def)
|
|
|
|
|
nir_def_init(instr, def, num_components, bit_size);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2015-10-31 16:31:59 -04:00
|
|
|
nir_builder_instr_insert(&b, instr);
|
2015-10-30 23:47:46 -04:00
|
|
|
|
2023-08-12 19:01:17 -05:00
|
|
|
if (def)
|
|
|
|
|
this->result = def;
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *
|
2014-07-10 18:18:17 -07:00
|
|
|
nir_visitor::evaluate_rvalue(ir_rvalue* ir)
|
|
|
|
|
{
|
|
|
|
|
ir->accept(this);
|
|
|
|
|
if (ir->as_dereference() || ir->as_constant()) {
|
|
|
|
|
/*
|
|
|
|
|
* A dereference is being used on the right hand side, which means we
|
|
|
|
|
* must emit a variable load.
|
|
|
|
|
*/
|
|
|
|
|
|
2019-06-04 11:41:25 +02:00
|
|
|
enum gl_access_qualifier access = deref_get_qualifier(this->deref);
|
|
|
|
|
this->result = nir_load_deref_with_access(&b, this->deref, access);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2015-10-30 23:47:46 -04:00
|
|
|
return this->result;
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2015-11-12 11:18:50 +01:00
|
|
|
static bool
|
|
|
|
|
type_is_float(glsl_base_type type)
|
|
|
|
|
{
|
2017-07-01 07:46:41 +02:00
|
|
|
return type == GLSL_TYPE_FLOAT || type == GLSL_TYPE_DOUBLE ||
|
|
|
|
|
type == GLSL_TYPE_FLOAT16;
|
2015-11-12 11:18:50 +01:00
|
|
|
}
|
|
|
|
|
|
2016-09-02 18:46:55 -07:00
|
|
|
static bool
|
|
|
|
|
type_is_signed(glsl_base_type type)
|
|
|
|
|
{
|
2017-07-01 07:46:41 +02:00
|
|
|
return type == GLSL_TYPE_INT || type == GLSL_TYPE_INT64 ||
|
|
|
|
|
type == GLSL_TYPE_INT16;
|
2016-09-02 18:46:55 -07:00
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_expression *ir)
|
|
|
|
|
{
|
2014-12-04 12:16:33 -08:00
|
|
|
/* Some special cases */
|
|
|
|
|
switch (ir->operation) {
|
|
|
|
|
case ir_unop_interpolate_at_centroid:
|
|
|
|
|
case ir_binop_interpolate_at_offset:
|
|
|
|
|
case ir_binop_interpolate_at_sample: {
|
|
|
|
|
ir_dereference *deref = ir->operands[0]->as_dereference();
|
|
|
|
|
ir_swizzle *swizzle = NULL;
|
2024-07-27 18:27:03 +10:00
|
|
|
ir_expression *precision_op = NULL;
|
2014-12-04 12:16:33 -08:00
|
|
|
if (!deref) {
|
2024-07-27 18:27:03 +10:00
|
|
|
precision_op = ir->operands[0]->as_expression();
|
|
|
|
|
if (precision_op) {
|
|
|
|
|
/* For some builtins precision is lowered to mediump for certain
|
|
|
|
|
* parameters that ignore precision. For example for Interpolation
|
|
|
|
|
* and Bitfield functions.
|
|
|
|
|
*/
|
|
|
|
|
assert(precision_op->operation == ir_unop_f2fmp);
|
|
|
|
|
deref = precision_op->operands[0]->as_dereference();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!deref) {
|
|
|
|
|
swizzle = ir->operands[0]->as_swizzle();
|
|
|
|
|
assert(swizzle);
|
|
|
|
|
deref = swizzle->val->as_dereference();
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-04 12:16:33 -08:00
|
|
|
assert(deref);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
deref->accept(this);
|
|
|
|
|
|
2022-10-17 13:28:19 +11:00
|
|
|
assert(nir_deref_mode_is(this->deref, nir_var_shader_in));
|
2014-12-04 12:16:33 -08:00
|
|
|
nir_intrinsic_op op;
|
2022-10-17 13:28:19 +11:00
|
|
|
switch (ir->operation) {
|
|
|
|
|
case ir_unop_interpolate_at_centroid:
|
|
|
|
|
op = nir_intrinsic_interp_deref_at_centroid;
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_interpolate_at_offset:
|
|
|
|
|
op = nir_intrinsic_interp_deref_at_offset;
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_interpolate_at_sample:
|
|
|
|
|
op = nir_intrinsic_interp_deref_at_sample;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid interpolation intrinsic");
|
2014-12-04 12:16:33 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(shader, op);
|
|
|
|
|
intrin->num_components = deref->type->vector_elements;
|
2023-08-14 11:56:00 -05:00
|
|
|
intrin->src[0] = nir_src_for_ssa(&this->deref->def);
|
2014-12-04 12:16:33 -08:00
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
if (intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
|
|
|
|
|
intrin->intrinsic == nir_intrinsic_interp_deref_at_sample)
|
|
|
|
|
intrin->src[1] = nir_src_for_ssa(evaluate_rvalue(ir->operands[1]));
|
2014-12-04 12:16:33 -08:00
|
|
|
|
2016-05-11 15:05:09 -04:00
|
|
|
unsigned bit_size = glsl_get_bit_size(deref->type);
|
2016-04-04 10:16:11 +02:00
|
|
|
add_instr(&intrin->instr, deref->type->vector_elements, bit_size);
|
2014-12-04 12:16:33 -08:00
|
|
|
|
|
|
|
|
if (swizzle) {
|
2015-10-31 16:31:59 -04:00
|
|
|
unsigned swiz[4] = {
|
|
|
|
|
swizzle->mask.x, swizzle->mask.y, swizzle->mask.z, swizzle->mask.w
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
result = nir_swizzle(&b, result, swiz,
|
2019-05-06 10:23:26 -05:00
|
|
|
swizzle->type->vector_elements);
|
2014-12-04 12:16:33 -08:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 18:27:03 +10:00
|
|
|
if (precision_op) {
|
|
|
|
|
result = nir_build_alu(&b, nir_op_f2fmp, result, NULL, NULL, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-04 12:16:33 -08:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-14 12:00:04 -05:00
|
|
|
case ir_unop_ssbo_unsized_array_length: {
|
|
|
|
|
nir_intrinsic_instr *intrin =
|
|
|
|
|
nir_intrinsic_instr_create(b.shader,
|
|
|
|
|
nir_intrinsic_deref_buffer_array_length);
|
|
|
|
|
|
|
|
|
|
ir_dereference *deref = ir->operands[0]->as_dereference();
|
2023-08-14 11:56:00 -05:00
|
|
|
intrin->src[0] = nir_src_for_ssa(&evaluate_deref(deref)->def);
|
2019-03-14 12:00:04 -05:00
|
|
|
|
|
|
|
|
add_instr(&intrin->instr, 1, 32);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-04 12:16:33 -08:00
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *srcs[4];
|
2017-08-09 13:34:02 +10:00
|
|
|
for (unsigned i = 0; i < ir->num_operands; i++)
|
2015-10-30 23:56:49 -04:00
|
|
|
srcs[i] = evaluate_rvalue(ir->operands[i]);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
glsl_base_type types[4];
|
2017-08-09 13:34:02 +10:00
|
|
|
for (unsigned i = 0; i < ir->num_operands; i++)
|
2019-05-05 11:39:08 +02:00
|
|
|
types[i] = ir->operands[i]->type->base_type;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2019-05-05 11:39:08 +02:00
|
|
|
glsl_base_type out_type = ir->type->base_type;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
switch (ir->operation) {
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_unop_bit_not: result = nir_inot(&b, srcs[0]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_logic_not:
|
2018-12-18 23:31:30 -05:00
|
|
|
result = nir_inot(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_neg:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(types[0]) ? nir_fneg(&b, srcs[0])
|
|
|
|
|
: nir_ineg(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_abs:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(types[0]) ? nir_fabs(&b, srcs[0])
|
|
|
|
|
: nir_iabs(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2018-09-12 17:05:14 -07:00
|
|
|
case ir_unop_clz:
|
|
|
|
|
result = nir_uclz(&b, srcs[0]);
|
|
|
|
|
break;
|
2014-10-15 16:19:26 -07:00
|
|
|
case ir_unop_saturate:
|
2015-11-12 11:18:50 +01:00
|
|
|
assert(type_is_float(types[0]));
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_fsat(&b, srcs[0]);
|
2014-10-15 16:19:26 -07:00
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_sign:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(types[0]) ? nir_fsign(&b, srcs[0])
|
|
|
|
|
: nir_isign(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_unop_rcp: result = nir_frcp(&b, srcs[0]); break;
|
2023-03-22 13:03:05 -07:00
|
|
|
|
|
|
|
|
case ir_unop_rsq:
|
|
|
|
|
if (consts->ForceGLSLAbsSqrt)
|
|
|
|
|
srcs[0] = nir_fabs(&b, srcs[0]);
|
|
|
|
|
result = nir_frsq(&b, srcs[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_unop_sqrt:
|
|
|
|
|
if (consts->ForceGLSLAbsSqrt)
|
|
|
|
|
srcs[0] = nir_fabs(&b, srcs[0]);
|
|
|
|
|
result = nir_fsqrt(&b, srcs[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
2022-05-31 13:36:15 -07:00
|
|
|
case ir_unop_exp: result = nir_fexp2(&b, nir_fmul_imm(&b, srcs[0], M_LOG2E)); break;
|
|
|
|
|
case ir_unop_log: result = nir_fmul_imm(&b, nir_flog2(&b, srcs[0]), 1.0 / M_LOG2E); break;
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_unop_exp2: result = nir_fexp2(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_log2: result = nir_flog2(&b, srcs[0]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_i2f:
|
|
|
|
|
case ir_unop_u2f:
|
|
|
|
|
case ir_unop_b2f:
|
2017-03-07 16:46:44 -08:00
|
|
|
case ir_unop_f2i:
|
|
|
|
|
case ir_unop_f2u:
|
|
|
|
|
case ir_unop_f2b:
|
|
|
|
|
case ir_unop_i2b:
|
|
|
|
|
case ir_unop_b2i:
|
|
|
|
|
case ir_unop_b2i64:
|
|
|
|
|
case ir_unop_d2f:
|
|
|
|
|
case ir_unop_f2d:
|
2022-06-27 15:16:41 +10:00
|
|
|
case ir_unop_f162u:
|
|
|
|
|
case ir_unop_u2f16:
|
|
|
|
|
case ir_unop_f162i:
|
|
|
|
|
case ir_unop_i2f16:
|
2019-04-19 15:36:00 +02:00
|
|
|
case ir_unop_f162f:
|
|
|
|
|
case ir_unop_f2f16:
|
2019-05-16 13:25:28 +02:00
|
|
|
case ir_unop_f162b:
|
|
|
|
|
case ir_unop_b2f16:
|
2022-06-27 15:16:41 +10:00
|
|
|
case ir_unop_f162d:
|
|
|
|
|
case ir_unop_d2f16:
|
|
|
|
|
case ir_unop_f162u64:
|
|
|
|
|
case ir_unop_u642f16:
|
|
|
|
|
case ir_unop_f162i64:
|
|
|
|
|
case ir_unop_i642f16:
|
2020-05-08 22:16:42 -04:00
|
|
|
case ir_unop_i2i:
|
|
|
|
|
case ir_unop_u2u:
|
2017-03-07 16:46:44 -08:00
|
|
|
case ir_unop_d2i:
|
|
|
|
|
case ir_unop_d2u:
|
|
|
|
|
case ir_unop_d2b:
|
2016-01-12 14:03:08 +01:00
|
|
|
case ir_unop_i2d:
|
|
|
|
|
case ir_unop_u2d:
|
2017-03-07 16:46:44 -08:00
|
|
|
case ir_unop_i642i:
|
|
|
|
|
case ir_unop_i642u:
|
|
|
|
|
case ir_unop_i642f:
|
|
|
|
|
case ir_unop_i642b:
|
|
|
|
|
case ir_unop_i642d:
|
|
|
|
|
case ir_unop_u642i:
|
|
|
|
|
case ir_unop_u642u:
|
|
|
|
|
case ir_unop_u642f:
|
|
|
|
|
case ir_unop_u642d:
|
|
|
|
|
case ir_unop_i2i64:
|
|
|
|
|
case ir_unop_u2i64:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_f2i64:
|
|
|
|
|
case ir_unop_d2i64:
|
2017-03-07 16:46:44 -08:00
|
|
|
case ir_unop_i2u64:
|
|
|
|
|
case ir_unop_u2u64:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_f2u64:
|
|
|
|
|
case ir_unop_d2u64:
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_i2u:
|
|
|
|
|
case ir_unop_u2i:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_i642u64:
|
2017-03-07 16:46:44 -08:00
|
|
|
case ir_unop_u642i64: {
|
|
|
|
|
nir_alu_type src_type = nir_get_nir_type_for_glsl_base_type(types[0]);
|
|
|
|
|
nir_alu_type dst_type = nir_get_nir_type_for_glsl_base_type(out_type);
|
2022-11-01 18:11:54 -07:00
|
|
|
result = nir_type_convert(&b, srcs[0], src_type, dst_type,
|
|
|
|
|
nir_rounding_mode_undef);
|
2017-03-07 19:54:37 -08:00
|
|
|
/* b2i and b2f don't have fixed bit-size versions so the builder will
|
|
|
|
|
* just assume 32 and we have to fix it up here.
|
|
|
|
|
*/
|
|
|
|
|
result->bit_size = nir_alu_type_get_type_size(dst_type);
|
2017-03-07 16:46:44 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-09 13:07:40 +02:00
|
|
|
case ir_unop_f2fmp: {
|
|
|
|
|
result = nir_build_alu(&b, nir_op_f2fmp, srcs[0], NULL, NULL, NULL);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-08 22:16:42 -04:00
|
|
|
case ir_unop_i2imp: {
|
|
|
|
|
result = nir_build_alu(&b, nir_op_i2imp, srcs[0], NULL, NULL, NULL);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case ir_unop_u2ump: {
|
2020-09-04 01:51:49 -04:00
|
|
|
result = nir_build_alu(&b, nir_op_i2imp, srcs[0], NULL, NULL, NULL);
|
2020-05-08 22:16:42 -04:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_bitcast_i2f:
|
|
|
|
|
case ir_unop_bitcast_f2i:
|
|
|
|
|
case ir_unop_bitcast_u2f:
|
|
|
|
|
case ir_unop_bitcast_f2u:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_bitcast_i642d:
|
|
|
|
|
case ir_unop_bitcast_d2i64:
|
|
|
|
|
case ir_unop_bitcast_u642d:
|
|
|
|
|
case ir_unop_bitcast_d2u64:
|
2015-07-21 14:22:11 +10:00
|
|
|
case ir_unop_subroutine_to_int:
|
2014-07-10 18:18:17 -07:00
|
|
|
/* no-op */
|
2019-05-06 11:45:46 -05:00
|
|
|
result = nir_mov(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_unop_trunc: result = nir_ftrunc(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_ceil: result = nir_fceil(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_floor: result = nir_ffloor(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_fract: result = nir_ffract(&b, srcs[0]); break;
|
2018-03-20 13:06:23 +11:00
|
|
|
case ir_unop_frexp_exp: result = nir_frexp_exp(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_frexp_sig: result = nir_frexp_sig(&b, srcs[0]); break;
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_unop_round_even: result = nir_fround_even(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_sin: result = nir_fsin(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_cos: result = nir_fcos(&b, srcs[0]); break;
|
2024-07-23 12:22:19 -04:00
|
|
|
case ir_unop_dFdx: result = nir_ddx(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_dFdy: result = nir_ddy(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_dFdx_fine: result = nir_ddx_fine(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_dFdy_fine: result = nir_ddy_fine(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_dFdx_coarse: result = nir_ddx_coarse(&b, srcs[0]); break;
|
|
|
|
|
case ir_unop_dFdy_coarse: result = nir_ddy_coarse(&b, srcs[0]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_pack_snorm_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_pack_snorm_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_pack_snorm_4x8:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_pack_snorm_4x8(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_pack_unorm_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_pack_unorm_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_pack_unorm_4x8:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_pack_unorm_4x8(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_pack_half_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_pack_half_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_unpack_snorm_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_unpack_snorm_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_unpack_snorm_4x8:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_unpack_snorm_4x8(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_unpack_unorm_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_unpack_unorm_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_unpack_unorm_4x8:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_unpack_unorm_4x8(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_unop_unpack_half_2x16:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_unpack_half_2x16(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2018-01-19 15:11:16 +11:00
|
|
|
case ir_unop_pack_sampler_2x32:
|
|
|
|
|
case ir_unop_pack_image_2x32:
|
2015-08-14 12:20:37 -07:00
|
|
|
case ir_unop_pack_double_2x32:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_pack_int_2x32:
|
|
|
|
|
case ir_unop_pack_uint_2x32:
|
2017-02-14 22:15:16 -08:00
|
|
|
result = nir_pack_64_2x32(&b, srcs[0]);
|
2016-09-01 15:21:04 -07:00
|
|
|
break;
|
2018-01-19 15:11:16 +11:00
|
|
|
case ir_unop_unpack_sampler_2x32:
|
|
|
|
|
case ir_unop_unpack_image_2x32:
|
2017-02-14 22:15:16 -08:00
|
|
|
case ir_unop_unpack_double_2x32:
|
2016-09-01 15:21:04 -07:00
|
|
|
case ir_unop_unpack_int_2x32:
|
|
|
|
|
case ir_unop_unpack_uint_2x32:
|
2017-02-14 22:15:16 -08:00
|
|
|
result = nir_unpack_64_2x32(&b, srcs[0]);
|
2016-09-01 15:21:04 -07:00
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_unop_bitfield_reverse:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_bitfield_reverse(&b, srcs[0]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2014-11-07 10:59:16 -08:00
|
|
|
case ir_unop_bit_count:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_bit_count(&b, srcs[0]);
|
2014-11-07 10:59:16 -08:00
|
|
|
break;
|
|
|
|
|
case ir_unop_find_msb:
|
|
|
|
|
switch (types[0]) {
|
|
|
|
|
case GLSL_TYPE_UINT:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_ufind_msb(&b, srcs[0]);
|
2014-11-07 10:59:16 -08:00
|
|
|
break;
|
|
|
|
|
case GLSL_TYPE_INT:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_ifind_msb(&b, srcs[0]);
|
2014-11-07 10:59:16 -08:00
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid type for findMSB()");
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case ir_unop_find_lsb:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_find_lsb(&b, srcs[0]);
|
2014-11-07 10:59:16 -08:00
|
|
|
break;
|
|
|
|
|
|
2015-06-01 09:44:55 +02:00
|
|
|
case ir_unop_get_buffer_size: {
|
|
|
|
|
nir_intrinsic_instr *load = nir_intrinsic_instr_create(
|
|
|
|
|
this->shader,
|
2020-09-22 03:24:45 -05:00
|
|
|
nir_intrinsic_get_ssbo_size);
|
2015-06-01 09:44:55 +02:00
|
|
|
load->num_components = ir->type->vector_elements;
|
2015-10-30 23:32:50 -04:00
|
|
|
load->src[0] = nir_src_for_ssa(evaluate_rvalue(ir->operands[0]));
|
2016-05-11 15:05:09 -04:00
|
|
|
unsigned bit_size = glsl_get_bit_size(ir->type);
|
2016-04-04 10:16:11 +02:00
|
|
|
add_instr(&load->instr, ir->type->vector_elements, bit_size);
|
2015-06-01 09:44:55 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-11 16:02:25 +02:00
|
|
|
case ir_unop_atan:
|
|
|
|
|
result = nir_atan(&b, srcs[0]);
|
|
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_add:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(out_type) ? nir_fadd(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_iadd(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2018-09-12 17:05:14 -07:00
|
|
|
case ir_binop_add_sat:
|
|
|
|
|
result = type_is_signed(out_type) ? nir_iadd_sat(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_uadd_sat(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_sub:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(out_type) ? nir_fsub(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_isub(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2018-09-12 17:05:14 -07:00
|
|
|
case ir_binop_sub_sat:
|
|
|
|
|
result = type_is_signed(out_type) ? nir_isub_sat(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_usub_sat(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_abs_sub:
|
|
|
|
|
/* out_type is always unsigned for ir_binop_abs_sub, so we have to key
|
|
|
|
|
* on the type of the sources.
|
|
|
|
|
*/
|
|
|
|
|
result = type_is_signed(types[0]) ? nir_uabs_isub(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_uabs_usub(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_avg:
|
|
|
|
|
result = type_is_signed(out_type) ? nir_ihadd(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_uhadd(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_avg_round:
|
|
|
|
|
result = type_is_signed(out_type) ? nir_irhadd(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_urhadd(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_mul_32x16:
|
|
|
|
|
result = type_is_signed(out_type) ? nir_imul_32x16(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_umul_32x16(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_mul:
|
2019-02-27 14:02:54 -08:00
|
|
|
if (type_is_float(out_type))
|
|
|
|
|
result = nir_fmul(&b, srcs[0], srcs[1]);
|
|
|
|
|
else if (out_type == GLSL_TYPE_INT64 &&
|
|
|
|
|
(ir->operands[0]->type->base_type == GLSL_TYPE_INT ||
|
|
|
|
|
ir->operands[1]->type->base_type == GLSL_TYPE_INT))
|
|
|
|
|
result = nir_imul_2x32_64(&b, srcs[0], srcs[1]);
|
|
|
|
|
else if (out_type == GLSL_TYPE_UINT64 &&
|
|
|
|
|
(ir->operands[0]->type->base_type == GLSL_TYPE_UINT ||
|
|
|
|
|
ir->operands[1]->type->base_type == GLSL_TYPE_UINT))
|
|
|
|
|
result = nir_umul_2x32_64(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_imul(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_div:
|
2015-11-12 11:18:50 +01:00
|
|
|
if (type_is_float(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_fdiv(&b, srcs[0], srcs[1]);
|
2016-09-02 18:46:55 -07:00
|
|
|
else if (type_is_signed(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_idiv(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_udiv(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_mod:
|
2015-11-12 11:18:50 +01:00
|
|
|
result = type_is_float(out_type) ? nir_fmod(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_umod(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_min:
|
2015-11-12 11:18:50 +01:00
|
|
|
if (type_is_float(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_fmin(&b, srcs[0], srcs[1]);
|
2016-09-02 18:46:55 -07:00
|
|
|
else if (type_is_signed(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_imin(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_umin(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_max:
|
2015-11-12 11:18:50 +01:00
|
|
|
if (type_is_float(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_fmax(&b, srcs[0], srcs[1]);
|
2016-09-02 18:46:55 -07:00
|
|
|
else if (type_is_signed(out_type))
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_imax(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_umax(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_pow: result = nir_fpow(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case ir_binop_bit_and: result = nir_iand(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case ir_binop_bit_or: result = nir_ior(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case ir_binop_bit_xor: result = nir_ixor(&b, srcs[0], srcs[1]); break;
|
2015-03-23 16:04:41 -07:00
|
|
|
case ir_binop_logic_and:
|
2018-12-18 23:31:30 -05:00
|
|
|
result = nir_iand(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2015-03-23 16:04:41 -07:00
|
|
|
case ir_binop_logic_or:
|
2018-12-18 23:31:30 -05:00
|
|
|
result = nir_ior(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2015-11-14 17:47:33 -08:00
|
|
|
case ir_binop_logic_xor:
|
2018-12-18 23:31:30 -05:00
|
|
|
result = nir_ixor(&b, srcs[0], srcs[1]);
|
2015-10-31 16:31:59 -04:00
|
|
|
break;
|
2020-09-03 09:27:58 -04:00
|
|
|
case ir_binop_lshift: result = nir_ishl(&b, srcs[0], nir_u2u32(&b, srcs[1])); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_rshift:
|
2020-09-03 09:27:58 -04:00
|
|
|
result = (type_is_signed(out_type)) ? nir_ishr(&b, srcs[0], nir_u2u32(&b, srcs[1]))
|
|
|
|
|
: nir_ushr(&b, srcs[0], nir_u2u32(&b, srcs[1]));
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_binop_imul_high:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = (out_type == GLSL_TYPE_INT) ? nir_imul_high(&b, srcs[0], srcs[1])
|
|
|
|
|
: nir_umul_high(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_binop_carry: result = nir_uadd_carry(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case ir_binop_borrow: result = nir_usub_borrow(&b, srcs[0], srcs[1]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_binop_less:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0]))
|
|
|
|
|
result = nir_flt(&b, srcs[0], srcs[1]);
|
|
|
|
|
else if (type_is_signed(types[0]))
|
|
|
|
|
result = nir_ilt(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_ult(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_binop_gequal:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0]))
|
|
|
|
|
result = nir_fge(&b, srcs[0], srcs[1]);
|
|
|
|
|
else if (type_is_signed(types[0]))
|
|
|
|
|
result = nir_ige(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_uge(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_binop_equal:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0]))
|
|
|
|
|
result = nir_feq(&b, srcs[0], srcs[1]);
|
|
|
|
|
else
|
|
|
|
|
result = nir_ieq(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_binop_nequal:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0]))
|
2020-08-18 19:51:57 +02:00
|
|
|
result = nir_fneu(&b, srcs[0], srcs[1]);
|
2018-12-18 23:31:30 -05:00
|
|
|
else
|
|
|
|
|
result = nir_ine(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_binop_all_equal:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0])) {
|
|
|
|
|
switch (ir->operands[0]->type->vector_elements) {
|
|
|
|
|
case 1: result = nir_feq(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 2: result = nir_ball_fequal2(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 3: result = nir_ball_fequal3(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 4: result = nir_ball_fequal4(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
switch (ir->operands[0]->type->vector_elements) {
|
2018-12-18 23:31:30 -05:00
|
|
|
case 1: result = nir_ieq(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 2: result = nir_ball_iequal2(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 3: result = nir_ball_iequal3(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 4: result = nir_ball_iequal4(&b, srcs[0], srcs[1]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_any_nequal:
|
2018-12-18 23:31:30 -05:00
|
|
|
if (type_is_float(types[0])) {
|
|
|
|
|
switch (ir->operands[0]->type->vector_elements) {
|
2020-08-18 19:51:57 +02:00
|
|
|
case 1: result = nir_fneu(&b, srcs[0], srcs[1]); break;
|
2018-12-18 23:31:30 -05:00
|
|
|
case 2: result = nir_bany_fnequal2(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 3: result = nir_bany_fnequal3(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 4: result = nir_bany_fnequal4(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
switch (ir->operands[0]->type->vector_elements) {
|
2018-12-18 23:31:30 -05:00
|
|
|
case 1: result = nir_ine(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 2: result = nir_bany_inequal2(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 3: result = nir_bany_inequal3(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case 4: result = nir_bany_inequal4(&b, srcs[0], srcs[1]); break;
|
2014-07-10 18:18:17 -07:00
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case ir_binop_dot:
|
2021-01-13 15:11:57 +00:00
|
|
|
result = nir_fdot(&b, srcs[0], srcs[1]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
2023-02-22 13:47:37 -08:00
|
|
|
|
2022-01-14 13:44:31 -08:00
|
|
|
case ir_binop_vector_extract:
|
|
|
|
|
result = nir_vector_extract(&b, srcs[0], srcs[1]);
|
2018-05-09 15:17:59 +00:00
|
|
|
break;
|
2023-02-22 13:47:37 -08:00
|
|
|
case ir_triop_vector_insert:
|
|
|
|
|
result = nir_vector_insert(&b, srcs[0], srcs[1], srcs[2]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2019-10-11 16:02:25 +02:00
|
|
|
case ir_binop_atan2:
|
|
|
|
|
result = nir_atan2(&b, srcs[0], srcs[1]);
|
|
|
|
|
break;
|
|
|
|
|
|
2015-10-31 16:31:59 -04:00
|
|
|
case ir_binop_ldexp: result = nir_ldexp(&b, srcs[0], srcs[1]); break;
|
|
|
|
|
case ir_triop_fma:
|
|
|
|
|
result = nir_ffma(&b, srcs[0], srcs[1], srcs[2]);
|
|
|
|
|
break;
|
2014-07-10 18:18:17 -07:00
|
|
|
case ir_triop_lrp:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_flrp(&b, srcs[0], srcs[1], srcs[2]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_triop_csel:
|
2018-12-18 23:31:30 -05:00
|
|
|
result = nir_bcsel(&b, srcs[0], srcs[1], srcs[2]);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_triop_bitfield_extract:
|
2023-12-14 22:21:26 -08:00
|
|
|
result = glsl_type_is_int_16_32(ir->type) ?
|
2020-07-18 10:02:11 -07:00
|
|
|
nir_ibitfield_extract(&b, nir_i2i32(&b, srcs[0]), nir_i2i32(&b, srcs[1]), nir_i2i32(&b, srcs[2])) :
|
|
|
|
|
nir_ubitfield_extract(&b, nir_u2u32(&b, srcs[0]), nir_i2i32(&b, srcs[1]), nir_i2i32(&b, srcs[2]));
|
2022-11-08 13:53:25 -08:00
|
|
|
|
|
|
|
|
if (ir->type->base_type == GLSL_TYPE_INT16) {
|
|
|
|
|
result = nir_i2i16(&b, result);
|
|
|
|
|
} else if (ir->type->base_type == GLSL_TYPE_UINT16) {
|
|
|
|
|
result = nir_u2u16(&b, result);
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_quadop_bitfield_insert:
|
2020-07-18 10:02:11 -07:00
|
|
|
result = nir_bitfield_insert(&b,
|
|
|
|
|
nir_u2u32(&b, srcs[0]), nir_u2u32(&b, srcs[1]),
|
|
|
|
|
nir_i2i32(&b, srcs[2]), nir_i2i32(&b, srcs[3]));
|
2022-11-08 13:53:25 -08:00
|
|
|
|
|
|
|
|
if (ir->type->base_type == GLSL_TYPE_INT16) {
|
|
|
|
|
result = nir_i2i16(&b, result);
|
|
|
|
|
} else if (ir->type->base_type == GLSL_TYPE_UINT16) {
|
|
|
|
|
result = nir_u2u16(&b, result);
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
case ir_quadop_vector:
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_vec(&b, srcs, ir->type->vector_elements);
|
2014-07-10 18:18:17 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
2022-11-08 13:53:25 -08:00
|
|
|
|
|
|
|
|
/* The bit-size of the NIR SSA value must match the bit-size of the
|
|
|
|
|
* original GLSL IR expression.
|
|
|
|
|
*/
|
|
|
|
|
assert(result->bit_size == glsl_base_type_get_bit_size(ir->type->base_type));
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_swizzle *ir)
|
|
|
|
|
{
|
|
|
|
|
unsigned swizzle[4] = { ir->mask.x, ir->mask.y, ir->mask.z, ir->mask.w };
|
2015-10-31 16:31:59 -04:00
|
|
|
result = nir_swizzle(&b, evaluate_rvalue(ir->val), swizzle,
|
2019-05-06 10:23:26 -05:00
|
|
|
ir->type->vector_elements);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_texture *ir)
|
|
|
|
|
{
|
|
|
|
|
unsigned num_srcs;
|
|
|
|
|
nir_texop op;
|
|
|
|
|
switch (ir->op) {
|
|
|
|
|
case ir_tex:
|
|
|
|
|
op = nir_texop_tex;
|
|
|
|
|
num_srcs = 1; /* coordinate */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txb:
|
|
|
|
|
case ir_txl:
|
|
|
|
|
op = (ir->op == ir_txb) ? nir_texop_txb : nir_texop_txl;
|
|
|
|
|
num_srcs = 2; /* coordinate, bias/lod */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txd:
|
|
|
|
|
op = nir_texop_txd; /* coordinate, dPdx, dPdy */
|
|
|
|
|
num_srcs = 3;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txf:
|
|
|
|
|
op = nir_texop_txf;
|
|
|
|
|
if (ir->lod_info.lod != NULL)
|
|
|
|
|
num_srcs = 2; /* coordinate, lod */
|
|
|
|
|
else
|
|
|
|
|
num_srcs = 1; /* coordinate */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txf_ms:
|
|
|
|
|
op = nir_texop_txf_ms;
|
|
|
|
|
num_srcs = 2; /* coordinate, sample_index */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txs:
|
|
|
|
|
op = nir_texop_txs;
|
|
|
|
|
if (ir->lod_info.lod != NULL)
|
|
|
|
|
num_srcs = 1; /* lod */
|
|
|
|
|
else
|
|
|
|
|
num_srcs = 0;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_lod:
|
|
|
|
|
op = nir_texop_lod;
|
|
|
|
|
num_srcs = 1; /* coordinate */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_tg4:
|
|
|
|
|
op = nir_texop_tg4;
|
|
|
|
|
num_srcs = 1; /* coordinate */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_query_levels:
|
|
|
|
|
op = nir_texop_query_levels;
|
|
|
|
|
num_srcs = 0;
|
|
|
|
|
break;
|
|
|
|
|
|
2015-08-27 23:05:03 -04:00
|
|
|
case ir_texture_samples:
|
|
|
|
|
op = nir_texop_texture_samples;
|
|
|
|
|
num_srcs = 0;
|
|
|
|
|
break;
|
|
|
|
|
|
2015-11-17 17:09:09 -08:00
|
|
|
case ir_samples_identical:
|
|
|
|
|
op = nir_texop_samples_identical;
|
|
|
|
|
num_srcs = 1; /* coordinate */
|
|
|
|
|
break;
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
default:
|
2015-01-21 20:22:18 -08:00
|
|
|
unreachable("not reached");
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ir->projector != NULL)
|
|
|
|
|
num_srcs++;
|
2016-12-12 08:32:38 -05:00
|
|
|
if (ir->shadow_comparator != NULL)
|
2014-07-10 18:18:17 -07:00
|
|
|
num_srcs++;
|
2019-03-18 21:23:59 +01:00
|
|
|
/* offsets are constants we store inside nir_tex_intrs.offsets */
|
2023-12-14 22:21:26 -08:00
|
|
|
if (ir->offset != NULL && !glsl_type_is_array(ir->offset->type))
|
2014-07-10 18:18:17 -07:00
|
|
|
num_srcs++;
|
2022-01-06 17:31:01 +08:00
|
|
|
if (ir->clamp != NULL)
|
|
|
|
|
num_srcs++;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
/* Add one for the texture deref */
|
2018-03-19 10:53:45 -07:00
|
|
|
num_srcs += 2;
|
2018-03-15 09:58:22 -07:00
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
nir_tex_instr *instr = nir_tex_instr_create(this->shader, num_srcs);
|
|
|
|
|
|
|
|
|
|
instr->op = op;
|
|
|
|
|
instr->sampler_dim =
|
|
|
|
|
(glsl_sampler_dim) ir->sampler->type->sampler_dimensionality;
|
|
|
|
|
instr->is_array = ir->sampler->type->sampler_array;
|
|
|
|
|
instr->is_shadow = ir->sampler->type->sampler_shadow;
|
2021-12-28 13:52:36 +08:00
|
|
|
|
|
|
|
|
const glsl_type *dest_type
|
2023-12-14 22:21:26 -08:00
|
|
|
= ir->is_sparse ? glsl_get_field_type(ir->type, "texel") : ir->type;
|
2023-12-12 12:49:24 -08:00
|
|
|
assert(dest_type != &glsl_type_builtin_error);
|
2022-04-22 09:05:26 -04:00
|
|
|
if (instr->is_shadow)
|
|
|
|
|
instr->is_new_style_shadow = (dest_type->vector_elements == 1);
|
2021-12-28 13:52:36 +08:00
|
|
|
instr->dest_type = nir_get_nir_type_for_glsl_type(dest_type);
|
|
|
|
|
instr->is_sparse = ir->is_sparse;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
nir_deref_instr *sampler_deref = evaluate_deref(ir->sampler);
|
2024-07-23 13:07:34 +10:00
|
|
|
nir_def *tex_intrin = nir_deref_texture_src(&b, 32, &sampler_deref->def);
|
2019-03-24 17:11:34 +01:00
|
|
|
|
2024-07-23 13:07:34 +10:00
|
|
|
instr->src[0] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref_intrinsic,
|
|
|
|
|
tex_intrin);
|
|
|
|
|
instr->src[1] = nir_tex_src_for_ssa(nir_tex_src_texture_deref_intrinsic,
|
|
|
|
|
tex_intrin);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2018-03-19 10:53:45 -07:00
|
|
|
unsigned src_number = 2;
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
if (ir->coordinate != NULL) {
|
|
|
|
|
instr->coord_components = ir->coordinate->type->vector_elements;
|
2023-05-31 20:56:31 -04:00
|
|
|
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_coord,
|
|
|
|
|
evaluate_rvalue(ir->coordinate));
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ir->projector != NULL) {
|
2023-05-31 20:56:31 -04:00
|
|
|
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_projector,
|
|
|
|
|
evaluate_rvalue(ir->projector));
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-12 08:32:38 -05:00
|
|
|
if (ir->shadow_comparator != NULL) {
|
2023-05-31 20:56:31 -04:00
|
|
|
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_comparator,
|
|
|
|
|
evaluate_rvalue(ir->shadow_comparator));
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ir->offset != NULL) {
|
2023-12-14 22:21:26 -08:00
|
|
|
if (glsl_type_is_array(ir->offset->type)) {
|
2024-05-31 13:34:57 -07:00
|
|
|
const int size = MIN2(glsl_array_size(ir->offset->type), 4);
|
|
|
|
|
for (int i = 0; i < size; i++) {
|
2019-03-18 21:23:59 +01:00
|
|
|
const ir_constant *c =
|
|
|
|
|
ir->offset->as_constant()->get_array_element(i);
|
|
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < 2; ++j) {
|
|
|
|
|
int val = c->get_int_component(j);
|
|
|
|
|
instr->tg4_offsets[i][j] = val;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2023-12-14 22:21:26 -08:00
|
|
|
assert(glsl_type_is_vector(ir->offset->type) || glsl_type_is_scalar(ir->offset->type));
|
2014-07-10 18:18:17 -07:00
|
|
|
|
2023-05-31 20:56:31 -04:00
|
|
|
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_offset,
|
|
|
|
|
evaluate_rvalue(ir->offset));
|
2019-03-18 21:23:59 +01:00
|
|
|
src_number++;
|
|
|
|
|
}
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
2022-01-06 17:31:01 +08:00
|
|
|
if (ir->clamp) {
|
2023-05-31 20:56:31 -04:00
|
|
|
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_min_lod,
|
|
|
|
|
evaluate_rvalue(ir->clamp));
|
2022-01-06 17:31:01 +08:00
|
|
|
src_number++;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
switch (ir->op) {
|
|
|
|
|
case ir_txb:
|
2023-05-31 20:56:31 -04:00
|
|
|
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_bias,
|
|
|
|
|
evaluate_rvalue(ir->lod_info.bias));
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txl:
|
|
|
|
|
case ir_txf:
|
|
|
|
|
case ir_txs:
|
|
|
|
|
if (ir->lod_info.lod != NULL) {
|
2023-05-31 20:56:31 -04:00
|
|
|
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_lod,
|
|
|
|
|
evaluate_rvalue(ir->lod_info.lod));
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txd:
|
2023-05-31 20:56:31 -04:00
|
|
|
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_ddx,
|
|
|
|
|
evaluate_rvalue(ir->lod_info.grad.dPdx));
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
2023-05-31 20:56:31 -04:00
|
|
|
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_ddy,
|
|
|
|
|
evaluate_rvalue(ir->lod_info.grad.dPdy));
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_txf_ms:
|
2023-05-31 20:56:31 -04:00
|
|
|
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_ms_index,
|
|
|
|
|
evaluate_rvalue(ir->lod_info.sample_index));
|
2014-07-10 18:18:17 -07:00
|
|
|
src_number++;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ir_tg4:
|
|
|
|
|
instr->component = ir->lod_info.component->as_constant()->value.u[0];
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(src_number == num_srcs);
|
|
|
|
|
|
2021-12-28 13:52:36 +08:00
|
|
|
unsigned bit_size = glsl_get_bit_size(dest_type);
|
2016-04-04 10:16:11 +02:00
|
|
|
add_instr(&instr->instr, nir_tex_instr_dest_size(instr), bit_size);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_constant *ir)
|
|
|
|
|
{
|
|
|
|
|
/*
|
Remove wrongly repeated words in comments
Clean up misrepetitions ('if if', 'the the' etc) found throughout the
comments. This has been done manually, after grepping
case-insensitively for duplicate if, is, the, then, do, for, an,
plus a few other typos corrected in fly-by
v2:
* proper commit message and non-joke title;
* replace two 'as is' followed by 'is' to 'as-is'.
v3:
* 'a integer' => 'an integer' and similar (originally spotted by
Jason Ekstrand, I fixed a few other similar ones while at it)
Signed-off-by: Giuseppe Bilotta <giuseppe.bilotta@gmail.com>
Reviewed-by: Chad Versace <chad.versace@intel.com>
2016-06-23 19:20:18 +02:00
|
|
|
* We don't know if this variable is an array or struct that gets
|
2014-12-19 14:55:45 -08:00
|
|
|
* dereferenced, so do the safe thing an make it a variable with a
|
|
|
|
|
* constant initializer and return a dereference.
|
2014-07-10 18:18:17 -07:00
|
|
|
*/
|
|
|
|
|
|
2015-10-09 07:05:11 -07:00
|
|
|
nir_variable *var =
|
|
|
|
|
nir_local_variable_create(this->impl, ir->type, "const_temp");
|
2014-07-10 18:18:17 -07:00
|
|
|
var->data.read_only = true;
|
|
|
|
|
var->constant_initializer = constant_copy(ir, var);
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
this->deref = nir_build_deref_var(&b, var);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_dereference_variable *ir)
|
|
|
|
|
{
|
2023-12-19 14:09:58 +11:00
|
|
|
if (ir->variable_referenced()->data.mode == ir_var_function_out ||
|
2024-01-08 16:57:41 +11:00
|
|
|
ir->variable_referenced()->data.mode == ir_var_function_inout ||
|
|
|
|
|
ir->variable_referenced()->data.mode == ir_var_function_in) {
|
2023-12-12 12:49:24 -08:00
|
|
|
unsigned i = (sig->return_type != &glsl_type_builtin_void) ? 1 : 0;
|
2018-12-10 10:58:43 -08:00
|
|
|
|
|
|
|
|
foreach_in_list(ir_variable, param, &sig->parameters) {
|
|
|
|
|
if (param == ir->variable_referenced()) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
this->deref = nir_build_deref_cast(&b, nir_load_param(&b, i),
|
2024-07-24 13:57:53 +10:00
|
|
|
nir_var_function_temp, ir->type, 0);
|
2018-12-10 10:58:43 -08:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 18:18:17 -07:00
|
|
|
struct hash_entry *entry =
|
|
|
|
|
_mesa_hash_table_search(this->var_table, ir->var);
|
|
|
|
|
assert(entry);
|
|
|
|
|
nir_variable *var = (nir_variable *) entry->data;
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
this->deref = nir_build_deref_var(&b, var);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_dereference_record *ir)
|
|
|
|
|
{
|
|
|
|
|
ir->record->accept(this);
|
|
|
|
|
|
2017-08-09 13:34:04 +10:00
|
|
|
int field_index = ir->field_idx;
|
2014-11-25 21:36:25 -08:00
|
|
|
assert(field_index >= 0);
|
|
|
|
|
|
2021-12-28 21:11:07 +08:00
|
|
|
/* sparse texture variable is a struct for ir_variable, but it has been
|
|
|
|
|
* converted to a vector for nir_variable.
|
|
|
|
|
*/
|
|
|
|
|
if (this->deref->deref_type == nir_deref_type_var &&
|
|
|
|
|
_mesa_set_search(this->sparse_variable_set, this->deref->var)) {
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *load = nir_load_deref(&b, this->deref);
|
2021-12-28 21:11:07 +08:00
|
|
|
assert(load->num_components >= 2);
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *ssa;
|
2021-12-28 21:11:07 +08:00
|
|
|
const glsl_type *type = ir->record->type;
|
2023-12-14 22:21:26 -08:00
|
|
|
if (field_index == glsl_get_field_index(type, "code")) {
|
2021-12-28 21:11:07 +08:00
|
|
|
/* last channel holds residency code */
|
|
|
|
|
ssa = nir_channel(&b, load, load->num_components - 1);
|
|
|
|
|
} else {
|
2023-12-14 22:21:26 -08:00
|
|
|
assert(field_index == glsl_get_field_index(type, "texel"));
|
2021-12-28 21:11:07 +08:00
|
|
|
|
|
|
|
|
unsigned mask = BITFIELD_MASK(load->num_components - 1);
|
|
|
|
|
ssa = nir_channels(&b, load, mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* still need to create a deref for return */
|
|
|
|
|
nir_variable *tmp =
|
|
|
|
|
nir_local_variable_create(this->impl, ir->type, "deref_tmp");
|
|
|
|
|
this->deref = nir_build_deref_var(&b, tmp);
|
|
|
|
|
nir_store_deref(&b, this->deref, ssa, ~0);
|
|
|
|
|
} else
|
|
|
|
|
this->deref = nir_build_deref_struct(&b, this->deref, field_index);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_visitor::visit(ir_dereference_array *ir)
|
|
|
|
|
{
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *index = evaluate_rvalue(ir->array_index);
|
2014-07-10 18:18:17 -07:00
|
|
|
|
|
|
|
|
ir->array->accept(this);
|
|
|
|
|
|
2018-03-15 09:58:22 -07:00
|
|
|
this->deref = nir_build_deref_array(&b, this->deref, index);
|
2014-07-10 18:18:17 -07:00
|
|
|
}
|
2014-09-07 19:24:15 +12:00
|
|
|
|
|
|
|
|
void
|
2015-08-25 18:19:12 +01:00
|
|
|
nir_visitor::visit(ir_barrier *)
|
2014-09-07 19:24:15 +12:00
|
|
|
{
|
2023-06-06 16:43:26 -04:00
|
|
|
if (shader->info.stage == MESA_SHADER_COMPUTE) {
|
2023-07-28 15:08:00 -04:00
|
|
|
nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
|
|
|
|
|
NIR_MEMORY_ACQ_REL, nir_var_mem_shared);
|
2023-06-06 16:43:26 -04:00
|
|
|
} else if (shader->info.stage == MESA_SHADER_TESS_CTRL) {
|
2023-07-28 15:08:00 -04:00
|
|
|
nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
|
|
|
|
|
NIR_MEMORY_ACQ_REL, nir_var_shader_out);
|
2023-03-01 14:47:56 -05:00
|
|
|
}
|
2014-09-07 19:24:15 +12:00
|
|
|
}
|
2019-03-03 10:00:14 -06:00
|
|
|
|
|
|
|
|
nir_shader *
|
|
|
|
|
glsl_float64_funcs_to_nir(struct gl_context *ctx,
|
|
|
|
|
const nir_shader_compiler_options *options)
|
|
|
|
|
{
|
|
|
|
|
/* We pretend it's a vertex shader. Ultimately, the stage shouldn't
|
|
|
|
|
* matter because we're not optimizing anything here.
|
|
|
|
|
*/
|
|
|
|
|
struct gl_shader *sh = _mesa_new_shader(-1, MESA_SHADER_VERTEX);
|
|
|
|
|
sh->Source = float64_source;
|
|
|
|
|
sh->CompileStatus = COMPILE_FAILURE;
|
|
|
|
|
_mesa_glsl_compile_shader(ctx, sh, false, false, true);
|
|
|
|
|
|
|
|
|
|
if (!sh->CompileStatus) {
|
|
|
|
|
if (sh->InfoLog) {
|
|
|
|
|
_mesa_problem(ctx,
|
|
|
|
|
"fp64 software impl compile failed:\n%s\nsource:\n%s\n",
|
|
|
|
|
sh->InfoLog, float64_source);
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_shader *nir = nir_shader_create(NULL, MESA_SHADER_VERTEX, options, NULL);
|
|
|
|
|
|
2022-01-07 12:47:08 +10:00
|
|
|
nir_visitor v1(&ctx->Const, nir);
|
2019-03-03 10:00:14 -06:00
|
|
|
nir_function_visitor v2(&v1);
|
|
|
|
|
v2.run(sh->ir);
|
|
|
|
|
visit_exec_list(sh->ir, &v1);
|
|
|
|
|
|
|
|
|
|
/* _mesa_delete_shader will try to free sh->Source but it's static const */
|
|
|
|
|
sh->Source = NULL;
|
|
|
|
|
_mesa_delete_shader(ctx, sh);
|
|
|
|
|
|
|
|
|
|
nir_validate_shader(nir, "float64_funcs_to_nir");
|
|
|
|
|
|
2024-01-10 09:57:14 -04:00
|
|
|
NIR_PASS(_, nir, nir_lower_variable_initializers, nir_var_function_temp);
|
|
|
|
|
NIR_PASS(_, nir, nir_lower_returns);
|
|
|
|
|
NIR_PASS(_, nir, nir_inline_functions);
|
|
|
|
|
NIR_PASS(_, nir, nir_opt_deref);
|
2019-03-04 14:39:40 -06:00
|
|
|
|
2019-08-08 01:44:52 -07:00
|
|
|
/* Do some optimizations to clean up the shader now. By optimizing the
|
|
|
|
|
* functions in the library, we avoid having to re-do that work every
|
|
|
|
|
* time we inline a copy of a function. Reducing basic blocks also helps
|
|
|
|
|
* with compile times.
|
|
|
|
|
*/
|
2024-01-10 09:57:14 -04:00
|
|
|
NIR_PASS(_, nir, nir_lower_vars_to_ssa);
|
|
|
|
|
NIR_PASS(_, nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
|
|
|
|
|
NIR_PASS(_, nir, nir_copy_prop);
|
|
|
|
|
NIR_PASS(_, nir, nir_opt_dce);
|
|
|
|
|
NIR_PASS(_, nir, nir_opt_cse);
|
|
|
|
|
NIR_PASS(_, nir, nir_opt_gcm, true);
|
|
|
|
|
NIR_PASS(_, nir, nir_opt_peephole_select, 1, false, false);
|
|
|
|
|
NIR_PASS(_, nir, nir_opt_dce);
|
2019-08-08 01:44:52 -07:00
|
|
|
|
2019-03-03 10:00:14 -06:00
|
|
|
return nir;
|
|
|
|
|
}
|