glsl: remove now unused GLSL IR varying linker code

Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15731>
This commit is contained in:
Timothy Arceri 2022-05-06 10:48:33 +10:00 committed by Marge Bot
parent 7647023f3b
commit 318d8ce6fc
16 changed files with 3 additions and 5724 deletions

View file

@ -2025,7 +2025,6 @@ ir_variable::ir_variable(const struct glsl_type *type, const char *name,
this->data.explicit_component = false;
this->data.has_initializer = false;
this->data.is_implicit_initializer = false;
this->data.is_unmatched_generic_inout = false;
this->data.is_xfb = false;
this->data.is_xfb_only = false;
this->data.explicit_xfb_buffer = false;

View file

@ -763,15 +763,6 @@ public:
*/
unsigned is_implicit_initializer:1;
/**
* Is this variable a generic output or input that has not yet been matched
* up to a variable in another stage of the pipeline?
*
* This is used by the linker as scratch storage while assigning locations
* to generic inputs and outputs.
*/
unsigned is_unmatched_generic_inout:1;
/**
* Is this varying used by transform feedback?
*

View file

@ -102,12 +102,6 @@ bool do_constant_variable(exec_list *instructions);
bool do_constant_variable_unlinked(exec_list *instructions);
bool do_copy_propagation_elements(exec_list *instructions);
bool do_constant_propagation(exec_list *instructions);
void do_dead_builtin_varyings(const struct gl_constants *consts,
gl_api api,
gl_linked_shader *producer,
gl_linked_shader *consumer,
unsigned num_tfeedback_decls,
class tfeedback_decl *tfeedback_decls);
bool do_dead_code(exec_list *instructions, bool uniform_locations_assigned);
bool do_dead_code_local(exec_list *instructions);
bool do_dead_code_unlinked(exec_list *instructions);
@ -130,23 +124,11 @@ bool lower_instructions(exec_list *instructions, unsigned what_to_lower);
bool lower_const_arrays_to_uniforms(exec_list *instructions, unsigned stage, unsigned max_uniform_components);
bool lower_clip_cull_distance(struct gl_shader_program *prog,
gl_linked_shader *shader);
ir_variable * lower_xfb_varying(void *mem_ctx,
gl_linked_shader *shader,
const char *old_var_name);
void lower_output_reads(unsigned stage, exec_list *instructions);
bool lower_packing_builtins(exec_list *instructions, int op_mask);
void lower_shared_reference(const struct gl_constants *consts,
struct gl_shader_program *prog,
struct gl_linked_shader *shader);
void lower_packed_varyings(void *mem_ctx,
unsigned locations_used,
const uint8_t *components,
ir_variable_mode mode,
unsigned gs_input_vertices,
gl_linked_shader *shader,
bool disable_varying_packing,
bool disable_xfb_packing,
bool xfb_enabled);
bool lower_vector_insert(exec_list *instructions, bool lower_nonconstant_index);
bool lower_vector_derefs(gl_linked_shader *shader);
void lower_named_interface_blocks(void *mem_ctx, gl_linked_shader *shader);

File diff suppressed because it is too large Load diff

View file

@ -31,268 +31,7 @@
* stages.
*/
#include "program/prog_parameter.h"
#include "util/bitset.h"
struct gl_shader_program;
struct gl_shader;
class ir_variable;
/**
* Data structure describing a varying which is available for use in transform
* feedback.
*
* For example, if the vertex shader contains:
*
* struct S {
* vec4 foo;
* float[3] bar;
* };
*
* varying S[2] v;
*
* Then there would be tfeedback_candidate objects corresponding to the
* following varyings:
*
* v[0].foo
* v[0].bar
* v[1].foo
* v[1].bar
*/
struct tfeedback_candidate
{
/**
* Toplevel variable containing this varying. In the above example, this
* would point to the declaration of the varying v.
*/
ir_variable *toplevel_var;
/**
* Type of this varying. In the above example, this would point to the
* glsl_type for "vec4" or "float[3]".
*/
const glsl_type *type;
/**
* Offset within the toplevel variable where this varying occurs.
* Counted in floats.
*/
unsigned struct_offset_floats;
/**
* Offset within the xfb with respect to alignment requirements.
* Counted in floats.
*/
unsigned xfb_offset_floats;
};
/**
* Data structure tracking information about a transform feedback declaration
* during linking.
*/
class tfeedback_decl
{
public:
void init(const struct gl_constants *consts,
const struct gl_extensions *exts,
const void *mem_ctx, const char *input);
static bool is_same(const tfeedback_decl &x, const tfeedback_decl &y);
bool assign_location(const struct gl_constants *consts,
struct gl_shader_program *prog);
unsigned get_num_outputs() const;
bool store(const struct gl_constants *consts,
struct gl_shader_program *prog,
struct gl_transform_feedback_info *info, unsigned buffer,
unsigned buffer_index, const unsigned max_outputs,
BITSET_WORD *used_components[MAX_FEEDBACK_BUFFERS],
bool *explicit_stride, unsigned *max_member_alignment,
bool has_xfb_qualifiers, const void *mem_ctx) const;
const tfeedback_candidate *find_candidate(gl_shader_program *prog,
hash_table *tfeedback_candidates);
void set_lowered_candidate(const tfeedback_candidate *candidate);
bool is_next_buffer_separator() const
{
return this->next_buffer_separator;
}
bool is_varying_written() const
{
if (this->next_buffer_separator || this->skip_components)
return false;
return this->matched_candidate->toplevel_var->data.assigned;
}
bool is_varying() const
{
return !this->next_buffer_separator && !this->skip_components;
}
bool subscripted() const
{
return this->is_subscripted;
}
const char *name() const
{
return this->orig_name;
}
unsigned get_stream_id() const
{
return this->stream_id;
}
unsigned get_buffer() const
{
return this->buffer;
}
unsigned get_offset() const
{
return this->offset;
}
/**
* The total number of varying components taken up by this variable. Only
* valid if assign_location() has been called.
*/
unsigned num_components() const
{
if (this->lowered_builtin_array_variable)
return this->size;
else
return this->vector_elements * this->matrix_columns * this->size *
(this->is_64bit() ? 2 : 1);
}
unsigned get_location() const {
return this->location;
}
private:
bool is_64bit() const
{
return _mesa_gl_datatype_is_64bit(this->type);
}
/**
* The name that was supplied to glTransformFeedbackVaryings. Used for
* error reporting and glGetTransformFeedbackVarying().
*/
const char *orig_name;
/**
* The name of the variable, parsed from orig_name.
*/
const char *var_name;
/**
* True if the declaration in orig_name represents an array.
*/
bool is_subscripted;
/**
* If is_subscripted is true, the subscript that was specified in orig_name.
*/
unsigned array_subscript;
/**
* Non-zero if the variable is gl_ClipDistance, glTessLevelOuter or
* gl_TessLevelInner and the driver lowers it to gl_*MESA.
*/
enum {
none,
clip_distance,
cull_distance,
tess_level_outer,
tess_level_inner,
} lowered_builtin_array_variable;
/**
* The vertex shader output location that the linker assigned for this
* variable. -1 if a location hasn't been assigned yet.
*/
int location;
/**
* Used to store the buffer assigned by xfb_buffer.
*/
unsigned buffer;
/**
* Used to store the offset assigned by xfb_offset.
*/
unsigned offset;
/**
* If non-zero, then this variable may be packed along with other variables
* into a single varying slot, so this offset should be applied when
* accessing components. For example, an offset of 1 means that the x
* component of this variable is actually stored in component y of the
* location specified by \c location.
*
* Only valid if location != -1.
*/
unsigned location_frac;
/**
* If location != -1, the number of vector elements in this variable, or 1
* if this variable is a scalar.
*/
unsigned vector_elements;
/**
* If location != -1, the number of matrix columns in this variable, or 1
* if this variable is not a matrix.
*/
unsigned matrix_columns;
/** Type of the varying returned by glGetTransformFeedbackVarying() */
GLenum type;
/**
* If location != -1, the size that should be returned by
* glGetTransformFeedbackVarying().
*/
unsigned size;
/**
* How many components to skip. If non-zero, this is
* gl_SkipComponents{1,2,3,4} from ARB_transform_feedback3.
*/
unsigned skip_components;
/**
* Whether this is gl_NextBuffer from ARB_transform_feedback3.
*/
bool next_buffer_separator;
/**
* If find_candidate() has been called, pointer to the tfeedback_candidate
* data structure that was found. Otherwise NULL.
*/
const tfeedback_candidate *matched_candidate;
/**
* StreamId assigned to this varying (defaults to 0). Can only be set to
* values other than 0 in geometry shaders that use the stream layout
* modifier. Accepted values must be in the range [0, MAX_VERTEX_STREAMS-1].
*/
unsigned stream_id;
};
bool
link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
const struct gl_constants *consts,
const struct gl_extensions *exts,
gl_api api, void *mem_ctx);
void
validate_first_and_last_interface_explicit_locations(const struct gl_constants *consts,

View file

@ -530,16 +530,6 @@ link_invalidate_variable_locations(exec_list *ir)
var->data.location = -1;
var->data.location_frac = 0;
}
/* ir_variable::is_unmatched_generic_inout is used by the linker while
* connecting outputs from one stage to inputs of the next stage.
*/
if (var->data.explicit_location &&
var->data.location < VARYING_SLOT_VAR0) {
var->data.is_unmatched_generic_inout = 0;
} else {
var->data.is_unmatched_generic_inout = 1;
}
}
}
@ -2796,7 +2786,6 @@ assign_attribute_or_color_locations(void *mem_ctx,
continue;
if (var->data.explicit_location) {
var->data.is_unmatched_generic_inout = 0;
if ((var->data.location >= (int)(max_index + generic_base))
|| (var->data.location < 0)) {
linker_error(prog,
@ -2813,7 +2802,6 @@ assign_attribute_or_color_locations(void *mem_ctx,
if (prog->AttributeBindings->get(binding, var->name)) {
assert(binding >= VERT_ATTRIB_GENERIC0);
var->data.location = binding;
var->data.is_unmatched_generic_inout = 0;
}
} else if (target_index == MESA_SHADER_FRAGMENT) {
unsigned binding;
@ -2826,7 +2814,6 @@ assign_attribute_or_color_locations(void *mem_ctx,
if (prog->FragDataBindings->get(binding, name)) {
assert(binding >= FRAG_RESULT_DATA0);
var->data.location = binding;
var->data.is_unmatched_generic_inout = 0;
if (prog->FragDataIndexBindings->get(index, name)) {
var->data.index = index;
@ -3134,7 +3121,6 @@ assign_attribute_or_color_locations(void *mem_ctx,
}
to_assign[i].var->data.location = generic_base + location;
to_assign[i].var->data.is_unmatched_generic_inout = 0;
used_locations |= (use_mask << location);
if (to_assign[i].var->type->without_array()->is_dual_slot())
@ -3161,61 +3147,6 @@ assign_attribute_or_color_locations(void *mem_ctx,
return true;
}
/**
* Match explicit locations of outputs to inputs and deactivate the
* unmatch flag if found so we don't optimise them away.
*/
static void
match_explicit_outputs_to_inputs(gl_linked_shader *producer,
gl_linked_shader *consumer)
{
glsl_symbol_table parameters;
ir_variable *explicit_locations[MAX_VARYINGS_INCL_PATCH][4] =
{ {NULL, NULL} };
/* Find all shader outputs in the "producer" stage.
*/
foreach_in_list(ir_instruction, node, producer->ir) {
ir_variable *const var = node->as_variable();
if ((var == NULL) || (var->data.mode != ir_var_shader_out))
continue;
if (var->data.explicit_location &&
var->data.location >= VARYING_SLOT_VAR0) {
const unsigned idx = var->data.location - VARYING_SLOT_VAR0;
if (explicit_locations[idx][var->data.location_frac] == NULL)
explicit_locations[idx][var->data.location_frac] = var;
/* Always match TCS outputs. They are shared by all invocations
* within a patch and can be used as shared memory.
*/
if (producer->Stage == MESA_SHADER_TESS_CTRL)
var->data.is_unmatched_generic_inout = 0;
}
}
/* Match inputs to outputs */
foreach_in_list(ir_instruction, node, consumer->ir) {
ir_variable *const input = node->as_variable();
if ((input == NULL) || (input->data.mode != ir_var_shader_in))
continue;
ir_variable *output = NULL;
if (input->data.explicit_location
&& input->data.location >= VARYING_SLOT_VAR0) {
output = explicit_locations[input->data.location - VARYING_SLOT_VAR0]
[input->data.location_frac];
if (output != NULL){
input->data.is_unmatched_generic_inout = 0;
output->data.is_unmatched_generic_inout = 0;
}
}
}
}
/**
* Store the gl_FragDepth layout in the gl_shader_program struct.
*/
@ -3452,409 +3383,6 @@ check_explicit_uniform_locations(const struct gl_extensions *exts,
prog->NumExplicitUniformLocations = entries_total;
}
/* Function checks if a variable var is a packed varying and
* if given name is part of packed varying's list.
*
* If a variable is a packed varying, it has a name like
* 'packed:a,b,c' where a, b and c are separate variables.
*/
static bool
included_in_packed_varying(ir_variable *var, const char *name)
{
if (strncmp(var->name, "packed:", 7) != 0)
return false;
char *list = strdup(var->name + 7);
assert(list);
bool found = false;
char *saveptr;
char *token = strtok_r(list, ",", &saveptr);
while (token) {
if (strcmp(token, name) == 0) {
found = true;
break;
}
token = strtok_r(NULL, ",", &saveptr);
}
free(list);
return found;
}
/**
* Function builds a stage reference bitmask from variable name.
*/
static uint8_t
build_stageref(struct gl_shader_program *shProg, const char *name,
unsigned mode)
{
uint8_t stages = 0;
/* Note, that we assume MAX 8 stages, if there will be more stages, type
* used for reference mask in gl_program_resource will need to be changed.
*/
assert(MESA_SHADER_STAGES < 8);
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
struct gl_linked_shader *sh = shProg->_LinkedShaders[i];
if (!sh)
continue;
/* Shader symbol table may contain variables that have
* been optimized away. Search IR for the variable instead.
*/
foreach_in_list(ir_instruction, node, sh->ir) {
ir_variable *var = node->as_variable();
if (var) {
unsigned baselen = strlen(var->name);
if (included_in_packed_varying(var, name)) {
stages |= (1 << i);
break;
}
/* Type needs to match if specified, otherwise we might
* pick a variable with same name but different interface.
*/
if (var->data.mode != mode)
continue;
if (strncmp(var->name, name, baselen) == 0) {
/* Check for exact name matches but also check for arrays and
* structs.
*/
if (name[baselen] == '\0' ||
name[baselen] == '[' ||
name[baselen] == '.') {
stages |= (1 << i);
break;
}
}
}
}
}
return stages;
}
/**
* Create gl_shader_variable from ir_variable class.
*/
static gl_shader_variable *
create_shader_variable(struct gl_shader_program *shProg,
const ir_variable *in,
const char *name, const glsl_type *type,
const glsl_type *interface_type,
bool use_implicit_location, int location,
const glsl_type *outermost_struct_type)
{
/* Allocate zero-initialized memory to ensure that bitfield padding
* is zero.
*/
gl_shader_variable *out = rzalloc(shProg, struct gl_shader_variable);
if (!out)
return NULL;
/* Since gl_VertexID may be lowered to gl_VertexIDMESA, but applications
* expect to see gl_VertexID in the program resource list. Pretend.
*/
if (in->data.mode == ir_var_system_value &&
in->data.location == SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) {
out->name.string = ralloc_strdup(shProg, "gl_VertexID");
} else if ((in->data.mode == ir_var_shader_out &&
in->data.location == VARYING_SLOT_TESS_LEVEL_OUTER) ||
(in->data.mode == ir_var_system_value &&
in->data.location == SYSTEM_VALUE_TESS_LEVEL_OUTER)) {
out->name.string = ralloc_strdup(shProg, "gl_TessLevelOuter");
type = glsl_type::get_array_instance(glsl_type::float_type, 4);
} else if ((in->data.mode == ir_var_shader_out &&
in->data.location == VARYING_SLOT_TESS_LEVEL_INNER) ||
(in->data.mode == ir_var_system_value &&
in->data.location == SYSTEM_VALUE_TESS_LEVEL_INNER)) {
out->name.string = ralloc_strdup(shProg, "gl_TessLevelInner");
type = glsl_type::get_array_instance(glsl_type::float_type, 2);
} else {
out->name.string = ralloc_strdup(shProg, name);
}
resource_name_updated(&out->name);
if (!out->name.string)
return NULL;
/* The ARB_program_interface_query spec says:
*
* "Not all active variables are assigned valid locations; the
* following variables will have an effective location of -1:
*
* * uniforms declared as atomic counters;
*
* * members of a uniform block;
*
* * built-in inputs, outputs, and uniforms (starting with "gl_"); and
*
* * inputs or outputs not declared with a "location" layout
* qualifier, except for vertex shader inputs and fragment shader
* outputs."
*/
if (in->type->is_atomic_uint() || is_gl_identifier(in->name) ||
!(in->data.explicit_location || use_implicit_location)) {
out->location = -1;
} else {
out->location = location;
}
out->type = type;
out->outermost_struct_type = outermost_struct_type;
out->interface_type = interface_type;
out->component = in->data.location_frac;
out->index = in->data.index;
out->patch = in->data.patch;
out->mode = in->data.mode;
out->interpolation = in->data.interpolation;
out->explicit_location = in->data.explicit_location;
out->precision = in->data.precision;
return out;
}
static bool
add_shader_variable(struct gl_shader_program *shProg,
struct set *resource_set,
unsigned stage_mask,
GLenum programInterface, ir_variable *var,
const char *name, const glsl_type *type,
bool use_implicit_location, int location,
bool inouts_share_location,
const glsl_type *outermost_struct_type = NULL)
{
const glsl_type *interface_type = var->get_interface_type();
if (outermost_struct_type == NULL) {
if (var->data.from_named_ifc_block) {
const char *interface_name = interface_type->name;
if (interface_type->is_array()) {
/* Issue #16 of the ARB_program_interface_query spec says:
*
* "* If a variable is a member of an interface block without an
* instance name, it is enumerated using just the variable name.
*
* * If a variable is a member of an interface block with an
* instance name, it is enumerated as "BlockName.Member", where
* "BlockName" is the name of the interface block (not the
* instance name) and "Member" is the name of the variable."
*
* In particular, it indicates that it should be "BlockName",
* not "BlockName[array length]". The conformance suite and
* dEQP both require this behavior.
*
* Here, we unwrap the extra array level added by named interface
* block array lowering so we have the correct variable type. We
* also unwrap the interface type when constructing the name.
*
* We leave interface_type the same so that ES 3.x SSO pipeline
* validation can enforce the rules requiring array length to
* match on interface blocks.
*/
type = type->fields.array;
interface_name = interface_type->fields.array->name;
}
name = ralloc_asprintf(shProg, "%s.%s", interface_name, name);
}
}
switch (type->base_type) {
case GLSL_TYPE_STRUCT: {
/* The ARB_program_interface_query spec says:
*
* "For an active variable declared as a structure, a separate entry
* will be generated for each active structure member. The name of
* each entry is formed by concatenating the name of the structure,
* the "." character, and the name of the structure member. If a
* structure member to enumerate is itself a structure or array,
* these enumeration rules are applied recursively."
*/
if (outermost_struct_type == NULL)
outermost_struct_type = type;
unsigned field_location = location;
for (unsigned i = 0; i < type->length; i++) {
const struct glsl_struct_field *field = &type->fields.structure[i];
char *field_name = ralloc_asprintf(shProg, "%s.%s", name, field->name);
if (!add_shader_variable(shProg, resource_set,
stage_mask, programInterface,
var, field_name, field->type,
use_implicit_location, field_location,
false, outermost_struct_type))
return false;
field_location += field->type->count_attribute_slots(false);
}
return true;
}
case GLSL_TYPE_ARRAY: {
/* The ARB_program_interface_query spec says:
*
* "For an active variable declared as an array of basic types, a
* single entry will be generated, with its name string formed by
* concatenating the name of the array and the string "[0]"."
*
* "For an active variable declared as an array of an aggregate data
* type (structures or arrays), a separate entry will be generated
* for each active array element, unless noted immediately below.
* The name of each entry is formed by concatenating the name of
* the array, the "[" character, an integer identifying the element
* number, and the "]" character. These enumeration rules are
* applied recursively, treating each enumerated array element as a
* separate active variable."
*/
const struct glsl_type *array_type = type->fields.array;
if (array_type->base_type == GLSL_TYPE_STRUCT ||
array_type->base_type == GLSL_TYPE_ARRAY) {
unsigned elem_location = location;
unsigned stride = inouts_share_location ? 0 :
array_type->count_attribute_slots(false);
for (unsigned i = 0; i < type->length; i++) {
char *elem = ralloc_asprintf(shProg, "%s[%d]", name, i);
if (!add_shader_variable(shProg, resource_set,
stage_mask, programInterface,
var, elem, array_type,
use_implicit_location, elem_location,
false, outermost_struct_type))
return false;
elem_location += stride;
}
return true;
}
FALLTHROUGH;
}
default: {
/* The ARB_program_interface_query spec says:
*
* "For an active variable declared as a single instance of a basic
* type, a single entry will be generated, using the variable name
* from the shader source."
*/
gl_shader_variable *sha_v =
create_shader_variable(shProg, var, name, type, interface_type,
use_implicit_location, location,
outermost_struct_type);
if (!sha_v)
return false;
return link_util_add_program_resource(shProg, resource_set,
programInterface, sha_v, stage_mask);
}
}
}
static bool
inout_has_same_location(const ir_variable *var, unsigned stage)
{
if (!var->data.patch &&
((var->data.mode == ir_var_shader_out &&
stage == MESA_SHADER_TESS_CTRL) ||
(var->data.mode == ir_var_shader_in &&
(stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
stage == MESA_SHADER_GEOMETRY))))
return true;
else
return false;
}
static bool
add_packed_varyings(struct gl_shader_program *shProg,
struct set *resource_set,
int stage, GLenum type)
{
struct gl_linked_shader *sh = shProg->_LinkedShaders[stage];
GLenum iface;
if (!sh || !sh->packed_varyings)
return true;
foreach_in_list(ir_instruction, node, sh->packed_varyings) {
ir_variable *var = node->as_variable();
if (var) {
switch (var->data.mode) {
case ir_var_shader_in:
iface = GL_PROGRAM_INPUT;
break;
case ir_var_shader_out:
iface = GL_PROGRAM_OUTPUT;
break;
default:
unreachable("unexpected type");
}
if (type == iface) {
const int stage_mask =
build_stageref(shProg, var->name, var->data.mode);
if (!add_shader_variable(shProg, resource_set,
stage_mask,
iface, var, var->name, var->type, false,
var->data.location - VARYING_SLOT_VAR0,
inout_has_same_location(var, stage)))
return false;
}
}
}
return true;
}
/**
* Builds up a list of program resources that point to existing
* resource data.
*/
void
build_program_resource_list(const struct gl_constants *consts,
struct gl_shader_program *shProg)
{
/* Rebuild resource list. */
if (shProg->data->ProgramResourceList) {
ralloc_free(shProg->data->ProgramResourceList);
shProg->data->ProgramResourceList = NULL;
shProg->data->NumProgramResourceList = 0;
}
int input_stage = MESA_SHADER_STAGES, output_stage = 0;
/* Determine first input and final output stage. These are used to
* detect which variables should be enumerated in the resource list
* for GL_PROGRAM_INPUT and GL_PROGRAM_OUTPUT.
*/
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
if (!shProg->_LinkedShaders[i])
continue;
if (input_stage == MESA_SHADER_STAGES)
input_stage = i;
output_stage = i;
}
/* Empty shader, no resources. */
if (input_stage == MESA_SHADER_STAGES && output_stage == 0)
return;
struct set *resource_set = _mesa_pointer_set_create(NULL);
/* Program interface needs to expose varyings in case of SSO. */
if (shProg->SeparateShader) {
if (!add_packed_varyings(shProg, resource_set,
input_stage, GL_PROGRAM_INPUT))
return;
if (!add_packed_varyings(shProg, resource_set,
output_stage, GL_PROGRAM_OUTPUT))
return;
}
_mesa_set_destroy(resource_set, NULL);
}
/**
* This check is done to make sure we allow only constant expression
* indexing and "constant-index-expression" (indexing with an expression
@ -4071,16 +3599,6 @@ link_varyings_and_uniforms(unsigned first, unsigned last,
}
}
unsigned prev = first;
for (unsigned i = prev + 1; i <= MESA_SHADER_FRAGMENT; i++) {
if (prog->_LinkedShaders[i] == NULL)
continue;
match_explicit_outputs_to_inputs(prog->_LinkedShaders[prev],
prog->_LinkedShaders[i]);
prev = i;
}
if (!assign_attribute_or_color_locations(mem_ctx, prog, consts,
MESA_SHADER_VERTEX, true)) {
return false;
@ -4100,9 +3618,6 @@ link_varyings_and_uniforms(unsigned first, unsigned last,
break;
}
if (!prog->data->LinkStatus)
return false;
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
if (prog->_LinkedShaders[i] == NULL)
continue;

View file

@ -35,9 +35,6 @@ extern bool
link_function_calls(gl_shader_program *prog, gl_linked_shader *main,
gl_shader **shader_list, unsigned num_shaders);
extern void
link_invalidate_variable_locations(exec_list *ir);
extern int
link_cross_validate_uniform_block(void *mem_ctx,
struct gl_uniform_block **linked_blocks,

View file

@ -1,980 +0,0 @@
/*
* Copyright © 2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* \file lower_varyings_to_packed.cpp
*
* This lowering pass generates GLSL code that manually packs varyings into
* vec4 slots, for the benefit of back-ends that don't support packed varyings
* natively.
*
* For example, the following shader:
*
* out mat3x2 foo; // location=4, location_frac=0
* out vec3 bar[2]; // location=5, location_frac=2
*
* main()
* {
* ...
* }
*
* Is rewritten to:
*
* mat3x2 foo;
* vec3 bar[2];
* out vec4 packed4; // location=4, location_frac=0
* out vec4 packed5; // location=5, location_frac=0
* out vec4 packed6; // location=6, location_frac=0
*
* main()
* {
* ...
* packed4.xy = foo[0];
* packed4.zw = foo[1];
* packed5.xy = foo[2];
* packed5.zw = bar[0].xy;
* packed6.x = bar[0].z;
* packed6.yzw = bar[1];
* }
*
* This lowering pass properly handles "double parking" of a varying vector
* across two varying slots. For example, in the code above, two of the
* components of bar[0] are stored in packed5, and the remaining component is
* stored in packed6.
*
* Note that in theory, the extra instructions may cause some loss of
* performance. However, hopefully in most cases the performance loss will
* either be absorbed by a later optimization pass, or it will be offset by
* memory bandwidth savings (because fewer varyings are used).
*
* This lowering pass also packs flat floats, ints, and uints together, by
* using ivec4 as the base type of flat "varyings", and using appropriate
* casts to convert floats and uints into ints.
*
* This lowering pass also handles varyings whose type is a struct or an array
* of struct. Structs are packed in order and with no gaps, so there may be a
* performance penalty due to structure elements being double-parked.
*
* Lowering of geometry shader inputs is slightly more complex, since geometry
* inputs are always arrays, so we need to lower arrays to arrays. For
* example, the following input:
*
* in struct Foo {
* float f;
* vec3 v;
* vec2 a[2];
* } arr[3]; // location=4, location_frac=0
*
* Would get lowered like this if it occurred in a fragment shader:
*
* struct Foo {
* float f;
* vec3 v;
* vec2 a[2];
* } arr[3];
* in vec4 packed4; // location=4, location_frac=0
* in vec4 packed5; // location=5, location_frac=0
* in vec4 packed6; // location=6, location_frac=0
* in vec4 packed7; // location=7, location_frac=0
* in vec4 packed8; // location=8, location_frac=0
* in vec4 packed9; // location=9, location_frac=0
*
* main()
* {
* arr[0].f = packed4.x;
* arr[0].v = packed4.yzw;
* arr[0].a[0] = packed5.xy;
* arr[0].a[1] = packed5.zw;
* arr[1].f = packed6.x;
* arr[1].v = packed6.yzw;
* arr[1].a[0] = packed7.xy;
* arr[1].a[1] = packed7.zw;
* arr[2].f = packed8.x;
* arr[2].v = packed8.yzw;
* arr[2].a[0] = packed9.xy;
* arr[2].a[1] = packed9.zw;
* ...
* }
*
* But it would get lowered like this if it occurred in a geometry shader:
*
* struct Foo {
* float f;
* vec3 v;
* vec2 a[2];
* } arr[3];
* in vec4 packed4[3]; // location=4, location_frac=0
* in vec4 packed5[3]; // location=5, location_frac=0
*
* main()
* {
* arr[0].f = packed4[0].x;
* arr[0].v = packed4[0].yzw;
* arr[0].a[0] = packed5[0].xy;
* arr[0].a[1] = packed5[0].zw;
* arr[1].f = packed4[1].x;
* arr[1].v = packed4[1].yzw;
* arr[1].a[0] = packed5[1].xy;
* arr[1].a[1] = packed5[1].zw;
* arr[2].f = packed4[2].x;
* arr[2].v = packed4[2].yzw;
* arr[2].a[0] = packed5[2].xy;
* arr[2].a[1] = packed5[2].zw;
* ...
* }
*/
#include "glsl_symbol_table.h"
#include "ir.h"
#include "ir_builder.h"
#include "ir_optimization.h"
#include "program/prog_instruction.h"
#include "main/shader_types.h"
static const glsl_type *
get_varying_type(const ir_variable *var, gl_shader_stage stage)
{
const glsl_type *type = var->type;
if (!var->data.patch &&
((var->data.mode == ir_var_shader_out &&
stage == MESA_SHADER_TESS_CTRL) ||
(var->data.mode == ir_var_shader_in &&
(stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
stage == MESA_SHADER_GEOMETRY)))) {
assert(type->is_array());
type = type->fields.array;
}
return type;
}
using namespace ir_builder;
namespace {
/**
* Visitor that performs varying packing. For each varying declared in the
* shader, this visitor determines whether it needs to be packed. If so, it
* demotes it to an ordinary global, creates new packed varyings, and
* generates assignments to convert between the original varying and the
* packed varying.
*/
class lower_packed_varyings_visitor
{
public:
lower_packed_varyings_visitor(void *mem_ctx,
unsigned locations_used,
const uint8_t *components,
ir_variable_mode mode,
unsigned gs_input_vertices,
exec_list *out_instructions,
exec_list *out_variables,
bool disable_varying_packing,
bool disable_xfb_packing,
bool xfb_enabled);
void run(struct gl_linked_shader *shader);
private:
void bitwise_assign_pack(ir_rvalue *lhs, ir_rvalue *rhs);
void bitwise_assign_unpack(ir_rvalue *lhs, ir_rvalue *rhs);
unsigned lower_rvalue(ir_rvalue *rvalue, unsigned fine_location,
ir_variable *unpacked_var, const char *name,
bool gs_input_toplevel, unsigned vertex_index);
unsigned lower_arraylike(ir_rvalue *rvalue, unsigned array_size,
unsigned fine_location,
ir_variable *unpacked_var, const char *name,
bool gs_input_toplevel, unsigned vertex_index);
ir_dereference *get_packed_varying_deref(unsigned location,
ir_variable *unpacked_var,
const char *name,
unsigned vertex_index);
bool needs_lowering(ir_variable *var, gl_shader_stage stage);
/**
* Memory context used to allocate new instructions for the shader.
*/
void * const mem_ctx;
/**
* Number of generic varying slots which are used by this shader. This is
* used to allocate temporary intermediate data structures. If any varying
* used by this shader has a location greater than or equal to
* VARYING_SLOT_VAR0 + locations_used, an assertion will fire.
*/
const unsigned locations_used;
const uint8_t* components;
/**
* Array of pointers to the packed varyings that have been created for each
* generic varying slot. NULL entries in this array indicate varying slots
* for which a packed varying has not been created yet.
*/
ir_variable **packed_varyings;
/**
* Type of varying which is being lowered in this pass (either
* ir_var_shader_in or ir_var_shader_out).
*/
const ir_variable_mode mode;
/**
* If we are currently lowering geometry shader inputs, the number of input
* vertices the geometry shader accepts. Otherwise zero.
*/
const unsigned gs_input_vertices;
/**
* Exec list into which the visitor should insert the packing instructions.
* Caller provides this list; it should insert the instructions into the
* appropriate place in the shader once the visitor has finished running.
*/
exec_list *out_instructions;
/**
* Exec list into which the visitor should insert any new variables.
*/
exec_list *out_variables;
bool disable_varying_packing;
bool disable_xfb_packing;
bool xfb_enabled;
};
} /* anonymous namespace */
lower_packed_varyings_visitor::lower_packed_varyings_visitor(
void *mem_ctx, unsigned locations_used, const uint8_t *components,
ir_variable_mode mode,
unsigned gs_input_vertices, exec_list *out_instructions,
exec_list *out_variables, bool disable_varying_packing,
bool disable_xfb_packing, bool xfb_enabled)
: mem_ctx(mem_ctx),
locations_used(locations_used),
components(components),
packed_varyings((ir_variable **)
rzalloc_array_size(mem_ctx, sizeof(*packed_varyings),
locations_used)),
mode(mode),
gs_input_vertices(gs_input_vertices),
out_instructions(out_instructions),
out_variables(out_variables),
disable_varying_packing(disable_varying_packing),
disable_xfb_packing(disable_xfb_packing),
xfb_enabled(xfb_enabled)
{
}
void
lower_packed_varyings_visitor::run(struct gl_linked_shader *shader)
{
foreach_in_list(ir_instruction, node, shader->ir) {
ir_variable *var = node->as_variable();
if (var == NULL)
continue;
if (var->data.mode != this->mode ||
var->data.location < VARYING_SLOT_VAR0 ||
!this->needs_lowering(var, shader->Stage))
continue;
/* This lowering pass is only capable of packing floats and ints
* together when their interpolation mode is "flat". Treat integers as
* being flat when the interpolation mode is none.
*/
assert(var->data.interpolation == INTERP_MODE_FLAT ||
var->data.interpolation == INTERP_MODE_NONE ||
!var->type->contains_integer());
/* Clone the variable for program resource list before
* it gets modified and lost.
*/
if (!shader->packed_varyings)
shader->packed_varyings = new (shader) exec_list;
shader->packed_varyings->push_tail(var->clone(shader, NULL));
/* Change the old varying into an ordinary global. */
assert(var->data.mode != ir_var_temporary);
var->data.mode = ir_var_auto;
/* Create a reference to the old varying. */
ir_dereference_variable *deref
= new(this->mem_ctx) ir_dereference_variable(var);
/* Recursively pack or unpack it. */
this->lower_rvalue(deref, var->data.location * 4 + var->data.location_frac, var,
var->name, this->gs_input_vertices != 0, 0);
}
}
#define SWIZZLE_ZWZW MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_W, SWIZZLE_Z, SWIZZLE_W)
/**
* Make an ir_assignment from \c rhs to \c lhs, performing appropriate
* bitcasts if necessary to match up types.
*
* This function is called when packing varyings.
*/
void
lower_packed_varyings_visitor::bitwise_assign_pack(ir_rvalue *lhs,
ir_rvalue *rhs)
{
if (lhs->type->base_type != rhs->type->base_type) {
/* Since we only mix types in flat varyings, and we always store flat
* varyings as type ivec4, we need only produce conversions from (uint
* or float) to int.
*/
assert(lhs->type->base_type == GLSL_TYPE_INT);
switch (rhs->type->base_type) {
case GLSL_TYPE_UINT:
rhs = new(this->mem_ctx)
ir_expression(ir_unop_u2i, lhs->type, rhs);
break;
case GLSL_TYPE_FLOAT:
rhs = new(this->mem_ctx)
ir_expression(ir_unop_bitcast_f2i, lhs->type, rhs);
break;
case GLSL_TYPE_DOUBLE:
assert(rhs->type->vector_elements <= 2);
if (rhs->type->vector_elements == 2) {
ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "pack", ir_var_temporary);
assert(lhs->type->vector_elements == 4);
this->out_variables->push_tail(t);
this->out_instructions->push_tail(
assign(t, u2i(expr(ir_unop_unpack_double_2x32, swizzle_x(rhs->clone(mem_ctx, NULL)))), 0x3));
this->out_instructions->push_tail(
assign(t, u2i(expr(ir_unop_unpack_double_2x32, swizzle_y(rhs))), 0xc));
rhs = deref(t).val;
} else {
rhs = u2i(expr(ir_unop_unpack_double_2x32, rhs));
}
break;
case GLSL_TYPE_INT64:
assert(rhs->type->vector_elements <= 2);
if (rhs->type->vector_elements == 2) {
ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "pack", ir_var_temporary);
assert(lhs->type->vector_elements == 4);
this->out_variables->push_tail(t);
this->out_instructions->push_tail(
assign(t, expr(ir_unop_unpack_int_2x32, swizzle_x(rhs->clone(mem_ctx, NULL))), 0x3));
this->out_instructions->push_tail(
assign(t, expr(ir_unop_unpack_int_2x32, swizzle_y(rhs)), 0xc));
rhs = deref(t).val;
} else {
rhs = expr(ir_unop_unpack_int_2x32, rhs);
}
break;
case GLSL_TYPE_UINT64:
assert(rhs->type->vector_elements <= 2);
if (rhs->type->vector_elements == 2) {
ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "pack", ir_var_temporary);
assert(lhs->type->vector_elements == 4);
this->out_variables->push_tail(t);
this->out_instructions->push_tail(
assign(t, u2i(expr(ir_unop_unpack_uint_2x32, swizzle_x(rhs->clone(mem_ctx, NULL)))), 0x3));
this->out_instructions->push_tail(
assign(t, u2i(expr(ir_unop_unpack_uint_2x32, swizzle_y(rhs))), 0xc));
rhs = deref(t).val;
} else {
rhs = u2i(expr(ir_unop_unpack_uint_2x32, rhs));
}
break;
case GLSL_TYPE_SAMPLER:
rhs = u2i(expr(ir_unop_unpack_sampler_2x32, rhs));
break;
case GLSL_TYPE_IMAGE:
rhs = u2i(expr(ir_unop_unpack_image_2x32, rhs));
break;
default:
assert(!"Unexpected type conversion while lowering varyings");
break;
}
}
this->out_instructions->push_tail(new (this->mem_ctx) ir_assignment(lhs, rhs));
}
/**
* Make an ir_assignment from \c rhs to \c lhs, performing appropriate
* bitcasts if necessary to match up types.
*
* This function is called when unpacking varyings.
*/
void
lower_packed_varyings_visitor::bitwise_assign_unpack(ir_rvalue *lhs,
ir_rvalue *rhs)
{
if (lhs->type->base_type != rhs->type->base_type) {
/* Since we only mix types in flat varyings, and we always store flat
* varyings as type ivec4, we need only produce conversions from int to
* (uint or float).
*/
assert(rhs->type->base_type == GLSL_TYPE_INT);
switch (lhs->type->base_type) {
case GLSL_TYPE_UINT:
rhs = new(this->mem_ctx)
ir_expression(ir_unop_i2u, lhs->type, rhs);
break;
case GLSL_TYPE_FLOAT:
rhs = new(this->mem_ctx)
ir_expression(ir_unop_bitcast_i2f, lhs->type, rhs);
break;
case GLSL_TYPE_DOUBLE:
assert(lhs->type->vector_elements <= 2);
if (lhs->type->vector_elements == 2) {
ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "unpack", ir_var_temporary);
assert(rhs->type->vector_elements == 4);
this->out_variables->push_tail(t);
this->out_instructions->push_tail(
assign(t, expr(ir_unop_pack_double_2x32, i2u(swizzle_xy(rhs->clone(mem_ctx, NULL)))), 0x1));
this->out_instructions->push_tail(
assign(t, expr(ir_unop_pack_double_2x32, i2u(swizzle(rhs->clone(mem_ctx, NULL), SWIZZLE_ZWZW, 2))), 0x2));
rhs = deref(t).val;
} else {
rhs = expr(ir_unop_pack_double_2x32, i2u(rhs));
}
break;
case GLSL_TYPE_INT64:
assert(lhs->type->vector_elements <= 2);
if (lhs->type->vector_elements == 2) {
ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "unpack", ir_var_temporary);
assert(rhs->type->vector_elements == 4);
this->out_variables->push_tail(t);
this->out_instructions->push_tail(
assign(t, expr(ir_unop_pack_int_2x32, swizzle_xy(rhs->clone(mem_ctx, NULL))), 0x1));
this->out_instructions->push_tail(
assign(t, expr(ir_unop_pack_int_2x32, swizzle(rhs->clone(mem_ctx, NULL), SWIZZLE_ZWZW, 2)), 0x2));
rhs = deref(t).val;
} else {
rhs = expr(ir_unop_pack_int_2x32, rhs);
}
break;
case GLSL_TYPE_UINT64:
assert(lhs->type->vector_elements <= 2);
if (lhs->type->vector_elements == 2) {
ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "unpack", ir_var_temporary);
assert(rhs->type->vector_elements == 4);
this->out_variables->push_tail(t);
this->out_instructions->push_tail(
assign(t, expr(ir_unop_pack_uint_2x32, i2u(swizzle_xy(rhs->clone(mem_ctx, NULL)))), 0x1));
this->out_instructions->push_tail(
assign(t, expr(ir_unop_pack_uint_2x32, i2u(swizzle(rhs->clone(mem_ctx, NULL), SWIZZLE_ZWZW, 2))), 0x2));
rhs = deref(t).val;
} else {
rhs = expr(ir_unop_pack_uint_2x32, i2u(rhs));
}
break;
case GLSL_TYPE_SAMPLER:
rhs = new(mem_ctx)
ir_expression(ir_unop_pack_sampler_2x32, lhs->type, i2u(rhs));
break;
case GLSL_TYPE_IMAGE:
rhs = new(mem_ctx)
ir_expression(ir_unop_pack_image_2x32, lhs->type, i2u(rhs));
break;
default:
assert(!"Unexpected type conversion while lowering varyings");
break;
}
}
this->out_instructions->push_tail(new(this->mem_ctx) ir_assignment(lhs, rhs));
}
/**
* Recursively pack or unpack the given varying (or portion of a varying) by
* traversing all of its constituent vectors.
*
* \param fine_location is the location where the first constituent vector
* should be packed--the word "fine" indicates that this location is expressed
* in multiples of a float, rather than multiples of a vec4 as is used
* elsewhere in Mesa.
*
* \param gs_input_toplevel should be set to true if we are lowering geometry
* shader inputs, and we are currently lowering the whole input variable
* (i.e. we are lowering the array whose index selects the vertex).
*
* \param vertex_index: if we are lowering geometry shader inputs, and the
* level of the array that we are currently lowering is *not* the top level,
* then this indicates which vertex we are currently lowering. Otherwise it
* is ignored.
*
* \return the location where the next constituent vector (after this one)
* should be packed.
*/
unsigned
lower_packed_varyings_visitor::lower_rvalue(ir_rvalue *rvalue,
unsigned fine_location,
ir_variable *unpacked_var,
const char *name,
bool gs_input_toplevel,
unsigned vertex_index)
{
unsigned dmul = rvalue->type->is_64bit() ? 2 : 1;
/* When gs_input_toplevel is set, we should be looking at a geometry shader
* input array.
*/
assert(!gs_input_toplevel || rvalue->type->is_array());
if (rvalue->type->is_struct()) {
for (unsigned i = 0; i < rvalue->type->length; i++) {
if (i != 0)
rvalue = rvalue->clone(this->mem_ctx, NULL);
const char *field_name = rvalue->type->fields.structure[i].name;
ir_dereference_record *dereference_record = new(this->mem_ctx)
ir_dereference_record(rvalue, field_name);
char *deref_name
= ralloc_asprintf(this->mem_ctx, "%s.%s", name, field_name);
fine_location = this->lower_rvalue(dereference_record, fine_location,
unpacked_var, deref_name, false,
vertex_index);
}
return fine_location;
} else if (rvalue->type->is_array()) {
/* Arrays are packed/unpacked by considering each array element in
* sequence.
*/
return this->lower_arraylike(rvalue, rvalue->type->array_size(),
fine_location, unpacked_var, name,
gs_input_toplevel, vertex_index);
} else if (rvalue->type->is_matrix()) {
/* Matrices are packed/unpacked by considering each column vector in
* sequence.
*/
return this->lower_arraylike(rvalue, rvalue->type->matrix_columns,
fine_location, unpacked_var, name,
false, vertex_index);
} else if (rvalue->type->vector_elements * dmul +
fine_location % 4 > 4) {
/* We don't have code to split up 64bit variable between two
* varying slots, instead we add padding if necessary.
*/
unsigned aligned_fine_location = ALIGN_POT(fine_location, dmul);
if (aligned_fine_location != fine_location) {
return this->lower_rvalue(rvalue, aligned_fine_location,
unpacked_var, name, false,
vertex_index);
}
/* This vector is going to be "double parked" across two varying slots,
* so handle it as two separate assignments. For doubles, a dvec3/dvec4
* can end up being spread over 3 slots. However the second splitting
* will happen later, here we just always want to split into 2.
*/
unsigned left_components, right_components;
unsigned left_swizzle_values[4] = { 0, 0, 0, 0 };
unsigned right_swizzle_values[4] = { 0, 0, 0, 0 };
char left_swizzle_name[4] = { 0, 0, 0, 0 };
char right_swizzle_name[4] = { 0, 0, 0, 0 };
left_components = 4 - fine_location % 4;
if (rvalue->type->is_64bit()) {
left_components /= 2;
assert(left_components > 0);
}
right_components = rvalue->type->vector_elements - left_components;
for (unsigned i = 0; i < left_components; i++) {
left_swizzle_values[i] = i;
left_swizzle_name[i] = "xyzw"[i];
}
for (unsigned i = 0; i < right_components; i++) {
right_swizzle_values[i] = i + left_components;
right_swizzle_name[i] = "xyzw"[i + left_components];
}
ir_swizzle *right_swizzle = new(this->mem_ctx)
ir_swizzle(rvalue->clone(this->mem_ctx, NULL), right_swizzle_values,
right_components);
char *right_name
= ralloc_asprintf(this->mem_ctx, "%s.%s", name, right_swizzle_name);
if (left_components) {
char *left_name
= ralloc_asprintf(this->mem_ctx, "%s.%s", name, left_swizzle_name);
ir_swizzle *left_swizzle = new(this->mem_ctx)
ir_swizzle(rvalue, left_swizzle_values, left_components);
fine_location = this->lower_rvalue(left_swizzle, fine_location,
unpacked_var, left_name, false,
vertex_index);
} else
/* Top up the fine location to the next slot */
fine_location++;
return this->lower_rvalue(right_swizzle, fine_location, unpacked_var,
right_name, false, vertex_index);
} else {
/* No special handling is necessary; pack the rvalue into the
* varying.
*/
unsigned swizzle_values[4] = { 0, 0, 0, 0 };
unsigned components = rvalue->type->vector_elements * dmul;
unsigned location = fine_location / 4;
unsigned location_frac = fine_location % 4;
for (unsigned i = 0; i < components; ++i)
swizzle_values[i] = i + location_frac;
assert(this->components[location - VARYING_SLOT_VAR0] >= components);
ir_dereference *packed_deref =
this->get_packed_varying_deref(location, unpacked_var, name,
vertex_index);
if (unpacked_var->data.stream != 0) {
assert(unpacked_var->data.stream < 4);
ir_variable *packed_var = packed_deref->variable_referenced();
for (unsigned i = 0; i < components; ++i) {
packed_var->data.stream |=
unpacked_var->data.stream << (2 * (location_frac + i));
}
}
ir_swizzle *swizzle = new(this->mem_ctx)
ir_swizzle(packed_deref, swizzle_values, components);
if (this->mode == ir_var_shader_out) {
this->bitwise_assign_pack(swizzle, rvalue);
} else {
this->bitwise_assign_unpack(rvalue, swizzle);
}
return fine_location + components;
}
}
/**
* Recursively pack or unpack a varying for which we need to iterate over its
* constituent elements, accessing each one using an ir_dereference_array.
* This takes care of both arrays and matrices, since ir_dereference_array
* treats a matrix like an array of its column vectors.
*
* \param gs_input_toplevel should be set to true if we are lowering geometry
* shader inputs, and we are currently lowering the whole input variable
* (i.e. we are lowering the array whose index selects the vertex).
*
* \param vertex_index: if we are lowering geometry shader inputs, and the
* level of the array that we are currently lowering is *not* the top level,
* then this indicates which vertex we are currently lowering. Otherwise it
* is ignored.
*/
unsigned
lower_packed_varyings_visitor::lower_arraylike(ir_rvalue *rvalue,
unsigned array_size,
unsigned fine_location,
ir_variable *unpacked_var,
const char *name,
bool gs_input_toplevel,
unsigned vertex_index)
{
unsigned dmul = rvalue->type->without_array()->is_64bit() ? 2 : 1;
if (array_size * dmul + fine_location % 4 > 4) {
fine_location = ALIGN_POT(fine_location, dmul);
}
for (unsigned i = 0; i < array_size; i++) {
if (i != 0)
rvalue = rvalue->clone(this->mem_ctx, NULL);
ir_constant *constant = new(this->mem_ctx) ir_constant(i);
ir_dereference_array *dereference_array = new(this->mem_ctx)
ir_dereference_array(rvalue, constant);
if (gs_input_toplevel) {
/* Geometry shader inputs are a special case. Instead of storing
* each element of the array at a different location, all elements
* are at the same location, but with a different vertex index.
*/
(void) this->lower_rvalue(dereference_array, fine_location,
unpacked_var, name, false, i);
} else {
char *subscripted_name
= ralloc_asprintf(this->mem_ctx, "%s[%d]", name, i);
fine_location =
this->lower_rvalue(dereference_array, fine_location,
unpacked_var, subscripted_name,
false, vertex_index);
}
}
return fine_location;
}
/**
* Retrieve the packed varying corresponding to the given varying location.
* If no packed varying has been created for the given varying location yet,
* create it and add it to the shader before returning it.
*
* The newly created varying inherits its interpolation parameters from \c
* unpacked_var. Its base type is ivec4 if we are lowering a flat varying,
* vec4 otherwise.
*
* \param vertex_index: if we are lowering geometry shader inputs, then this
* indicates which vertex we are currently lowering. Otherwise it is ignored.
*/
ir_dereference *
lower_packed_varyings_visitor::get_packed_varying_deref(
unsigned location, ir_variable *unpacked_var, const char *name,
unsigned vertex_index)
{
unsigned slot = location - VARYING_SLOT_VAR0;
assert(slot < locations_used);
if (this->packed_varyings[slot] == NULL) {
char *packed_name = ralloc_asprintf(this->mem_ctx, "packed:%s", name);
const glsl_type *packed_type;
assert(components[slot] != 0);
if (unpacked_var->is_interpolation_flat())
packed_type = glsl_type::get_instance(GLSL_TYPE_INT, components[slot], 1);
else
packed_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, components[slot], 1);
if (this->gs_input_vertices != 0) {
packed_type =
glsl_type::get_array_instance(packed_type,
this->gs_input_vertices);
}
ir_variable *packed_var = new(this->mem_ctx)
ir_variable(packed_type, packed_name, this->mode);
if (this->gs_input_vertices != 0) {
/* Prevent update_array_sizes() from messing with the size of the
* array.
*/
packed_var->data.max_array_access = this->gs_input_vertices - 1;
}
packed_var->data.centroid = unpacked_var->data.centroid;
packed_var->data.sample = unpacked_var->data.sample;
packed_var->data.patch = unpacked_var->data.patch;
packed_var->data.interpolation =
packed_type->without_array() == glsl_type::ivec4_type
? unsigned(INTERP_MODE_FLAT) : unpacked_var->data.interpolation;
packed_var->data.location = location;
packed_var->data.precision = unpacked_var->data.precision;
packed_var->data.always_active_io = unpacked_var->data.always_active_io;
packed_var->data.stream = 1u << 31;
unpacked_var->insert_before(packed_var);
this->packed_varyings[slot] = packed_var;
} else {
ir_variable *var = this->packed_varyings[slot];
/* The slot needs to be marked as always active if any variable that got
* packed there was.
*/
var->data.always_active_io |= unpacked_var->data.always_active_io;
/* For geometry shader inputs, only update the packed variable name the
* first time we visit each component.
*/
if (this->gs_input_vertices == 0 || vertex_index == 0) {
if (var->is_name_ralloced())
ralloc_asprintf_append((char **) &var->name, ",%s", name);
else
var->name = ralloc_asprintf(var, "%s,%s", var->name, name);
}
}
ir_dereference *deref = new(this->mem_ctx)
ir_dereference_variable(this->packed_varyings[slot]);
if (this->gs_input_vertices != 0) {
/* When lowering GS inputs, the packed variable is an array, so we need
* to dereference it using vertex_index.
*/
ir_constant *constant = new(this->mem_ctx) ir_constant(vertex_index);
deref = new(this->mem_ctx) ir_dereference_array(deref, constant);
}
return deref;
}
bool
lower_packed_varyings_visitor::needs_lowering(ir_variable *var,
gl_shader_stage stage)
{
/* Things composed of vec4's, varyings with explicitly assigned
* locations or varyings marked as must_be_shader_input (which might be used
* by interpolateAt* functions) shouldn't be lowered. Everything else can be.
*/
if (var->data.explicit_location || var->data.must_be_shader_input)
return false;
const glsl_type *type = var->type;
type = get_varying_type(var, stage);
/* Some drivers (e.g. panfrost) don't support packing of transform
* feedback varyings.
*/
if (disable_xfb_packing && var->data.is_xfb &&
!(type->is_array() || type->is_struct() || type->is_matrix()) &&
xfb_enabled)
return false;
/* Override disable_varying_packing if the var is only used by transform
* feedback. Also override it if transform feedback is enabled and the
* variable is an array, struct or matrix as the elements of these types
* will always have the same interpolation and therefore are safe to pack.
*/
if (disable_varying_packing && !var->data.is_xfb_only &&
!((type->is_array() || type->is_struct() || type->is_matrix()) &&
xfb_enabled))
return false;
type = type->without_array();
if (type->vector_elements == 4 && !type->is_64bit())
return false;
return true;
}
/**
* Visitor that splices varying packing code before every use of EmitVertex()
* in a geometry shader.
*/
class lower_packed_varyings_gs_splicer : public ir_hierarchical_visitor
{
public:
explicit lower_packed_varyings_gs_splicer(void *mem_ctx,
const exec_list *instructions);
virtual ir_visitor_status visit_leave(ir_emit_vertex *ev);
private:
/**
* Memory context used to allocate new instructions for the shader.
*/
void * const mem_ctx;
/**
* Instructions that should be spliced into place before each EmitVertex()
* call.
*/
const exec_list *instructions;
};
lower_packed_varyings_gs_splicer::lower_packed_varyings_gs_splicer(
void *mem_ctx, const exec_list *instructions)
: mem_ctx(mem_ctx), instructions(instructions)
{
}
ir_visitor_status
lower_packed_varyings_gs_splicer::visit_leave(ir_emit_vertex *ev)
{
foreach_in_list(ir_instruction, ir, this->instructions) {
ev->insert_before(ir->clone(this->mem_ctx, NULL));
}
return visit_continue;
}
/**
* Visitor that splices varying packing code before every return.
*/
class lower_packed_varyings_return_splicer : public ir_hierarchical_visitor
{
public:
explicit lower_packed_varyings_return_splicer(void *mem_ctx,
const exec_list *instructions);
virtual ir_visitor_status visit_leave(ir_return *ret);
private:
/**
* Memory context used to allocate new instructions for the shader.
*/
void * const mem_ctx;
/**
* Instructions that should be spliced into place before each return.
*/
const exec_list *instructions;
};
lower_packed_varyings_return_splicer::lower_packed_varyings_return_splicer(
void *mem_ctx, const exec_list *instructions)
: mem_ctx(mem_ctx), instructions(instructions)
{
}
ir_visitor_status
lower_packed_varyings_return_splicer::visit_leave(ir_return *ret)
{
foreach_in_list(ir_instruction, ir, this->instructions) {
ret->insert_before(ir->clone(this->mem_ctx, NULL));
}
return visit_continue;
}
void
lower_packed_varyings(void *mem_ctx, unsigned locations_used,
const uint8_t *components,
ir_variable_mode mode, unsigned gs_input_vertices,
gl_linked_shader *shader, bool disable_varying_packing,
bool disable_xfb_packing, bool xfb_enabled)
{
exec_list *instructions = shader->ir;
ir_function *main_func = shader->symbols->get_function("main");
exec_list void_parameters;
ir_function_signature *main_func_sig
= main_func->matching_signature(NULL, &void_parameters, false);
exec_list new_instructions, new_variables;
lower_packed_varyings_visitor visitor(mem_ctx,
locations_used,
components,
mode,
gs_input_vertices,
&new_instructions,
&new_variables,
disable_varying_packing,
disable_xfb_packing,
xfb_enabled);
visitor.run(shader);
if (mode == ir_var_shader_out) {
if (shader->Stage == MESA_SHADER_GEOMETRY) {
/* For geometry shaders, outputs need to be lowered before each call
* to EmitVertex()
*/
lower_packed_varyings_gs_splicer splicer(mem_ctx, &new_instructions);
/* Add all the variables in first. */
main_func_sig->body.get_head_raw()->insert_before(&new_variables);
/* Now update all the EmitVertex instances */
splicer.run(instructions);
} else {
/* For other shader types, outputs need to be lowered before each
* return statement and at the end of main()
*/
lower_packed_varyings_return_splicer splicer(mem_ctx, &new_instructions);
main_func_sig->body.get_head_raw()->insert_before(&new_variables);
splicer.run(instructions);
/* Lower outputs at the end of main() if the last instruction is not
* a return statement
*/
if (((ir_instruction*)instructions->get_tail())->ir_type != ir_type_return) {
main_func_sig->body.append_list(&new_instructions);
}
}
} else {
/* Shader inputs need to be lowered at the beginning of main() */
main_func_sig->body.get_head_raw()->insert_before(&new_instructions);
main_func_sig->body.get_head_raw()->insert_before(&new_variables);
}
}

View file

@ -1,244 +0,0 @@
/*
* Copyright ©2019 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* \file lower_xfb_varying.cpp
*
*/
#include "ir.h"
#include "main/shader_types.h"
#include "glsl_symbol_table.h"
#include "util/strndup.h"
namespace {
/**
* Visitor that splices varying packing code before every return.
*/
class lower_xfb_var_splicer : public ir_hierarchical_visitor
{
public:
explicit lower_xfb_var_splicer(void *mem_ctx,
gl_shader_stage stage,
const exec_list *instructions);
ir_visitor_status append_instructions(exec_node *node);
virtual ir_visitor_status visit_leave(ir_return *ret);
virtual ir_visitor_status visit_leave(ir_function_signature *sig);
virtual ir_visitor_status visit_leave(ir_emit_vertex *emit);
private:
/**
* Memory context used to allocate new instructions for the shader.
*/
void * const mem_ctx;
gl_shader_stage stage;
/**
* Instructions that should be spliced into place before each return and EmitVertex().
*/
const exec_list *instructions;
};
} /* anonymous namespace */
lower_xfb_var_splicer::lower_xfb_var_splicer(void *mem_ctx, gl_shader_stage stage,
const exec_list *instructions)
: mem_ctx(mem_ctx), stage(stage), instructions(instructions)
{
}
ir_visitor_status
lower_xfb_var_splicer::append_instructions(exec_node *node)
{
foreach_in_list(ir_instruction, ir, this->instructions) {
node->insert_before(ir->clone(this->mem_ctx, NULL));
}
return visit_continue;
}
ir_visitor_status
lower_xfb_var_splicer::visit_leave(ir_return *ret)
{
if (stage != MESA_SHADER_VERTEX)
return visit_continue;
return append_instructions(ret);
}
ir_visitor_status
lower_xfb_var_splicer::visit_leave(ir_emit_vertex *emit)
{
return append_instructions(emit);
}
/** Insert a copy-back assignment at the end of the main() function */
ir_visitor_status
lower_xfb_var_splicer::visit_leave(ir_function_signature *sig)
{
if (strcmp(sig->function_name(), "main") != 0)
return visit_continue;
if (this->stage == MESA_SHADER_VERTEX) {
if (((ir_instruction*)sig->body.get_tail())->ir_type == ir_type_return)
return visit_continue;
foreach_in_list(ir_instruction, ir, this->instructions) {
sig->body.push_tail(ir->clone(this->mem_ctx, NULL));
}
}
return visit_continue;
}
static char*
get_field_name(const char *name)
{
const char *first_dot = strchr(name, '.');
const char *first_square_bracket = strchr(name, '[');
int name_size = 0;
if (!first_square_bracket && !first_dot)
name_size = strlen(name);
else if ((!first_square_bracket ||
(first_dot && first_dot < first_square_bracket)))
name_size = first_dot - name;
else
name_size = first_square_bracket - name;
return strndup(name, name_size);
}
/* Generate a new name given the old xfb declaration string by replacing dots
* with '_', brackets with '@' and appending "-xfb" */
static char *
generate_new_name(void *mem_ctx, const char *name)
{
char *new_name;
unsigned i = 0;
new_name = ralloc_strdup(mem_ctx, name);
while (new_name[i]) {
if (new_name[i] == '.') {
new_name[i] = '_';
} else if (new_name[i] == '[' || new_name[i] == ']') {
new_name[i] = '@';
}
i++;
}
if (!ralloc_strcat(&new_name, "-xfb")) {
ralloc_free(new_name);
return NULL;
}
return new_name;
}
/* Get the dereference for the given variable name. The method is called
* recursively to parse array indices and struct members. */
static bool
get_deref(void *ctx,
const char *name,
struct gl_linked_shader *shader,
ir_dereference **deref,
const glsl_type **type)
{
if (name[0] == '\0') {
/* End */
return (*deref != NULL);
} else if (name[0] == '[') {
/* Array index */
char *endptr = NULL;
unsigned index;
index = strtol(name + 1, &endptr, 10);
assert(*type != NULL && (*type)->is_array() && endptr[0] == ']');
*deref = new(ctx) ir_dereference_array(*deref, new(ctx) ir_constant(index));
*type = (*type)->without_array();
return get_deref(ctx, endptr + 1, shader, deref, type);
} else if (name[0] == '.') {
/* Struct member */
char *field = get_field_name(name + 1);
assert(*type != NULL && (*type)->is_struct() && field != NULL);
*deref = new(ctx) ir_dereference_record(*deref, field);
*type = (*type)->field_type(field);
assert(*type != glsl_type::error_type);
name += 1 + strlen(field);
free(field);
return get_deref(ctx, name, shader, deref, type);
} else {
/* Top level variable */
char *field = get_field_name(name);
ir_variable *toplevel_var;
toplevel_var = shader->symbols->get_variable(field);
name += strlen(field);
free(field);
if (toplevel_var == NULL) {
return false;
}
*deref = new (ctx) ir_dereference_variable(toplevel_var);
*type = toplevel_var->type;
return get_deref(ctx, name, shader, deref, type);
}
}
ir_variable *
lower_xfb_varying(void *mem_ctx,
struct gl_linked_shader *shader,
const char *old_var_name)
{
exec_list new_instructions;
char *new_var_name;
ir_dereference *deref = NULL;
const glsl_type *type = NULL;
if (!get_deref(mem_ctx, old_var_name, shader, &deref, &type)) {
if (deref) {
delete deref;
}
return NULL;
}
new_var_name = generate_new_name(mem_ctx, old_var_name);
ir_variable *new_variable
= new(mem_ctx) ir_variable(type, new_var_name, ir_var_shader_out);
new_variable->data.assigned = true;
new_variable->data.used = true;
shader->ir->push_head(new_variable);
ralloc_free(new_var_name);
ir_dereference *lhs = new(mem_ctx) ir_dereference_variable(new_variable);
ir_assignment *new_assignment = new(mem_ctx) ir_assignment(lhs, deref);
new_instructions.push_tail(new_assignment);
lower_xfb_var_splicer splicer(mem_ctx, shader->Stage, &new_instructions);
visit_list_elements(&splicer, shader->ir);
return new_variable;
}

View file

@ -171,7 +171,6 @@ files_libglsl = files(
'lower_jumps.cpp',
'lower_mat_op_to_vec.cpp',
'lower_offset_array.cpp',
'lower_packed_varyings.cpp',
'lower_named_interface_blocks.cpp',
'lower_packing_builtins.cpp',
'lower_precision.cpp',
@ -184,7 +183,6 @@ files_libglsl = files(
'lower_vertex_id.cpp',
'lower_output_reads.cpp',
'lower_shared_reference.cpp',
'lower_xfb_varying.cpp',
'opt_algebraic.cpp',
'opt_array_splitting.cpp',
'opt_conditional_discard.cpp',
@ -193,7 +191,6 @@ files_libglsl = files(
'opt_constant_variable.cpp',
'opt_copy_propagation_elements.cpp',
'opt_dead_builtin_variables.cpp',
'opt_dead_builtin_varyings.cpp',
'opt_dead_code.cpp',
'opt_dead_code_local.cpp',
'opt_dead_functions.cpp',

View file

@ -1,621 +0,0 @@
/*
* Copyright © 2013 Marek Olšák <maraeo@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* \file opt_dead_builtin_varyings.cpp
*
* This eliminates the built-in shader outputs which are either not written
* at all or not used by the next stage. It also eliminates unused elements
* of gl_TexCoord inputs, which reduces the overall varying usage.
* The varyings handled here are the primary and secondary color, the fog,
* and the texture coordinates (gl_TexCoord).
*
* This pass is necessary, because the Mesa GLSL linker cannot eliminate
* built-in varyings like it eliminates user-defined varyings, because
* the built-in varyings have pre-assigned locations. Also, the elimination
* of unused gl_TexCoord elements requires its own lowering pass anyway.
*
* It's implemented by replacing all occurrences of dead varyings with
* temporary variables, which creates dead code. It is recommended to run
* a dead-code elimination pass after this.
*
* If any texture coordinate slots can be eliminated, the gl_TexCoord array is
* broken down into separate vec4 variables with locations equal to
* VARYING_SLOT_TEX0 + i.
*
* The same is done for the gl_FragData fragment shader output.
*/
#include "ir.h"
#include "ir_rvalue_visitor.h"
#include "ir_optimization.h"
#include "ir_print_visitor.h"
#include "compiler/glsl_types.h"
#include "link_varyings.h"
#include "main/consts_exts.h"
#include "main/shader_types.h"
#include "util/u_string.h"
namespace {
/**
* This obtains detailed information about built-in varyings from shader code.
*/
class varying_info_visitor : public ir_hierarchical_visitor {
public:
/* "mode" can be either ir_var_shader_in or ir_var_shader_out */
varying_info_visitor(ir_variable_mode mode, bool find_frag_outputs = false)
: lower_texcoord_array(true),
texcoord_array(NULL),
texcoord_usage(0),
find_frag_outputs(find_frag_outputs),
lower_fragdata_array(true),
fragdata_array(NULL),
fragdata_usage(0),
color_usage(0),
tfeedback_color_usage(0),
fog(NULL),
has_fog(false),
tfeedback_has_fog(false),
mode(mode)
{
memset(color, 0, sizeof(color));
memset(backcolor, 0, sizeof(backcolor));
}
virtual ir_visitor_status visit_enter(ir_dereference_array *ir)
{
ir_variable *var = ir->variable_referenced();
if (!var || var->data.mode != this->mode || !var->type->is_array() ||
!is_gl_identifier(var->name))
return visit_continue;
/* Only match gl_FragData[], not gl_SecondaryFragDataEXT[] or
* gl_LastFragData[].
*/
if (this->find_frag_outputs && strcmp(var->name, "gl_FragData") == 0) {
this->fragdata_array = var;
ir_constant *index = ir->array_index->as_constant();
if (index == NULL) {
/* This is variable indexing. */
this->fragdata_usage |= (1 << var->type->array_size()) - 1;
this->lower_fragdata_array = false;
}
else {
this->fragdata_usage |= 1 << index->get_uint_component(0);
/* Don't lower fragdata array if the output variable
* is not a float variable (or float vector) because it will
* generate wrong register assignments because of different
* data types.
*/
if (var->type->gl_type != GL_FLOAT &&
var->type->gl_type != GL_FLOAT_VEC2 &&
var->type->gl_type != GL_FLOAT_VEC3 &&
var->type->gl_type != GL_FLOAT_VEC4)
this->lower_fragdata_array = false;
}
/* Don't visit the leaves of ir_dereference_array. */
return visit_continue_with_parent;
}
if (!this->find_frag_outputs && var->data.location == VARYING_SLOT_TEX0) {
this->texcoord_array = var;
ir_constant *index = ir->array_index->as_constant();
if (index == NULL) {
/* There is variable indexing, we can't lower the texcoord array.
*/
this->texcoord_usage |= (1 << var->type->array_size()) - 1;
this->lower_texcoord_array = false;
}
else {
this->texcoord_usage |= 1 << index->get_uint_component(0);
}
/* Don't visit the leaves of ir_dereference_array. */
return visit_continue_with_parent;
}
return visit_continue;
}
virtual ir_visitor_status visit(ir_dereference_variable *ir)
{
ir_variable *var = ir->variable_referenced();
if (var->data.mode != this->mode || !var->type->is_array())
return visit_continue;
if (this->find_frag_outputs && var->data.location == FRAG_RESULT_DATA0 &&
var->data.index == 0) {
/* This is a whole array dereference. */
this->fragdata_usage |= (1 << var->type->array_size()) - 1;
this->lower_fragdata_array = false;
return visit_continue;
}
if (!this->find_frag_outputs && var->data.location == VARYING_SLOT_TEX0) {
/* This is a whole array dereference like "gl_TexCoord = x;",
* there's probably no point in lowering that.
*/
this->texcoord_usage |= (1 << var->type->array_size()) - 1;
this->lower_texcoord_array = false;
}
return visit_continue;
}
virtual ir_visitor_status visit(ir_variable *var)
{
if (var->data.mode != this->mode)
return visit_continue;
/* Nothing to do here for fragment outputs. */
if (this->find_frag_outputs)
return visit_continue;
/* Handle colors and fog. */
switch (var->data.location) {
case VARYING_SLOT_COL0:
this->color[0] = var;
this->color_usage |= 1;
break;
case VARYING_SLOT_COL1:
this->color[1] = var;
this->color_usage |= 2;
break;
case VARYING_SLOT_BFC0:
this->backcolor[0] = var;
this->color_usage |= 1;
break;
case VARYING_SLOT_BFC1:
this->backcolor[1] = var;
this->color_usage |= 2;
break;
case VARYING_SLOT_FOGC:
this->fog = var;
this->has_fog = true;
break;
}
return visit_continue;
}
void get(exec_list *ir,
unsigned num_tfeedback_decls,
tfeedback_decl *tfeedback_decls)
{
/* Handle the transform feedback varyings. */
for (unsigned i = 0; i < num_tfeedback_decls; i++) {
if (!tfeedback_decls[i].is_varying())
continue;
unsigned location = tfeedback_decls[i].get_location();
switch (location) {
case VARYING_SLOT_COL0:
case VARYING_SLOT_BFC0:
this->tfeedback_color_usage |= 1;
break;
case VARYING_SLOT_COL1:
case VARYING_SLOT_BFC1:
this->tfeedback_color_usage |= 2;
break;
case VARYING_SLOT_FOGC:
this->tfeedback_has_fog = true;
break;
default:
if (location >= VARYING_SLOT_TEX0 &&
location <= VARYING_SLOT_TEX7) {
this->lower_texcoord_array = false;
}
}
}
/* Process the shader. */
visit_list_elements(this, ir);
if (!this->texcoord_array) {
this->lower_texcoord_array = false;
}
if (!this->fragdata_array) {
this->lower_fragdata_array = false;
}
}
bool lower_texcoord_array;
ir_variable *texcoord_array;
unsigned texcoord_usage; /* bitmask */
bool find_frag_outputs; /* false if it's looking for varyings */
bool lower_fragdata_array;
ir_variable *fragdata_array;
unsigned fragdata_usage; /* bitmask */
ir_variable *color[2];
ir_variable *backcolor[2];
unsigned color_usage; /* bitmask */
unsigned tfeedback_color_usage; /* bitmask */
ir_variable *fog;
bool has_fog;
bool tfeedback_has_fog;
ir_variable_mode mode;
};
/**
* This replaces unused varyings with temporary variables.
*
* If "ir" is the producer, the "external" usage should come from
* the consumer. It also works the other way around. If either one is
* missing, set the "external" usage to a full mask.
*/
class replace_varyings_visitor : public ir_rvalue_visitor {
public:
replace_varyings_visitor(struct gl_linked_shader *sha,
const varying_info_visitor *info,
unsigned external_texcoord_usage,
unsigned external_color_usage,
bool external_has_fog)
: shader(sha), info(info), new_fog(NULL)
{
void *const ctx = shader->ir;
memset(this->new_fragdata, 0, sizeof(this->new_fragdata));
memset(this->new_texcoord, 0, sizeof(this->new_texcoord));
memset(this->new_color, 0, sizeof(this->new_color));
memset(this->new_backcolor, 0, sizeof(this->new_backcolor));
const char *mode_str =
info->mode == ir_var_shader_in ? "in" : "out";
/* Handle texcoord outputs.
*
* We're going to break down the gl_TexCoord array into separate
* variables. First, add declarations of the new variables all
* occurrences of gl_TexCoord will be replaced with.
*/
if (info->lower_texcoord_array) {
prepare_array(shader->ir, this->new_texcoord,
ARRAY_SIZE(this->new_texcoord),
VARYING_SLOT_TEX0, "TexCoord", mode_str,
info->texcoord_usage, external_texcoord_usage);
}
/* Handle gl_FragData in the same way like gl_TexCoord. */
if (info->lower_fragdata_array) {
prepare_array(shader->ir, this->new_fragdata,
ARRAY_SIZE(this->new_fragdata),
FRAG_RESULT_DATA0, "FragData", mode_str,
info->fragdata_usage, (1 << MAX_DRAW_BUFFERS) - 1);
}
/* Create dummy variables which will replace set-but-unused color and
* fog outputs.
*/
external_color_usage |= info->tfeedback_color_usage;
for (int i = 0; i < 2; i++) {
char name[32];
if (!(external_color_usage & (1 << i))) {
if (info->color[i]) {
snprintf(name, 32, "gl_%s_FrontColor%i_dummy", mode_str, i);
this->new_color[i] =
new (ctx) ir_variable(glsl_type::vec4_type, name,
ir_var_temporary);
}
if (info->backcolor[i]) {
snprintf(name, 32, "gl_%s_BackColor%i_dummy", mode_str, i);
this->new_backcolor[i] =
new (ctx) ir_variable(glsl_type::vec4_type, name,
ir_var_temporary);
}
}
}
if (!external_has_fog && !info->tfeedback_has_fog &&
info->fog) {
char name[32];
snprintf(name, 32, "gl_%s_FogFragCoord_dummy", mode_str);
this->new_fog = new (ctx) ir_variable(glsl_type::float_type, name,
ir_var_temporary);
}
/* Now do the replacing. */
visit_list_elements(this, shader->ir);
}
void prepare_array(exec_list *ir,
ir_variable **new_var,
int max_elements, unsigned start_location,
const char *var_name, const char *mode_str,
unsigned usage, unsigned external_usage)
{
void *const ctx = ir;
for (int i = max_elements-1; i >= 0; i--) {
if (usage & (1 << i)) {
char name[32];
if (!(external_usage & (1 << i))) {
/* This varying is unused in the next stage. Declare
* a temporary instead of an output. */
snprintf(name, 32, "gl_%s_%s%i_dummy", mode_str, var_name, i);
new_var[i] =
new (ctx) ir_variable(glsl_type::vec4_type, name,
ir_var_temporary);
}
else {
snprintf(name, 32, "gl_%s_%s%i", mode_str, var_name, i);
new_var[i] =
new(ctx) ir_variable(glsl_type::vec4_type, name,
this->info->mode);
new_var[i]->data.location = start_location + i;
new_var[i]->data.explicit_location = true;
new_var[i]->data.explicit_index = 0;
}
ir->get_head_raw()->insert_before(new_var[i]);
}
}
}
virtual ir_visitor_status visit(ir_variable *var)
{
/* Remove the gl_TexCoord array. */
if (this->info->lower_texcoord_array &&
var == this->info->texcoord_array) {
var->remove();
}
/* Remove the gl_FragData array. */
if (this->info->lower_fragdata_array &&
var == this->info->fragdata_array) {
/* Clone variable for program resource list before it is removed. */
if (!shader->fragdata_arrays)
shader->fragdata_arrays = new (shader) exec_list;
shader->fragdata_arrays->push_tail(var->clone(shader, NULL));
var->remove();
}
/* Replace set-but-unused color and fog outputs with dummy variables. */
for (int i = 0; i < 2; i++) {
if (var == this->info->color[i] && this->new_color[i]) {
var->replace_with(this->new_color[i]);
}
if (var == this->info->backcolor[i] &&
this->new_backcolor[i]) {
var->replace_with(this->new_backcolor[i]);
}
}
if (var == this->info->fog && this->new_fog) {
var->replace_with(this->new_fog);
}
return visit_continue;
}
virtual void handle_rvalue(ir_rvalue **rvalue)
{
if (!*rvalue)
return;
void *ctx = ralloc_parent(*rvalue);
/* Replace an array dereference gl_TexCoord[i] with a single
* variable dereference representing gl_TexCoord[i].
*/
if (this->info->lower_texcoord_array) {
/* gl_TexCoord[i] occurrence */
ir_dereference_array *const da = (*rvalue)->as_dereference_array();
if (da && da->variable_referenced() ==
this->info->texcoord_array) {
unsigned i = da->array_index->as_constant()->get_uint_component(0);
*rvalue = new(ctx) ir_dereference_variable(this->new_texcoord[i]);
return;
}
}
/* Same for gl_FragData. */
if (this->info->lower_fragdata_array) {
/* gl_FragData[i] occurrence */
ir_dereference_array *const da = (*rvalue)->as_dereference_array();
if (da && da->variable_referenced() == this->info->fragdata_array) {
unsigned i = da->array_index->as_constant()->get_uint_component(0);
*rvalue = new(ctx) ir_dereference_variable(this->new_fragdata[i]);
return;
}
}
/* Replace set-but-unused color and fog outputs with dummy variables. */
ir_dereference_variable *const dv = (*rvalue)->as_dereference_variable();
if (!dv)
return;
ir_variable *var = dv->variable_referenced();
for (int i = 0; i < 2; i++) {
if (var == this->info->color[i] && this->new_color[i]) {
*rvalue = new(ctx) ir_dereference_variable(this->new_color[i]);
return;
}
if (var == this->info->backcolor[i] &&
this->new_backcolor[i]) {
*rvalue = new(ctx) ir_dereference_variable(this->new_backcolor[i]);
return;
}
}
if (var == this->info->fog && this->new_fog) {
*rvalue = new(ctx) ir_dereference_variable(this->new_fog);
}
}
virtual ir_visitor_status visit_leave(ir_assignment *ir)
{
handle_rvalue(&ir->rhs);
/* We have to use set_lhs when changing the LHS of an assignment. */
ir_rvalue *lhs = ir->lhs;
handle_rvalue(&lhs);
if (lhs != ir->lhs) {
ir->set_lhs(lhs);
}
return visit_continue;
}
private:
struct gl_linked_shader *shader;
const varying_info_visitor *info;
ir_variable *new_fragdata[MAX_DRAW_BUFFERS];
ir_variable *new_texcoord[MAX_TEXTURE_COORD_UNITS];
ir_variable *new_color[2];
ir_variable *new_backcolor[2];
ir_variable *new_fog;
};
} /* anonymous namespace */
static void
lower_texcoord_array(struct gl_linked_shader *shader, const varying_info_visitor *info)
{
replace_varyings_visitor(shader, info,
(1 << MAX_TEXTURE_COORD_UNITS) - 1,
1 | 2, true);
}
static void
lower_fragdata_array(struct gl_linked_shader *shader)
{
varying_info_visitor info(ir_var_shader_out, true);
info.get(shader->ir, 0, NULL);
replace_varyings_visitor(shader, &info, 0, 0, false);
}
void
do_dead_builtin_varyings(const struct gl_constants *consts,
gl_api api,
gl_linked_shader *producer,
gl_linked_shader *consumer,
unsigned num_tfeedback_decls,
tfeedback_decl *tfeedback_decls)
{
/* Lower the gl_FragData array to separate variables. */
if (consumer && consumer->Stage == MESA_SHADER_FRAGMENT &&
!consts->ShaderCompilerOptions[MESA_SHADER_FRAGMENT].NirOptions) {
lower_fragdata_array(consumer);
}
/* Lowering of built-in varyings has no effect with the core context and
* GLES2, because they are not available there.
*/
if (api == API_OPENGL_CORE ||
api == API_OPENGLES2) {
return;
}
/* Information about built-in varyings. */
varying_info_visitor producer_info(ir_var_shader_out);
varying_info_visitor consumer_info(ir_var_shader_in);
if (producer) {
producer_info.get(producer->ir, num_tfeedback_decls, tfeedback_decls);
if (producer->Stage == MESA_SHADER_TESS_CTRL)
producer_info.lower_texcoord_array = false;
if (!consumer) {
/* At least eliminate unused gl_TexCoord elements. */
if (producer_info.lower_texcoord_array) {
lower_texcoord_array(producer, &producer_info);
}
return;
}
}
if (consumer) {
consumer_info.get(consumer->ir, 0, NULL);
if (consumer->Stage != MESA_SHADER_FRAGMENT)
consumer_info.lower_texcoord_array = false;
if (!producer) {
/* At least eliminate unused gl_TexCoord elements. */
if (consumer_info.lower_texcoord_array) {
lower_texcoord_array(consumer, &consumer_info);
}
return;
}
}
/* Eliminate the outputs unused by the consumer. */
if (producer_info.lower_texcoord_array ||
producer_info.color_usage ||
producer_info.has_fog) {
replace_varyings_visitor(producer,
&producer_info,
consumer_info.texcoord_usage,
consumer_info.color_usage,
consumer_info.has_fog);
}
/* The gl_TexCoord fragment shader inputs can be initialized
* by GL_COORD_REPLACE, so we can't eliminate them.
*
* This doesn't prevent elimination of the gl_TexCoord elements which
* are not read by the fragment shader. We want to eliminate those anyway.
*/
if (consumer->Stage == MESA_SHADER_FRAGMENT) {
producer_info.texcoord_usage = (1 << MAX_TEXTURE_COORD_UNITS) - 1;
}
/* Eliminate the inputs uninitialized by the producer. */
if (consumer_info.lower_texcoord_array ||
consumer_info.color_usage ||
consumer_info.has_fog) {
replace_varyings_visitor(consumer,
&consumer_info,
producer_info.texcoord_usage,
producer_info.color_usage,
producer_info.has_fog);
}
}

View file

@ -44,8 +44,4 @@ _mesa_glsl_compile_shader(struct gl_context *ctx, struct gl_shader *shader,
extern void
link_shaders(struct gl_context *ctx, struct gl_shader_program *prog);
extern void
build_program_resource_list(const struct gl_constants *consts,
struct gl_shader_program *shProg);
#endif /* GLSL_PROGRAM_H */

View file

@ -1,199 +0,0 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <gtest/gtest.h>
#include "util/compiler.h"
#include "main/macros.h"
#include "util/ralloc.h"
#include "ir.h"
#include "linker.h"
/**
* \file varyings_test.cpp
*
* Test various aspects of linking shader stage inputs and outputs.
*/
class invalidate_locations : public ::testing::Test {
public:
virtual void SetUp();
virtual void TearDown();
void *mem_ctx;
exec_list ir;
};
void
invalidate_locations::SetUp()
{
glsl_type_singleton_init_or_ref();
this->mem_ctx = ralloc_context(NULL);
this->ir.make_empty();
}
void
invalidate_locations::TearDown()
{
ralloc_free(this->mem_ctx);
this->mem_ctx = NULL;
glsl_type_singleton_decref();
}
TEST_F(invalidate_locations, simple_vertex_in_generic)
{
ir_variable *const var =
new(mem_ctx) ir_variable(glsl_type::vec(4),
"a",
ir_var_shader_in);
EXPECT_FALSE(var->data.explicit_location);
EXPECT_EQ(-1, var->data.location);
var->data.location = VERT_ATTRIB_GENERIC0;
var->data.location_frac = 2;
ir.push_tail(var);
link_invalidate_variable_locations(&ir);
EXPECT_EQ(-1, var->data.location);
EXPECT_EQ(0u, var->data.location_frac);
EXPECT_FALSE(var->data.explicit_location);
EXPECT_TRUE(var->data.is_unmatched_generic_inout);
}
TEST_F(invalidate_locations, explicit_location_vertex_in_generic)
{
ir_variable *const var =
new(mem_ctx) ir_variable(glsl_type::vec(4),
"a",
ir_var_shader_in);
EXPECT_FALSE(var->data.explicit_location);
EXPECT_EQ(-1, var->data.location);
var->data.location = VERT_ATTRIB_GENERIC0;
var->data.explicit_location = true;
ir.push_tail(var);
link_invalidate_variable_locations(&ir);
EXPECT_EQ(VERT_ATTRIB_GENERIC0, var->data.location);
EXPECT_EQ(0u, var->data.location_frac);
EXPECT_TRUE(var->data.explicit_location);
EXPECT_FALSE(var->data.is_unmatched_generic_inout);
}
TEST_F(invalidate_locations, explicit_location_frac_vertex_in_generic)
{
ir_variable *const var =
new(mem_ctx) ir_variable(glsl_type::vec(4),
"a",
ir_var_shader_in);
EXPECT_FALSE(var->data.explicit_location);
EXPECT_EQ(-1, var->data.location);
var->data.location = VERT_ATTRIB_GENERIC0;
var->data.location_frac = 2;
var->data.explicit_location = true;
ir.push_tail(var);
link_invalidate_variable_locations(&ir);
EXPECT_EQ(VERT_ATTRIB_GENERIC0, var->data.location);
EXPECT_EQ(2u, var->data.location_frac);
EXPECT_TRUE(var->data.explicit_location);
EXPECT_FALSE(var->data.is_unmatched_generic_inout);
}
TEST_F(invalidate_locations, vertex_in_builtin)
{
ir_variable *const var =
new(mem_ctx) ir_variable(glsl_type::vec(4),
"gl_Vertex",
ir_var_shader_in);
EXPECT_FALSE(var->data.explicit_location);
EXPECT_EQ(-1, var->data.location);
var->data.location = VERT_ATTRIB_POS;
var->data.explicit_location = true;
ir.push_tail(var);
link_invalidate_variable_locations(&ir);
EXPECT_EQ(VERT_ATTRIB_POS, var->data.location);
EXPECT_EQ(0u, var->data.location_frac);
EXPECT_TRUE(var->data.explicit_location);
EXPECT_FALSE(var->data.is_unmatched_generic_inout);
}
TEST_F(invalidate_locations, simple_vertex_out_generic)
{
ir_variable *const var =
new(mem_ctx) ir_variable(glsl_type::vec(4),
"a",
ir_var_shader_out);
EXPECT_FALSE(var->data.explicit_location);
EXPECT_EQ(-1, var->data.location);
var->data.location = VARYING_SLOT_VAR0;
ir.push_tail(var);
link_invalidate_variable_locations(&ir);
EXPECT_EQ(-1, var->data.location);
EXPECT_EQ(0u, var->data.location_frac);
EXPECT_FALSE(var->data.explicit_location);
EXPECT_TRUE(var->data.is_unmatched_generic_inout);
}
TEST_F(invalidate_locations, vertex_out_builtin)
{
ir_variable *const var =
new(mem_ctx) ir_variable(glsl_type::vec(4),
"gl_FrontColor",
ir_var_shader_out);
EXPECT_FALSE(var->data.explicit_location);
EXPECT_EQ(-1, var->data.location);
var->data.location = VARYING_SLOT_COL0;
var->data.explicit_location = true;
ir.push_tail(var);
link_invalidate_variable_locations(&ir);
EXPECT_EQ(VARYING_SLOT_COL0, var->data.location);
EXPECT_EQ(0u, var->data.location_frac);
EXPECT_TRUE(var->data.explicit_location);
EXPECT_FALSE(var->data.is_unmatched_generic_inout);
}

View file

@ -23,9 +23,8 @@ test(
executable(
'general_ir_test',
['array_refcount_test.cpp', 'builtin_variable_test.cpp',
'invalidate_locations_test.cpp', 'general_ir_test.cpp',
'lower_int64_test.cpp', 'opt_add_neg_to_sub_test.cpp',
'varyings_test.cpp', ir_expression_operation_h],
'general_ir_test.cpp', 'lower_int64_test.cpp',
'opt_add_neg_to_sub_test.cpp', ir_expression_operation_h],
cpp_args : [cpp_msvc_compat_args],
gnu_symbol_visibility : 'hidden',
include_directories : [inc_include, inc_src, inc_mapi, inc_mesa, inc_gallium, inc_gallium_aux, inc_glsl],

View file

@ -1,330 +0,0 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <gtest/gtest.h>
#include "util/compiler.h"
#include "main/macros.h"
#include "util/ralloc.h"
#include "ir.h"
#include "util/hash_table.h"
/**
* \file varyings_test.cpp
*
* Test various aspects of linking shader stage inputs and outputs.
*/
namespace linker {
void
populate_consumer_input_sets(void *mem_ctx, exec_list *ir,
hash_table *consumer_inputs,
hash_table *consumer_interface_inputs,
ir_variable *consumer_inputs_with_locations[VARYING_SLOT_MAX]);
ir_variable *
get_matching_input(void *mem_ctx,
const ir_variable *output_var,
hash_table *consumer_inputs,
hash_table *consumer_interface_inputs,
ir_variable *consumer_inputs_with_locations[VARYING_SLOT_MAX]);
}
class link_varyings : public ::testing::Test {
public:
virtual void SetUp();
virtual void TearDown();
char *interface_field_name(const glsl_type *iface, unsigned field = 0)
{
return ralloc_asprintf(mem_ctx,
"%s.%s",
iface->name,
iface->fields.structure[field].name);
}
void *mem_ctx;
exec_list ir;
hash_table *consumer_inputs;
hash_table *consumer_interface_inputs;
const glsl_type *simple_interface;
ir_variable *junk[VARYING_SLOT_TESS_MAX];
};
void
link_varyings::SetUp()
{
glsl_type_singleton_init_or_ref();
this->mem_ctx = ralloc_context(NULL);
this->ir.make_empty();
this->consumer_inputs =
_mesa_hash_table_create(NULL, _mesa_hash_string,
_mesa_key_string_equal);
this->consumer_interface_inputs =
_mesa_hash_table_create(NULL, _mesa_hash_string,
_mesa_key_string_equal);
/* Needs to happen after glsl type initialization */
static const glsl_struct_field f[] = {
glsl_struct_field(glsl_type::vec(4), "v")
};
this->simple_interface =
glsl_type::get_interface_instance(f,
ARRAY_SIZE(f),
GLSL_INTERFACE_PACKING_STD140,
false,
"simple_interface");
}
void
link_varyings::TearDown()
{
ralloc_free(this->mem_ctx);
this->mem_ctx = NULL;
_mesa_hash_table_destroy(this->consumer_inputs, NULL);
this->consumer_inputs = NULL;
_mesa_hash_table_destroy(this->consumer_interface_inputs, NULL);
this->consumer_interface_inputs = NULL;
glsl_type_singleton_decref();
}
TEST_F(link_varyings, single_simple_input)
{
ir_variable *const v =
new(mem_ctx) ir_variable(glsl_type::vec(4),
"a",
ir_var_shader_in);
ir.push_tail(v);
linker::populate_consumer_input_sets(mem_ctx,
&ir,
consumer_inputs,
consumer_interface_inputs,
junk);
hash_entry *entry = _mesa_hash_table_search(consumer_inputs, "a");
EXPECT_EQ((void *) v, entry->data);
EXPECT_EQ(1u, consumer_inputs->entries);
EXPECT_TRUE(consumer_interface_inputs->entries == 0);
}
TEST_F(link_varyings, gl_ClipDistance)
{
const glsl_type *const array_8_of_float =
glsl_type::get_array_instance(glsl_type::vec(1), 8);
ir_variable *const clipdistance =
new(mem_ctx) ir_variable(array_8_of_float,
"gl_ClipDistance",
ir_var_shader_in);
clipdistance->data.explicit_location = true;
clipdistance->data.location = VARYING_SLOT_CLIP_DIST0;
clipdistance->data.explicit_index = 0;
ir.push_tail(clipdistance);
linker::populate_consumer_input_sets(mem_ctx,
&ir,
consumer_inputs,
consumer_interface_inputs,
junk);
EXPECT_EQ(clipdistance, junk[VARYING_SLOT_CLIP_DIST0]);
EXPECT_TRUE(consumer_inputs->entries == 0);
EXPECT_TRUE(consumer_interface_inputs->entries == 0);
}
TEST_F(link_varyings, gl_CullDistance)
{
const glsl_type *const array_8_of_float =
glsl_type::get_array_instance(glsl_type::vec(1), 8);
ir_variable *const culldistance =
new(mem_ctx) ir_variable(array_8_of_float,
"gl_CullDistance",
ir_var_shader_in);
culldistance->data.explicit_location = true;
culldistance->data.location = VARYING_SLOT_CULL_DIST0;
culldistance->data.explicit_index = 0;
ir.push_tail(culldistance);
linker::populate_consumer_input_sets(mem_ctx,
&ir,
consumer_inputs,
consumer_interface_inputs,
junk);
EXPECT_EQ(culldistance, junk[VARYING_SLOT_CULL_DIST0]);
EXPECT_TRUE(consumer_inputs->entries == 0);
EXPECT_TRUE(consumer_interface_inputs->entries == 0);
}
TEST_F(link_varyings, single_interface_input)
{
ir_variable *const v =
new(mem_ctx) ir_variable(simple_interface->fields.structure[0].type,
simple_interface->fields.structure[0].name,
ir_var_shader_in);
v->init_interface_type(simple_interface);
ir.push_tail(v);
linker::populate_consumer_input_sets(mem_ctx,
&ir,
consumer_inputs,
consumer_interface_inputs,
junk);
char *const full_name = interface_field_name(simple_interface);
hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs,
full_name);
EXPECT_EQ((void *) v, entry->data);
EXPECT_EQ(1u, consumer_interface_inputs->entries);
EXPECT_TRUE(consumer_inputs->entries == 0);
}
TEST_F(link_varyings, one_interface_and_one_simple_input)
{
ir_variable *const v =
new(mem_ctx) ir_variable(glsl_type::vec(4),
"a",
ir_var_shader_in);
ir.push_tail(v);
ir_variable *const iface =
new(mem_ctx) ir_variable(simple_interface->fields.structure[0].type,
simple_interface->fields.structure[0].name,
ir_var_shader_in);
iface->init_interface_type(simple_interface);
ir.push_tail(iface);
linker::populate_consumer_input_sets(mem_ctx,
&ir,
consumer_inputs,
consumer_interface_inputs,
junk);
char *const iface_field_name = interface_field_name(simple_interface);
hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs,
iface_field_name);
EXPECT_EQ((void *) iface, entry->data);
EXPECT_EQ(1u, consumer_interface_inputs->entries);
entry = _mesa_hash_table_search(consumer_inputs, "a");
EXPECT_EQ((void *) v, entry->data);
EXPECT_EQ(1u, consumer_inputs->entries);
}
TEST_F(link_varyings, interface_field_doesnt_match_noninterface)
{
char *const iface_field_name = interface_field_name(simple_interface);
/* The input shader has a single input variable name "a.v"
*/
ir_variable *const in_v =
new(mem_ctx) ir_variable(glsl_type::vec(4),
iface_field_name,
ir_var_shader_in);
ir.push_tail(in_v);
linker::populate_consumer_input_sets(mem_ctx,
&ir,
consumer_inputs,
consumer_interface_inputs,
junk);
/* Create an output variable, "v", that is part of an interface block named
* "a". They should not match.
*/
ir_variable *const out_v =
new(mem_ctx) ir_variable(simple_interface->fields.structure[0].type,
simple_interface->fields.structure[0].name,
ir_var_shader_in);
out_v->init_interface_type(simple_interface);
ir_variable *const match =
linker::get_matching_input(mem_ctx,
out_v,
consumer_inputs,
consumer_interface_inputs,
junk);
EXPECT_EQ(NULL, match);
}
TEST_F(link_varyings, interface_field_doesnt_match_noninterface_vice_versa)
{
char *const iface_field_name = interface_field_name(simple_interface);
/* In input shader has a single variable, "v", that is part of an interface
* block named "a".
*/
ir_variable *const in_v =
new(mem_ctx) ir_variable(simple_interface->fields.structure[0].type,
simple_interface->fields.structure[0].name,
ir_var_shader_in);
in_v->init_interface_type(simple_interface);
ir.push_tail(in_v);
linker::populate_consumer_input_sets(mem_ctx,
&ir,
consumer_inputs,
consumer_interface_inputs,
junk);
/* Create an output variable "a.v". They should not match.
*/
ir_variable *const out_v =
new(mem_ctx) ir_variable(glsl_type::vec(4),
iface_field_name,
ir_var_shader_out);
ir_variable *const match =
linker::get_matching_input(mem_ctx,
out_v,
consumer_inputs,
consumer_interface_inputs,
junk);
EXPECT_EQ(NULL, match);
}

View file

@ -266,8 +266,6 @@ struct gl_linked_shader
unsigned num_combined_uniform_components;
struct exec_list *ir;
struct exec_list *packed_varyings;
struct exec_list *fragdata_arrays;
struct glsl_symbol_table *symbols;
/**