glsl: Add image built-in function generator.

Because of the combinatorial explosion of different image built-ins
with different image dimensionalities and base data types, enumerating
all the 242 possibilities would be annoying and a waste of .text
space.  Instead use a special path in the built-in builder that loops
over all the known image types.

v2: Generate built-ins on GLSL version 4.20 too.  Rename
    '_has_float_data_type' to '_supports_float_data_type'.  Avoid
    duplicating enumeration of image built-ins in create_intrinsics()
    and create_builtins().
v3: Use a more orthodox approach for passing image built-in generator
    parameters.
v4: Cosmetic changes.

Acked-by: Paul Berry <stereotype441@gmail.com>
This commit is contained in:
Francisco Jerez 2013-11-25 20:29:57 -08:00
parent 87acc7c650
commit e8dbe430aa

View file

@ -340,6 +340,13 @@ shader_trinary_minmax(const _mesa_glsl_parse_state *state)
return state->AMD_shader_trinary_minmax_enable;
}
static bool
shader_image_load_store(const _mesa_glsl_parse_state *state)
{
return (state->is_version(420, 0) ||
state->ARB_shader_image_load_store_enable);
}
/** @} */
/******************************************************************************/
@ -413,6 +420,33 @@ private:
/** Create a new function and add the given signatures. */
void add_function(const char *name, ...);
enum image_function_flags {
IMAGE_FUNCTION_EMIT_STUB = (1 << 0),
IMAGE_FUNCTION_RETURNS_VOID = (1 << 1),
IMAGE_FUNCTION_HAS_VECTOR_DATA_TYPE = (1 << 2),
IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE = (1 << 3),
IMAGE_FUNCTION_READ_ONLY = (1 << 4),
IMAGE_FUNCTION_WRITE_ONLY = (1 << 5)
};
/**
* Create a new image built-in function for all known image types.
* \p flags is a bitfield of \c image_function_flags flags.
*/
void add_image_function(const char *name,
const char *intrinsic_name,
unsigned num_arguments,
unsigned flags);
/**
* Create new functions for all known image built-ins and types.
* If \p glsl is \c true, use the GLSL built-in names and emit code
* to call into the actual compiler intrinsic. If \p glsl is
* false, emit a function prototype with no body for each image
* intrinsic name.
*/
void add_image_functions(bool glsl);
ir_function_signature *new_sig(const glsl_type *return_type,
builtin_available_predicate avail,
int num_params, ...);
@ -580,6 +614,20 @@ private:
B1(max3)
B1(mid3)
ir_function_signature *_image_prototype(const glsl_type *image_type,
const char *intrinsic_name,
unsigned num_arguments,
unsigned flags);
ir_function_signature *_image(const glsl_type *image_type,
const char *intrinsic_name,
unsigned num_arguments,
unsigned flags);
ir_function_signature *_memory_barrier_intrinsic(
builtin_available_predicate avail);
ir_function_signature *_memory_barrier(
builtin_available_predicate avail);
#undef B0
#undef B1
#undef B2
@ -694,6 +742,12 @@ builtin_builder::create_intrinsics()
add_function("__intrinsic_atomic_predecrement",
_atomic_intrinsic(shader_atomic_counters),
NULL);
add_image_functions(false);
add_function("__intrinsic_memory_barrier",
_memory_barrier_intrinsic(shader_image_load_store),
NULL);
}
/**
@ -2167,6 +2221,12 @@ builtin_builder::create_builtins()
_mid3(glsl_type::uvec4_type),
NULL);
add_image_functions(true);
add_function("memoryBarrier",
_memory_barrier(shader_image_load_store),
NULL);
#undef F
#undef FI
#undef FIU
@ -2200,6 +2260,104 @@ builtin_builder::add_function(const char *name, ...)
shader->symbols->add_function(f);
}
void
builtin_builder::add_image_function(const char *name,
const char *intrinsic_name,
unsigned num_arguments,
unsigned flags)
{
static const glsl_type *const types[] = {
glsl_type::image1D_type,
glsl_type::image2D_type,
glsl_type::image3D_type,
glsl_type::image2DRect_type,
glsl_type::imageCube_type,
glsl_type::imageBuffer_type,
glsl_type::image1DArray_type,
glsl_type::image2DArray_type,
glsl_type::imageCubeArray_type,
glsl_type::image2DMS_type,
glsl_type::image2DMSArray_type,
glsl_type::iimage1D_type,
glsl_type::iimage2D_type,
glsl_type::iimage3D_type,
glsl_type::iimage2DRect_type,
glsl_type::iimageCube_type,
glsl_type::iimageBuffer_type,
glsl_type::iimage1DArray_type,
glsl_type::iimage2DArray_type,
glsl_type::iimageCubeArray_type,
glsl_type::iimage2DMS_type,
glsl_type::iimage2DMSArray_type,
glsl_type::uimage1D_type,
glsl_type::uimage2D_type,
glsl_type::uimage3D_type,
glsl_type::uimage2DRect_type,
glsl_type::uimageCube_type,
glsl_type::uimageBuffer_type,
glsl_type::uimage1DArray_type,
glsl_type::uimage2DArray_type,
glsl_type::uimageCubeArray_type,
glsl_type::uimage2DMS_type,
glsl_type::uimage2DMSArray_type
};
ir_function *f = new(mem_ctx) ir_function(name);
for (unsigned i = 0; i < Elements(types); ++i) {
if (types[i]->sampler_type != GLSL_TYPE_FLOAT ||
(flags & IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE))
f->add_signature(_image(types[i], intrinsic_name,
num_arguments, flags));
}
shader->symbols->add_function(f);
}
void
builtin_builder::add_image_functions(bool glsl)
{
const unsigned flags = (glsl ? IMAGE_FUNCTION_EMIT_STUB : 0);
add_image_function(glsl ? "imageLoad" : "__intrinsic_image_load",
"__intrinsic_image_load", 0,
(flags | IMAGE_FUNCTION_HAS_VECTOR_DATA_TYPE |
IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE |
IMAGE_FUNCTION_READ_ONLY));
add_image_function(glsl ? "imageStore" : "__intrinsic_image_store",
"__intrinsic_image_store", 1,
(flags | IMAGE_FUNCTION_RETURNS_VOID |
IMAGE_FUNCTION_HAS_VECTOR_DATA_TYPE |
IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE |
IMAGE_FUNCTION_WRITE_ONLY));
add_image_function(glsl ? "imageAtomicAdd" : "__intrinsic_image_atomic_add",
"__intrinsic_image_atomic_add", 1, flags);
add_image_function(glsl ? "imageAtomicMin" : "__intrinsic_image_atomic_min",
"__intrinsic_image_atomic_min", 1, flags);
add_image_function(glsl ? "imageAtomicMax" : "__intrinsic_image_atomic_max",
"__intrinsic_image_atomic_max", 1, flags);
add_image_function(glsl ? "imageAtomicAnd" : "__intrinsic_image_atomic_and",
"__intrinsic_image_atomic_and", 1, flags);
add_image_function(glsl ? "imageAtomicOr" : "__intrinsic_image_atomic_or",
"__intrinsic_image_atomic_or", 1, flags);
add_image_function(glsl ? "imageAtomicXor" : "__intrinsic_image_atomic_xor",
"__intrinsic_image_atomic_xor", 1, flags);
add_image_function((glsl ? "imageAtomicExchange" :
"__intrinsic_image_atomic_exchange"),
"__intrinsic_image_atomic_exchange", 1, flags);
add_image_function((glsl ? "imageAtomicCompSwap" :
"__intrinsic_image_atomic_comp_swap"),
"__intrinsic_image_atomic_comp_swap", 2, flags);
}
ir_variable *
builtin_builder::in_var(const glsl_type *type, const char *name)
{
@ -4094,6 +4252,99 @@ builtin_builder::_mid3(const glsl_type *type)
return sig;
}
ir_function_signature *
builtin_builder::_image_prototype(const glsl_type *image_type,
const char *intrinsic_name,
unsigned num_arguments,
unsigned flags)
{
const glsl_type *data_type = glsl_type::get_instance(
image_type->sampler_type,
(flags & IMAGE_FUNCTION_HAS_VECTOR_DATA_TYPE ? 4 : 1),
1);
const glsl_type *ret_type = (flags & IMAGE_FUNCTION_RETURNS_VOID ?
glsl_type::void_type : data_type);
/* Addressing arguments that are always present. */
ir_variable *image = in_var(image_type, "image");
ir_variable *coord = in_var(
glsl_type::ivec(image_type->coordinate_components()), "coord");
ir_function_signature *sig = new_sig(
ret_type, shader_image_load_store, 2, image, coord);
/* Sample index for multisample images. */
if (image_type->sampler_dimensionality == GLSL_SAMPLER_DIM_MS)
sig->parameters.push_tail(in_var(glsl_type::int_type, "sample"));
/* Data arguments. */
for (unsigned i = 0; i < num_arguments; ++i)
sig->parameters.push_tail(in_var(data_type,
ralloc_asprintf(NULL, "arg%d", i)));
/* Set the maximal set of qualifiers allowed for this image
* built-in. Function calls with arguments having fewer
* qualifiers than present in the prototype are allowed by the
* spec, but not with more, i.e. this will make the compiler
* accept everything that needs to be accepted, and reject cases
* like loads from write-only or stores to read-only images.
*/
image->data.image.read_only = flags & IMAGE_FUNCTION_READ_ONLY;
image->data.image.write_only = flags & IMAGE_FUNCTION_WRITE_ONLY;
image->data.image.coherent = true;
image->data.image._volatile = true;
image->data.image._restrict = true;
return sig;
}
ir_function_signature *
builtin_builder::_image(const glsl_type *image_type,
const char *intrinsic_name,
unsigned num_arguments,
unsigned flags)
{
ir_function_signature *sig = _image_prototype(image_type, intrinsic_name,
num_arguments, flags);
if (flags & IMAGE_FUNCTION_EMIT_STUB) {
ir_factory body(&sig->body, mem_ctx);
ir_function *f = shader->symbols->get_function(intrinsic_name);
if (flags & IMAGE_FUNCTION_RETURNS_VOID) {
body.emit(call(f, NULL, sig->parameters));
} else {
ir_variable *ret_val =
body.make_temp(sig->return_type, "_ret_val");
body.emit(call(f, ret_val, sig->parameters));
body.emit(ret(ret_val));
}
sig->is_defined = true;
} else {
sig->is_intrinsic = true;
}
return sig;
}
ir_function_signature *
builtin_builder::_memory_barrier_intrinsic(builtin_available_predicate avail)
{
MAKE_INTRINSIC(glsl_type::void_type, avail, 0);
return sig;
}
ir_function_signature *
builtin_builder::_memory_barrier(builtin_available_predicate avail)
{
MAKE_SIG(glsl_type::void_type, avail, 0);
body.emit(call(shader->symbols->get_function("__intrinsic_memory_barrier"),
NULL, sig->parameters));
return sig;
}
/** @} */
/******************************************************************************/