nir/intrinsics: Add a second const index to load_uniform

In the i965 backend, we want to be able to "pull apart" the uniforms and
push some of them into the shader through a different path.  In order to do
this effectively, we need to know which variable is actually being referred
to by a given uniform load.  Previously, it was completely flattened by
nir_lower_io which made things difficult.  This adds more information to
the intrinsic to make this easier for us.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Jason Ekstrand 2015-08-18 11:18:55 -07:00
parent 6c33d6bbf9
commit 0db8e87b4a
3 changed files with 19 additions and 13 deletions

View file

@ -141,11 +141,17 @@ SYSTEM_VALUE(sample_mask_in, 1)
SYSTEM_VALUE(invocation_id, 1)
/*
* The first and only index is the base address to load from. Indirect
* loads have an additional register input, which is added to the constant
* address to compute the final address to load from. For UBO's (and
* SSBO's), the first source is the (possibly constant) UBO buffer index
* and the indirect (if it exists) is the second source.
* The format of the indices depends on the type of the load. For uniforms,
* the first index is the base address and the second index is an offset that
* should be added to the base address. (This way you can determine in the
* back-end which variable is being accessed even in an array.) For inputs,
* the one and only index corresponds to the attribute slot. UBO loads also
* have a single index which is the base address to load from.
*
* UBO loads have a (possibly constant) source which is the UBO buffer index.
* For each type of load, the _indirect variant has one additional source
* (the second in the case of UBO's) that is the is an indirect to be added to
* the constant address or base offset to compute the final offset.
*
* For vector backends, the address is in terms of one vec4, and so each array
* element is +4 scalar components from the previous array element. For scalar
@ -153,14 +159,14 @@ SYSTEM_VALUE(invocation_id, 1)
* elements begin immediately after the previous array element.
*/
#define LOAD(name, extra_srcs, flags) \
INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, 1, flags) \
#define LOAD(name, extra_srcs, indices, flags) \
INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, indices, flags) \
INTRINSIC(load_##name##_indirect, extra_srcs + 1, ARR(1, 1), \
true, 0, 0, 1, flags)
true, 0, 0, indices, flags)
LOAD(uniform, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
LOAD(ubo, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
LOAD(input, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
LOAD(uniform, 0, 2, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
LOAD(ubo, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
LOAD(input, 0, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
/* LOAD(ssbo, 1, 0) */
/*

View file

@ -1504,7 +1504,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
has_indirect = true;
/* fallthrough */
case nir_intrinsic_load_uniform: {
unsigned index = instr->const_index[0];
unsigned index = instr->const_index[0] + instr->const_index[1];
fs_reg uniform_reg;
if (index < num_direct_uniforms) {

View file

@ -570,7 +570,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
has_indirect = true;
/* fallthrough */
case nir_intrinsic_load_uniform: {
int uniform = instr->const_index[0];
int uniform = instr->const_index[0] + instr->const_index[1];
dest = get_nir_dest(instr->dest);