mesa/st: only do scalar lowerings if driver benefits

As not every (upcoming) backend compiler is happy with
nir_lower_xxx_to_scalar lowerings do them only if the backend
is scalar (and not vec4) based.

Signed-off-by: Christian Gmeiner <christian.gmeiner@gmail.com>
Reviewed-by: Eric Anholt <eric@anholt.net>
This commit is contained in:
Christian Gmeiner 2018-06-15 12:29:59 +02:00
parent f485e5671c
commit 8b3099353e

View file

@ -317,15 +317,18 @@ st_nir_assign_uniform_locations(struct gl_context *ctx,
}
void
st_nir_opts(nir_shader *nir)
st_nir_opts(nir_shader *nir, bool scalar)
{
bool progress;
do {
progress = false;
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
NIR_PASS_V(nir, nir_lower_alu_to_scalar);
NIR_PASS_V(nir, nir_lower_phis_to_scalar);
if (scalar) {
NIR_PASS_V(nir, nir_lower_alu_to_scalar);
NIR_PASS_V(nir, nir_lower_phis_to_scalar);
}
NIR_PASS_V(nir, nir_lower_alu);
NIR_PASS_V(nir, nir_lower_pack);
@ -364,6 +367,9 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
{
const nir_shader_compiler_options *options =
st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
enum pipe_shader_type type = pipe_shader_type_from_mesa(stage);
struct pipe_screen *screen = st->pipe->screen;
bool is_scalar = screen->get_shader_param(screen, type, PIPE_SHADER_CAP_SCALAR_ISA);
assert(options);
if (prog->nir)
@ -406,7 +412,7 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_lower_var_copies);
st_nir_opts(nir);
st_nir_opts(nir, is_scalar);
return nir;
}
@ -588,7 +594,7 @@ st_nir_get_mesa_program(struct gl_context *ctx,
}
static void
st_nir_link_shaders(nir_shader **producer, nir_shader **consumer)
st_nir_link_shaders(nir_shader **producer, nir_shader **consumer, bool scalar)
{
nir_lower_io_arrays_to_elements(*producer, *consumer);
@ -615,8 +621,8 @@ st_nir_link_shaders(nir_shader **producer, nir_shader **consumer)
NIR_PASS_V(*producer, nir_lower_indirect_derefs, indirect_mask);
NIR_PASS_V(*consumer, nir_lower_indirect_derefs, indirect_mask);
st_nir_opts(*producer);
st_nir_opts(*consumer);
st_nir_opts(*producer, scalar);
st_nir_opts(*consumer, scalar);
}
}
@ -627,6 +633,20 @@ st_link_nir(struct gl_context *ctx,
struct gl_shader_program *shader_program)
{
struct st_context *st = st_context(ctx);
struct pipe_screen *screen = st->pipe->screen;
bool is_scalar[MESA_SHADER_STAGES];
/* Determine scalar property of each shader stage */
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
enum pipe_shader_type type;
if (shader == NULL)
continue;
type = pipe_shader_type_from_mesa(shader->Stage);
is_scalar[i] = screen->get_shader_param(screen, type, PIPE_SHADER_CAP_SCALAR_ISA);
}
/* Determine first and last stage. */
unsigned first = MESA_SHADER_STAGES;
@ -655,7 +675,7 @@ st_link_nir(struct gl_context *ctx,
nir_shader *nir = shader->Program->nir;
NIR_PASS_V(nir, nir_lower_io_to_scalar_early, mask);
st_nir_opts(nir);
st_nir_opts(nir, is_scalar[i]);
}
/* Linking the stages in the opposite order (from fragment to vertex)
@ -670,7 +690,8 @@ st_link_nir(struct gl_context *ctx,
continue;
st_nir_link_shaders(&shader->Program->nir,
&shader_program->_LinkedShaders[next]->Program->nir);
&shader_program->_LinkedShaders[next]->Program->nir,
is_scalar[i]);
next = i;
}