vc4: Start adding a NIR-based output lowering pass.

For now, this just splits up store_output intrinsics to be scalars, and
drops unused outputs in the coordinate shader.  My goal is to be able to
drop a bunch of my VC4-specific optimization by letting NIR handle it.
This commit is contained in:
Eric Anholt 2015-07-29 15:52:18 -07:00
parent c93ffd661a
commit b85f6ae4b2
4 changed files with 137 additions and 7 deletions

View file

@ -19,6 +19,7 @@ C_SOURCES := \
vc4_fence.c \
vc4_formats.c \
vc4_job.c \
vc4_nir_lower_io.c \
vc4_opt_algebraic.c \
vc4_opt_constant_folding.c \
vc4_opt_copy_propagation.c \

View file

@ -0,0 +1,130 @@
/*
* Copyright © 2015 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "vc4_qir.h"
#include "tgsi/tgsi_info.h"
#include "glsl/nir/nir_builder.h"
/**
* Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
* something amenable to the VC4 architecture.
*
* Currently, it split outputs into scalars, and drops any non-position values
* in coordinate shaders.
*/
static void
vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
nir_intrinsic_instr *intr)
{
nir_variable *output_var = NULL;
foreach_list_typed(nir_variable, var, node, &c->s->outputs) {
if (var->data.driver_location == intr->const_index[0]) {
output_var = var;
break;
}
}
assert(output_var);
unsigned semantic_name = output_var->data.location;
if (c->stage == QSTAGE_COORD &&
(semantic_name != TGSI_SEMANTIC_POSITION &&
semantic_name != TGSI_SEMANTIC_PSIZE)) {
nir_instr_remove(&intr->instr);
return;
}
/* All TGSI-to-NIR outputs are VEC4. */
assert(intr->num_components == 4);
nir_builder_insert_before_instr(b, &intr->instr);
for (unsigned i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr *intr_comp =
nir_intrinsic_instr_create(c->s, nir_intrinsic_store_output);
intr_comp->num_components = 1;
intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
assert(intr->src[0].is_ssa);
intr_comp->src[0] = nir_src_for_ssa(nir_swizzle(b,
intr->src[0].ssa,
&i, 1, false));
nir_builder_instr_insert(b, &intr_comp->instr);
}
nir_instr_remove(&intr->instr);
}
static void
vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
struct nir_instr *instr)
{
if (instr->type != nir_instr_type_intrinsic)
return;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_store_output:
vc4_nir_lower_output(c, b, intr);
break;
default:
break;
}
}
static bool
vc4_nir_lower_io_block(nir_block *block, void *arg)
{
struct vc4_compile *c = arg;
nir_function_impl *impl =
nir_cf_node_get_function(&block->cf_node);
nir_builder b;
nir_builder_init(&b, impl);
nir_foreach_instr_safe(block, instr)
vc4_nir_lower_io_instr(c, &b, instr);
return true;
}
static bool
vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
{
nir_foreach_block(impl, vc4_nir_lower_io_block, c);
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
return true;
}
void
vc4_nir_lower_io(struct vc4_compile *c)
{
nir_foreach_overload(c->s, overload) {
if (overload->impl)
vc4_nir_lower_io_impl(c, overload->impl);
}
}

View file

@ -1895,13 +1895,10 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
break;
case nir_intrinsic_store_output:
for (int i = 0; i < instr->num_components; i++) {
c->outputs[instr->const_index[0] * 4 + i] =
qir_MOV(c, ntq_get_src(c, instr->src[0], i));
}
c->num_outputs = MAX2(c->num_outputs,
instr->const_index[0] * 4 +
instr->num_components + 1);
assert(instr->num_components == 1);
c->outputs[instr->const_index[0]] =
qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
c->num_outputs = MAX2(c->num_outputs, instr->const_index[0] + 1);
break;
case nir_intrinsic_discard:
@ -2102,6 +2099,7 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
c->s = tgsi_to_nir(tokens, &nir_options);
nir_opt_global_to_local(c->s);
nir_convert_to_ssa(c->s);
vc4_nir_lower_io(c);
nir_lower_idiv(c->s);
vc4_optimize_nir(c->s);

View file

@ -409,6 +409,7 @@ bool qir_opt_cse(struct vc4_compile *c);
bool qir_opt_dead_code(struct vc4_compile *c);
bool qir_opt_small_immediates(struct vc4_compile *c);
bool qir_opt_vpm_writes(struct vc4_compile *c);
void vc4_nir_lower_io(struct vc4_compile *c);
void qir_lower_uniforms(struct vc4_compile *c);
void qpu_schedule_instructions(struct vc4_compile *c);