2014-07-22 14:05:06 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*
|
|
|
|
|
* Authors:
|
|
|
|
|
* Connor Abbott (cwabbott0@gmail.com)
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include "nir.h"
|
2015-01-16 16:14:51 -08:00
|
|
|
#include <stdlib.h>
|
2014-07-22 14:05:06 -07:00
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Implements the classic to-SSA algorithm described by Cytron et. al. in
|
|
|
|
|
* "Efficiently Computing Static Single Assignment Form and the Control
|
|
|
|
|
* Dependence Graph."
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* inserts a phi node of the form reg = phi(reg, reg, reg, ...) */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
insert_trivial_phi(nir_register *reg, nir_block *block, void *mem_ctx)
|
|
|
|
|
{
|
|
|
|
|
nir_phi_instr *instr = nir_phi_instr_create(mem_ctx);
|
|
|
|
|
|
|
|
|
|
instr->dest.reg.reg = reg;
|
|
|
|
|
struct set_entry *entry;
|
|
|
|
|
set_foreach(block->predecessors, entry) {
|
|
|
|
|
nir_block *pred = (nir_block *) entry->key;
|
|
|
|
|
|
2015-04-02 16:15:11 -07:00
|
|
|
nir_phi_src *src = ralloc(instr, nir_phi_src);
|
2014-07-22 14:05:06 -07:00
|
|
|
src->pred = pred;
|
|
|
|
|
src->src.is_ssa = false;
|
|
|
|
|
src->src.reg.base_offset = 0;
|
|
|
|
|
src->src.reg.indirect = NULL;
|
|
|
|
|
src->src.reg.reg = reg;
|
|
|
|
|
exec_list_push_tail(&instr->srcs, &src->node);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_instr_insert_before_block(block, &instr->instr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
insert_phi_nodes(nir_function_impl *impl)
|
|
|
|
|
{
|
|
|
|
|
void *mem_ctx = ralloc_parent(impl);
|
|
|
|
|
|
|
|
|
|
unsigned *work = calloc(impl->num_blocks, sizeof(unsigned));
|
|
|
|
|
unsigned *has_already = calloc(impl->num_blocks, sizeof(unsigned));
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Since the work flags already prevent us from inserting a node that has
|
|
|
|
|
* ever been inserted into W, we don't need to use a set to represent W.
|
|
|
|
|
* Also, since no block can ever be inserted into W more than once, we know
|
|
|
|
|
* that the maximum size of W is the number of basic blocks in the
|
|
|
|
|
* function. So all we need to handle W is an array and a pointer to the
|
|
|
|
|
* next element to be inserted and the next element to be removed.
|
|
|
|
|
*/
|
|
|
|
|
nir_block **W = malloc(impl->num_blocks * sizeof(nir_block *));
|
|
|
|
|
unsigned w_start, w_end;
|
|
|
|
|
|
|
|
|
|
unsigned iter_count = 0;
|
|
|
|
|
|
|
|
|
|
nir_index_blocks(impl);
|
|
|
|
|
|
|
|
|
|
foreach_list_typed(nir_register, reg, node, &impl->registers) {
|
|
|
|
|
if (reg->num_array_elems != 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
w_start = w_end = 0;
|
|
|
|
|
iter_count++;
|
|
|
|
|
|
nir/nir: Use a linked list instead of a hash set for use/def sets
This commit switches us from the current setup of using hash sets for
use/def sets to using linked lists. Doing so should save us quite a bit of
memory because we aren't carrying around 3 hash sets per register and 2 per
SSA value. It should also save us CPU time because adding/removing things
from use/def sets is 4 pointer manipulations instead of a hash lookup.
Running shader-db 50 times with USE_NIR=0, NIR, and NIR + use/def lists:
GLSL IR Only: 586.4 +/- 1.653833
NIR with hash sets: 675.4 +/- 2.502108
NIR + use/def lists: 641.2 +/- 1.557043
I also ran a memory usage experiment with Ken's patch to delete GLSL IR and
keep NIR. This patch cuts an aditional 42.9 MiB of ralloc'd memory over
and above what we gained by deleting the GLSL IR on the same dota trace.
On the code complexity side of things, some things are now much easier and
others are a bit harder. One of the operations we perform constantly in
optimization passes is to replace one source with another. Due to the fact
that an instruction can use the same SSA value multiple times, we had to
iterate through the sources of the instruction and determine if the use we
were replacing was the only one before removing it from the set of uses.
With this patch, uses are per-source not per-instruction so we can just
remove it safely. On the other hand, trying to iterate over all of the
instructions that use a given value is more difficult. Fortunately, the
two places we do that are the ffma peephole where it doesn't matter and GCM
where we already gracefully handle duplicates visits to an instruction.
Another aspect here is that using linked lists in this way can be tricky to
get right. With sets, things were quite forgiving and the worst that
happened if you didn't properly remove a use was that it would get caught
in the validator. With linked lists, it can lead to linked list corruption
which can be harder to track. However, we do just as much validation of
the linked lists as we did of the sets so the validator should still catch
these problems. While working on this series, the vast majority of the
bugs I had to fix were caught by assertions. I don't think the lists are
going to be that much worse than the sets.
Reviewed-by: Connor Abbott <cwabbott0@gmail.com>
2015-04-24 10:16:27 -07:00
|
|
|
nir_foreach_def(reg, dest) {
|
|
|
|
|
nir_instr *def = dest->reg.parent_instr;
|
2014-07-22 14:05:06 -07:00
|
|
|
if (work[def->block->index] < iter_count)
|
|
|
|
|
W[w_end++] = def->block;
|
|
|
|
|
work[def->block->index] = iter_count;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
while (w_start != w_end) {
|
|
|
|
|
nir_block *cur = W[w_start++];
|
nir/nir: Use a linked list instead of a hash set for use/def sets
This commit switches us from the current setup of using hash sets for
use/def sets to using linked lists. Doing so should save us quite a bit of
memory because we aren't carrying around 3 hash sets per register and 2 per
SSA value. It should also save us CPU time because adding/removing things
from use/def sets is 4 pointer manipulations instead of a hash lookup.
Running shader-db 50 times with USE_NIR=0, NIR, and NIR + use/def lists:
GLSL IR Only: 586.4 +/- 1.653833
NIR with hash sets: 675.4 +/- 2.502108
NIR + use/def lists: 641.2 +/- 1.557043
I also ran a memory usage experiment with Ken's patch to delete GLSL IR and
keep NIR. This patch cuts an aditional 42.9 MiB of ralloc'd memory over
and above what we gained by deleting the GLSL IR on the same dota trace.
On the code complexity side of things, some things are now much easier and
others are a bit harder. One of the operations we perform constantly in
optimization passes is to replace one source with another. Due to the fact
that an instruction can use the same SSA value multiple times, we had to
iterate through the sources of the instruction and determine if the use we
were replacing was the only one before removing it from the set of uses.
With this patch, uses are per-source not per-instruction so we can just
remove it safely. On the other hand, trying to iterate over all of the
instructions that use a given value is more difficult. Fortunately, the
two places we do that are the ffma peephole where it doesn't matter and GCM
where we already gracefully handle duplicates visits to an instruction.
Another aspect here is that using linked lists in this way can be tricky to
get right. With sets, things were quite forgiving and the worst that
happened if you didn't properly remove a use was that it would get caught
in the validator. With linked lists, it can lead to linked list corruption
which can be harder to track. However, we do just as much validation of
the linked lists as we did of the sets so the validator should still catch
these problems. While working on this series, the vast majority of the
bugs I had to fix were caught by assertions. I don't think the lists are
going to be that much worse than the sets.
Reviewed-by: Connor Abbott <cwabbott0@gmail.com>
2015-04-24 10:16:27 -07:00
|
|
|
struct set_entry *entry;
|
2014-07-22 14:05:06 -07:00
|
|
|
set_foreach(cur->dom_frontier, entry) {
|
|
|
|
|
nir_block *next = (nir_block *) entry->key;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If there's more than one return statement, then the end block
|
|
|
|
|
* can be a join point for some definitions. However, there are
|
|
|
|
|
* no instructions in the end block, so nothing would use those
|
|
|
|
|
* phi nodes. Of course, we couldn't place those phi nodes
|
|
|
|
|
* anyways due to the restriction of having no instructions in the
|
|
|
|
|
* end block...
|
|
|
|
|
*/
|
|
|
|
|
if (next == impl->end_block)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (has_already[next->index] < iter_count) {
|
|
|
|
|
insert_trivial_phi(reg, next, mem_ctx);
|
|
|
|
|
has_already[next->index] = iter_count;
|
|
|
|
|
if (work[next->index] < iter_count) {
|
|
|
|
|
work[next->index] = iter_count;
|
|
|
|
|
W[w_end++] = next;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
free(work);
|
|
|
|
|
free(has_already);
|
|
|
|
|
free(W);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
nir_ssa_def **stack;
|
|
|
|
|
int index;
|
|
|
|
|
unsigned num_defs; /** < used to add indices to debug names */
|
2015-02-20 12:31:31 -08:00
|
|
|
#ifndef NDEBUG
|
2014-07-22 14:05:06 -07:00
|
|
|
unsigned stack_size;
|
|
|
|
|
#endif
|
|
|
|
|
} reg_state;
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
reg_state *states;
|
|
|
|
|
void *mem_ctx;
|
|
|
|
|
nir_instr *parent_instr;
|
|
|
|
|
nir_if *parent_if;
|
|
|
|
|
nir_function_impl *impl;
|
|
|
|
|
|
|
|
|
|
/* map from SSA value -> original register */
|
|
|
|
|
struct hash_table *ssa_map;
|
|
|
|
|
} rewrite_state;
|
|
|
|
|
|
|
|
|
|
static nir_ssa_def *get_ssa_src(nir_register *reg, rewrite_state *state)
|
|
|
|
|
{
|
|
|
|
|
unsigned index = reg->index;
|
|
|
|
|
|
|
|
|
|
if (state->states[index].index == -1) {
|
|
|
|
|
/*
|
|
|
|
|
* We're using an undefined register, create a new undefined SSA value
|
|
|
|
|
* to preserve the information that this source is undefined
|
|
|
|
|
*/
|
2014-12-15 17:44:37 -08:00
|
|
|
nir_ssa_undef_instr *instr =
|
|
|
|
|
nir_ssa_undef_instr_create(state->mem_ctx, reg->num_components);
|
2014-11-04 10:40:48 -08:00
|
|
|
|
2014-07-22 14:05:06 -07:00
|
|
|
/*
|
|
|
|
|
* We could just insert the undefined instruction before the instruction
|
|
|
|
|
* we're rewriting, but we could be rewriting a phi source in which case
|
|
|
|
|
* we can't do that, so do the next easiest thing - insert it at the
|
|
|
|
|
* beginning of the program. In the end, it doesn't really matter where
|
|
|
|
|
* the undefined instructions are because they're going to be ignored
|
|
|
|
|
* in the backend.
|
|
|
|
|
*/
|
|
|
|
|
nir_instr_insert_before_cf_list(&state->impl->body, &instr->instr);
|
|
|
|
|
return &instr->def;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return state->states[index].stack[state->states[index].index];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
rewrite_use(nir_src *src, void *_state)
|
|
|
|
|
{
|
|
|
|
|
rewrite_state *state = (rewrite_state *) _state;
|
|
|
|
|
|
|
|
|
|
if (src->is_ssa)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
unsigned index = src->reg.reg->index;
|
|
|
|
|
|
|
|
|
|
if (state->states[index].stack == NULL)
|
|
|
|
|
return true;
|
|
|
|
|
|
nir/nir: Use a linked list instead of a hash set for use/def sets
This commit switches us from the current setup of using hash sets for
use/def sets to using linked lists. Doing so should save us quite a bit of
memory because we aren't carrying around 3 hash sets per register and 2 per
SSA value. It should also save us CPU time because adding/removing things
from use/def sets is 4 pointer manipulations instead of a hash lookup.
Running shader-db 50 times with USE_NIR=0, NIR, and NIR + use/def lists:
GLSL IR Only: 586.4 +/- 1.653833
NIR with hash sets: 675.4 +/- 2.502108
NIR + use/def lists: 641.2 +/- 1.557043
I also ran a memory usage experiment with Ken's patch to delete GLSL IR and
keep NIR. This patch cuts an aditional 42.9 MiB of ralloc'd memory over
and above what we gained by deleting the GLSL IR on the same dota trace.
On the code complexity side of things, some things are now much easier and
others are a bit harder. One of the operations we perform constantly in
optimization passes is to replace one source with another. Due to the fact
that an instruction can use the same SSA value multiple times, we had to
iterate through the sources of the instruction and determine if the use we
were replacing was the only one before removing it from the set of uses.
With this patch, uses are per-source not per-instruction so we can just
remove it safely. On the other hand, trying to iterate over all of the
instructions that use a given value is more difficult. Fortunately, the
two places we do that are the ffma peephole where it doesn't matter and GCM
where we already gracefully handle duplicates visits to an instruction.
Another aspect here is that using linked lists in this way can be tricky to
get right. With sets, things were quite forgiving and the worst that
happened if you didn't properly remove a use was that it would get caught
in the validator. With linked lists, it can lead to linked list corruption
which can be harder to track. However, we do just as much validation of
the linked lists as we did of the sets so the validator should still catch
these problems. While working on this series, the vast majority of the
bugs I had to fix were caught by assertions. I don't think the lists are
going to be that much worse than the sets.
Reviewed-by: Connor Abbott <cwabbott0@gmail.com>
2015-04-24 10:16:27 -07:00
|
|
|
nir_ssa_def *def = get_ssa_src(src->reg.reg, state);
|
2014-07-22 14:05:06 -07:00
|
|
|
if (state->parent_instr)
|
nir/nir: Use a linked list instead of a hash set for use/def sets
This commit switches us from the current setup of using hash sets for
use/def sets to using linked lists. Doing so should save us quite a bit of
memory because we aren't carrying around 3 hash sets per register and 2 per
SSA value. It should also save us CPU time because adding/removing things
from use/def sets is 4 pointer manipulations instead of a hash lookup.
Running shader-db 50 times with USE_NIR=0, NIR, and NIR + use/def lists:
GLSL IR Only: 586.4 +/- 1.653833
NIR with hash sets: 675.4 +/- 2.502108
NIR + use/def lists: 641.2 +/- 1.557043
I also ran a memory usage experiment with Ken's patch to delete GLSL IR and
keep NIR. This patch cuts an aditional 42.9 MiB of ralloc'd memory over
and above what we gained by deleting the GLSL IR on the same dota trace.
On the code complexity side of things, some things are now much easier and
others are a bit harder. One of the operations we perform constantly in
optimization passes is to replace one source with another. Due to the fact
that an instruction can use the same SSA value multiple times, we had to
iterate through the sources of the instruction and determine if the use we
were replacing was the only one before removing it from the set of uses.
With this patch, uses are per-source not per-instruction so we can just
remove it safely. On the other hand, trying to iterate over all of the
instructions that use a given value is more difficult. Fortunately, the
two places we do that are the ffma peephole where it doesn't matter and GCM
where we already gracefully handle duplicates visits to an instruction.
Another aspect here is that using linked lists in this way can be tricky to
get right. With sets, things were quite forgiving and the worst that
happened if you didn't properly remove a use was that it would get caught
in the validator. With linked lists, it can lead to linked list corruption
which can be harder to track. However, we do just as much validation of
the linked lists as we did of the sets so the validator should still catch
these problems. While working on this series, the vast majority of the
bugs I had to fix were caught by assertions. I don't think the lists are
going to be that much worse than the sets.
Reviewed-by: Connor Abbott <cwabbott0@gmail.com>
2015-04-24 10:16:27 -07:00
|
|
|
nir_instr_rewrite_src(state->parent_instr, src, nir_src_for_ssa(def));
|
2014-07-22 14:05:06 -07:00
|
|
|
else
|
nir/nir: Use a linked list instead of a hash set for use/def sets
This commit switches us from the current setup of using hash sets for
use/def sets to using linked lists. Doing so should save us quite a bit of
memory because we aren't carrying around 3 hash sets per register and 2 per
SSA value. It should also save us CPU time because adding/removing things
from use/def sets is 4 pointer manipulations instead of a hash lookup.
Running shader-db 50 times with USE_NIR=0, NIR, and NIR + use/def lists:
GLSL IR Only: 586.4 +/- 1.653833
NIR with hash sets: 675.4 +/- 2.502108
NIR + use/def lists: 641.2 +/- 1.557043
I also ran a memory usage experiment with Ken's patch to delete GLSL IR and
keep NIR. This patch cuts an aditional 42.9 MiB of ralloc'd memory over
and above what we gained by deleting the GLSL IR on the same dota trace.
On the code complexity side of things, some things are now much easier and
others are a bit harder. One of the operations we perform constantly in
optimization passes is to replace one source with another. Due to the fact
that an instruction can use the same SSA value multiple times, we had to
iterate through the sources of the instruction and determine if the use we
were replacing was the only one before removing it from the set of uses.
With this patch, uses are per-source not per-instruction so we can just
remove it safely. On the other hand, trying to iterate over all of the
instructions that use a given value is more difficult. Fortunately, the
two places we do that are the ffma peephole where it doesn't matter and GCM
where we already gracefully handle duplicates visits to an instruction.
Another aspect here is that using linked lists in this way can be tricky to
get right. With sets, things were quite forgiving and the worst that
happened if you didn't properly remove a use was that it would get caught
in the validator. With linked lists, it can lead to linked list corruption
which can be harder to track. However, we do just as much validation of
the linked lists as we did of the sets so the validator should still catch
these problems. While working on this series, the vast majority of the
bugs I had to fix were caught by assertions. I don't think the lists are
going to be that much worse than the sets.
Reviewed-by: Connor Abbott <cwabbott0@gmail.com>
2015-04-24 10:16:27 -07:00
|
|
|
nir_if_rewrite_condition(state->parent_if, nir_src_for_ssa(def));
|
|
|
|
|
|
2014-07-22 14:05:06 -07:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
rewrite_def_forwards(nir_dest *dest, void *_state)
|
|
|
|
|
{
|
|
|
|
|
rewrite_state *state = (rewrite_state *) _state;
|
|
|
|
|
|
|
|
|
|
if (dest->is_ssa)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
nir_register *reg = dest->reg.reg;
|
|
|
|
|
unsigned index = reg->index;
|
|
|
|
|
|
|
|
|
|
if (state->states[index].stack == NULL)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
char *name = NULL;
|
|
|
|
|
if (dest->reg.reg->name)
|
|
|
|
|
name = ralloc_asprintf(state->mem_ctx, "%s_%u", dest->reg.reg->name,
|
|
|
|
|
state->states[index].num_defs);
|
|
|
|
|
|
nir/nir: Use a linked list instead of a hash set for use/def sets
This commit switches us from the current setup of using hash sets for
use/def sets to using linked lists. Doing so should save us quite a bit of
memory because we aren't carrying around 3 hash sets per register and 2 per
SSA value. It should also save us CPU time because adding/removing things
from use/def sets is 4 pointer manipulations instead of a hash lookup.
Running shader-db 50 times with USE_NIR=0, NIR, and NIR + use/def lists:
GLSL IR Only: 586.4 +/- 1.653833
NIR with hash sets: 675.4 +/- 2.502108
NIR + use/def lists: 641.2 +/- 1.557043
I also ran a memory usage experiment with Ken's patch to delete GLSL IR and
keep NIR. This patch cuts an aditional 42.9 MiB of ralloc'd memory over
and above what we gained by deleting the GLSL IR on the same dota trace.
On the code complexity side of things, some things are now much easier and
others are a bit harder. One of the operations we perform constantly in
optimization passes is to replace one source with another. Due to the fact
that an instruction can use the same SSA value multiple times, we had to
iterate through the sources of the instruction and determine if the use we
were replacing was the only one before removing it from the set of uses.
With this patch, uses are per-source not per-instruction so we can just
remove it safely. On the other hand, trying to iterate over all of the
instructions that use a given value is more difficult. Fortunately, the
two places we do that are the ffma peephole where it doesn't matter and GCM
where we already gracefully handle duplicates visits to an instruction.
Another aspect here is that using linked lists in this way can be tricky to
get right. With sets, things were quite forgiving and the worst that
happened if you didn't properly remove a use was that it would get caught
in the validator. With linked lists, it can lead to linked list corruption
which can be harder to track. However, we do just as much validation of
the linked lists as we did of the sets so the validator should still catch
these problems. While working on this series, the vast majority of the
bugs I had to fix were caught by assertions. I don't think the lists are
going to be that much worse than the sets.
Reviewed-by: Connor Abbott <cwabbott0@gmail.com>
2015-04-24 10:16:27 -07:00
|
|
|
list_del(&dest->reg.def_link);
|
2015-01-20 16:23:51 -08:00
|
|
|
nir_ssa_dest_init(state->parent_instr, dest, reg->num_components, name);
|
2014-07-22 14:05:06 -07:00
|
|
|
|
|
|
|
|
/* push our SSA destination on the stack */
|
|
|
|
|
state->states[index].index++;
|
|
|
|
|
assert(state->states[index].index < state->states[index].stack_size);
|
|
|
|
|
state->states[index].stack[state->states[index].index] = &dest->ssa;
|
|
|
|
|
state->states[index].num_defs++;
|
|
|
|
|
|
|
|
|
|
_mesa_hash_table_insert(state->ssa_map, &dest->ssa, reg);
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
rewrite_alu_instr_forward(nir_alu_instr *instr, rewrite_state *state)
|
|
|
|
|
{
|
|
|
|
|
state->parent_instr = &instr->instr;
|
|
|
|
|
|
|
|
|
|
nir_foreach_src(&instr->instr, rewrite_use, state);
|
|
|
|
|
|
2015-01-26 14:37:42 -08:00
|
|
|
if (instr->dest.dest.is_ssa)
|
|
|
|
|
return;
|
|
|
|
|
|
2014-07-22 14:05:06 -07:00
|
|
|
nir_register *reg = instr->dest.dest.reg.reg;
|
|
|
|
|
unsigned index = reg->index;
|
|
|
|
|
|
|
|
|
|
if (state->states[index].stack == NULL)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
unsigned write_mask = instr->dest.write_mask;
|
|
|
|
|
if (write_mask != (1 << instr->dest.dest.reg.reg->num_components) - 1) {
|
|
|
|
|
/*
|
|
|
|
|
* Calculate the number of components the final instruction, which for
|
|
|
|
|
* per-component things is the number of output components of the
|
|
|
|
|
* instruction and non-per-component things is the number of enabled
|
|
|
|
|
* channels in the write mask.
|
|
|
|
|
*/
|
|
|
|
|
unsigned num_components;
|
|
|
|
|
if (nir_op_infos[instr->op].output_size == 0) {
|
|
|
|
|
unsigned temp = (write_mask & 0x5) + ((write_mask >> 1) & 0x5);
|
|
|
|
|
num_components = (temp & 0x3) + ((temp >> 2) & 0x3);
|
|
|
|
|
} else {
|
|
|
|
|
num_components = nir_op_infos[instr->op].output_size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
char *name = NULL;
|
|
|
|
|
if (instr->dest.dest.reg.reg->name)
|
|
|
|
|
name = ralloc_asprintf(state->mem_ctx, "%s_%u",
|
|
|
|
|
reg->name, state->states[index].num_defs);
|
|
|
|
|
|
|
|
|
|
instr->dest.write_mask = (1 << num_components) - 1;
|
nir/nir: Use a linked list instead of a hash set for use/def sets
This commit switches us from the current setup of using hash sets for
use/def sets to using linked lists. Doing so should save us quite a bit of
memory because we aren't carrying around 3 hash sets per register and 2 per
SSA value. It should also save us CPU time because adding/removing things
from use/def sets is 4 pointer manipulations instead of a hash lookup.
Running shader-db 50 times with USE_NIR=0, NIR, and NIR + use/def lists:
GLSL IR Only: 586.4 +/- 1.653833
NIR with hash sets: 675.4 +/- 2.502108
NIR + use/def lists: 641.2 +/- 1.557043
I also ran a memory usage experiment with Ken's patch to delete GLSL IR and
keep NIR. This patch cuts an aditional 42.9 MiB of ralloc'd memory over
and above what we gained by deleting the GLSL IR on the same dota trace.
On the code complexity side of things, some things are now much easier and
others are a bit harder. One of the operations we perform constantly in
optimization passes is to replace one source with another. Due to the fact
that an instruction can use the same SSA value multiple times, we had to
iterate through the sources of the instruction and determine if the use we
were replacing was the only one before removing it from the set of uses.
With this patch, uses are per-source not per-instruction so we can just
remove it safely. On the other hand, trying to iterate over all of the
instructions that use a given value is more difficult. Fortunately, the
two places we do that are the ffma peephole where it doesn't matter and GCM
where we already gracefully handle duplicates visits to an instruction.
Another aspect here is that using linked lists in this way can be tricky to
get right. With sets, things were quite forgiving and the worst that
happened if you didn't properly remove a use was that it would get caught
in the validator. With linked lists, it can lead to linked list corruption
which can be harder to track. However, we do just as much validation of
the linked lists as we did of the sets so the validator should still catch
these problems. While working on this series, the vast majority of the
bugs I had to fix were caught by assertions. I don't think the lists are
going to be that much worse than the sets.
Reviewed-by: Connor Abbott <cwabbott0@gmail.com>
2015-04-24 10:16:27 -07:00
|
|
|
list_del(&instr->dest.dest.reg.def_link);
|
2015-01-20 16:23:51 -08:00
|
|
|
nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_components, name);
|
2014-07-22 14:05:06 -07:00
|
|
|
|
|
|
|
|
if (nir_op_infos[instr->op].output_size == 0) {
|
|
|
|
|
/*
|
|
|
|
|
* When we change the output writemask, we need to change the
|
|
|
|
|
* swizzles for per-component inputs too
|
|
|
|
|
*/
|
|
|
|
|
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
|
|
|
|
|
if (nir_op_infos[instr->op].input_sizes[i] != 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
unsigned new_swizzle[4] = {0, 0, 0, 0};
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We keep two indices:
|
|
|
|
|
* 1. The index of the original (non-SSA) component
|
|
|
|
|
* 2. The index of the post-SSA, compacted, component
|
|
|
|
|
*
|
|
|
|
|
* We need to map the swizzle component at index 1 to the swizzle
|
|
|
|
|
* component at index 2.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
unsigned ssa_index = 0;
|
|
|
|
|
for (unsigned index = 0; index < 4; index++) {
|
|
|
|
|
if (!((write_mask >> index) & 1))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
new_swizzle[ssa_index] = instr->src[i].swizzle[index];
|
|
|
|
|
ssa_index++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < 4; j++)
|
|
|
|
|
instr->src[i].swizzle[j] = new_swizzle[j];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_op op;
|
|
|
|
|
switch (reg->num_components) {
|
|
|
|
|
case 2: op = nir_op_vec2; break;
|
|
|
|
|
case 3: op = nir_op_vec3; break;
|
|
|
|
|
case 4: op = nir_op_vec4; break;
|
2015-01-21 20:22:18 -08:00
|
|
|
default: unreachable("not reached");
|
2014-07-22 14:05:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_alu_instr *vec = nir_alu_instr_create(state->mem_ctx, op);
|
|
|
|
|
|
|
|
|
|
vec->dest.dest.reg.reg = reg;
|
|
|
|
|
vec->dest.write_mask = (1 << reg->num_components) - 1;
|
|
|
|
|
|
|
|
|
|
nir_ssa_def *old_src = get_ssa_src(reg, state);
|
|
|
|
|
nir_ssa_def *new_src = &instr->dest.dest.ssa;
|
|
|
|
|
|
|
|
|
|
unsigned ssa_index = 0;
|
|
|
|
|
for (unsigned i = 0; i < reg->num_components; i++) {
|
|
|
|
|
vec->src[i].src.is_ssa = true;
|
|
|
|
|
if ((write_mask >> i) & 1) {
|
|
|
|
|
vec->src[i].src.ssa = new_src;
|
|
|
|
|
if (nir_op_infos[instr->op].output_size == 0)
|
|
|
|
|
vec->src[i].swizzle[0] = ssa_index;
|
|
|
|
|
else
|
|
|
|
|
vec->src[i].swizzle[0] = i;
|
|
|
|
|
ssa_index++;
|
|
|
|
|
} else {
|
|
|
|
|
vec->src[i].src.ssa = old_src;
|
|
|
|
|
vec->src[i].swizzle[0] = i;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nir_instr_insert_after(&instr->instr, &vec->instr);
|
|
|
|
|
|
|
|
|
|
state->parent_instr = &vec->instr;
|
|
|
|
|
rewrite_def_forwards(&vec->dest.dest, state);
|
|
|
|
|
} else {
|
|
|
|
|
rewrite_def_forwards(&instr->dest.dest, state);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
rewrite_phi_instr(nir_phi_instr *instr, rewrite_state *state)
|
|
|
|
|
{
|
|
|
|
|
state->parent_instr = &instr->instr;
|
|
|
|
|
rewrite_def_forwards(&instr->dest, state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
rewrite_instr_forward(nir_instr *instr, rewrite_state *state)
|
|
|
|
|
{
|
|
|
|
|
if (instr->type == nir_instr_type_alu) {
|
|
|
|
|
rewrite_alu_instr_forward(nir_instr_as_alu(instr), state);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (instr->type == nir_instr_type_phi) {
|
|
|
|
|
rewrite_phi_instr(nir_instr_as_phi(instr), state);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
state->parent_instr = instr;
|
|
|
|
|
|
|
|
|
|
nir_foreach_src(instr, rewrite_use, state);
|
|
|
|
|
nir_foreach_dest(instr, rewrite_def_forwards, state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
rewrite_phi_sources(nir_block *block, nir_block *pred, rewrite_state *state)
|
|
|
|
|
{
|
|
|
|
|
nir_foreach_instr(block, instr) {
|
|
|
|
|
if (instr->type != nir_instr_type_phi)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
nir_phi_instr *phi_instr = nir_instr_as_phi(instr);
|
|
|
|
|
|
|
|
|
|
state->parent_instr = instr;
|
|
|
|
|
|
2015-01-20 16:30:14 -08:00
|
|
|
nir_foreach_phi_src(phi_instr, src) {
|
2014-07-22 14:05:06 -07:00
|
|
|
if (src->pred == pred) {
|
|
|
|
|
rewrite_use(&src->src, state);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
rewrite_def_backwards(nir_dest *dest, void *_state)
|
|
|
|
|
{
|
|
|
|
|
rewrite_state *state = (rewrite_state *) _state;
|
|
|
|
|
|
|
|
|
|
if (!dest->is_ssa)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
struct hash_entry *entry =
|
|
|
|
|
_mesa_hash_table_search(state->ssa_map, &dest->ssa);
|
|
|
|
|
|
|
|
|
|
if (!entry)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
nir_register *reg = (nir_register *) entry->data;
|
|
|
|
|
unsigned index = reg->index;
|
|
|
|
|
|
|
|
|
|
state->states[index].index--;
|
|
|
|
|
assert(state->states[index].index >= -1);
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
rewrite_instr_backwards(nir_instr *instr, rewrite_state *state)
|
|
|
|
|
{
|
|
|
|
|
nir_foreach_dest(instr, rewrite_def_backwards, state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
rewrite_block(nir_block *block, rewrite_state *state)
|
|
|
|
|
{
|
|
|
|
|
/* This will skip over any instructions after the current one, which is
|
|
|
|
|
* what we want because those instructions (vector gather, conditional
|
|
|
|
|
* select) will already be in SSA form.
|
|
|
|
|
*/
|
|
|
|
|
nir_foreach_instr_safe(block, instr) {
|
|
|
|
|
rewrite_instr_forward(instr, state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (block != state->impl->end_block &&
|
|
|
|
|
!nir_cf_node_is_last(&block->cf_node) &&
|
|
|
|
|
nir_cf_node_next(&block->cf_node)->type == nir_cf_node_if) {
|
|
|
|
|
nir_if *if_stmt = nir_cf_node_as_if(nir_cf_node_next(&block->cf_node));
|
|
|
|
|
state->parent_instr = NULL;
|
|
|
|
|
state->parent_if = if_stmt;
|
|
|
|
|
rewrite_use(&if_stmt->condition, state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (block->successors[0])
|
|
|
|
|
rewrite_phi_sources(block->successors[0], block, state);
|
|
|
|
|
if (block->successors[1])
|
|
|
|
|
rewrite_phi_sources(block->successors[1], block, state);
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < block->num_dom_children; i++)
|
|
|
|
|
rewrite_block(block->dom_children[i], state);
|
|
|
|
|
|
|
|
|
|
nir_foreach_instr_reverse(block, instr) {
|
|
|
|
|
rewrite_instr_backwards(instr, state);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
remove_unused_regs(nir_function_impl *impl, rewrite_state *state)
|
|
|
|
|
{
|
|
|
|
|
foreach_list_typed_safe(nir_register, reg, node, &impl->registers) {
|
|
|
|
|
if (state->states[reg->index].stack != NULL)
|
|
|
|
|
exec_node_remove(®->node);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
init_rewrite_state(nir_function_impl *impl, rewrite_state *state)
|
|
|
|
|
{
|
|
|
|
|
state->impl = impl;
|
|
|
|
|
state->mem_ctx = ralloc_parent(impl);
|
|
|
|
|
state->ssa_map = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
|
|
|
|
|
_mesa_key_pointer_equal);
|
|
|
|
|
state->states = ralloc_array(NULL, reg_state, impl->reg_alloc);
|
|
|
|
|
|
|
|
|
|
foreach_list_typed(nir_register, reg, node, &impl->registers) {
|
|
|
|
|
assert(reg->index < impl->reg_alloc);
|
|
|
|
|
if (reg->num_array_elems > 0) {
|
|
|
|
|
state->states[reg->index].stack = NULL;
|
|
|
|
|
} else {
|
|
|
|
|
/*
|
|
|
|
|
* Calculate a conservative estimate of the stack size based on the
|
|
|
|
|
* number of definitions there are. Note that this function *must* be
|
|
|
|
|
* called after phi nodes are inserted so we can count phi node
|
|
|
|
|
* definitions too.
|
|
|
|
|
*/
|
nir/nir: Use a linked list instead of a hash set for use/def sets
This commit switches us from the current setup of using hash sets for
use/def sets to using linked lists. Doing so should save us quite a bit of
memory because we aren't carrying around 3 hash sets per register and 2 per
SSA value. It should also save us CPU time because adding/removing things
from use/def sets is 4 pointer manipulations instead of a hash lookup.
Running shader-db 50 times with USE_NIR=0, NIR, and NIR + use/def lists:
GLSL IR Only: 586.4 +/- 1.653833
NIR with hash sets: 675.4 +/- 2.502108
NIR + use/def lists: 641.2 +/- 1.557043
I also ran a memory usage experiment with Ken's patch to delete GLSL IR and
keep NIR. This patch cuts an aditional 42.9 MiB of ralloc'd memory over
and above what we gained by deleting the GLSL IR on the same dota trace.
On the code complexity side of things, some things are now much easier and
others are a bit harder. One of the operations we perform constantly in
optimization passes is to replace one source with another. Due to the fact
that an instruction can use the same SSA value multiple times, we had to
iterate through the sources of the instruction and determine if the use we
were replacing was the only one before removing it from the set of uses.
With this patch, uses are per-source not per-instruction so we can just
remove it safely. On the other hand, trying to iterate over all of the
instructions that use a given value is more difficult. Fortunately, the
two places we do that are the ffma peephole where it doesn't matter and GCM
where we already gracefully handle duplicates visits to an instruction.
Another aspect here is that using linked lists in this way can be tricky to
get right. With sets, things were quite forgiving and the worst that
happened if you didn't properly remove a use was that it would get caught
in the validator. With linked lists, it can lead to linked list corruption
which can be harder to track. However, we do just as much validation of
the linked lists as we did of the sets so the validator should still catch
these problems. While working on this series, the vast majority of the
bugs I had to fix were caught by assertions. I don't think the lists are
going to be that much worse than the sets.
Reviewed-by: Connor Abbott <cwabbott0@gmail.com>
2015-04-24 10:16:27 -07:00
|
|
|
unsigned stack_size = list_length(®->defs);
|
2014-07-22 14:05:06 -07:00
|
|
|
|
|
|
|
|
state->states[reg->index].stack = ralloc_array(state->states,
|
|
|
|
|
nir_ssa_def *,
|
|
|
|
|
stack_size);
|
2015-02-20 12:31:31 -08:00
|
|
|
#ifndef NDEBUG
|
2014-07-22 14:05:06 -07:00
|
|
|
state->states[reg->index].stack_size = stack_size;
|
|
|
|
|
#endif
|
|
|
|
|
state->states[reg->index].index = -1;
|
|
|
|
|
state->states[reg->index].num_defs = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
destroy_rewrite_state(rewrite_state *state)
|
|
|
|
|
{
|
|
|
|
|
_mesa_hash_table_destroy(state->ssa_map, NULL);
|
|
|
|
|
ralloc_free(state->states);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_convert_to_ssa_impl(nir_function_impl *impl)
|
|
|
|
|
{
|
2014-12-12 16:25:38 -08:00
|
|
|
nir_metadata_require(impl, nir_metadata_dominance);
|
2014-07-22 14:05:06 -07:00
|
|
|
|
|
|
|
|
insert_phi_nodes(impl);
|
|
|
|
|
|
|
|
|
|
rewrite_state state;
|
|
|
|
|
init_rewrite_state(impl, &state);
|
|
|
|
|
|
2015-08-06 18:18:40 -07:00
|
|
|
rewrite_block(nir_start_block(impl), &state);
|
2014-07-22 14:05:06 -07:00
|
|
|
|
|
|
|
|
remove_unused_regs(impl, &state);
|
|
|
|
|
|
2014-12-12 16:25:38 -08:00
|
|
|
nir_metadata_preserve(impl, nir_metadata_block_index |
|
|
|
|
|
nir_metadata_dominance);
|
|
|
|
|
|
2014-07-22 14:05:06 -07:00
|
|
|
destroy_rewrite_state(&state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_convert_to_ssa(nir_shader *shader)
|
|
|
|
|
{
|
2015-12-26 10:00:47 -08:00
|
|
|
nir_foreach_function(shader, function) {
|
|
|
|
|
if (function->impl)
|
|
|
|
|
nir_convert_to_ssa_impl(function->impl);
|
2014-07-22 14:05:06 -07:00
|
|
|
}
|
|
|
|
|
}
|