mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-05 11:48:06 +02:00
radeonsi: Compute correct LDS size for fragment shaders.
No sure where the 36 came from, but we clearly need at least 48 bytes per attribute per primitive. Signed-off-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl> Reviewed-by: Marek Olšák <marek.olsak@amd.com>
This commit is contained in:
parent
a1f698881e
commit
6291f19f71
1 changed files with 6 additions and 3 deletions
|
|
@ -5640,15 +5640,18 @@ static void si_shader_dump_stats(struct si_screen *sscreen,
|
|||
|
||||
/* Compute LDS usage for PS. */
|
||||
if (processor == PIPE_SHADER_FRAGMENT) {
|
||||
/* The minimum usage per wave is (num_inputs * 36). The maximum
|
||||
* usage is (num_inputs * 36 * 16).
|
||||
/* The minimum usage per wave is (num_inputs * 48). The maximum
|
||||
* usage is (num_inputs * 48 * 16).
|
||||
* We can get anything in between and it varies between waves.
|
||||
*
|
||||
* The 48 bytes per input for a single primitive is equal to
|
||||
* 4 bytes/component * 4 components/input * 3 points.
|
||||
*
|
||||
* Other stages don't know the size at compile time or don't
|
||||
* allocate LDS per wave, but instead they do it per thread group.
|
||||
*/
|
||||
lds_per_wave = conf->lds_size * lds_increment +
|
||||
align(num_inputs * 36, lds_increment);
|
||||
align(num_inputs * 48, lds_increment);
|
||||
}
|
||||
|
||||
/* Compute the per-SIMD wave counts. */
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue