mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-20 07:20:10 +01:00
pan: roll lower_texture() into postprocess()
Every caller of pan_shader_lower_texture() immediatly called pan_shader_postprocess() and every caller of pan_shader_postprocess() lowered textures except blend shaders and those don't texture anyway. Reviewed-by: Olivia Lee <olivia.lee@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38265>
This commit is contained in:
parent
dcb9ca1aa6
commit
0fae56e100
8 changed files with 4 additions and 15 deletions
|
|
@ -562,7 +562,6 @@ pan_preload_get_shader(struct pan_fb_preload_cache *cache,
|
||||||
|
|
||||||
pan_shader_preprocess(b.shader, inputs.gpu_id);
|
pan_shader_preprocess(b.shader, inputs.gpu_id);
|
||||||
pan_shader_lower_texture_early(b.shader, inputs.gpu_id);
|
pan_shader_lower_texture_early(b.shader, inputs.gpu_id);
|
||||||
pan_shader_lower_texture(b.shader, inputs.gpu_id);
|
|
||||||
pan_shader_postprocess(b.shader, inputs.gpu_id);
|
pan_shader_postprocess(b.shader, inputs.gpu_id);
|
||||||
|
|
||||||
if (PAN_ARCH == 4) {
|
if (PAN_ARCH == 4) {
|
||||||
|
|
|
||||||
|
|
@ -139,7 +139,6 @@ panfrost_shader_compile(struct panfrost_screen *screen, const nir_shader *ir,
|
||||||
if (mesa_shader_stage_is_compute(s->info.stage)) {
|
if (mesa_shader_stage_is_compute(s->info.stage)) {
|
||||||
pan_shader_preprocess(s, panfrost_device_gpu_id(dev));
|
pan_shader_preprocess(s, panfrost_device_gpu_id(dev));
|
||||||
pan_shader_lower_texture_early(s, panfrost_device_gpu_id(dev));
|
pan_shader_lower_texture_early(s, panfrost_device_gpu_id(dev));
|
||||||
pan_shader_lower_texture(s, panfrost_device_gpu_id(dev));
|
|
||||||
pan_shader_postprocess(s, panfrost_device_gpu_id(dev));
|
pan_shader_postprocess(s, panfrost_device_gpu_id(dev));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -526,7 +525,6 @@ panfrost_create_shader_state(struct pipe_context *pctx,
|
||||||
*/
|
*/
|
||||||
NIR_PASS(_, nir, nir_opt_constant_folding);
|
NIR_PASS(_, nir, nir_opt_constant_folding);
|
||||||
|
|
||||||
pan_shader_lower_texture(nir, panfrost_device_gpu_id(dev));
|
|
||||||
pan_shader_postprocess(nir, panfrost_device_gpu_id(dev));
|
pan_shader_postprocess(nir, panfrost_device_gpu_id(dev));
|
||||||
|
|
||||||
if (nir->info.stage == MESA_SHADER_FRAGMENT)
|
if (nir->info.stage == MESA_SHADER_FRAGMENT)
|
||||||
|
|
|
||||||
|
|
@ -425,7 +425,6 @@ main(int argc, const char **argv)
|
||||||
|
|
||||||
pan_shader_preprocess(s, inputs.gpu_id);
|
pan_shader_preprocess(s, inputs.gpu_id);
|
||||||
pan_shader_lower_texture_early(s, inputs.gpu_id);
|
pan_shader_lower_texture_early(s, inputs.gpu_id);
|
||||||
pan_shader_lower_texture(s, inputs.gpu_id);
|
|
||||||
pan_shader_postprocess(s, inputs.gpu_id);
|
pan_shader_postprocess(s, inputs.gpu_id);
|
||||||
|
|
||||||
NIR_PASS(_, s, nir_opt_deref);
|
NIR_PASS(_, s, nir_opt_deref);
|
||||||
|
|
|
||||||
|
|
@ -5997,6 +5997,8 @@ bifrost_postprocess_nir(nir_shader *nir, unsigned gpu_id)
|
||||||
{
|
{
|
||||||
MESA_TRACE_FUNC();
|
MESA_TRACE_FUNC();
|
||||||
|
|
||||||
|
bifrost_lower_texture_nir(nir, gpu_id);
|
||||||
|
|
||||||
if (nir->info.stage == MESA_SHADER_FRAGMENT) {
|
if (nir->info.stage == MESA_SHADER_FRAGMENT) {
|
||||||
NIR_PASS(_, nir, nir_lower_mediump_io,
|
NIR_PASS(_, nir, nir_lower_mediump_io,
|
||||||
nir_var_shader_in | nir_var_shader_out,
|
nir_var_shader_in | nir_var_shader_out,
|
||||||
|
|
|
||||||
|
|
@ -83,15 +83,6 @@ pan_shader_lower_texture_early(nir_shader *nir, unsigned gpu_id)
|
||||||
NIR_PASS(_, nir, nir_lower_tex, &lower_tex_options);
|
NIR_PASS(_, nir, nir_lower_tex, &lower_tex_options);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
|
||||||
pan_shader_lower_texture(nir_shader *nir, unsigned gpu_id)
|
|
||||||
{
|
|
||||||
if (pan_arch(gpu_id) >= 6)
|
|
||||||
bifrost_lower_texture_nir(nir, gpu_id);
|
|
||||||
else
|
|
||||||
midgard_lower_texture_nir(nir, gpu_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
pan_shader_disassemble(FILE *fp, const void *code, size_t size, unsigned gpu_id,
|
pan_shader_disassemble(FILE *fp, const void *code, size_t size, unsigned gpu_id,
|
||||||
bool verbose)
|
bool verbose)
|
||||||
|
|
|
||||||
|
|
@ -395,6 +395,8 @@ midgard_preprocess_nir(nir_shader *nir, UNUSED unsigned gpu_id)
|
||||||
void
|
void
|
||||||
midgard_postprocess_nir(nir_shader *nir, UNUSED unsigned gpu_id)
|
midgard_postprocess_nir(nir_shader *nir, UNUSED unsigned gpu_id)
|
||||||
{
|
{
|
||||||
|
midgard_lower_texture_nir(nir, gpu_id);
|
||||||
|
|
||||||
if (nir->info.stage == MESA_SHADER_VERTEX) {
|
if (nir->info.stage == MESA_SHADER_VERTEX) {
|
||||||
/* nir_lower[_explicit]_io is lazy and emits mul+add chains even
|
/* nir_lower[_explicit]_io is lazy and emits mul+add chains even
|
||||||
* for offsets it could figure out are constant. Do some
|
* for offsets it could figure out are constant. Do some
|
||||||
|
|
|
||||||
|
|
@ -155,7 +155,6 @@ get_preload_shader(struct panvk_device *dev,
|
||||||
|
|
||||||
pan_shader_preprocess(nir, inputs.gpu_id);
|
pan_shader_preprocess(nir, inputs.gpu_id);
|
||||||
pan_shader_lower_texture_early(nir, inputs.gpu_id);
|
pan_shader_lower_texture_early(nir, inputs.gpu_id);
|
||||||
pan_shader_lower_texture(nir, inputs.gpu_id);
|
|
||||||
pan_shader_postprocess(nir, inputs.gpu_id);
|
pan_shader_postprocess(nir, inputs.gpu_id);
|
||||||
|
|
||||||
VkResult result = panvk_per_arch(create_internal_shader)(
|
VkResult result = panvk_per_arch(create_internal_shader)(
|
||||||
|
|
|
||||||
|
|
@ -933,7 +933,6 @@ panvk_lower_nir(struct panvk_device *dev, nir_shader *nir,
|
||||||
*/
|
*/
|
||||||
NIR_PASS(_, nir, nir_opt_constant_folding);
|
NIR_PASS(_, nir, nir_opt_constant_folding);
|
||||||
|
|
||||||
pan_shader_lower_texture(nir, compile_input->gpu_id);
|
|
||||||
pan_shader_postprocess(nir, compile_input->gpu_id);
|
pan_shader_postprocess(nir, compile_input->gpu_id);
|
||||||
|
|
||||||
if (stage == MESA_SHADER_VERTEX)
|
if (stage == MESA_SHADER_VERTEX)
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue