svga: add svga_compile_shader helper function

This patch refactors common shader compilation code into a helper function
which will call the corresponding shader translation function according to
the shader IR type.

It also adds a function pointer for getting dummy shader for different
shader stages.

Reviewed-by: Neha Bhende <bhenden@vmware.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16501>
This commit is contained in:
Charmaine Lee 2022-05-12 20:57:22 -07:00 committed by Marge Bot
parent c291e685bc
commit 058823906e
13 changed files with 150 additions and 302 deletions

View file

@ -61,12 +61,15 @@ svga_create_compute_state(struct pipe_context *pipe,
/* we need to keep a local copy of the tokens */
cs->base.tokens = tgsi_dup_tokens(templ->prog);
}
assert(templ->ir_type == PIPE_SHADER_IR_TGSI);
struct svga_shader *shader = &cs->base;
shader->id = svga->debug.shader_id++;
shader->type = templ->ir_type;
shader->stage = PIPE_SHADER_COMPUTE;
/* Collect shader basic info */
svga_tgsi_scan_shader(&cs->base);
cs->base.id = svga->debug.shader_id++;
cs->shared_mem_size = templ->req_local_mem;
SVGA_STATS_TIME_POP(svga_sws(svga));

View file

@ -63,6 +63,8 @@ svga_create_fs_state(struct pipe_context *pipe,
fs->generic_inputs = svga_get_generic_inputs_mask(&fs->base.tgsi_info);
fs->base.get_dummy_shader = svga_get_compiled_dummy_fragment_shader;
svga_remap_generics(fs->base.info.generic_inputs_mask,
fs->generic_remap_table);

View file

@ -61,6 +61,7 @@ svga_create_gs_state(struct pipe_context *pipe,
tmp.type = PIPE_SHADER_IR_TGSI;
tmp.tokens = gs->base.tokens;
gs->base.get_dummy_shader = svga_get_compiled_dummy_geometry_shader;
gs->draw_shader = draw_create_geometry_shader(svga->swtnl.draw, &tmp);
done:

View file

@ -55,6 +55,8 @@ svga_create_vs_state(struct pipe_context *pipe,
if (!vs)
goto done;
vs->base.get_dummy_shader = svga_get_compiled_dummy_vertex_shader;
{
/* Need to do construct a new template in case we substituted a
* debug shader.

View file

@ -947,3 +947,64 @@ svga_create_shader(struct pipe_context *pipe,
return shader;
}
/**
* Helper function to compile a shader.
* Depending on the shader IR type, it calls the corresponding
* compile shader function.
*/
enum pipe_error
svga_compile_shader(struct svga_context *svga,
struct svga_shader *shader,
const struct svga_compile_key *key,
struct svga_shader_variant **out_variant)
{
struct svga_shader_variant *variant = NULL;
enum pipe_error ret = PIPE_ERROR;
if (shader->type == PIPE_SHADER_IR_TGSI) {
variant = svga_tgsi_compile_shader(svga, shader, key);
} else {
debug_printf("Unexpected nir shader\n");
assert(0);
}
if (variant == NULL) {
if (shader->get_dummy_shader != NULL) {
debug_printf("Failed to compile shader, using dummy shader.\n");
variant = shader->get_dummy_shader(svga, shader, key);
}
}
else if (svga_shader_too_large(svga, variant)) {
/* too big, use shader */
if (shader->get_dummy_shader != NULL) {
debug_printf("Shader too large (%u bytes), using dummy shader.\n",
(unsigned)(variant->nr_tokens
* sizeof(variant->tokens[0])));
/* Free the too-large variant */
svga_destroy_shader_variant(svga, variant);
/* Use simple pass-through shader instead */
variant = shader->get_dummy_shader(svga, shader, key);
}
}
if (variant == NULL)
return PIPE_ERROR;
ret = svga_define_shader(svga, variant);
if (ret != PIPE_OK) {
svga_destroy_shader_variant(svga, variant);
return ret;
}
*out_variant = variant;
/* insert variant at head of linked list */
variant->next = shader->variants;
shader->variants = variant;
return PIPE_OK;
}

View file

@ -371,6 +371,11 @@ struct svga_shader
/** Head of linked list of compiled variants */
struct svga_shader_variant *variants;
/* Get dummy shader variant */
struct svga_shader_variant *(*get_dummy_shader)(struct svga_context *,
struct svga_shader *,
const struct svga_compile_key *);
unsigned id; /**< for debugging only */
};
@ -488,6 +493,12 @@ svga_create_shader(struct pipe_context *pipe,
enum pipe_shader_type stage,
unsigned len);
enum pipe_error
svga_compile_shader(struct svga_context *svga,
struct svga_shader *shader,
const struct svga_compile_key *key,
struct svga_shader_variant **out_variant);
enum pipe_error
svga_define_shader(struct svga_context *svga,
struct svga_shader_variant *variant);
@ -604,5 +615,20 @@ svga_is_using_flat_shading(const struct svga_context *svga)
svga_fs_variant(svga->state.hw_draw.fs)->uses_flat_interp : FALSE;
}
struct svga_shader_variant *
svga_get_compiled_dummy_vertex_shader(struct svga_context *svga,
struct svga_shader *shader,
const struct svga_compile_key *key);
struct svga_shader_variant *
svga_get_compiled_dummy_fragment_shader(struct svga_context *svga,
struct svga_shader *shader,
const struct svga_compile_key *key);
struct svga_shader_variant *
svga_get_compiled_dummy_geometry_shader(struct svga_context *svga,
struct svga_shader *shader,
const struct svga_compile_key *key);
#endif /* SVGA_SHADER_H */

View file

@ -35,35 +35,6 @@
#include "svga_tgsi.h"
/**
* Translate TGSI shader into an svga shader variant.
*/
static enum pipe_error
compile_cs(struct svga_context *svga,
struct svga_compute_shader *cs,
const struct svga_compile_key *key,
struct svga_shader_variant **out_variant)
{
struct svga_shader_variant *variant;
enum pipe_error ret = PIPE_ERROR;
variant = svga_tgsi_vgpu10_translate(svga, &cs->base, key,
PIPE_SHADER_COMPUTE);
if (!variant)
return PIPE_ERROR;
ret = svga_define_shader(svga, variant);
if (ret != PIPE_OK) {
svga_destroy_shader_variant(svga, variant);
return ret;
}
*out_variant = variant;
return PIPE_OK;
}
/**
* Create compute shader compile key.
*/
@ -128,14 +99,9 @@ emit_hw_cs(struct svga_context *svga, uint64_t dirty)
variant = svga_search_shader_key(&cs->base, &key);
if (!variant) {
ret = compile_cs(svga, cs, &key, &variant);
ret = svga_compile_shader(svga, &cs->base, &key, &variant);
if (ret != PIPE_OK)
goto done;
/* insert the new variant at head of linked list */
assert(variant);
variant->next = cs->base.variants;
cs->base.variants = variant;
}
if (variant != svga->state.hw_draw.cs) {

View file

@ -80,31 +80,16 @@ get_dummy_fragment_shader(void)
}
static struct svga_shader_variant *
translate_fragment_program(struct svga_context *svga,
const struct svga_fragment_shader *fs,
const struct svga_compile_key *key)
{
if (svga_have_vgpu10(svga)) {
return svga_tgsi_vgpu10_translate(svga, &fs->base, key,
PIPE_SHADER_FRAGMENT);
}
else {
return svga_tgsi_vgpu9_translate(svga, &fs->base, key,
PIPE_SHADER_FRAGMENT);
}
}
/**
* Replace the given shader's instruction with a simple constant-color
* shader. We use this when normal shader translation fails.
*/
static struct svga_shader_variant *
get_compiled_dummy_shader(struct svga_context *svga,
struct svga_fragment_shader *fs,
const struct svga_compile_key *key)
struct svga_shader_variant *
svga_get_compiled_dummy_fragment_shader(struct svga_context *svga,
struct svga_shader *shader,
const struct svga_compile_key *key)
{
struct svga_fragment_shader *fs = (struct svga_fragment_shader *)shader;
const struct tgsi_token *dummy = get_dummy_fragment_shader();
struct svga_shader_variant *variant;
@ -119,61 +104,11 @@ get_compiled_dummy_shader(struct svga_context *svga,
svga_remap_generics(fs->base.info.generic_inputs_mask,
fs->generic_remap_table);
variant = translate_fragment_program(svga, fs, key);
variant = svga_tgsi_compile_shader(svga, shader, key);
return variant;
}
/**
* Translate TGSI shader into an svga shader variant.
*/
static enum pipe_error
compile_fs(struct svga_context *svga,
struct svga_fragment_shader *fs,
const struct svga_compile_key *key,
struct svga_shader_variant **out_variant)
{
struct svga_shader_variant *variant;
enum pipe_error ret = PIPE_ERROR;
variant = translate_fragment_program(svga, fs, key);
if (variant == NULL) {
debug_printf("Failed to compile fragment shader,"
" using dummy shader instead.\n");
variant = get_compiled_dummy_shader(svga, fs, key);
}
else if (svga_shader_too_large(svga, variant)) {
/* too big, use dummy shader */
debug_printf("Shader too large (%u bytes),"
" using dummy shader instead.\n",
(unsigned) (variant->nr_tokens
* sizeof(variant->tokens[0])));
/* Free the too-large variant */
svga_destroy_shader_variant(svga, variant);
/* Use simple pass-through shader instead */
variant = get_compiled_dummy_shader(svga, fs, key);
}
if (!variant) {
return PIPE_ERROR;
}
ret = svga_define_shader(svga, variant);
if (ret != PIPE_OK) {
svga_destroy_shader_variant(svga, variant);
return ret;
}
*out_variant = variant;
/* insert variant at head of linked list */
variant->next = fs->base.variants;
fs->base.variants = variant;
return PIPE_OK;
}
/* SVGA_NEW_TEXTURE_BINDING
* SVGA_NEW_RAST
* SVGA_NEW_NEED_SWTNL
@ -463,7 +398,7 @@ emit_hw_fs(struct svga_context *svga, uint64_t dirty)
variant = svga_search_shader_key(&fs->base, &key);
if (!variant) {
ret = compile_fs(svga, fs, &key, &variant);
ret = svga_compile_shader(svga, &fs->base, &key, &variant);
if (ret != PIPE_OK)
goto done;
}

View file

@ -48,54 +48,24 @@ get_dummy_geometry_shader(void)
}
static struct svga_shader_variant *
translate_geometry_program(struct svga_context *svga,
const struct svga_geometry_shader *gs,
const struct svga_compile_key *key)
{
assert(svga_have_vgpu10(svga));
return svga_tgsi_vgpu10_translate(svga, &gs->base, key,
PIPE_SHADER_GEOMETRY);
}
/**
* Translate TGSI shader into an svga shader variant.
*/
static enum pipe_error
compile_gs(struct svga_context *svga,
struct svga_geometry_shader *gs,
const struct svga_compile_key *key,
struct svga_shader_variant **out_variant)
struct svga_shader_variant *
svga_get_compiled_dummy_geometry_shader(struct svga_context *svga,
struct svga_shader *shader,
const struct svga_compile_key *key)
{
const struct tgsi_token *dummy = get_dummy_geometry_shader();
struct svga_shader_variant *variant;
enum pipe_error ret = PIPE_ERROR;
struct svga_geometry_shader *gs = (struct svga_geometry_shader *)shader;
variant = translate_geometry_program(svga, gs, key);
if (!variant) {
/* some problem during translation, try the dummy shader */
const struct tgsi_token *dummy = get_dummy_geometry_shader();
if (!dummy) {
return PIPE_ERROR_OUT_OF_MEMORY;
}
debug_printf("Failed to compile geometry shader, using dummy shader instead.\n");
FREE((void *) gs->base.tokens);
gs->base.tokens = dummy;
variant = translate_geometry_program(svga, gs, key);
if (!variant) {
return PIPE_ERROR;
}
}
if (!dummy)
return NULL;
ret = svga_define_shader(svga, variant);
if (ret != PIPE_OK) {
svga_destroy_shader_variant(svga, variant);
return ret;
}
FREE((void *) gs->base.tokens);
gs->base.tokens = dummy;
svga_tgsi_scan_shader(&gs->base);
variant = svga_tgsi_compile_shader(svga, shader, key);
*out_variant = variant;
return PIPE_OK;
return variant;
}
@ -199,14 +169,9 @@ emit_hw_gs(struct svga_context *svga, uint64_t dirty)
variant = svga_search_shader_key(&gs->base, &key);
if (!variant) {
ret = compile_gs(svga, gs, &key, &variant);
ret = svga_compile_shader(svga, &gs->base, &key, &variant);
if (ret != PIPE_OK)
goto done;
/* insert the new variant at head of linked list */
assert(variant);
variant->next = gs->base.variants;
gs->base.variants = variant;
}
}

View file

@ -33,35 +33,6 @@
#include "svga_shader.h"
/**
* Translate TGSI shader into an svga shader variant.
*/
static enum pipe_error
compile_tcs(struct svga_context *svga,
struct svga_tcs_shader *tcs,
const struct svga_compile_key *key,
struct svga_shader_variant **out_variant)
{
struct svga_shader_variant *variant;
enum pipe_error ret = PIPE_ERROR;
variant = svga_tgsi_vgpu10_translate(svga, &tcs->base, key,
PIPE_SHADER_TESS_CTRL);
if (!variant)
return PIPE_ERROR;
ret = svga_define_shader(svga, variant);
if (ret != PIPE_OK) {
svga_destroy_shader_variant(svga, variant);
return ret;
}
*out_variant = variant;
return PIPE_OK;
}
static void
make_tcs_key(struct svga_context *svga, struct svga_compile_key *key)
{
@ -143,14 +114,9 @@ emit_hw_tcs(struct svga_context *svga, uint64_t dirty)
variant = svga_search_shader_key(&tcs->base, &key);
if (!variant) {
ret = compile_tcs(svga, tcs, &key, &variant);
ret = svga_compile_shader(svga, &tcs->base, &key, &variant);
if (ret != PIPE_OK)
goto done;
/* insert the new variant at head of linked list */
assert(variant);
variant->next = tcs->base.variants;
tcs->base.variants = variant;
}
if (variant != svga->state.hw_draw.tcs) {
@ -184,35 +150,6 @@ struct svga_tracked_state svga_hw_tcs =
};
/**
* Translate TGSI shader into an svga shader variant.
*/
static enum pipe_error
compile_tes(struct svga_context *svga,
struct svga_tes_shader *tes,
const struct svga_compile_key *key,
struct svga_shader_variant **out_variant)
{
struct svga_shader_variant *variant;
enum pipe_error ret = PIPE_ERROR;
variant = svga_tgsi_vgpu10_translate(svga, &tes->base, key,
PIPE_SHADER_TESS_EVAL);
if (!variant)
return PIPE_ERROR;
ret = svga_define_shader(svga, variant);
if (ret != PIPE_OK) {
svga_destroy_shader_variant(svga, variant);
return ret;
}
*out_variant = variant;
return PIPE_OK;
}
static void
make_tes_key(struct svga_context *svga, struct svga_compile_key *key)
{
@ -345,14 +282,9 @@ emit_hw_tes(struct svga_context *svga, uint64_t dirty)
variant = svga_search_shader_key(&tes->base, &key);
if (!variant) {
ret = compile_tes(svga, tes, &key, &variant);
ret = svga_compile_shader(svga, &tes->base, &key, &variant);
if (ret != PIPE_OK)
goto done;
/* insert the new variant at head of linked list */
assert(variant);
variant->next = tes->base.variants;
tes->base.variants = variant;
}
if (variant != svga->state.hw_draw.tes) {

View file

@ -70,31 +70,16 @@ get_dummy_vertex_shader(void)
}
static struct svga_shader_variant *
translate_vertex_program(struct svga_context *svga,
const struct svga_vertex_shader *vs,
const struct svga_compile_key *key)
{
if (svga_have_vgpu10(svga)) {
return svga_tgsi_vgpu10_translate(svga, &vs->base, key,
PIPE_SHADER_VERTEX);
}
else {
return svga_tgsi_vgpu9_translate(svga, &vs->base, key,
PIPE_SHADER_VERTEX);
}
}
/**
* Replace the given shader's instruction with a simple / dummy shader.
* We use this when normal shader translation fails.
*/
static struct svga_shader_variant *
get_compiled_dummy_vertex_shader(struct svga_context *svga,
struct svga_vertex_shader *vs,
const struct svga_compile_key *key)
struct svga_shader_variant *
svga_get_compiled_dummy_vertex_shader(struct svga_context *svga,
struct svga_shader *shader,
const struct svga_compile_key *key)
{
struct svga_vertex_shader *vs = (struct svga_vertex_shader *)shader;
const struct tgsi_token *dummy = get_dummy_vertex_shader();
struct svga_shader_variant *variant;
@ -107,57 +92,11 @@ get_compiled_dummy_vertex_shader(struct svga_context *svga,
svga_tgsi_scan_shader(&vs->base);
variant = translate_vertex_program(svga, vs, key);
variant = svga_tgsi_compile_shader(svga, shader, key);
return variant;
}
/**
* Translate TGSI shader into an svga shader variant.
*/
static enum pipe_error
compile_vs(struct svga_context *svga,
struct svga_vertex_shader *vs,
const struct svga_compile_key *key,
struct svga_shader_variant **out_variant)
{
struct svga_shader_variant *variant;
enum pipe_error ret = PIPE_ERROR;
variant = translate_vertex_program(svga, vs, key);
if (variant == NULL) {
debug_printf("Failed to compile vertex shader,"
" using dummy shader instead.\n");
variant = get_compiled_dummy_vertex_shader(svga, vs, key);
}
else if (svga_shader_too_large(svga, variant)) {
/* too big, use dummy shader */
debug_printf("Shader too large (%u bytes),"
" using dummy shader instead.\n",
(unsigned) (variant->nr_tokens
* sizeof(variant->tokens[0])));
/* Free the too-large variant */
svga_destroy_shader_variant(svga, variant);
/* Use simple pass-through shader instead */
variant = get_compiled_dummy_vertex_shader(svga, vs, key);
}
if (!variant) {
return PIPE_ERROR;
}
ret = svga_define_shader(svga, variant);
if (ret != PIPE_OK) {
svga_destroy_shader_variant(svga, variant);
return ret;
}
*out_variant = variant;
return PIPE_OK;
}
/* SVGA_NEW_PRESCALE, SVGA_NEW_RAST, SVGA_NEW_FS
*/
static void
@ -329,7 +268,7 @@ compile_passthrough_vs(struct svga_context *svga,
memset(&key, 0, sizeof(key));
key.vs.undo_viewport = 1;
ret = compile_vs(svga, &new_vs, &key, &variant);
ret = svga_compile_shader(svga, &new_vs.base, &key, &variant);
if (ret != PIPE_OK)
return ret;
@ -393,15 +332,10 @@ emit_hw_vs(struct svga_context *svga, uint64_t dirty)
ret = compile_passthrough_vs(svga, vs, fs, &variant);
}
else {
ret = compile_vs(svga, vs, &key, &variant);
ret = svga_compile_shader(svga, &vs->base, &key, &variant);
}
if (ret != PIPE_OK)
goto done;
/* insert the new variant at head of linked list */
assert(variant);
variant->next = vs->base.variants;
vs->base.variants = variant;
}
}

View file

@ -515,3 +515,20 @@ svga_tgsi_scan_shader(struct svga_shader *shader)
break;
}
}
/**
* Compile a TGSI shader
*/
struct svga_shader_variant *
svga_tgsi_compile_shader(struct svga_context *svga,
struct svga_shader *shader,
const struct svga_compile_key *key)
{
if (svga_have_vgpu10(svga)) {
return svga_tgsi_vgpu10_translate(svga, shader, key, shader->stage);
}
else {
return svga_tgsi_vgpu9_translate(svga, shader, key, shader->stage);
}
}

View file

@ -79,4 +79,8 @@ boolean svga_shader_verify(const uint32_t *tokens, unsigned nr_tokens);
void
svga_tgsi_scan_shader(struct svga_shader *shader);
struct svga_shader_variant *
svga_tgsi_compile_shader(struct svga_context *svga,
struct svga_shader *shader,
const struct svga_compile_key *key);
#endif