i965: Change brw_shader to gl_shader in brw_link_shader().

Nothing actually wanted brw_shader fields - we just had to type
shader->base all over the place for no reason.

Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Jason Ekstrand <jason.ekstrand@intel.com>
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
This commit is contained in:
Kenneth Graunke 2015-04-07 16:29:32 -07:00
parent 500da98e0b
commit 10d85ffc5a

View file

@ -129,15 +129,14 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
const struct gl_shader_compiler_options *options =
&ctx->Const.ShaderCompilerOptions[stage];
struct brw_shader *shader =
(struct brw_shader *)shProg->_LinkedShaders[stage];
struct gl_shader *shader = shProg->_LinkedShaders[stage];
if (!shader)
continue;
struct gl_program *prog =
ctx->Driver.NewProgram(ctx, _mesa_shader_stage_to_program(stage),
shader->base.Name);
shader->Name);
if (!prog)
return false;
prog->Parameters = _mesa_new_parameter_list();
@ -147,19 +146,19 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
/* Temporary memory context for any new IR. */
void *mem_ctx = ralloc_context(NULL);
ralloc_adopt(mem_ctx, shader->base.ir);
ralloc_adopt(mem_ctx, shader->ir);
bool progress;
/* lower_packing_builtins() inserts arithmetic instructions, so it
* must precede lower_instructions().
*/
brw_lower_packing_builtins(brw, (gl_shader_stage) stage, shader->base.ir);
do_mat_op_to_vec(shader->base.ir);
brw_lower_packing_builtins(brw, (gl_shader_stage) stage, shader->ir);
do_mat_op_to_vec(shader->ir);
const int bitfield_insert = brw->gen >= 7
? BITFIELD_INSERT_TO_BFM_BFI
: 0;
lower_instructions(shader->base.ir,
lower_instructions(shader->ir,
MOD_TO_FLOOR |
DIV_TO_MUL_RCP |
SUB_TO_ADD_NEG |
@ -172,21 +171,21 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
* if-statements need to be flattened.
*/
if (brw->gen < 6)
lower_if_to_cond_assign(shader->base.ir, 16);
lower_if_to_cond_assign(shader->ir, 16);
do_lower_texture_projection(shader->base.ir);
brw_lower_texture_gradients(brw, shader->base.ir);
do_vec_index_to_cond_assign(shader->base.ir);
lower_vector_insert(shader->base.ir, true);
do_lower_texture_projection(shader->ir);
brw_lower_texture_gradients(brw, shader->ir);
do_vec_index_to_cond_assign(shader->ir);
lower_vector_insert(shader->ir, true);
if (options->NirOptions == NULL)
brw_do_cubemap_normalize(shader->base.ir);
lower_offset_arrays(shader->base.ir);
brw_do_lower_unnormalized_offset(shader->base.ir);
lower_noise(shader->base.ir);
lower_quadop_vector(shader->base.ir, false);
brw_do_cubemap_normalize(shader->ir);
lower_offset_arrays(shader->ir);
brw_do_lower_unnormalized_offset(shader->ir);
lower_noise(shader->ir);
lower_quadop_vector(shader->ir, false);
bool lowered_variable_indexing =
lower_variable_index_to_cond_assign(shader->base.ir,
lower_variable_index_to_cond_assign(shader->ir,
options->EmitNoIndirectInput,
options->EmitNoIndirectOutput,
options->EmitNoIndirectTemp,
@ -197,23 +196,23 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
"back to very inefficient code generation\n");
}
lower_ubo_reference(&shader->base, shader->base.ir);
lower_ubo_reference(shader, shader->ir);
do {
progress = false;
if (is_scalar_shader_stage(brw, stage)) {
brw_do_channel_expressions(shader->base.ir);
brw_do_vector_splitting(shader->base.ir);
brw_do_channel_expressions(shader->ir);
brw_do_vector_splitting(shader->ir);
}
progress = do_lower_jumps(shader->base.ir, true, true,
progress = do_lower_jumps(shader->ir, true, true,
true, /* main return */
false, /* continue */
false /* loops */
) || progress;
progress = do_common_optimization(shader->base.ir, true, true,
progress = do_common_optimization(shader->ir, true, true,
options, ctx->Const.NativeIntegers)
|| progress;
} while (progress);
@ -225,7 +224,7 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
* too late. At that point, the values for the built-in uniforms won't
* get sent to the shader.
*/
foreach_in_list(ir_instruction, node, shader->base.ir) {
foreach_in_list(ir_instruction, node, shader->ir) {
ir_variable *var = node->as_variable();
if ((var == NULL) || (var->data.mode != ir_var_uniform)
@ -241,15 +240,15 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
}
}
validate_ir_tree(shader->base.ir);
validate_ir_tree(shader->ir);
do_set_program_inouts(shader->base.ir, prog, shader->base.Stage);
do_set_program_inouts(shader->ir, prog, shader->Stage);
prog->SamplersUsed = shader->base.active_samplers;
prog->ShadowSamplers = shader->base.shadow_samplers;
prog->SamplersUsed = shader->active_samplers;
prog->ShadowSamplers = shader->shadow_samplers;
_mesa_update_shader_textures_used(shProg, prog);
_mesa_reference_program(ctx, &shader->base.Program, prog);
_mesa_reference_program(ctx, &shader->Program, prog);
brw_add_texrect_params(prog);
@ -259,15 +258,15 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
* to the permanent memory context, and free the temporary one (discarding any
* junk we optimized away).
*/
reparent_ir(shader->base.ir, shader->base.ir);
reparent_ir(shader->ir, shader->ir);
ralloc_free(mem_ctx);
if (ctx->_Shader->Flags & GLSL_DUMP) {
fprintf(stderr, "\n");
fprintf(stderr, "GLSL IR for linked %s program %d:\n",
_mesa_shader_stage_to_string(shader->base.Stage),
_mesa_shader_stage_to_string(shader->Stage),
shProg->Name);
_mesa_print_ir(stderr, shader->base.ir, NULL);
_mesa_print_ir(stderr, shader->ir, NULL);
fprintf(stderr, "\n");
}
}