zink: Avoid variable shadowing everywhere

Allows to build the zink driver with -Wshadow and makes sure that we are actually
dealing with the right variable when requested.

Signed-off-by: Corentin Noël <corentin.noel@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/27259>
This commit is contained in:
Corentin Noël 2024-01-26 12:06:14 +01:00 committed by Marge Bot
parent 86aef4a3c4
commit fe8c2dd43a
7 changed files with 65 additions and 64 deletions

View file

@ -3055,15 +3055,15 @@ emit_image_intrinsic(struct ntv_context *ctx, nir_intrinsic_instr *intr)
type_to_dim(glsl_get_sampler_dim(type), &is_ms);
SpvId sample = is_ms ? get_src(ctx, &intr->src[2], &atype) : emit_uint_const(ctx, 32, 0);
SpvId coord = get_image_coords(ctx, type, &intr->src[1]);
enum glsl_base_type glsl_type = glsl_get_sampler_result_type(type);
SpvId base_type = get_glsl_basetype(ctx, glsl_type);
enum glsl_base_type glsl_result_type = glsl_get_sampler_result_type(type);
SpvId base_type = get_glsl_basetype(ctx, glsl_result_type);
SpvId texel = spirv_builder_emit_image_texel_pointer(&ctx->builder, base_type, img_var, coord, sample);
SpvId param2 = 0;
/* The type of Value must be the same as Result Type.
* The type of the value pointed to by Pointer must be the same as Result Type.
*/
nir_alu_type ntype = nir_get_nir_type_for_glsl_base_type(glsl_type);
nir_alu_type ntype = nir_get_nir_type_for_glsl_base_type(glsl_result_type);
if (ptype != ntype) {
SpvId cast_type = get_def_type(ctx, &intr->def, ntype);
param = emit_bitcast(ctx, cast_type, param);
@ -3583,9 +3583,9 @@ emit_tex(struct ntv_context *ctx, nir_tex_instr *tex)
unsigned num_components = nir_src_num_components(tex->src[i].src);
SpvId components[NIR_MAX_VEC_COMPONENTS];
for (int i = 0; i < num_components; ++i) {
int64_t tmp = nir_const_value_as_int(cv[i], bit_size);
components[i] = emit_int_const(ctx, bit_size, tmp);
for (int j = 0; j < num_components; ++j) {
int64_t tmp = nir_const_value_as_int(cv[j], bit_size);
components[j] = emit_int_const(ctx, bit_size, tmp);
}
if (num_components > 1) {

View file

@ -896,11 +896,11 @@ static int
batch_find_resource(struct zink_batch_state *bs, struct zink_resource_object *obj, struct zink_batch_obj_list *list)
{
unsigned hash = obj->bo->unique_id & (BUFFER_HASHLIST_SIZE-1);
int i = bs->buffer_indices_hashlist[hash];
int buffer_index = bs->buffer_indices_hashlist[hash];
/* not found or found */
if (i < 0 || (i < list->num_buffers && list->objs[i] == obj))
return i;
if (buffer_index < 0 || (buffer_index < list->num_buffers && list->objs[buffer_index] == obj))
return buffer_index;
/* Hash collision, look for the BO in the list of list->objs linearly. */
for (int i = list->num_buffers - 1; i >= 0; i--) {

View file

@ -579,9 +579,9 @@ zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, en
//struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && screen->info.has_tmz_support) ?
//screen->bo_slabs_encrypted : screen->bo_slabs;
struct pb_slabs *slabs = screen->pb.bo_slabs;
struct pb_slabs *bo_slabs = screen->pb.bo_slabs;
struct pb_slabs *last_slab = &slabs[NUM_SLAB_ALLOCATORS - 1];
struct pb_slabs *last_slab = &bo_slabs[NUM_SLAB_ALLOCATORS - 1];
unsigned max_slab_entry_size = 1 << (last_slab->min_order + last_slab->num_orders - 1);
/* Sub-allocate small buffers from slabs. */

View file

@ -707,9 +707,9 @@ lower_line_stipple_fs(nir_shader *shader)
nir_iand_imm(&b, nir_ishr(&b, pattern, stipple_pos), 1);
nir_push_if(&b, nir_ieq_imm(&b, bit, 0));
{
nir_def *value = nir_load_var(&b, sample_mask);
value = nir_ixor(&b, value, index_mask);
nir_store_var(&b, sample_mask, value, 1);
nir_def *sample_mask_value = nir_load_var(&b, sample_mask);
sample_mask_value = nir_ixor(&b, sample_mask_value, index_mask);
nir_store_var(&b, sample_mask, sample_mask_value, 1);
}
nir_pop_if(&b, NULL);
}
@ -2044,7 +2044,7 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
return true;
}
case nir_intrinsic_load_scratch:
case nir_intrinsic_load_shared:
case nir_intrinsic_load_shared: {
b->cursor = nir_before_instr(instr);
bool force_2x32 = intr->def.bit_size == 64 && !has_int64;
nir_def *offset = nir_udiv_imm(b, intr->src[0].ssa, (force_2x32 ? 32 : intr->def.bit_size) / 8);
@ -2064,6 +2064,7 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
return true;
}
break;
}
case nir_intrinsic_store_ssbo: {
b->cursor = nir_before_instr(instr);
bool force_2x32 = nir_src_bit_size(intr->src[0]) == 64 && !has_int64;
@ -2197,8 +2198,8 @@ rewrite_atomic_ssbo_instr(nir_builder *b, nir_instr *instr, struct bo_vars *bo)
nir_intrinsic_set_atomic_op(new_instr, nir_intrinsic_atomic_op(intr));
new_instr->src[0] = nir_src_for_ssa(&deref_arr->def);
/* deref ops have no offset src, so copy the srcs after it */
for (unsigned i = 2; i < nir_intrinsic_infos[intr->intrinsic].num_srcs; i++)
new_instr->src[i - 1] = nir_src_for_ssa(intr->src[i].ssa);
for (unsigned j = 2; j < nir_intrinsic_infos[intr->intrinsic].num_srcs; j++)
new_instr->src[j - 1] = nir_src_for_ssa(intr->src[j].ssa);
nir_builder_instr_insert(b, &new_instr->instr);
result[i] = &new_instr->def;
@ -2777,26 +2778,26 @@ zink_compiler_assign_io(struct zink_screen *screen, nir_shader *producer, nir_sh
}
if (consumer->info.stage != MESA_SHADER_FRAGMENT) {
producer->info.has_transform_feedback_varyings = false;
nir_foreach_shader_out_variable(var, producer)
var->data.explicit_xfb_buffer = false;
nir_foreach_shader_out_variable(var_out, producer)
var_out->data.explicit_xfb_buffer = false;
}
if (producer->info.stage == MESA_SHADER_TESS_CTRL) {
/* never assign from tcs -> tes, always invert */
nir_foreach_variable_with_modes(var, consumer, nir_var_shader_in)
assign_producer_var_io(consumer->info.stage, var, &reserved, slot_map);
nir_foreach_variable_with_modes_safe(var, producer, nir_var_shader_out) {
if (!assign_consumer_var_io(producer->info.stage, var, &reserved, slot_map))
nir_foreach_variable_with_modes(var_in, consumer, nir_var_shader_in)
assign_producer_var_io(consumer->info.stage, var_in, &reserved, slot_map);
nir_foreach_variable_with_modes_safe(var_out, producer, nir_var_shader_out) {
if (!assign_consumer_var_io(producer->info.stage, var_out, &reserved, slot_map))
/* this is an output, nothing more needs to be done for it to be dropped */
do_fixup = true;
}
} else {
nir_foreach_variable_with_modes(var, producer, nir_var_shader_out)
assign_producer_var_io(producer->info.stage, var, &reserved, slot_map);
nir_foreach_variable_with_modes_safe(var, consumer, nir_var_shader_in) {
if (!assign_consumer_var_io(consumer->info.stage, var, &reserved, slot_map)) {
nir_foreach_variable_with_modes(var_out, producer, nir_var_shader_out)
assign_producer_var_io(producer->info.stage, var_out, &reserved, slot_map);
nir_foreach_variable_with_modes_safe(var_in, consumer, nir_var_shader_in) {
if (!assign_consumer_var_io(consumer->info.stage, var_in, &reserved, slot_map)) {
do_fixup = true;
/* input needs to be rewritten */
nir_shader_instructions_pass(consumer, rewrite_read_as_0, nir_metadata_dominance, var);
nir_shader_instructions_pass(consumer, rewrite_read_as_0, nir_metadata_dominance, var_in);
}
}
if (consumer->info.stage == MESA_SHADER_FRAGMENT && screen->driver_workarounds.needs_sanitised_layer)
@ -5191,11 +5192,11 @@ rework_io_vars(nir_shader *nir, nir_variable_mode mode)
}
if (is_arrayed)
vec_type = glsl_array_type(vec_type, 32 /* MAX_PATCH_VERTICES */, glsl_get_explicit_stride(vec_type));
nir_variable *found = find_io_var_with_semantics(nir, mode, mode, s, location, c, is_load);
if (found) {
if (glsl_get_vector_elements(glsl_without_array(found->type)) < glsl_get_vector_elements(glsl_without_array(vec_type))) {
nir_variable *found_var = find_io_var_with_semantics(nir, mode, mode, s, location, c, is_load);
if (found_var) {
if (glsl_get_vector_elements(glsl_without_array(found_var->type)) < glsl_get_vector_elements(glsl_without_array(vec_type))) {
/* enlarge existing vars if necessary */
found->type = vec_type;
found_var->type = vec_type;
}
continue;
}
@ -5737,8 +5738,8 @@ zink_gfx_shader_free(struct zink_screen *screen, struct zink_shader *shader)
for (unsigned r = 0; r < ARRAY_SIZE(prog->pipelines); r++) {
for (int i = 0; i < ARRAY_SIZE(prog->pipelines[0]); ++i) {
hash_table_foreach(&prog->pipelines[r][i], entry) {
struct zink_gfx_pipeline_cache_entry *pc_entry = entry->data;
hash_table_foreach(&prog->pipelines[r][i], table_entry) {
struct zink_gfx_pipeline_cache_entry *pc_entry = table_entry->data;
util_queue_fence_wait(&pc_entry->fence);
}

View file

@ -319,10 +319,10 @@ kopper_CreateSwapchain(struct zink_screen *screen, struct kopper_displaytarget *
if (util_queue_is_initialized(&screen->flush_queue))
util_queue_finish(&screen->flush_queue);
simple_mtx_lock(&screen->queue_lock);
VkResult result = VKSCR(QueueWaitIdle)(screen->queue);
VkResult wait_result = VKSCR(QueueWaitIdle)(screen->queue);
simple_mtx_unlock(&screen->queue_lock);
if (result != VK_SUCCESS)
mesa_loge("ZINK: vkQueueWaitIdle failed (%s)", vk_Result_to_str(result));
if (wait_result != VK_SUCCESS)
mesa_loge("ZINK: vkQueueWaitIdle failed (%s)", vk_Result_to_str(wait_result));
error = VKSCR(CreateSwapchainKHR)(screen->dev, &cswap->scci, NULL,
&cswap->swapchain);
}

View file

@ -516,15 +516,15 @@ get_render_pass(struct zink_context *ctx)
if (!_mesa_hash_table_insert_pre_hashed(ctx->render_pass_cache, hash, &rp->state, rp))
return NULL;
bool found = false;
struct set_entry *entry = _mesa_set_search_or_add(&ctx->render_pass_state_cache, &pstate, &found);
struct set_entry *cache_entry = _mesa_set_search_or_add(&ctx->render_pass_state_cache, &pstate, &found);
struct zink_render_pass_pipeline_state *ppstate;
if (!found) {
entry->key = ralloc(ctx, struct zink_render_pass_pipeline_state);
ppstate = (void*)entry->key;
cache_entry->key = ralloc(ctx, struct zink_render_pass_pipeline_state);
ppstate = (void*)cache_entry->key;
memcpy(ppstate, &pstate, rp_state_size(&pstate));
ppstate->id = ctx->render_pass_state_cache.entries;
}
ppstate = (void*)entry->key;
ppstate = (void*)cache_entry->key;
rp->pipeline_state = ppstate->id;
}
return rp;

View file

@ -49,8 +49,8 @@ zink_create_vertex_elements_state(struct pipe_context *pctx,
ves->hw_state.hash = _mesa_hash_pointer(ves);
int buffer_map[PIPE_MAX_ATTRIBS];
for (int i = 0; i < ARRAY_SIZE(buffer_map); ++i)
buffer_map[i] = -1;
for (int j = 0; j < ARRAY_SIZE(buffer_map); ++j)
buffer_map[j] = -1;
int num_bindings = 0;
unsigned num_decomposed = 0;
@ -127,23 +127,23 @@ zink_create_vertex_elements_state(struct pipe_context *pctx,
}
}
assert(num_decomposed + num_elements <= PIPE_MAX_ATTRIBS);
u_foreach_bit(i, ves->decomposed_attrs | ves->decomposed_attrs_without_w) {
const struct pipe_vertex_element *elem = elements + i;
u_foreach_bit(attr_index, ves->decomposed_attrs | ves->decomposed_attrs_without_w) {
const struct pipe_vertex_element *elem = elements + attr_index;
const struct util_format_description *desc = util_format_description(elem->src_format);
unsigned size = 1;
if (size32 & BITFIELD_BIT(i))
if (size32 & BITFIELD_BIT(attr_index))
size = 4;
else if (size16 & BITFIELD_BIT(i))
else if (size16 & BITFIELD_BIT(attr_index))
size = 2;
else
assert(size8 & BITFIELD_BIT(i));
assert(size8 & BITFIELD_BIT(attr_index));
for (unsigned j = 1; j < desc->nr_channels; j++) {
if (screen->info.have_EXT_vertex_input_dynamic_state) {
memcpy(&ves->hw_state.dynattribs[num_elements], &ves->hw_state.dynattribs[i], sizeof(VkVertexInputAttributeDescription2EXT));
memcpy(&ves->hw_state.dynattribs[num_elements], &ves->hw_state.dynattribs[attr_index], sizeof(VkVertexInputAttributeDescription2EXT));
ves->hw_state.dynattribs[num_elements].location = num_elements;
ves->hw_state.dynattribs[num_elements].offset += j * size;
} else {
memcpy(&ves->hw_state.attribs[num_elements], &ves->hw_state.attribs[i], sizeof(VkVertexInputAttributeDescription));
memcpy(&ves->hw_state.attribs[num_elements], &ves->hw_state.attribs[attr_index], sizeof(VkVertexInputAttributeDescription));
ves->hw_state.attribs[num_elements].location = num_elements;
ves->hw_state.attribs[num_elements].offset += j * size;
}
@ -153,23 +153,23 @@ zink_create_vertex_elements_state(struct pipe_context *pctx,
ves->hw_state.num_bindings = num_bindings;
ves->hw_state.num_attribs = num_elements;
if (screen->info.have_EXT_vertex_input_dynamic_state) {
for (int i = 0; i < num_bindings; ++i) {
ves->hw_state.dynbindings[i].sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT;
ves->hw_state.dynbindings[i].binding = ves->bindings[i].binding;
ves->hw_state.dynbindings[i].inputRate = ves->bindings[i].inputRate;
ves->hw_state.dynbindings[i].stride = strides[i];
if (ves->divisor[i])
ves->hw_state.dynbindings[i].divisor = ves->divisor[i];
for (int j = 0; j < num_bindings; ++j) {
ves->hw_state.dynbindings[j].sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT;
ves->hw_state.dynbindings[j].binding = ves->bindings[j].binding;
ves->hw_state.dynbindings[j].inputRate = ves->bindings[j].inputRate;
ves->hw_state.dynbindings[j].stride = strides[j];
if (ves->divisor[j])
ves->hw_state.dynbindings[j].divisor = ves->divisor[j];
else
ves->hw_state.dynbindings[i].divisor = 1;
ves->hw_state.dynbindings[j].divisor = 1;
}
} else {
for (int i = 0; i < num_bindings; ++i) {
ves->hw_state.b.bindings[i].binding = ves->bindings[i].binding;
ves->hw_state.b.bindings[i].inputRate = ves->bindings[i].inputRate;
if (ves->divisor[i]) {
ves->hw_state.b.divisors[ves->hw_state.b.divisors_present].divisor = ves->divisor[i];
ves->hw_state.b.divisors[ves->hw_state.b.divisors_present].binding = ves->bindings[i].binding;
for (int j = 0; j < num_bindings; ++j) {
ves->hw_state.b.bindings[j].binding = ves->bindings[j].binding;
ves->hw_state.b.bindings[j].inputRate = ves->bindings[j].inputRate;
if (ves->divisor[j]) {
ves->hw_state.b.divisors[ves->hw_state.b.divisors_present].divisor = ves->divisor[j];
ves->hw_state.b.divisors[ves->hw_state.b.divisors_present].binding = ves->bindings[j].binding;
ves->hw_state.b.divisors_present++;
}
}