mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-27 14:50:10 +01:00
v3dv: don't lower vulkan resource index result to scalar
The intrinsic produces a vec2, so let's honor that and avoid the weird lowering to scalar and later reconstruction to vec2 when we find load vulkan descriptor intrinsics. It fixes tests like this (which require that we expose KHR_spirv_1_4): dEQP-VK.spirv_assembly.instruction.spirv1p4.opptrequal.null_comparisons_ssbo_equal that otherwise produce bad code that tries to access a vec2 from the result of that intrinsic, leading to NIR validation errors. Reviewed-by: Alejandro Piñeiro <apinheiro@igalia.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11257>
This commit is contained in:
parent
d84cd611eb
commit
21b0a4c80c
4 changed files with 15 additions and 18 deletions
|
|
@ -2838,7 +2838,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
|
|||
case nir_intrinsic_get_ssbo_size:
|
||||
ntq_store_dest(c, &instr->dest, 0,
|
||||
vir_uniform(c, QUNIFORM_GET_SSBO_SIZE,
|
||||
nir_src_as_uint(instr->src[0])));
|
||||
nir_src_comp_as_uint(instr->src[0], 0)));
|
||||
break;
|
||||
|
||||
case nir_intrinsic_get_ubo_size:
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ lower_load(struct v3d_compile *c,
|
|||
nir_builder *b,
|
||||
nir_intrinsic_instr *instr)
|
||||
{
|
||||
uint32_t index = nir_src_as_uint(instr->src[0]);
|
||||
uint32_t index = nir_src_comp_as_uint(instr->src[0], 0);
|
||||
|
||||
nir_intrinsic_op op;
|
||||
if (instr->intrinsic == nir_intrinsic_load_ubo) {
|
||||
|
|
@ -75,7 +75,7 @@ lower_store(struct v3d_compile *c,
|
|||
nir_builder *b,
|
||||
nir_intrinsic_instr *instr)
|
||||
{
|
||||
uint32_t index = nir_src_as_uint(instr->src[1]);
|
||||
uint32_t index = nir_src_comp_as_uint(instr->src[1], 0);
|
||||
rewrite_offset(b, instr, index, 2, nir_intrinsic_get_ssbo_size);
|
||||
}
|
||||
|
||||
|
|
@ -84,7 +84,7 @@ lower_atomic(struct v3d_compile *c,
|
|||
nir_builder *b,
|
||||
nir_intrinsic_instr *instr)
|
||||
{
|
||||
uint32_t index = nir_src_as_uint(instr->src[0]);
|
||||
uint32_t index = nir_src_comp_as_uint(instr->src[0], 0);
|
||||
rewrite_offset(b, instr, index, 1, nir_intrinsic_get_ssbo_size);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1410,12 +1410,13 @@ v3d_attempt_compile(struct v3d_compile *c)
|
|||
|
||||
if (c->key->robust_buffer_access) {
|
||||
/* v3d_nir_lower_robust_buffer_access assumes constant buffer
|
||||
* indices on ubo/ssbo intrinsics so run a copy propagation pass
|
||||
* before we run the lowering to warrant this. We also want to run
|
||||
* the lowering before v3d_optimize to clean-up redundant
|
||||
* get_buffer_size calls produced in the pass.
|
||||
* indices on ubo/ssbo intrinsics so run copy propagation and
|
||||
* constant folding passes before we run the lowering to warrant
|
||||
* this. We also want to run the lowering before v3d_optimize to
|
||||
* clean-up redundant get_buffer_size calls produced in the pass.
|
||||
*/
|
||||
NIR_PASS_V(c->s, nir_copy_prop);
|
||||
NIR_PASS_V(c->s, nir_opt_constant_folding);
|
||||
NIR_PASS_V(c->s, v3d_nir_lower_robust_buffer_access, c);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -658,13 +658,11 @@ lower_vulkan_resource_index(nir_builder *b,
|
|||
}
|
||||
|
||||
/* Since we use the deref pass, both vulkan_resource_index and
|
||||
* vulkan_load_descriptor returns a vec2. But for the index the backend
|
||||
* expect just one scalar (like with get_ssbo_size), so lets return here
|
||||
* just it. Then on load_descriptor we would recreate the vec2, keeping the
|
||||
* second component (unused right now) to zero.
|
||||
* vulkan_load_descriptor return a vec2 providing an index and
|
||||
* offset. Our backend compiler only cares about the index part.
|
||||
*/
|
||||
nir_ssa_def_rewrite_uses(&instr->dest.ssa,
|
||||
nir_imm_int(b, index));
|
||||
nir_imm_ivec2(b, index, 0));
|
||||
nir_instr_remove(&instr->instr);
|
||||
}
|
||||
|
||||
|
|
@ -907,12 +905,10 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
|
|||
return true;
|
||||
|
||||
case nir_intrinsic_load_vulkan_descriptor: {
|
||||
/* We are not using it, as loading the descriptor happens as part of the
|
||||
* load/store instruction, so the simpler is just doing a no-op. We just
|
||||
* lower the desc back to a vec2, as it is what load_ssbo/ubo expects.
|
||||
/* Loading the descriptor happens as part of load/store instructions,
|
||||
* so for us this is a no-op.
|
||||
*/
|
||||
nir_ssa_def *desc = nir_vec2(b, instr->src[0].ssa, nir_imm_int(b, 0));
|
||||
nir_ssa_def_rewrite_uses(&instr->dest.ssa, desc);
|
||||
nir_ssa_def_rewrite_uses(&instr->dest.ssa, instr->src[0].ssa);
|
||||
nir_instr_remove(&instr->instr);
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue