mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-25 13:00:09 +01:00
build: avoid redefining unreachable() which is standard in C23
In the C23 standard unreachable() is now a predefined function-like macro in <stddef.h> See https://android.googlesource.com/platform/bionic/+/HEAD/docs/c23.md#is-now-a-predefined-function_like-macro-in And this causes build errors when building for C23: ----------------------------------------------------------------------- In file included from ../src/util/log.h:30, from ../src/util/log.c:30: ../src/util/macros.h:123:9: warning: "unreachable" redefined 123 | #define unreachable(str) \ | ^~~~~~~~~~~ In file included from ../src/util/macros.h:31: /usr/lib/gcc/x86_64-linux-gnu/14/include/stddef.h:456:9: note: this is the location of the previous definition 456 | #define unreachable() (__builtin_unreachable ()) | ^~~~~~~~~~~ ----------------------------------------------------------------------- So don't redefine it with the same name, but use the name UNREACHABLE() to also signify it's a macro. Using a different name also makes sense because the behavior of the macro was extending the one of __builtin_unreachable() anyway, and it also had a different signature, accepting one argument, compared to the standard unreachable() with no arguments. This change improves the chances of building mesa with the C23 standard, which for instance is the default in recent AOSP versions. All the instances of the macro, including the definition, were updated with the following command line: git grep -l '[^_]unreachable(' -- "src/**" | sort | uniq | \ while read file; \ do \ sed -e 's/\([^_]\)unreachable(/\1UNREACHABLE(/g' -i "$file"; \ done && \ sed -e 's/#undef unreachable/#undef UNREACHABLE/g' -i src/intel/isl/isl_aux_info.c Reviewed-by: Erik Faye-Lund <erik.faye-lund@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36437>
This commit is contained in:
parent
b4c7d3a08e
commit
ddf2aa3a4d
1079 changed files with 3741 additions and 3741 deletions
|
|
@ -138,7 +138,7 @@ ac_build_gfx6_fmask_descriptor(const enum amd_gfx_level gfx_level, const struct
|
|||
num_format = V_008F14_IMG_NUM_FORMAT_FMASK_64_16_8;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid nr_samples");
|
||||
UNREACHABLE("invalid nr_samples");
|
||||
}
|
||||
} else {
|
||||
switch (FMASK(state->num_samples, state->num_storage_samples)) {
|
||||
|
|
@ -182,7 +182,7 @@ ac_build_gfx6_fmask_descriptor(const enum amd_gfx_level gfx_level, const struct
|
|||
data_format = V_008F14_IMG_DATA_FORMAT_FMASK64_S16_F8;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid nr_samples");
|
||||
UNREACHABLE("invalid nr_samples");
|
||||
}
|
||||
num_format = V_008F14_IMG_NUM_FORMAT_UINT;
|
||||
}
|
||||
|
|
@ -282,7 +282,7 @@ ac_build_gfx10_fmask_descriptor(const enum amd_gfx_level gfx_level, const struct
|
|||
format = V_008F0C_GFX10_FORMAT_FMASK64_S16_F8;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid nr_samples");
|
||||
UNREACHABLE("invalid nr_samples");
|
||||
}
|
||||
#undef FMASK
|
||||
|
||||
|
|
|
|||
|
|
@ -1740,7 +1740,7 @@ ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
max_hs_out_vram_dwords_enum = V_03093C_X_1K_DWORDS;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid TCS workgroup size");
|
||||
UNREACHABLE("invalid TCS workgroup size");
|
||||
}
|
||||
|
||||
/* Vega10 should limit num_workgroups to 508 (127 per SE)
|
||||
|
|
@ -2247,7 +2247,7 @@ int ac_get_gs_table_depth(enum amd_gfx_level gfx_level, enum radeon_family famil
|
|||
case CHIP_VEGAM:
|
||||
return 32;
|
||||
default:
|
||||
unreachable("Unknown GPU");
|
||||
UNREACHABLE("Unknown GPU");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2152,7 +2152,7 @@ void ac_parse_ib_chunk(struct ac_ib_parser *ib)
|
|||
else if (ib->ip_type == AMD_IP_VCN_DEC || ib->ip_type == AMD_IP_VCN_ENC)
|
||||
parse_vcn_ib(memf, &tmp_ib);
|
||||
else
|
||||
unreachable("unsupported IP type");
|
||||
UNREACHABLE("unsupported IP type");
|
||||
|
||||
u_memstream_close(&mem);
|
||||
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ pairs_packed_opcode_to_regular(unsigned opcode)
|
|||
case PKT3_SET_SH_REG_PAIRS_PACKED:
|
||||
return PKT3_SET_SH_REG;
|
||||
default:
|
||||
unreachable("invalid packed opcode");
|
||||
UNREACHABLE("invalid packed opcode");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -362,7 +362,7 @@ static enum sqtt_gfxip_level ac_gfx_level_to_sqtt_gfxip_level(enum amd_gfx_level
|
|||
case GFX12:
|
||||
return SQTT_GFXIP_LEVEL_GFXIP_12;
|
||||
default:
|
||||
unreachable("Invalid gfx level");
|
||||
UNREACHABLE("Invalid gfx level");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -394,7 +394,7 @@ static enum sqtt_memory_type ac_vram_type_to_sqtt_memory_type(uint32_t vram_type
|
|||
case AMD_VRAM_TYPE_LPDDR5:
|
||||
return SQTT_MEMORY_TYPE_LPDDR5;
|
||||
default:
|
||||
unreachable("Invalid vram type");
|
||||
UNREACHABLE("Invalid vram type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -712,7 +712,7 @@ static enum sqtt_version ac_gfx_level_to_sqtt_version(enum amd_gfx_level gfx_lev
|
|||
case GFX12:
|
||||
return SQTT_VERSION_3_3;
|
||||
default:
|
||||
unreachable("Invalid gfx level");
|
||||
UNREACHABLE("Invalid gfx level");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -876,7 +876,7 @@ static enum elf_gfxip_level ac_gfx_level_to_elf_gfxip_level(enum amd_gfx_level g
|
|||
case GFX12:
|
||||
return EF_AMDGPU_MACH_AMDGCN_GFX1200;
|
||||
default:
|
||||
unreachable("Invalid gfx level");
|
||||
UNREACHABLE("Invalid gfx level");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -469,7 +469,7 @@ static bool apply_relocs(const struct ac_rtld_upload_info *u, unsigned part_idx,
|
|||
*(uint64_t *)dst_ptr = util_cpu_to_le64(abs - va);
|
||||
break;
|
||||
default:
|
||||
unreachable("bad r_type");
|
||||
UNREACHABLE("bad r_type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
if (gfx_level >= GFX11) {
|
||||
switch (dfmt) {
|
||||
default:
|
||||
unreachable("bad dfmt");
|
||||
UNREACHABLE("bad dfmt");
|
||||
case V_008F0C_BUF_DATA_FORMAT_INVALID:
|
||||
return V_008F0C_GFX11_FORMAT_INVALID;
|
||||
|
||||
|
|
@ -131,7 +131,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_NUM_FORMAT_SSCALED:
|
||||
return V_008F0C_GFX11_FORMAT_8_SSCALED;
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
return V_008F0C_GFX11_FORMAT_8_UINT;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -149,7 +149,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_NUM_FORMAT_SSCALED:
|
||||
return V_008F0C_GFX11_FORMAT_8_8_SSCALED;
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
return V_008F0C_GFX11_FORMAT_8_8_UINT;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -167,7 +167,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_NUM_FORMAT_SSCALED:
|
||||
return V_008F0C_GFX11_FORMAT_8_8_8_8_SSCALED;
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
return V_008F0C_GFX11_FORMAT_8_8_8_8_UINT;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -185,7 +185,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_NUM_FORMAT_SSCALED:
|
||||
return V_008F0C_GFX11_FORMAT_16_SSCALED;
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
return V_008F0C_GFX11_FORMAT_16_UINT;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -205,7 +205,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_NUM_FORMAT_SSCALED:
|
||||
return V_008F0C_GFX11_FORMAT_16_16_SSCALED;
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
return V_008F0C_GFX11_FORMAT_16_16_UINT;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -225,7 +225,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_NUM_FORMAT_SSCALED:
|
||||
return V_008F0C_GFX11_FORMAT_16_16_16_16_SSCALED;
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
return V_008F0C_GFX11_FORMAT_16_16_16_16_UINT;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -237,7 +237,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_DATA_FORMAT_32:
|
||||
switch (nfmt) {
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
return V_008F0C_GFX11_FORMAT_32_UINT;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -249,7 +249,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_DATA_FORMAT_32_32:
|
||||
switch (nfmt) {
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
return V_008F0C_GFX11_FORMAT_32_32_UINT;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -261,7 +261,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_DATA_FORMAT_32_32_32:
|
||||
switch (nfmt) {
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
return V_008F0C_GFX11_FORMAT_32_32_32_UINT;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -273,7 +273,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_DATA_FORMAT_32_32_32_32:
|
||||
switch (nfmt) {
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
return V_008F0C_GFX11_FORMAT_32_32_32_32_UINT;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -293,7 +293,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_NUM_FORMAT_SSCALED:
|
||||
return V_008F0C_GFX11_FORMAT_2_10_10_10_SSCALED;
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
return V_008F0C_GFX11_FORMAT_2_10_10_10_UINT;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -303,7 +303,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
case V_008F0C_BUF_DATA_FORMAT_10_11_11:
|
||||
switch (nfmt) {
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_FLOAT:
|
||||
return V_008F0C_GFX11_FORMAT_10_11_11_FLOAT;
|
||||
}
|
||||
|
|
@ -312,7 +312,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
unsigned format;
|
||||
switch (dfmt) {
|
||||
default:
|
||||
unreachable("bad dfmt");
|
||||
UNREACHABLE("bad dfmt");
|
||||
case V_008F0C_BUF_DATA_FORMAT_INVALID:
|
||||
format = V_008F0C_GFX10_FORMAT_INVALID;
|
||||
break;
|
||||
|
|
@ -373,7 +373,7 @@ unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsi
|
|||
format -= 1;
|
||||
break;
|
||||
default:
|
||||
unreachable("bad nfmt");
|
||||
UNREACHABLE("bad nfmt");
|
||||
case V_008F0C_BUF_NUM_FORMAT_UINT:
|
||||
break;
|
||||
case V_008F0C_BUF_NUM_FORMAT_SINT:
|
||||
|
|
@ -580,7 +580,7 @@ enum ac_image_dim ac_get_sampler_dim(enum amd_gfx_level gfx_level, enum glsl_sam
|
|||
case GLSL_SAMPLER_DIM_SUBPASS_MS:
|
||||
return ac_image_2darraymsaa;
|
||||
default:
|
||||
unreachable("bad sampler dim");
|
||||
UNREACHABLE("bad sampler dim");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -656,7 +656,7 @@ uint16_t ac_get_ps_iter_mask(unsigned ps_iter_samples)
|
|||
case 4: return 0x11;
|
||||
case 8: return 0x01;
|
||||
default:
|
||||
unreachable("invalid sample count");
|
||||
UNREACHABLE("invalid sample count");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -885,7 +885,7 @@ unsigned ac_compute_lshs_workgroup_size(enum amd_gfx_level gfx_level, gl_shader_
|
|||
else if (stage == MESA_SHADER_TESS_CTRL)
|
||||
return hs_workgroup_size;
|
||||
else
|
||||
unreachable("invalid LSHS shader stage");
|
||||
UNREACHABLE("invalid LSHS shader stage");
|
||||
}
|
||||
|
||||
unsigned ac_compute_ngg_workgroup_size(unsigned es_verts, unsigned gs_inst_prims,
|
||||
|
|
|
|||
|
|
@ -2872,7 +2872,7 @@ struct ac_pm4_state *ac_emulate_clear_state(const struct radeon_info *info)
|
|||
} else if (info->gfx_level == GFX9) {
|
||||
gfx9_emulate_clear_state(pm4);
|
||||
} else {
|
||||
unreachable("unimplemented");
|
||||
UNREACHABLE("unimplemented");
|
||||
}
|
||||
|
||||
ac_pm4_finalize(pm4);
|
||||
|
|
@ -3066,7 +3066,7 @@ struct ac_pm4_state *ac_create_shadowing_ib_preamble(const struct radeon_info *i
|
|||
ac_pm4_cmd_add(pm4, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
|
||||
ac_pm4_cmd_add(pm4, 0);
|
||||
} else {
|
||||
unreachable("invalid chip");
|
||||
UNREACHABLE("invalid chip");
|
||||
}
|
||||
|
||||
ac_pm4_cmd_add(pm4, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ unsigned ac_pipe_config_to_num_pipes(unsigned pipe_config)
|
|||
case V_009910_ADDR_SURF_P16_32x32_16x16:
|
||||
return 16;
|
||||
default:
|
||||
unreachable("invalid pipe_config");
|
||||
UNREACHABLE("invalid pipe_config");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -774,7 +774,7 @@ static unsigned bpe_to_format(const struct radeon_surf *surf)
|
|||
*/
|
||||
return ADDR_FMT_BC3;
|
||||
default:
|
||||
unreachable("invalid compressed bpe");
|
||||
UNREACHABLE("invalid compressed bpe");
|
||||
}
|
||||
} else if (surf->blk_w == 5 && surf->blk_h == 4)
|
||||
return ADDR_FMT_ASTC_5x4;
|
||||
|
|
@ -823,7 +823,7 @@ static unsigned bpe_to_format(const struct radeon_surf *surf)
|
|||
assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
|
||||
return ADDR_FMT_32_32_32_32;
|
||||
default:
|
||||
unreachable("invalid bpe");
|
||||
UNREACHABLE("invalid bpe");
|
||||
}
|
||||
}
|
||||
return ADDR_FMT_INVALID;
|
||||
|
|
@ -1944,7 +1944,7 @@ static bool is_dcc_supported_by_CB(const struct radeon_info *info, unsigned sw_m
|
|||
sw_mode == ADDR_SW_256KB_Z_X || sw_mode == ADDR_SW_256KB_R_X;
|
||||
|
||||
default:
|
||||
unreachable("invalid gfx_level");
|
||||
UNREACHABLE("invalid gfx_level");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2053,7 +2053,7 @@ static bool gfx9_is_dcc_supported_by_DCN(const struct radeon_info *info,
|
|||
(surf->u.gfx9.color.dcc.independent_64B_blocks &&
|
||||
surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B));
|
||||
default:
|
||||
unreachable("unhandled chip");
|
||||
UNREACHABLE("unhandled chip");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
@ -4031,7 +4031,7 @@ static uint32_t ac_surface_get_pitch_align(const struct radeon_info *info,
|
|||
block_size_log2 = 18;
|
||||
break;
|
||||
default:
|
||||
unreachable("unhandled swizzle mode");
|
||||
UNREACHABLE("unhandled swizzle mode");
|
||||
}
|
||||
|
||||
return 1 << ((block_size_log2 >> 1) - (bpe_log2 >> 1));
|
||||
|
|
@ -4059,7 +4059,7 @@ static uint32_t ac_surface_get_pitch_align(const struct radeon_info *info,
|
|||
block_size_log2 = 18;
|
||||
break;
|
||||
default:
|
||||
unreachable("unhandled swizzle mode");
|
||||
UNREACHABLE("unhandled swizzle mode");
|
||||
}
|
||||
|
||||
if (info->gfx_level >= GFX10) {
|
||||
|
|
@ -4086,7 +4086,7 @@ static uint32_t ac_surface_get_pitch_align(const struct radeon_info *info,
|
|||
return 8 * surf->u.legacy.bankw * surf->u.legacy.mtilea *
|
||||
ac_pipe_config_to_num_pipes(surf->u.legacy.pipe_config);
|
||||
default:
|
||||
unreachable("unhandled surf mode");
|
||||
UNREACHABLE("unhandled surf mode");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -4192,7 +4192,7 @@ uint64_t ac_surface_get_plane_offset(enum amd_gfx_level gfx_level,
|
|||
assert(!layer);
|
||||
return surf->meta_offset;
|
||||
default:
|
||||
unreachable("Invalid plane index");
|
||||
UNREACHABLE("Invalid plane index");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -4213,7 +4213,7 @@ uint64_t ac_surface_get_plane_stride(enum amd_gfx_level gfx_level,
|
|||
case 2:
|
||||
return surf->u.gfx9.color.dcc_pitch_max + 1;
|
||||
default:
|
||||
unreachable("Invalid plane index");
|
||||
UNREACHABLE("Invalid plane index");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -4229,7 +4229,7 @@ uint64_t ac_surface_get_plane_size(const struct radeon_surf *surf,
|
|||
case 2:
|
||||
return surf->meta_size;
|
||||
default:
|
||||
unreachable("Invalid plane index");
|
||||
UNREACHABLE("Invalid plane index");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -4301,7 +4301,7 @@ ac_surface_addr_from_coord(struct ac_addrlib *addrlib, const struct radeon_info
|
|||
else if (info->gfx_level >= GFX9)
|
||||
return gfx9_surface_addr_from_coord(addrlib, info, surf, surf_info, level, x, y, layer, is_3d);
|
||||
else
|
||||
unreachable("invalid gfx_level");
|
||||
UNREACHABLE("invalid gfx_level");
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
|||
|
|
@ -315,7 +315,7 @@ static void run_dcc_address_test(const char *name, const struct radeon_info *inf
|
|||
swizzle_modes[num_swizzle_modes++] = ADDR_SW_256KB_R_X;
|
||||
break;
|
||||
default:
|
||||
unreachable("unhandled gfx level");
|
||||
UNREACHABLE("unhandled gfx level");
|
||||
}
|
||||
|
||||
if (full) {
|
||||
|
|
@ -493,7 +493,7 @@ static void run_htile_address_test(const char *name, const struct radeon_info *i
|
|||
swizzle_modes[num_swizzle_modes++] = ADDR_SW_256KB_Z_X;
|
||||
break;
|
||||
default:
|
||||
unreachable("unhandled gfx level");
|
||||
UNREACHABLE("unhandled gfx level");
|
||||
}
|
||||
|
||||
/* The test coverage is reduced for Gitlab CI because it timeouts. */
|
||||
|
|
|
|||
|
|
@ -325,7 +325,7 @@ static void test_modifier(const struct radeon_info *info,
|
|||
block_size_bits = 18;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid swizzle mode");
|
||||
UNREACHABLE("invalid swizzle mode");
|
||||
}
|
||||
} else {
|
||||
switch (surf.u.gfx9.swizzle_mode) {
|
||||
|
|
@ -365,7 +365,7 @@ static void test_modifier(const struct radeon_info *info,
|
|||
block_size_bits = 18;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid swizzle mode");
|
||||
UNREACHABLE("invalid swizzle mode");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ const char *ac_get_family_name(enum radeon_family family)
|
|||
CASE(GFX1201);
|
||||
#undef CASE
|
||||
default:
|
||||
unreachable("Unknown GPU family");
|
||||
UNREACHABLE("Unknown GPU family");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -332,7 +332,7 @@ ac_nir_varying_expression_max_cost(nir_shader *producer, nir_shader *consumer)
|
|||
return 12;
|
||||
|
||||
default:
|
||||
unreachable("unexpected shader stage");
|
||||
UNREACHABLE("unexpected shader stage");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -686,7 +686,7 @@ enum gl_access_qualifier ac_nir_get_mem_access_flags(const nir_intrinsic_instr *
|
|||
break;
|
||||
|
||||
default:
|
||||
unreachable("unexpected store instruction");
|
||||
UNREACHABLE("unexpected store instruction");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -762,7 +762,7 @@ summarize_repack(nir_builder *b, nir_def *packed_counts, bool mask_lane_id, unsi
|
|||
return nir_msad_4x8(b, nir_unpack_64_2x32_split_y(b, sad_op), nir_imm_int(b, 0), sum);
|
||||
}
|
||||
} else {
|
||||
unreachable("Unimplemented NGG wave count");
|
||||
UNREACHABLE("Unimplemented NGG wave count");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -527,7 +527,7 @@ ac_nir_cull_primitive(nir_builder *b,
|
|||
return ac_nir_cull_line(b, skip_viewport_state_culling, initially_accepted, pos, &w_info,
|
||||
accept_func, state);
|
||||
} else {
|
||||
unreachable("point culling not implemented");
|
||||
UNREACHABLE("point culling not implemented");
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ static unsigned get_coord_components(enum glsl_sampler_dim dim, bool is_array)
|
|||
case GLSL_SAMPLER_DIM_3D:
|
||||
return 3;
|
||||
default:
|
||||
unreachable("unexpected sampler type");
|
||||
UNREACHABLE("unexpected sampler type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -402,7 +402,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
|
|||
return true;
|
||||
|
||||
default:
|
||||
unreachable("shouldn't get here");
|
||||
UNREACHABLE("shouldn't get here");
|
||||
}
|
||||
} else if (instr->type == nir_instr_type_tex) {
|
||||
nir_tex_instr *tex = nir_instr_as_tex(instr);
|
||||
|
|
@ -468,7 +468,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
|
|||
case nir_tex_src_texture_offset:
|
||||
case nir_tex_src_sampler_offset:
|
||||
case nir_tex_src_plane:
|
||||
unreachable("unsupported texture src");
|
||||
UNREACHABLE("unsupported texture src");
|
||||
|
||||
default:;
|
||||
}
|
||||
|
|
@ -493,7 +493,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
|
|||
return true;
|
||||
|
||||
default:
|
||||
unreachable("shouldn't get here");
|
||||
UNREACHABLE("shouldn't get here");
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ load_subgroup_id_lowered(lower_intrinsics_to_args_state *s, nir_builder *b)
|
|||
}
|
||||
return nir_ushr_imm(b, sgpr_local_invocation_index, util_logbase2(s->wave_size));
|
||||
} else {
|
||||
unreachable("unimplemented for LS");
|
||||
UNREACHABLE("unimplemented for LS");
|
||||
}
|
||||
} else if (s->hw_stage == AC_HW_LEGACY_GEOMETRY_SHADER ||
|
||||
s->hw_stage == AC_HW_NEXT_GEN_GEOMETRY_SHADER) {
|
||||
|
|
@ -281,7 +281,7 @@ lower_intrinsic_to_arg(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
|
|||
replacement = ac_nir_load_arg_upper_bound(b, s->args, s->args->gs_invocation_id, 31);
|
||||
}
|
||||
} else {
|
||||
unreachable("unexpected shader stage");
|
||||
UNREACHABLE("unexpected shader stage");
|
||||
}
|
||||
break;
|
||||
case nir_intrinsic_load_sample_id:
|
||||
|
|
@ -368,7 +368,7 @@ lower_intrinsic_to_arg(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
|
|||
else if (s->args->gs_wave_id.used)
|
||||
replacement = ac_nir_load_arg(b, s->args, s->args->gs_wave_id);
|
||||
else
|
||||
unreachable("Shader doesn't have GS wave ID.");
|
||||
UNREACHABLE("Shader doesn't have GS wave ID.");
|
||||
break;
|
||||
case nir_intrinsic_overwrite_vs_arguments_amd:
|
||||
s->vertex_id = intrin->src[0].ssa;
|
||||
|
|
@ -410,7 +410,7 @@ lower_intrinsic_to_arg(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
|
|||
}
|
||||
}
|
||||
} else {
|
||||
unreachable("invalid stage");
|
||||
UNREACHABLE("invalid stage");
|
||||
}
|
||||
break;
|
||||
case nir_intrinsic_load_primitive_id:
|
||||
|
|
@ -427,7 +427,7 @@ lower_intrinsic_to_arg(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
|
|||
else
|
||||
replacement = ac_nir_load_arg(b, s->args, s->args->gs_prim_id); /* NGG */
|
||||
} else {
|
||||
unreachable("invalid stage");
|
||||
UNREACHABLE("invalid stage");
|
||||
}
|
||||
break;
|
||||
case nir_intrinsic_load_tess_coord: {
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ ac_nir_lower_legacy_gs(nir_shader *nir, ac_nir_lower_legacy_gs_options *options,
|
|||
num_vertices_per_primitive = 3;
|
||||
break;
|
||||
default:
|
||||
unreachable("Invalid GS output primitive.");
|
||||
UNREACHABLE("Invalid GS output primitive.");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1087,7 +1087,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
|
|||
if (s->deferred.uses_tess_primitive_id)
|
||||
nir_store_var(b, repacked_variables[2], nir_load_primitive_id(b), 0x1u);
|
||||
} else {
|
||||
unreachable("Should be VS or TES.");
|
||||
UNREACHABLE("Should be VS or TES.");
|
||||
}
|
||||
}
|
||||
nir_pop_if(b, if_es_thread);
|
||||
|
|
@ -1283,7 +1283,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
|
|||
|
||||
nir_overwrite_tes_arguments_amd(b, u, v, prim_id, rel_patch_id);
|
||||
} else {
|
||||
unreachable("Should be VS or TES.");
|
||||
UNREACHABLE("Should be VS or TES.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -858,7 +858,7 @@ ac_nir_lower_ngg_gs(nir_shader *shader, const ac_nir_lower_ngg_options *options,
|
|||
else if (shader->info.gs.output_primitive == MESA_PRIM_TRIANGLE_STRIP)
|
||||
state.num_vertices_per_primitive = 3;
|
||||
else
|
||||
unreachable("Invalid GS output primitive.");
|
||||
UNREACHABLE("Invalid GS output primitive.");
|
||||
|
||||
/* Extract the full control flow. It is going to be wrapped in an if statement. */
|
||||
nir_cf_list extracted;
|
||||
|
|
|
|||
|
|
@ -275,7 +275,7 @@ ms_get_out_layout_part(unsigned location,
|
|||
}
|
||||
}
|
||||
|
||||
unreachable("Couldn't figure out mesh shader output mode.");
|
||||
UNREACHABLE("Couldn't figure out mesh shader output mode.");
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -357,7 +357,7 @@ ms_store_arrayed_output(nir_builder *b,
|
|||
nir_store_var(b, s->out_variables[idx], val, 0x1);
|
||||
}
|
||||
} else {
|
||||
unreachable("Invalid MS output mode for store");
|
||||
UNREACHABLE("Invalid MS output mode for store");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -441,7 +441,7 @@ ms_load_arrayed_output(nir_builder *b,
|
|||
}
|
||||
return nir_vec(b, arr, num_components);
|
||||
} else {
|
||||
unreachable("Invalid MS output mode for load");
|
||||
UNREACHABLE("Invalid MS output mode for load");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -513,7 +513,7 @@ lower_ms_intrinsic(nir_builder *b, nir_instr *instr, void *state)
|
|||
case nir_intrinsic_set_vertex_and_primitive_count:
|
||||
return lower_ms_set_vertex_and_primitive_count(b, intrin, s);
|
||||
default:
|
||||
unreachable("Not a lowerable mesh shader intrinsic.");
|
||||
UNREACHABLE("Not a lowerable mesh shader intrinsic.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1310,7 +1310,7 @@ ms_calculate_output_layout(const struct radeon_info *hw_info, unsigned api_share
|
|||
else if (l.lds.vtx_attr.mask)
|
||||
ms_move_output(&l.lds.vtx_attr, &l.scratch_ring.vtx_attr);
|
||||
else
|
||||
unreachable("API shader uses too much shared memory.");
|
||||
UNREACHABLE("API shader uses too much shared memory.");
|
||||
|
||||
ms_calculate_arrayed_output_layout(&l, max_vertices, max_primitives);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -424,7 +424,7 @@ emit_ps_color_export(nir_builder *b, lower_ps_state *s, unsigned output_index, u
|
|||
pack_op = nir_op_pack_snorm_2x16;
|
||||
break;
|
||||
default:
|
||||
unreachable("unsupported color export format");
|
||||
UNREACHABLE("unsupported color export format");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -637,7 +637,7 @@ export_ps_outputs(nir_builder *b, lower_ps_state *s)
|
|||
case BITFIELD_RANGE(0, 2):
|
||||
break;
|
||||
default:
|
||||
unreachable("unexpected number of color outputs for dual source blending");
|
||||
UNREACHABLE("unexpected number of color outputs for dual source blending");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -225,7 +225,7 @@ lower_query_size(nir_builder *b, nir_def *desc, nir_src *lod,
|
|||
result = nir_vec3(b, width, height, depth);
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid sampler dim");
|
||||
UNREACHABLE("invalid sampler dim");
|
||||
}
|
||||
|
||||
return handle_null_desc(b, desc, result);
|
||||
|
|
@ -352,7 +352,7 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data)
|
|||
result = query_samples(b, desc, tex->sampler_dim, gfx_level);
|
||||
break;
|
||||
default:
|
||||
unreachable("shouldn't get here");
|
||||
UNREACHABLE("shouldn't get here");
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
|
|||
|
|
@ -271,7 +271,7 @@ lower_task_intrinsics(nir_builder *b,
|
|||
case nir_intrinsic_launch_mesh_workgroups:
|
||||
return lower_task_launch_mesh_workgroups(b, intrin, s);
|
||||
default:
|
||||
unreachable("unsupported task shader intrinsic");
|
||||
UNREACHABLE("unsupported task shader intrinsic");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -338,7 +338,7 @@ lower_mesh_intrinsics(nir_builder *b,
|
|||
if (intrin->intrinsic == nir_intrinsic_load_task_payload)
|
||||
return lower_taskmesh_payload_load(b, intrin, s);
|
||||
else
|
||||
unreachable("unsupported mesh shader intrinsic");
|
||||
UNREACHABLE("unsupported mesh shader intrinsic");
|
||||
}
|
||||
|
||||
bool
|
||||
|
|
|
|||
|
|
@ -804,7 +804,7 @@ lower_hs_output_access(nir_builder *b,
|
|||
update_hs_barrier(intrin, st);
|
||||
return NIR_LOWER_INSTR_PROGRESS;
|
||||
} else {
|
||||
unreachable("intrinsic not supported by lower_hs_output_access");
|
||||
UNREACHABLE("intrinsic not supported by lower_hs_output_access");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -474,7 +474,7 @@ move_coords_from_divergent_cf(struct move_tex_coords_state *state, nir_function_
|
|||
break;
|
||||
}
|
||||
case nir_cf_node_function:
|
||||
unreachable("Invalid cf type");
|
||||
UNREACHABLE("Invalid cf type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -419,7 +419,7 @@ ac_prepare_cs_clear_copy_buffer(const struct ac_cs_clear_copy_buffer_options *op
|
|||
break;
|
||||
|
||||
case GFX12:
|
||||
unreachable("cp_sdma_ge_use_system_memory_scope should be true, so we should never get here");
|
||||
UNREACHABLE("cp_sdma_ge_use_system_memory_scope should be true, so we should never get here");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -405,7 +405,7 @@ emit_vintrp_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Inst
|
|||
} else if (ctx.gfx_level >= GFX10) {
|
||||
encoding = (0b110101 << 26);
|
||||
} else {
|
||||
unreachable("Unknown gfx_level.");
|
||||
UNREACHABLE("Unknown gfx_level.");
|
||||
}
|
||||
|
||||
unsigned opsel = instr->opcode == aco_opcode::v_interp_p2_hi_f16 ? 0x8 : 0;
|
||||
|
|
@ -1085,7 +1085,7 @@ emit_vop3_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instru
|
|||
} else if (ctx.gfx_level >= GFX10) {
|
||||
encoding = (0b110101 << 26);
|
||||
} else {
|
||||
unreachable("Unknown gfx_level.");
|
||||
UNREACHABLE("Unknown gfx_level.");
|
||||
}
|
||||
|
||||
if (ctx.gfx_level <= GFX7) {
|
||||
|
|
@ -1136,7 +1136,7 @@ emit_vop3p_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instr
|
|||
} else if (ctx.gfx_level >= GFX10) {
|
||||
encoding = (0b110011 << 26);
|
||||
} else {
|
||||
unreachable("Unknown gfx_level.");
|
||||
UNREACHABLE("Unknown gfx_level.");
|
||||
}
|
||||
|
||||
encoding |= opcode << 16;
|
||||
|
|
@ -1383,7 +1383,7 @@ emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* inst
|
|||
case Format::PSEUDO:
|
||||
case Format::PSEUDO_BARRIER:
|
||||
if (instr->opcode != aco_opcode::p_unit_test)
|
||||
unreachable("Pseudo instructions should be lowered before assembly.");
|
||||
UNREACHABLE("Pseudo instructions should be lowered before assembly.");
|
||||
break;
|
||||
default:
|
||||
if (instr->isDPP16()) {
|
||||
|
|
@ -1399,7 +1399,7 @@ emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* inst
|
|||
} else if (instr->isSDWA()) {
|
||||
emit_sdwa_instruction(ctx, out, instr);
|
||||
} else {
|
||||
unreachable("unimplemented instruction format");
|
||||
UNREACHABLE("unimplemented instruction format");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -358,7 +358,7 @@ public:
|
|||
case s_lshl:
|
||||
return aco_opcode::s_lshl_b32;
|
||||
default:
|
||||
unreachable("Unsupported wave specific opcode.");
|
||||
UNREACHABLE("Unsupported wave specific opcode.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -728,7 +728,7 @@ add_branch_code(exec_ctx& ctx, Block* block)
|
|||
bld.branch(aco_opcode::p_cbranch_nz, bld.scc(cond), block->linear_succs[1],
|
||||
block->linear_succs[0]);
|
||||
} else {
|
||||
unreachable("unknown/invalid block type");
|
||||
UNREACHABLE("unknown/invalid block type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -838,7 +838,7 @@ get_reduction_identity(ReduceOp op, unsigned idx)
|
|||
case fmax16: return 0xfc00u; /* negative infinity */
|
||||
case fmax32: return 0xff800000u; /* negative infinity */
|
||||
case fmax64: return idx ? 0xfff00000u : 0u; /* negative infinity */
|
||||
default: unreachable("Invalid reduction operation"); break;
|
||||
default: UNREACHABLE("Invalid reduction operation"); break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -789,7 +789,7 @@ public:
|
|||
case 255:
|
||||
return (signext && (data_.i & 0x80000000u) ? 0xffffffff00000000ull : 0ull) | data_.i;
|
||||
}
|
||||
unreachable("invalid register for 64-bit constant");
|
||||
UNREACHABLE("invalid register for 64-bit constant");
|
||||
} else {
|
||||
return data_.i;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -483,7 +483,7 @@ lower_branch_instruction(branch_ctx& ctx, Block& block)
|
|||
bld.sopp(aco_opcode::s_cbranch_scc0, target);
|
||||
}
|
||||
break;
|
||||
default: unreachable("Unknown Pseudo branch instruction!");
|
||||
default: UNREACHABLE("Unknown Pseudo branch instruction!");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -952,7 +952,7 @@ emit_reduction(lower_context* ctx, aco_opcode op, ReduceOp reduce_op, unsigned c
|
|||
false, identity);
|
||||
}
|
||||
break;
|
||||
default: unreachable("Invalid reduction mode");
|
||||
default: UNREACHABLE("Invalid reduction mode");
|
||||
}
|
||||
|
||||
if (op == aco_opcode::p_reduce) {
|
||||
|
|
@ -1418,7 +1418,7 @@ do_copy(lower_context* ctx, Builder& bld, const copy_operation& copy, bool* pres
|
|||
} else if (def.regClass().is_subdword()) {
|
||||
bld.vop1_sdwa(aco_opcode::v_mov_b32, def, op);
|
||||
} else {
|
||||
unreachable("unsupported copy");
|
||||
UNREACHABLE("unsupported copy");
|
||||
}
|
||||
|
||||
did_copy = true;
|
||||
|
|
|
|||
|
|
@ -226,7 +226,7 @@ struct InstrPred {
|
|||
case Format::EXP:
|
||||
case Format::SOPP:
|
||||
case Format::PSEUDO_BRANCH:
|
||||
case Format::PSEUDO_BARRIER: unreachable("unsupported instruction format");
|
||||
case Format::PSEUDO_BARRIER: UNREACHABLE("unsupported instruction format");
|
||||
default: return true;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3166,7 +3166,7 @@ apply_load_extract(opt_ctx& ctx, aco_ptr<Instruction>& extract)
|
|||
else if (bits_loaded == 16 && load->isSMEM() && !is_s_buffer)
|
||||
load->opcode = sign_ext ? aco_opcode::s_load_sshort : aco_opcode::s_load_ushort;
|
||||
else
|
||||
unreachable("Forgot to add opcode above.");
|
||||
UNREACHABLE("Forgot to add opcode above.");
|
||||
|
||||
if (dst_bitsize <= 16 && ctx.program->gfx_level >= GFX9) {
|
||||
switch (load->opcode) {
|
||||
|
|
@ -4340,7 +4340,7 @@ to_uniform_bool_instr(opt_ctx& ctx, aco_ptr<Instruction>& instr)
|
|||
pred_instr->definitions[1].physReg() == scc);
|
||||
op.setTemp(pred_instr->definitions[1].getTemp());
|
||||
} else {
|
||||
unreachable("Invalid operand on uniform bitwise instruction.");
|
||||
UNREACHABLE("Invalid operand on uniform bitwise instruction.");
|
||||
}
|
||||
|
||||
ctx.uses[op.tempId()]++;
|
||||
|
|
@ -4792,7 +4792,7 @@ sopc_is_signed(aco_opcode opcode)
|
|||
SOPC(ge)
|
||||
SOPC(lt)
|
||||
SOPC(le)
|
||||
default: unreachable("Not a valid SOPC instruction.");
|
||||
default: UNREACHABLE("Not a valid SOPC instruction.");
|
||||
}
|
||||
#undef SOPC
|
||||
}
|
||||
|
|
|
|||
|
|
@ -478,7 +478,7 @@ try_optimize_scc_nocompare(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
|
|||
cselect->operands[0] = Operand::c32(0);
|
||||
cselect->operands[1] = instr->operands[!scc_op_idx];
|
||||
} else {
|
||||
unreachable("invalid op");
|
||||
UNREACHABLE("invalid op");
|
||||
}
|
||||
scc_op_idx = 2;
|
||||
instr.reset(cselect);
|
||||
|
|
|
|||
|
|
@ -876,7 +876,7 @@ print_stage(Stage stage, FILE* output)
|
|||
case SWStage::TS: fprintf(output, "TS"); break;
|
||||
case SWStage::MS: fprintf(output, "MS"); break;
|
||||
case SWStage::RT: fprintf(output, "RT"); break;
|
||||
default: unreachable("invalid SW stage");
|
||||
default: UNREACHABLE("invalid SW stage");
|
||||
}
|
||||
if (stage.num_sw_stages() > 1)
|
||||
fprintf(output, "+");
|
||||
|
|
@ -893,7 +893,7 @@ print_stage(Stage stage, FILE* output)
|
|||
case AC_HW_NEXT_GEN_GEOMETRY_SHADER: fprintf(output, "NEXT_GEN_GEOMETRY_SHADER"); break;
|
||||
case AC_HW_PIXEL_SHADER: fprintf(output, "PIXEL_SHADER"); break;
|
||||
case AC_HW_COMPUTE_SHADER: fprintf(output, "COMPUTE_SHADER"); break;
|
||||
default: unreachable("invalid HW stage");
|
||||
default: UNREACHABLE("invalid HW stage");
|
||||
}
|
||||
|
||||
fprintf(output, ")\n");
|
||||
|
|
|
|||
|
|
@ -711,7 +711,7 @@ add_subdword_operand(ra_ctx& ctx, aco_ptr<Instruction>& instr, unsigned idx, uns
|
|||
else if (instr->opcode == aco_opcode::global_store_short)
|
||||
instr->opcode = aco_opcode::global_store_short_d16_hi;
|
||||
else
|
||||
unreachable("Something went wrong: Impossible register assignment.");
|
||||
UNREACHABLE("Something went wrong: Impossible register assignment.");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -883,7 +883,7 @@ add_subdword_definition(Program* program, aco_ptr<Instruction>& instr, PhysReg r
|
|||
else if (instr->opcode == aco_opcode::ds_read_u16_d16)
|
||||
instr->opcode = aco_opcode::ds_read_u16_d16_hi;
|
||||
else
|
||||
unreachable("Something went wrong: Impossible register assignment.");
|
||||
UNREACHABLE("Something went wrong: Impossible register assignment.");
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -3291,7 +3291,7 @@ optimize_encoding_sopk(ra_ctx& ctx, RegisterFile& register_file, aco_ptr<Instruc
|
|||
case aco_opcode::s_add_i32: instr->opcode = aco_opcode::s_addk_i32; break;
|
||||
case aco_opcode::s_mul_i32: instr->opcode = aco_opcode::s_mulk_i32; break;
|
||||
case aco_opcode::s_cselect_b32: instr->opcode = aco_opcode::s_cmovk_i32; break;
|
||||
default: unreachable("illegal instruction");
|
||||
default: UNREACHABLE("illegal instruction");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1529,7 +1529,7 @@ assign_spill_slots(spill_ctx& ctx, unsigned spills_to_vgpr)
|
|||
if (!ctx.is_reloaded[spill_id]) {
|
||||
/* never reloaded, so don't spill */
|
||||
} else if (!is_assigned[spill_id]) {
|
||||
unreachable("No spill slot assigned for spill id");
|
||||
UNREACHABLE("No spill slot assigned for spill id");
|
||||
} else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
|
||||
spill_vgpr(ctx, block, instructions, *it, slots);
|
||||
} else {
|
||||
|
|
@ -1576,7 +1576,7 @@ assign_spill_slots(spill_ctx& ctx, unsigned spills_to_vgpr)
|
|||
assert(ctx.is_reloaded[spill_id]);
|
||||
|
||||
if (!is_assigned[spill_id]) {
|
||||
unreachable("No spill slot assigned for spill id");
|
||||
UNREACHABLE("No spill slot assigned for spill id");
|
||||
} else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
|
||||
reload_vgpr(ctx, block, instructions, *it, slots);
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ sanitize_cf_list(nir_function_impl* impl, struct exec_list* cf_list)
|
|||
}
|
||||
break;
|
||||
}
|
||||
case nir_cf_node_function: unreachable("Invalid cf type");
|
||||
case nir_cf_node_function: UNREACHABLE("Invalid cf type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -726,7 +726,7 @@ setup_isel_context(Program* program, unsigned shader_count, struct nir_shader* c
|
|||
case MESA_SHADER_CALLABLE:
|
||||
case MESA_SHADER_INTERSECTION:
|
||||
case MESA_SHADER_ANY_HIT: sw_stage = SWStage::RT; break;
|
||||
default: unreachable("Shader stage not implemented");
|
||||
default: UNREACHABLE("Shader stage not implemented");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -442,7 +442,7 @@ visit_tex(isel_context* ctx, nir_tex_instr* instr)
|
|||
case 2: op = aco_opcode::buffer_load_format_d16_xy; break;
|
||||
case 3: op = aco_opcode::buffer_load_format_d16_xyz; break;
|
||||
case 4: op = aco_opcode::buffer_load_format_d16_xyzw; break;
|
||||
default: unreachable("Tex instruction loads more than 4 components.");
|
||||
default: UNREACHABLE("Tex instruction loads more than 4 components.");
|
||||
}
|
||||
} else {
|
||||
switch (util_last_bit(dmask & 0xf)) {
|
||||
|
|
@ -450,7 +450,7 @@ visit_tex(isel_context* ctx, nir_tex_instr* instr)
|
|||
case 2: op = aco_opcode::buffer_load_format_xy; break;
|
||||
case 3: op = aco_opcode::buffer_load_format_xyz; break;
|
||||
case 4: op = aco_opcode::buffer_load_format_xyzw; break;
|
||||
default: unreachable("Tex instruction loads more than 4 components.");
|
||||
default: UNREACHABLE("Tex instruction loads more than 4 components.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -943,7 +943,7 @@ visit_cf_list(isel_context* ctx, struct exec_list* list)
|
|||
case nir_cf_node_block: visit_block(ctx, nir_cf_node_as_block(node)); break;
|
||||
case nir_cf_node_if: visit_if(ctx, nir_cf_node_as_if(node)); break;
|
||||
case nir_cf_node_loop: visit_loop(ctx, nir_cf_node_as_loop(node)); break;
|
||||
default: unreachable("unimplemented cf list type");
|
||||
default: UNREACHABLE("unimplemented cf list type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2937,7 +2937,7 @@ visit_alu_instr(isel_context* ctx, nir_alu_instr* instr)
|
|||
Temp one = bld.copy(bld.def(v1), Operand::c32(0x3c00u));
|
||||
bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(), one, src);
|
||||
} else {
|
||||
unreachable("Wrong destination register class for nir_op_b2f16.");
|
||||
UNREACHABLE("Wrong destination register class for nir_op_b2f16.");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
@ -2952,7 +2952,7 @@ visit_alu_instr(isel_context* ctx, nir_alu_instr* instr)
|
|||
bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(),
|
||||
Operand::c32(0x3f800000u), src);
|
||||
} else {
|
||||
unreachable("Wrong destination register class for nir_op_b2f32.");
|
||||
UNREACHABLE("Wrong destination register class for nir_op_b2f32.");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
@ -2970,7 +2970,7 @@ visit_alu_instr(isel_context* ctx, nir_alu_instr* instr)
|
|||
bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(), one, src);
|
||||
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand::zero(), upper);
|
||||
} else {
|
||||
unreachable("Wrong destination register class for nir_op_b2f64.");
|
||||
UNREACHABLE("Wrong destination register class for nir_op_b2f64.");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
@ -3024,7 +3024,7 @@ visit_alu_instr(isel_context* ctx, nir_alu_instr* instr)
|
|||
bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(), Operand::c32(1u),
|
||||
src);
|
||||
} else {
|
||||
unreachable("Invalid register class for b2i32");
|
||||
UNREACHABLE("Invalid register class for b2i32");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
@ -3471,7 +3471,7 @@ visit_alu_instr(isel_context* ctx, nir_alu_instr* instr)
|
|||
case nir_op_ubfe:
|
||||
case nir_op_ibfe: {
|
||||
if (dst.bytes() != 4)
|
||||
unreachable("Unsupported BFE bit size");
|
||||
UNREACHABLE("Unsupported BFE bit size");
|
||||
|
||||
if (dst.type() == RegType::sgpr) {
|
||||
Temp base = get_alu_src(ctx, instr->src[0]);
|
||||
|
|
|
|||
|
|
@ -743,7 +743,7 @@ mubuf_load_format_callback(Builder& bld, const LoadEmitInfo& info, unsigned byte
|
|||
case 4: op = aco_opcode::buffer_load_format_d16_xy; break;
|
||||
case 6: op = aco_opcode::buffer_load_format_d16_xyz; break;
|
||||
case 8: op = aco_opcode::buffer_load_format_d16_xyzw; break;
|
||||
default: unreachable("invalid buffer load format size"); break;
|
||||
default: UNREACHABLE("invalid buffer load format size"); break;
|
||||
}
|
||||
} else {
|
||||
assert(info.component_size == 4);
|
||||
|
|
@ -752,7 +752,7 @@ mubuf_load_format_callback(Builder& bld, const LoadEmitInfo& info, unsigned byte
|
|||
case 8: op = aco_opcode::buffer_load_format_xy; break;
|
||||
case 12: op = aco_opcode::buffer_load_format_xyz; break;
|
||||
case 16: op = aco_opcode::buffer_load_format_xyzw; break;
|
||||
default: unreachable("invalid buffer load format size"); break;
|
||||
default: UNREACHABLE("invalid buffer load format size"); break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1289,7 +1289,7 @@ get_buffer_store_op(unsigned bytes)
|
|||
case 12: return aco_opcode::buffer_store_dwordx3;
|
||||
case 16: return aco_opcode::buffer_store_dwordx4;
|
||||
}
|
||||
unreachable("Unexpected store size");
|
||||
UNREACHABLE("Unexpected store size");
|
||||
return aco_opcode::num_opcodes;
|
||||
}
|
||||
|
||||
|
|
@ -1366,7 +1366,7 @@ resolve_excess_vmem_const_offset(Builder& bld, Temp& voffset, unsigned const_off
|
|||
else if (likely(voffset.regClass() == v1))
|
||||
voffset = bld.vadd32(bld.def(v1), Operand(voffset), Operand::c32(excess_const_offset));
|
||||
else
|
||||
unreachable("Unsupported register class of voffset");
|
||||
UNREACHABLE("Unsupported register class of voffset");
|
||||
}
|
||||
|
||||
return const_offset;
|
||||
|
|
@ -1476,7 +1476,7 @@ visit_store_output(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
abort();
|
||||
}
|
||||
} else {
|
||||
unreachable("Shader stage not implemented");
|
||||
UNREACHABLE("Shader stage not implemented");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1660,7 +1660,7 @@ visit_load_tcs_per_vertex_input(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
if (load_input_from_temps(ctx, instr, dst))
|
||||
return;
|
||||
|
||||
unreachable("LDS-based TCS input should have been lowered in NIR.");
|
||||
UNREACHABLE("LDS-based TCS input should have been lowered in NIR.");
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -1668,7 +1668,7 @@ visit_load_per_vertex_input(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
{
|
||||
switch (ctx->shader->info.stage) {
|
||||
case MESA_SHADER_TESS_CTRL: visit_load_tcs_per_vertex_input(ctx, instr); break;
|
||||
default: unreachable("Unimplemented shader stage");
|
||||
default: UNREACHABLE("Unimplemented shader stage");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2052,7 +2052,7 @@ visit_image_load(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
case 2: opcode = aco_opcode::buffer_load_format_xy; break;
|
||||
case 3: opcode = aco_opcode::buffer_load_format_xyz; break;
|
||||
case 4: opcode = aco_opcode::buffer_load_format_xyzw; break;
|
||||
default: unreachable(">4 channel buffer image load");
|
||||
default: UNREACHABLE(">4 channel buffer image load");
|
||||
}
|
||||
} else {
|
||||
switch (util_bitcount(dmask)) {
|
||||
|
|
@ -2060,7 +2060,7 @@ visit_image_load(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
case 2: opcode = aco_opcode::buffer_load_format_d16_xy; break;
|
||||
case 3: opcode = aco_opcode::buffer_load_format_d16_xyz; break;
|
||||
case 4: opcode = aco_opcode::buffer_load_format_d16_xyzw; break;
|
||||
default: unreachable(">4 channel buffer image load");
|
||||
default: UNREACHABLE(">4 channel buffer image load");
|
||||
}
|
||||
}
|
||||
aco_ptr<Instruction> load{create_instruction(opcode, Format::MUBUF, 3 + is_sparse, 1)};
|
||||
|
|
@ -2195,7 +2195,7 @@ visit_image_store(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
case 0x3: opcode = aco_opcode::buffer_store_format_xy; break;
|
||||
case 0x7: opcode = aco_opcode::buffer_store_format_xyz; break;
|
||||
case 0xf: opcode = aco_opcode::buffer_store_format_xyzw; break;
|
||||
default: unreachable(">4 channel buffer image store");
|
||||
default: UNREACHABLE(">4 channel buffer image store");
|
||||
}
|
||||
} else {
|
||||
switch (dmask) {
|
||||
|
|
@ -2203,7 +2203,7 @@ visit_image_store(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
case 0x3: opcode = aco_opcode::buffer_store_format_d16_xy; break;
|
||||
case 0x7: opcode = aco_opcode::buffer_store_format_d16_xyz; break;
|
||||
case 0xf: opcode = aco_opcode::buffer_store_format_d16_xyzw; break;
|
||||
default: unreachable(">4 channel buffer image store");
|
||||
default: UNREACHABLE(">4 channel buffer image store");
|
||||
}
|
||||
}
|
||||
aco_ptr<Instruction> store{create_instruction(opcode, Format::MUBUF, 4, 0)};
|
||||
|
|
@ -2323,7 +2323,7 @@ translate_buffer_image_atomic_op(const nir_atomic_op op, aco_opcode* buf_op, aco
|
|||
*buf_op64 = aco_opcode::buffer_atomic_fmax_x2;
|
||||
*image_op = aco_opcode::image_atomic_fmax;
|
||||
break;
|
||||
default: unreachable("unsupported atomic operation");
|
||||
default: UNREACHABLE("unsupported atomic operation");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2611,7 +2611,7 @@ visit_store_global(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
case 16:
|
||||
op = global ? aco_opcode::global_store_dwordx4 : aco_opcode::flat_store_dwordx4;
|
||||
break;
|
||||
default: unreachable("store_global not implemented for this size.");
|
||||
default: UNREACHABLE("store_global not implemented for this size.");
|
||||
}
|
||||
|
||||
aco_ptr<Instruction> flat{create_instruction(op, format, 3, 0)};
|
||||
|
|
@ -2745,7 +2745,7 @@ visit_global_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
op32 = aco_opcode::num_opcodes;
|
||||
op64 = aco_opcode::global_atomic_ordered_add_b64;
|
||||
break;
|
||||
default: unreachable("unsupported atomic operation");
|
||||
default: UNREACHABLE("unsupported atomic operation");
|
||||
}
|
||||
|
||||
aco_opcode op = instr->def.bit_size == 32 ? op32 : op64;
|
||||
|
|
@ -3020,7 +3020,7 @@ translate_nir_scope(mesa_scope scope)
|
|||
case SCOPE_DEVICE: return scope_device;
|
||||
case SCOPE_SHADER_CALL: return scope_invocation;
|
||||
}
|
||||
unreachable("invalid scope");
|
||||
UNREACHABLE("invalid scope");
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -3199,7 +3199,7 @@ visit_shared_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
op64 = aco_opcode::ds_max_f64;
|
||||
op64_rtn = aco_opcode::ds_max_rtn_f64;
|
||||
break;
|
||||
default: unreachable("Unhandled shared atomic intrinsic");
|
||||
default: UNREACHABLE("Unhandled shared atomic intrinsic");
|
||||
}
|
||||
|
||||
bool return_previous = !nir_def_is_unused(&instr->def);
|
||||
|
|
@ -3251,7 +3251,7 @@ visit_shared_append(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
switch (instr->intrinsic) {
|
||||
case nir_intrinsic_shared_append_amd: op = aco_opcode::ds_append; break;
|
||||
case nir_intrinsic_shared_consume_amd: op = aco_opcode::ds_consume; break;
|
||||
default: unreachable("not shared_append/consume");
|
||||
default: UNREACHABLE("not shared_append/consume");
|
||||
}
|
||||
|
||||
Temp tmp = bld.tmp(v1);
|
||||
|
|
@ -3423,7 +3423,7 @@ visit_store_scratch(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
case 8: op = aco_opcode::scratch_store_dwordx2; break;
|
||||
case 12: op = aco_opcode::scratch_store_dwordx3; break;
|
||||
case 16: op = aco_opcode::scratch_store_dwordx4; break;
|
||||
default: unreachable("Unexpected store size");
|
||||
default: UNREACHABLE("Unexpected store size");
|
||||
}
|
||||
|
||||
uint32_t const_offset = base_const_offset + offsets[i];
|
||||
|
|
@ -3483,7 +3483,7 @@ get_reduce_op(nir_op op, unsigned bit_size)
|
|||
CASEF(fmul)
|
||||
CASEF(fmin)
|
||||
CASEF(fmax)
|
||||
default: unreachable("unknown reduction op");
|
||||
default: UNREACHABLE("unknown reduction op");
|
||||
#undef CASEI
|
||||
#undef CASEF
|
||||
}
|
||||
|
|
@ -3741,7 +3741,7 @@ inclusive_scan_to_exclusive(isel_context* ctx, ReduceOp op, Definition dst, Temp
|
|||
case ixor8:
|
||||
case ixor16:
|
||||
case ixor32: return bld.vop2(aco_opcode::v_xor_b32, dst, scan, src);
|
||||
default: unreachable("Unsupported op");
|
||||
default: UNREACHABLE("Unsupported op");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3862,22 +3862,22 @@ visit_cmat_muladd(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
switch (type_b) {
|
||||
case GLSL_TYPE_FLOAT_E4M3FN: opcode = aco_opcode::v_wmma_f32_16x16x16_fp8_fp8; break;
|
||||
case GLSL_TYPE_FLOAT_E5M2: opcode = aco_opcode::v_wmma_f32_16x16x16_fp8_bf8; break;
|
||||
default: unreachable("invalid cmat_muladd_amd type");
|
||||
default: UNREACHABLE("invalid cmat_muladd_amd type");
|
||||
}
|
||||
break;
|
||||
case GLSL_TYPE_FLOAT_E5M2:
|
||||
switch (type_b) {
|
||||
case GLSL_TYPE_FLOAT_E4M3FN: opcode = aco_opcode::v_wmma_f32_16x16x16_bf8_fp8; break;
|
||||
case GLSL_TYPE_FLOAT_E5M2: opcode = aco_opcode::v_wmma_f32_16x16x16_bf8_bf8; break;
|
||||
default: unreachable("invalid cmat_muladd_amd type");
|
||||
default: UNREACHABLE("invalid cmat_muladd_amd type");
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: unreachable("invalid cmat_muladd_amd type");
|
||||
default: UNREACHABLE("invalid cmat_muladd_amd type");
|
||||
}
|
||||
|
||||
if (opcode == aco_opcode::num_opcodes)
|
||||
unreachable("visit_cmat_muladd: invalid bit size combination");
|
||||
UNREACHABLE("visit_cmat_muladd: invalid bit size combination");
|
||||
|
||||
Builder bld(ctx->program, ctx->block);
|
||||
|
||||
|
|
@ -4041,7 +4041,7 @@ ds_bvh_stack_offset1_gfx11(unsigned stack_size)
|
|||
case 16: return 0x10;
|
||||
case 32: return 0x20;
|
||||
case 64: return 0x30;
|
||||
default: unreachable("invalid stack size");
|
||||
default: UNREACHABLE("invalid stack size");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -4563,7 +4563,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
case nir_intrinsic_reduce: aco_op = aco_opcode::p_reduce; break;
|
||||
case nir_intrinsic_inclusive_scan: aco_op = aco_opcode::p_inclusive_scan; break;
|
||||
case nir_intrinsic_exclusive_scan: aco_op = aco_opcode::p_exclusive_scan; break;
|
||||
default: unreachable("unknown reduce intrinsic");
|
||||
default: UNREACHABLE("unknown reduce intrinsic");
|
||||
}
|
||||
|
||||
/* Avoid whole wave shift. */
|
||||
|
|
@ -5156,7 +5156,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
switch (instr->num_components) {
|
||||
case 4: emit_ds_bvh_stack_push4_pop1_rtn(ctx, instr, bld); break;
|
||||
case 8: emit_ds_bvh_stack_push8_pop1_rtn(ctx, instr, bld); break;
|
||||
default: unreachable("Invalid BVH stack component count!");
|
||||
default: UNREACHABLE("Invalid BVH stack component count!");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ emit_clamp_alpha_test(isel_context* ctx, const struct aco_ps_epilog_info* info,
|
|||
case COMPARE_FUNC_GREATER: opcode = aco_opcode::v_cmp_nlt_f32; break;
|
||||
case COMPARE_FUNC_NOTEQUAL: opcode = aco_opcode::v_cmp_nlg_f32; break;
|
||||
case COMPARE_FUNC_GEQUAL: opcode = aco_opcode::v_cmp_nle_f32; break;
|
||||
default: unreachable("invalid alpha func");
|
||||
default: UNREACHABLE("invalid alpha func");
|
||||
}
|
||||
|
||||
Temp ref = get_arg(ctx, info->alpha_reference);
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ int ac_get_elem_bits(struct ac_llvm_context *ctx, LLVMTypeRef type)
|
|||
if (type == ctx->f64)
|
||||
return 64;
|
||||
|
||||
unreachable("Unhandled type kind in get_elem_bits");
|
||||
UNREACHABLE("Unhandled type kind in get_elem_bits");
|
||||
}
|
||||
|
||||
unsigned ac_get_type_size(LLVMTypeRef type)
|
||||
|
|
@ -207,7 +207,7 @@ static LLVMTypeRef to_integer_type_scalar(struct ac_llvm_context *ctx, LLVMTypeR
|
|||
else if (t == ctx->f64 || t == ctx->i64)
|
||||
return ctx->i64;
|
||||
else
|
||||
unreachable("Unhandled integer size");
|
||||
UNREACHABLE("Unhandled integer size");
|
||||
}
|
||||
|
||||
LLVMTypeRef ac_to_integer_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
|
||||
|
|
@ -225,7 +225,7 @@ LLVMTypeRef ac_to_integer_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
|
|||
case AC_ADDR_SPACE_LDS:
|
||||
return ctx->i32;
|
||||
default:
|
||||
unreachable("unhandled address space");
|
||||
UNREACHABLE("unhandled address space");
|
||||
}
|
||||
}
|
||||
return to_integer_type_scalar(ctx, t);
|
||||
|
|
@ -259,7 +259,7 @@ static LLVMTypeRef to_float_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef
|
|||
else if (t == ctx->i64 || t == ctx->f64)
|
||||
return ctx->f64;
|
||||
else
|
||||
unreachable("Unhandled float size");
|
||||
UNREACHABLE("Unhandled float size");
|
||||
}
|
||||
|
||||
LLVMTypeRef ac_to_float_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
|
||||
|
|
@ -503,7 +503,7 @@ LLVMValueRef ac_build_varying_gather_values(struct ac_llvm_context *ctx, LLVMVal
|
|||
if (value_count == 1) {
|
||||
return values[component];
|
||||
} else if (!value_count)
|
||||
unreachable("value_count is 0");
|
||||
UNREACHABLE("value_count is 0");
|
||||
|
||||
for (unsigned i = component; i < value_count + component; i++) {
|
||||
LLVMValueRef value = values[i];
|
||||
|
|
@ -527,7 +527,7 @@ LLVMValueRef ac_build_gather_values_extended(struct ac_llvm_context *ctx, LLVMVa
|
|||
if (value_count == 1 && !always_vector) {
|
||||
return values[0];
|
||||
} else if (!value_count)
|
||||
unreachable("value_count is 0");
|
||||
UNREACHABLE("value_count is 0");
|
||||
|
||||
for (i = 0; i < value_count; i++) {
|
||||
LLVMValueRef value = values[i * value_stride];
|
||||
|
|
@ -1384,7 +1384,7 @@ LLVMValueRef ac_build_umsb(struct ac_llvm_context *ctx, LLVMValueRef arg, LLVMTy
|
|||
zero = ctx->i8_0;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid bitsize");
|
||||
UNREACHABLE("invalid bitsize");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -1531,7 +1531,7 @@ static unsigned ac_num_coords(enum ac_image_dim dim)
|
|||
case ac_image_2darraymsaa:
|
||||
return 4;
|
||||
default:
|
||||
unreachable("ac_num_coords: bad dim");
|
||||
UNREACHABLE("ac_num_coords: bad dim");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1550,7 +1550,7 @@ static unsigned ac_num_derivs(enum ac_image_dim dim)
|
|||
case ac_image_2dmsaa:
|
||||
case ac_image_2darraymsaa:
|
||||
default:
|
||||
unreachable("derivatives not supported");
|
||||
UNREACHABLE("derivatives not supported");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1586,7 +1586,7 @@ static const char *get_atomic_name(enum ac_atomic_op op)
|
|||
case ac_atomic_fmax:
|
||||
return "fmax";
|
||||
}
|
||||
unreachable("bad atomic op");
|
||||
UNREACHABLE("bad atomic op");
|
||||
}
|
||||
|
||||
LLVMValueRef ac_build_image_opcode(struct ac_llvm_context *ctx, struct ac_image_args *a)
|
||||
|
|
@ -1764,7 +1764,7 @@ LLVMValueRef ac_build_image_opcode(struct ac_llvm_context *ctx, struct ac_image_
|
|||
name = "getresinfo";
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid image opcode");
|
||||
UNREACHABLE("invalid image opcode");
|
||||
}
|
||||
|
||||
const char *dimname;
|
||||
|
|
@ -1794,7 +1794,7 @@ LLVMValueRef ac_build_image_opcode(struct ac_llvm_context *ctx, struct ac_image_
|
|||
dimname = "2darraymsaa";
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid dim");
|
||||
UNREACHABLE("invalid dim");
|
||||
}
|
||||
|
||||
ac_build_type_name_for_intr(data_type, data_type_str, sizeof(data_type_str));
|
||||
|
|
@ -2186,7 +2186,7 @@ LLVMValueRef ac_build_bit_count(struct ac_llvm_context *ctx, LLVMValueRef src0)
|
|||
result = LLVMBuildZExt(ctx->builder, result, ctx->i32, "");
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid bitsize");
|
||||
UNREACHABLE("invalid bitsize");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -2214,7 +2214,7 @@ LLVMValueRef ac_build_bitfield_reverse(struct ac_llvm_context *ctx, LLVMValueRef
|
|||
result = ac_build_intrinsic(ctx, "llvm.bitreverse.i8", ctx->i8, (LLVMValueRef[]){src0}, 1, 0);
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid bitsize");
|
||||
UNREACHABLE("invalid bitsize");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -2272,7 +2272,7 @@ LLVMValueRef ac_find_lsb(struct ac_llvm_context *ctx, LLVMTypeRef dst_type, LLVM
|
|||
zero = ctx->i8_0;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid bitsize");
|
||||
UNREACHABLE("invalid bitsize");
|
||||
}
|
||||
|
||||
LLVMValueRef params[2] = {
|
||||
|
|
@ -2900,7 +2900,7 @@ static LLVMValueRef get_reduction_identity(struct ac_llvm_context *ctx, nir_op o
|
|||
case nir_op_iand:
|
||||
return ctx->i1true;
|
||||
default:
|
||||
unreachable("bad reduction intrinsic");
|
||||
UNREACHABLE("bad reduction intrinsic");
|
||||
}
|
||||
} else if (type_size == 1) {
|
||||
switch (op) {
|
||||
|
|
@ -2923,7 +2923,7 @@ static LLVMValueRef get_reduction_identity(struct ac_llvm_context *ctx, nir_op o
|
|||
case nir_op_ixor:
|
||||
return ctx->i8_0;
|
||||
default:
|
||||
unreachable("bad reduction intrinsic");
|
||||
UNREACHABLE("bad reduction intrinsic");
|
||||
}
|
||||
} else if (type_size == 2) {
|
||||
switch (op) {
|
||||
|
|
@ -2954,7 +2954,7 @@ static LLVMValueRef get_reduction_identity(struct ac_llvm_context *ctx, nir_op o
|
|||
case nir_op_ixor:
|
||||
return ctx->i16_0;
|
||||
default:
|
||||
unreachable("bad reduction intrinsic");
|
||||
UNREACHABLE("bad reduction intrinsic");
|
||||
}
|
||||
} else if (type_size == 4) {
|
||||
switch (op) {
|
||||
|
|
@ -2985,7 +2985,7 @@ static LLVMValueRef get_reduction_identity(struct ac_llvm_context *ctx, nir_op o
|
|||
case nir_op_ixor:
|
||||
return ctx->i32_0;
|
||||
default:
|
||||
unreachable("bad reduction intrinsic");
|
||||
UNREACHABLE("bad reduction intrinsic");
|
||||
}
|
||||
} else { /* type_size == 64bit */
|
||||
switch (op) {
|
||||
|
|
@ -3016,7 +3016,7 @@ static LLVMValueRef get_reduction_identity(struct ac_llvm_context *ctx, nir_op o
|
|||
case nir_op_ixor:
|
||||
return ctx->i64_0;
|
||||
default:
|
||||
unreachable("bad reduction intrinsic");
|
||||
UNREACHABLE("bad reduction intrinsic");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3062,7 +3062,7 @@ static LLVMValueRef ac_build_alu_op(struct ac_llvm_context *ctx, LLVMValueRef lh
|
|||
case nir_op_ixor:
|
||||
return LLVMBuildXor(ctx->builder, lhs, rhs, "");
|
||||
default:
|
||||
unreachable("bad reduction intrinsic");
|
||||
UNREACHABLE("bad reduction intrinsic");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -415,7 +415,7 @@ LLVMValueRef ac_build_atomic_rmw(struct ac_llvm_context *ctx, LLVMAtomicRMWBinOp
|
|||
binop = AtomicRMWInst::FAdd;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid LLVMAtomicRMWBinOp");
|
||||
UNREACHABLE("invalid LLVMAtomicRMWBinOp");
|
||||
break;
|
||||
}
|
||||
unsigned SSID = unwrap(ctx->context)->getOrInsertSyncScopeID(sync_scope);
|
||||
|
|
|
|||
|
|
@ -264,7 +264,7 @@ static LLVMValueRef emit_b2f(struct ac_llvm_context *ctx, LLVMValueRef src0, uns
|
|||
case 64:
|
||||
return LLVMBuildSelect(ctx->builder, src0, ctx->f64_1, ctx->f64_0, "");
|
||||
default:
|
||||
unreachable("Unsupported bit size.");
|
||||
UNREACHABLE("Unsupported bit size.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -290,7 +290,7 @@ static LLVMValueRef emit_b2i(struct ac_llvm_context *ctx, LLVMValueRef src0, uns
|
|||
case 64:
|
||||
return LLVMBuildSelect(ctx->builder, src0, ctx->i64_1, ctx->i64_0, "");
|
||||
default:
|
||||
unreachable("Unsupported bit size.");
|
||||
UNREACHABLE("Unsupported bit size.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1650,7 +1650,7 @@ static void visit_store_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *in
|
|||
data_type = ctx->ac.f32;
|
||||
break;
|
||||
default:
|
||||
unreachable("Malformed vector store.");
|
||||
UNREACHABLE("Malformed vector store.");
|
||||
}
|
||||
data = LLVMBuildBitCast(ctx->ac.builder, data, data_type, "");
|
||||
|
||||
|
|
@ -1759,7 +1759,7 @@ translate_atomic_op(nir_atomic_op op)
|
|||
case nir_atomic_op_imin: return LLVMAtomicRMWBinOpMin;
|
||||
case nir_atomic_op_imax: return LLVMAtomicRMWBinOpMax;
|
||||
case nir_atomic_op_fadd: return LLVMAtomicRMWBinOpFAdd;
|
||||
default: unreachable("Unexpected atomic");
|
||||
default: UNREACHABLE("Unexpected atomic");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1914,7 +1914,7 @@ static LLVMValueRef get_memory_addr(struct ac_nir_context *ctx, nir_intrinsic_in
|
|||
return get_shared_mem_ptr(ctx, intr->src[num_src - 1], nir_intrinsic_base(intr));
|
||||
}
|
||||
default:
|
||||
unreachable("unexpected store intrinsic");
|
||||
UNREACHABLE("unexpected store intrinsic");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1986,7 +1986,7 @@ static LLVMValueRef visit_global_atomic(struct ac_nir_context *ctx,
|
|||
data_type = ctx->ac.f64;
|
||||
break;
|
||||
default:
|
||||
unreachable("Unsupported float bit size");
|
||||
UNREACHABLE("Unsupported float bit size");
|
||||
}
|
||||
|
||||
data = LLVMBuildBitCast(ctx->ac.builder, data, data_type, "");
|
||||
|
|
|
|||
|
|
@ -1356,7 +1356,7 @@ radv_get_rgp_shader_stage(struct radv_shader *shader)
|
|||
case MESA_SHADER_CALLABLE:
|
||||
return RGP_HW_STAGE_CS;
|
||||
default:
|
||||
unreachable("invalid mesa shader stage");
|
||||
UNREACHABLE("invalid mesa shader stage");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1463,7 +1463,7 @@ radv_add_rt_record(struct radv_device *device, struct rgp_code_object *code_obje
|
|||
snprintf(shader_data->rt_shader_name, sizeof(shader_data->rt_shader_name), "_amdgpu_cs_main");
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid rt stage");
|
||||
UNREACHABLE("invalid rt stage");
|
||||
}
|
||||
record->num_shaders_combined = 1;
|
||||
|
||||
|
|
|
|||
|
|
@ -239,7 +239,7 @@ radv_meta_get_view_type(const struct radv_image *image)
|
|||
case VK_IMAGE_TYPE_3D:
|
||||
return VK_IMAGE_VIEW_TYPE_3D;
|
||||
default:
|
||||
unreachable("bad VkImageViewType");
|
||||
UNREACHABLE("bad VkImageViewType");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ radv_meta_get_96bit_channel_format(VkFormat format)
|
|||
return VK_FORMAT_R32_SFLOAT;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid R32G32B32 format");
|
||||
UNREACHABLE("invalid R32G32B32 format");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ get_view_type(const struct radv_image *image)
|
|||
case VK_IMAGE_TYPE_3D:
|
||||
return VK_IMAGE_VIEW_TYPE_3D;
|
||||
default:
|
||||
unreachable("bad VkImageViewType");
|
||||
UNREACHABLE("bad VkImageViewType");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ translate_sampler_dim(VkImageType type)
|
|||
case VK_IMAGE_TYPE_3D:
|
||||
return GLSL_SAMPLER_DIM_3D;
|
||||
default:
|
||||
unreachable("Unhandled image type");
|
||||
UNREACHABLE("Unhandled image type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -99,7 +99,7 @@ get_pipeline(struct radv_device *device, const struct radv_image_view *src_iview
|
|||
fs = radv_meta_nir_build_blit_copy_fragment_shader_stencil(device, tex_dim);
|
||||
break;
|
||||
default:
|
||||
unreachable("Unhandled aspect");
|
||||
UNREACHABLE("Unhandled aspect");
|
||||
}
|
||||
|
||||
VkGraphicsPipelineCreateInfo pipeline_create_info = {
|
||||
|
|
@ -219,7 +219,7 @@ get_pipeline(struct radv_device *device, const struct radv_image_view *src_iview
|
|||
render.stencil_attachment_format = VK_FORMAT_S8_UINT;
|
||||
break;
|
||||
default:
|
||||
unreachable("Unhandled aspect");
|
||||
UNREACHABLE("Unhandled aspect");
|
||||
}
|
||||
|
||||
result = vk_meta_create_graphics_pipeline(&device->vk, &device->meta_state.device, &pipeline_create_info, &render,
|
||||
|
|
|
|||
|
|
@ -229,7 +229,7 @@ radv_meta_blit2d_normal_dst(struct radv_cmd_buffer *cmd_buffer, struct radv_meta
|
|||
|
||||
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
|
||||
} else
|
||||
unreachable("Processing blit2d with multiple aspects.");
|
||||
UNREACHABLE("Processing blit2d with multiple aspects.");
|
||||
|
||||
float vertex_push_constants[4] = {
|
||||
rect->src_x,
|
||||
|
|
@ -368,7 +368,7 @@ get_color_pipeline(struct radv_device *device, enum blit2d_src_type src_type, Vk
|
|||
name = "meta_blit2d_buffer_fs";
|
||||
break;
|
||||
default:
|
||||
unreachable("unknown blit src type\n");
|
||||
UNREACHABLE("unknown blit src type\n");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -508,7 +508,7 @@ get_depth_only_pipeline(struct radv_device *device, enum blit2d_src_type src_typ
|
|||
name = "meta_blit2d_depth_buffer_fs";
|
||||
break;
|
||||
default:
|
||||
unreachable("unknown blit src type\n");
|
||||
UNREACHABLE("unknown blit src type\n");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -666,7 +666,7 @@ get_stencil_only_pipeline(struct radv_device *device, enum blit2d_src_type src_t
|
|||
name = "meta_blit2d_stencil_buffer_fs";
|
||||
break;
|
||||
default:
|
||||
unreachable("unknown blit src type\n");
|
||||
UNREACHABLE("unknown blit src type\n");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1025,7 +1025,7 @@ radv_clear_dcc_comp_to_single(struct radv_cmd_buffer *cmd_buffer, struct radv_im
|
|||
format = VK_FORMAT_R32G32B32A32_UINT;
|
||||
break;
|
||||
default:
|
||||
unreachable("Unsupported number of bytes per pixel");
|
||||
UNREACHABLE("Unsupported number of bytes per pixel");
|
||||
}
|
||||
|
||||
result = get_clear_dcc_comp_to_single_pipeline(device, is_msaa, &pipeline, &layout);
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ vk_format_for_size(int bs)
|
|||
case 16:
|
||||
return VK_FORMAT_R32G32B32A32_UINT;
|
||||
default:
|
||||
unreachable("Invalid format block size");
|
||||
UNREACHABLE("Invalid format block size");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -440,7 +440,7 @@ radv_get_compat_color_ds_format(VkFormat format)
|
|||
case VK_FORMAT_R32_UINT:
|
||||
return VK_FORMAT_R32_SFLOAT;
|
||||
default:
|
||||
unreachable("invalid color format for color to depth/stencil image copy.");
|
||||
UNREACHABLE("invalid color format for color to depth/stencil image copy.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -127,7 +127,7 @@ get_pipeline(struct radv_device *device, enum radv_color_op op, VkPipeline *pipe
|
|||
pdev->info.gfx_level >= GFX11 ? V_028808_CB_DCC_DECOMPRESS_GFX11 : V_028808_CB_DCC_DECOMPRESS_GFX8;
|
||||
break;
|
||||
default:
|
||||
unreachable("Invalid color op");
|
||||
UNREACHABLE("Invalid color op");
|
||||
}
|
||||
|
||||
const VkGraphicsPipelineCreateInfo pipeline_create_info = {
|
||||
|
|
@ -335,7 +335,7 @@ radv_process_color_image(struct radv_cmd_buffer *cmd_buffer, struct radv_image *
|
|||
pred_offset = image->dcc_pred_offset;
|
||||
break;
|
||||
default:
|
||||
unreachable("Invalid color op");
|
||||
UNREACHABLE("Invalid color op");
|
||||
}
|
||||
|
||||
if (radv_dcc_enabled(image, subresourceRange->baseMipLevel) &&
|
||||
|
|
|
|||
|
|
@ -656,7 +656,7 @@ radv_cmd_buffer_resolve_rendering(struct radv_cmd_buffer *cmd_buffer)
|
|||
radv_cmd_buffer_resolve_rendering_fs(cmd_buffer, src_iview, src_layout, dst_iview, dst_layout);
|
||||
break;
|
||||
default:
|
||||
unreachable("Invalid resolve method");
|
||||
UNREACHABLE("Invalid resolve method");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1276,7 +1276,7 @@ radv_meta_resolve_compute_type_name(enum radv_meta_resolve_compute_type type)
|
|||
case RADV_META_RESOLVE_COMPUTE_FLOAT:
|
||||
return "float";
|
||||
default:
|
||||
unreachable("invalid compute resolve type");
|
||||
UNREACHABLE("invalid compute resolve type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1341,7 +1341,7 @@ get_resolve_mode_str(VkResolveModeFlagBits resolve_mode)
|
|||
case VK_RESOLVE_MODE_MAX_BIT:
|
||||
return "max";
|
||||
default:
|
||||
unreachable("invalid resolve mode");
|
||||
UNREACHABLE("invalid resolve mode");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1402,7 +1402,7 @@ radv_meta_nir_build_depth_stencil_resolve_compute_shader(struct radv_device *dev
|
|||
outval = nir_umax(&b, outval, si);
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid resolve mode");
|
||||
UNREACHABLE("invalid resolve mode");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1500,7 +1500,7 @@ radv_meta_nir_build_depth_stencil_resolve_fragment_shader(struct radv_device *de
|
|||
outval = nir_umax(&b, outval, si);
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid resolve mode");
|
||||
UNREACHABLE("invalid resolve mode");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -135,7 +135,7 @@ lower_abi_instr(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
|
|||
replacement = nir_iadd_imm_nuw(b, n, 1);
|
||||
}
|
||||
} else
|
||||
unreachable("invalid tessellation shader stage");
|
||||
UNREACHABLE("invalid tessellation shader stage");
|
||||
break;
|
||||
case nir_intrinsic_load_pipeline_stat_query_enabled_amd:
|
||||
replacement = shader_query_bool_setting(b, radv_shader_query_pipeline_stat, s);
|
||||
|
|
@ -321,7 +321,7 @@ lower_abi_instr(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
|
|||
/* TES won't use this intrinsic, because it can get primitive id directly
|
||||
* instead of using this intrinsic to pass primitive id by LDS.
|
||||
*/
|
||||
unreachable("load_provoking_vtx_in_prim_amd is only supported in VS and GS");
|
||||
UNREACHABLE("load_provoking_vtx_in_prim_amd is only supported in VS and GS");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -410,7 +410,7 @@ lower_abi_instr(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
|
|||
num_vertices = 3;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid GS output primitive");
|
||||
UNREACHABLE("invalid GS output primitive");
|
||||
break;
|
||||
}
|
||||
replacement = nir_imm_int(b, num_vertices);
|
||||
|
|
|
|||
|
|
@ -676,7 +676,7 @@ radv_nir_lower_cooperative_matrix(nir_shader *shader, enum amd_gfx_level gfx_lev
|
|||
else if (src_use == GLSL_CMAT_USE_B)
|
||||
src_use = GLSL_CMAT_USE_A;
|
||||
else
|
||||
unreachable("unsupported transpose");
|
||||
UNREACHABLE("unsupported transpose");
|
||||
}
|
||||
} else {
|
||||
sat = nir_intrinsic_saturate(intr);
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ radv_recompute_fs_input_bases_callback(UNUSED nir_builder *b, nir_intrinsic_inst
|
|||
new_base = s->num_always_per_vertex + s->num_potentially_per_primitive +
|
||||
util_bitcount64(s->always_per_primitive & location_mask);
|
||||
} else {
|
||||
unreachable("invalid FS input");
|
||||
UNREACHABLE("invalid FS input");
|
||||
}
|
||||
|
||||
if (new_base != old_base) {
|
||||
|
|
|
|||
|
|
@ -404,7 +404,7 @@ lower_rq_load(struct radv_device *device, nir_builder *b, nir_intrinsic_instr *i
|
|||
return radv_load_vertex_position(device, b, primitive_addr, nir_intrinsic_column(instr));
|
||||
}
|
||||
default:
|
||||
unreachable("Invalid nir_ray_query_value!");
|
||||
UNREACHABLE("Invalid nir_ray_query_value!");
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
|
@ -671,7 +671,7 @@ radv_nir_lower_ray_queries(struct nir_shader *shader, struct radv_device *device
|
|||
lower_rq_terminate(&builder, intrinsic, rq);
|
||||
break;
|
||||
default:
|
||||
unreachable("Unsupported ray query intrinsic!");
|
||||
UNREACHABLE("Unsupported ray query intrinsic!");
|
||||
}
|
||||
|
||||
if (new_dest)
|
||||
|
|
|
|||
|
|
@ -238,7 +238,7 @@ constant_fold_scalar(nir_scalar s, unsigned invocation_id, nir_shader *shader, n
|
|||
return true;
|
||||
}
|
||||
|
||||
unreachable("unhandled scalar type");
|
||||
UNREACHABLE("unhandled scalar type");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1849,13 +1849,13 @@ lower_rt_instruction_monolithic(nir_builder *b, nir_instr *instr, void *data)
|
|||
return true;
|
||||
}
|
||||
case nir_intrinsic_rt_resume:
|
||||
unreachable("nir_intrinsic_rt_resume");
|
||||
UNREACHABLE("nir_intrinsic_rt_resume");
|
||||
case nir_intrinsic_rt_return_amd:
|
||||
unreachable("nir_intrinsic_rt_return_amd");
|
||||
UNREACHABLE("nir_intrinsic_rt_return_amd");
|
||||
case nir_intrinsic_execute_closest_hit_amd:
|
||||
unreachable("nir_intrinsic_execute_closest_hit_amd");
|
||||
UNREACHABLE("nir_intrinsic_execute_closest_hit_amd");
|
||||
case nir_intrinsic_execute_miss_amd:
|
||||
unreachable("nir_intrinsic_execute_miss_amd");
|
||||
UNREACHABLE("nir_intrinsic_execute_miss_amd");
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ radv_get_acceleration_structure_layout(struct radv_device *device,
|
|||
bvh_leaf_size = sizeof(struct radv_gfx12_instance_node) + sizeof(struct radv_gfx12_instance_node_user_data);
|
||||
break;
|
||||
default:
|
||||
unreachable("Unknown VkGeometryTypeKHR");
|
||||
UNREACHABLE("Unknown VkGeometryTypeKHR");
|
||||
}
|
||||
bvh_node_size_gcd = RADV_GFX12_BVH_NODE_SIZE;
|
||||
} else {
|
||||
|
|
@ -111,7 +111,7 @@ radv_get_acceleration_structure_layout(struct radv_device *device,
|
|||
bvh_leaf_size = sizeof(struct radv_bvh_instance_node);
|
||||
break;
|
||||
default:
|
||||
unreachable("Unknown VkGeometryTypeKHR");
|
||||
UNREACHABLE("Unknown VkGeometryTypeKHR");
|
||||
}
|
||||
bvh_node_size_gcd = 64;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1258,7 +1258,7 @@ radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer)
|
|||
radeon_emit(sample_locs_pixel[3][1]);
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid number of samples");
|
||||
UNREACHABLE("invalid number of samples");
|
||||
}
|
||||
|
||||
if (pdev->info.gfx_level >= GFX12) {
|
||||
|
|
@ -3012,7 +3012,7 @@ radv_emit_graphics_shaders(struct radv_cmd_buffer *cmd_buffer)
|
|||
radv_emit_compute_shader(pdev, cmd_buffer->gang.cs, cmd_buffer->state.shaders[MESA_SHADER_TASK]);
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid bind stage");
|
||||
UNREACHABLE("invalid bind stage");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3155,7 +3155,7 @@ radv_get_depth_clamp_mode(struct radv_cmd_buffer *cmd_buffer)
|
|||
mode = RADV_DEPTH_CLAMP_MODE_USER_DEFINED;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid depth clamp mode\n");
|
||||
UNREACHABLE("invalid depth clamp mode\n");
|
||||
}
|
||||
|
||||
if (!d->vk.rs.depth_clamp_enable) {
|
||||
|
|
@ -3513,7 +3513,7 @@ radv_get_primitive_reset_index(const struct radv_cmd_buffer *cmd_buffer)
|
|||
case V_028A7C_VGT_INDEX_32:
|
||||
return 0xffffffffu;
|
||||
default:
|
||||
unreachable("invalid index type");
|
||||
UNREACHABLE("invalid index type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -5213,7 +5213,7 @@ radv_emit_tess_domain_origin(struct radv_cmd_buffer *cmd_buffer)
|
|||
type = V_028B6C_TESS_ISOLINE;
|
||||
break;
|
||||
default:
|
||||
unreachable("Invalid tess primitive type");
|
||||
UNREACHABLE("Invalid tess primitive type");
|
||||
}
|
||||
|
||||
switch (tes->info.tes.spacing) {
|
||||
|
|
@ -5227,7 +5227,7 @@ radv_emit_tess_domain_origin(struct radv_cmd_buffer *cmd_buffer)
|
|||
partitioning = V_028B6C_PART_FRAC_EVEN;
|
||||
break;
|
||||
default:
|
||||
unreachable("Invalid tess spacing type");
|
||||
UNREACHABLE("Invalid tess spacing type");
|
||||
}
|
||||
|
||||
if (tes->info.tes.point_mode) {
|
||||
|
|
@ -5672,7 +5672,7 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, VkShaderStageFlags stag
|
|||
dirty_stages = VK_SHADER_STAGE_COMPUTE_BIT;
|
||||
break;
|
||||
default:
|
||||
unreachable("Unhandled bind point");
|
||||
UNREACHABLE("Unhandled bind point");
|
||||
}
|
||||
|
||||
if (internal_stages & VK_SHADER_STAGE_COMPUTE_BIT) {
|
||||
|
|
@ -7023,7 +7023,7 @@ vk_to_index_type(VkIndexType type)
|
|||
case VK_INDEX_TYPE_UINT32:
|
||||
return V_028A7C_VGT_INDEX_32;
|
||||
default:
|
||||
unreachable("invalid index type");
|
||||
UNREACHABLE("invalid index type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -7039,7 +7039,7 @@ radv_get_vgt_index_size(uint32_t type)
|
|||
case V_028A7C_VGT_INDEX_32:
|
||||
return 4;
|
||||
default:
|
||||
unreachable("invalid index type");
|
||||
UNREACHABLE("invalid index type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -7726,7 +7726,7 @@ radv_bind_shader(struct radv_cmd_buffer *cmd_buffer, struct radv_shader *shader,
|
|||
/* no-op */
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid shader stage");
|
||||
UNREACHABLE("invalid shader stage");
|
||||
}
|
||||
|
||||
cmd_buffer->state.shaders[stage] = shader;
|
||||
|
|
@ -14498,63 +14498,63 @@ radv_CmdBindShadersEXT(VkCommandBuffer commandBuffer, uint32_t stageCount, const
|
|||
VKAPI_ATTR void VKAPI_CALL
|
||||
radv_CmdSetCoverageModulationModeNV(VkCommandBuffer commandBuffer, VkCoverageModulationModeNV coverageModulationMode)
|
||||
{
|
||||
unreachable("Not supported by RADV.");
|
||||
UNREACHABLE("Not supported by RADV.");
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
radv_CmdSetCoverageModulationTableEnableNV(VkCommandBuffer commandBuffer, VkBool32 coverageModulationTableEnable)
|
||||
{
|
||||
unreachable("Not supported by RADV.");
|
||||
UNREACHABLE("Not supported by RADV.");
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
radv_CmdSetCoverageModulationTableNV(VkCommandBuffer commandBuffer, uint32_t coverageModulationTableCount,
|
||||
const float *pCoverageModulationTable)
|
||||
{
|
||||
unreachable("Not supported by RADV.");
|
||||
UNREACHABLE("Not supported by RADV.");
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
radv_CmdSetCoverageReductionModeNV(VkCommandBuffer commandBuffer, VkCoverageReductionModeNV coverageReductionMode)
|
||||
{
|
||||
unreachable("Not supported by RADV.");
|
||||
UNREACHABLE("Not supported by RADV.");
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
radv_CmdSetCoverageToColorEnableNV(VkCommandBuffer commandBuffer, VkBool32 coverageToColorEnable)
|
||||
{
|
||||
unreachable("Not supported by RADV.");
|
||||
UNREACHABLE("Not supported by RADV.");
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
radv_CmdSetCoverageToColorLocationNV(VkCommandBuffer commandBuffer, uint32_t coverageToColorLocation)
|
||||
{
|
||||
unreachable("Not supported by RADV.");
|
||||
UNREACHABLE("Not supported by RADV.");
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
radv_CmdSetRepresentativeFragmentTestEnableNV(VkCommandBuffer commandBuffer, VkBool32 representativeFragmentTestEnable)
|
||||
{
|
||||
unreachable("Not supported by RADV.");
|
||||
UNREACHABLE("Not supported by RADV.");
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
radv_CmdSetShadingRateImageEnableNV(VkCommandBuffer commandBuffer, VkBool32 shadingRateImageEnable)
|
||||
{
|
||||
unreachable("Not supported by RADV.");
|
||||
UNREACHABLE("Not supported by RADV.");
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
radv_CmdSetViewportSwizzleNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
|
||||
const VkViewportSwizzleNV *pViewportSwizzles)
|
||||
{
|
||||
unreachable("Not supported by RADV.");
|
||||
UNREACHABLE("Not supported by RADV.");
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
radv_CmdSetViewportWScalingEnableNV(VkCommandBuffer commandBuffer, VkBool32 viewportWScalingEnable)
|
||||
{
|
||||
unreachable("Not supported by RADV.");
|
||||
UNREACHABLE("Not supported by RADV.");
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
|
|
|
|||
|
|
@ -358,7 +358,7 @@ radv_cp_wait_mem(struct radeon_cmdbuf *cs, const enum radv_queue_family qf, cons
|
|||
} else if (qf == RADV_QUEUE_TRANSFER) {
|
||||
radv_sdma_emit_wait_mem(cs, op, va, ref, mask);
|
||||
} else {
|
||||
unreachable("unsupported queue family");
|
||||
UNREACHABLE("unsupported queue family");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -379,7 +379,7 @@ radv_cs_write_data_head(const struct radv_device *device, struct radeon_cmdbuf *
|
|||
} else if (qf == RADV_QUEUE_TRANSFER) {
|
||||
radv_sdma_emit_write_data_head(cs, va, count);
|
||||
} else {
|
||||
unreachable("unsupported queue family");
|
||||
UNREACHABLE("unsupported queue family");
|
||||
}
|
||||
|
||||
return cdw_end;
|
||||
|
|
|
|||
|
|
@ -187,6 +187,6 @@ radv_GetDescriptorEXT(VkDevice _device, const VkDescriptorGetInfoEXT *pDescripto
|
|||
break;
|
||||
}
|
||||
default:
|
||||
unreachable("invalid descriptor type");
|
||||
UNREACHABLE("invalid descriptor type");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ radv_write_image_descriptor(unsigned *dst, unsigned size, VkDescriptorType descr
|
|||
memcpy(dst, descriptor, 64);
|
||||
break;
|
||||
default:
|
||||
unreachable("Invalid size");
|
||||
UNREACHABLE("Invalid size");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3428,7 +3428,7 @@ radv_CreateIndirectExecutionSetEXT(VkDevice _device, const VkIndirectExecutionSe
|
|||
break;
|
||||
}
|
||||
default:
|
||||
unreachable("Invalid IES type");
|
||||
UNREACHABLE("Invalid IES type");
|
||||
}
|
||||
|
||||
stride = sizeof(struct radv_compute_pipeline_metadata);
|
||||
|
|
@ -3469,7 +3469,7 @@ radv_CreateIndirectExecutionSetEXT(VkDevice _device, const VkIndirectExecutionSe
|
|||
break;
|
||||
}
|
||||
default:
|
||||
unreachable("Invalid IES type");
|
||||
UNREACHABLE("Invalid IES type");
|
||||
}
|
||||
|
||||
*pIndirectExecutionSet = radv_indirect_execution_set_to_handle(set);
|
||||
|
|
|
|||
|
|
@ -912,7 +912,7 @@ radv_get_image_format_properties(struct radv_physical_device *pdev, const VkPhys
|
|||
} else if (tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
|
||||
format_feature_flags = radv_get_modifier_flags(pdev, format, mod_info->drmFormatModifier, &format_props);
|
||||
} else {
|
||||
unreachable("bad VkImageTiling");
|
||||
UNREACHABLE("bad VkImageTiling");
|
||||
}
|
||||
|
||||
if (format_feature_flags == 0)
|
||||
|
|
@ -925,7 +925,7 @@ radv_get_image_format_properties(struct radv_physical_device *pdev, const VkPhys
|
|||
|
||||
switch (info->type) {
|
||||
default:
|
||||
unreachable("bad vkimage type\n");
|
||||
UNREACHABLE("bad vkimage type\n");
|
||||
case VK_IMAGE_TYPE_1D:
|
||||
maxExtent.width = 16384;
|
||||
maxExtent.height = 1;
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ radv_swizzle_conv(VkComponentSwizzle component, const unsigned char chan[4], VkC
|
|||
case VK_COMPONENT_SWIZZLE_A:
|
||||
return (enum pipe_swizzle)chan[vk_swiz - VK_COMPONENT_SWIZZLE_R];
|
||||
default:
|
||||
unreachable("Illegal swizzle");
|
||||
UNREACHABLE("Illegal swizzle");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -655,7 +655,7 @@ radv_get_surface_flags(struct radv_device *device, struct radv_image *image, uns
|
|||
flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE);
|
||||
break;
|
||||
default:
|
||||
unreachable("unhandled image type");
|
||||
UNREACHABLE("unhandled image type");
|
||||
}
|
||||
|
||||
/* Required for clearing/initializing a specific layer on GFX8. */
|
||||
|
|
@ -1389,7 +1389,7 @@ radv_select_modifier(const struct radv_device *dev, VkFormat format,
|
|||
}
|
||||
}
|
||||
}
|
||||
unreachable("App specified an invalid modifier");
|
||||
UNREACHABLE("App specified an invalid modifier");
|
||||
}
|
||||
|
||||
VkResult
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ radv_tex_dim(VkImageType image_type, VkImageViewType view_type, unsigned nr_laye
|
|||
else
|
||||
return V_008F1C_SQ_RSRC_IMG_2D_ARRAY;
|
||||
default:
|
||||
unreachable("illegal image type");
|
||||
UNREACHABLE("illegal image type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ get_llvm_calling_convention(LLVMValueRef func, gl_shader_stage stage)
|
|||
return AC_LLVM_AMDGPU_CS;
|
||||
break;
|
||||
default:
|
||||
unreachable("Unhandle shader type");
|
||||
UNREACHABLE("Unhandle shader type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -838,7 +838,7 @@ radv_pc_get_result(const struct radv_perfcounter_impl *impl, const uint64_t *dat
|
|||
result.float64 += radv_pc_sum_reg(impl->regs[2 * i], data) * radv_pc_sum_reg(impl->regs[2 * i + 1], data);
|
||||
break;
|
||||
default:
|
||||
unreachable("unhandled performance counter operation");
|
||||
UNREACHABLE("unhandled performance counter operation");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -511,7 +511,7 @@ radv_find_memory_index(const struct radv_physical_device *pdev, VkMemoryProperty
|
|||
return i;
|
||||
}
|
||||
}
|
||||
unreachable("invalid memory properties");
|
||||
UNREACHABLE("invalid memory properties");
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -1431,7 +1431,7 @@ radv_get_compiler_string(struct radv_physical_device *pdev)
|
|||
#if AMD_LLVM_AVAILABLE
|
||||
return " (LLVM " MESA_LLVM_VERSION_STRING ")";
|
||||
#else
|
||||
unreachable("LLVM is not available");
|
||||
UNREACHABLE("LLVM is not available");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ radv_pipeline_destroy(struct radv_device *device, struct radv_pipeline *pipeline
|
|||
radv_destroy_ray_tracing_pipeline(device, radv_pipeline_to_ray_tracing(pipeline));
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid pipeline type");
|
||||
UNREACHABLE("invalid pipeline type");
|
||||
}
|
||||
|
||||
radv_rmv_log_resource_destroy(device, (uint64_t)radv_pipeline_to_handle(pipeline));
|
||||
|
|
@ -174,7 +174,7 @@ radv_pipeline_get_shader_key(const struct radv_device *device, const VkPipelineS
|
|||
else if (subgroup_size->requiredSubgroupSize == 64)
|
||||
key.subgroup_required_size = RADV_REQUIRED_WAVE64;
|
||||
else
|
||||
unreachable("Unsupported required subgroup size.");
|
||||
UNREACHABLE("Unsupported required subgroup size.");
|
||||
}
|
||||
|
||||
if (stage->flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT) {
|
||||
|
|
@ -867,7 +867,7 @@ radv_GetPipelineExecutablePropertiesKHR(VkDevice _device, const VkPipelineInfoKH
|
|||
description = "Vulkan Callable Shader";
|
||||
break;
|
||||
default:
|
||||
unreachable("Unsupported shader stage");
|
||||
UNREACHABLE("Unsupported shader stage");
|
||||
}
|
||||
|
||||
props->subgroupSize = shader->info.wave_size;
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ radv_get_pipeline_key(struct radv_device *device, const VkPipelineCreateInfoKHR
|
|||
break;
|
||||
}
|
||||
default:
|
||||
unreachable("unsupported pipeline create info struct");
|
||||
UNREACHABLE("unsupported pipeline create info struct");
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
|
|||
|
|
@ -380,7 +380,7 @@ radv_dynamic_state_mask(VkDynamicState state)
|
|||
case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT:
|
||||
return RADV_DYNAMIC_SCISSOR | RADV_DYNAMIC_SCISSOR_WITH_COUNT;
|
||||
default:
|
||||
unreachable("Unhandled dynamic state");
|
||||
UNREACHABLE("Unhandled dynamic state");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -568,7 +568,7 @@ shader_stage_to_pipeline_library_flags(VkShaderStageFlagBits stage)
|
|||
case VK_SHADER_STAGE_FRAGMENT_BIT:
|
||||
return VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT;
|
||||
default:
|
||||
unreachable("Invalid shader stage");
|
||||
UNREACHABLE("Invalid shader stage");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1458,7 +1458,7 @@ radv_graphics_shaders_link(const struct radv_device *device, const struct radv_g
|
|||
radv_link_fs(&stages[s], gfx_state);
|
||||
break;
|
||||
default:
|
||||
unreachable("Invalid graphics shader stage");
|
||||
UNREACHABLE("Invalid graphics shader stage");
|
||||
}
|
||||
|
||||
next_stage = &stages[s];
|
||||
|
|
@ -1485,7 +1485,7 @@ radv_graphics_shaders_fill_linked_vs_io_info(struct radv_shader_stage *vs_stage,
|
|||
break;
|
||||
}
|
||||
default:
|
||||
unreachable("invalid next stage for VS");
|
||||
UNREACHABLE("invalid next stage for VS");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2186,7 +2186,7 @@ radv_get_next_stage(gl_shader_stage stage, VkShaderStageFlagBits active_nir_stag
|
|||
case MESA_SHADER_FRAGMENT:
|
||||
return MESA_SHADER_NONE;
|
||||
default:
|
||||
unreachable("invalid graphics shader stage");
|
||||
UNREACHABLE("invalid graphics shader stage");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -255,7 +255,7 @@ radv_translate_prim(unsigned topology)
|
|||
case VK_PRIMITIVE_TOPOLOGY_META_RECT_LIST_MESA:
|
||||
return V_008958_DI_PT_RECTLIST;
|
||||
default:
|
||||
unreachable("unhandled primitive type");
|
||||
UNREACHABLE("unhandled primitive type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -415,7 +415,7 @@ radv_translate_blend_logic_op(VkLogicOp op)
|
|||
case VK_LOGIC_OP_SET:
|
||||
return V_028808_ROP3_SET;
|
||||
default:
|
||||
unreachable("Unhandled logic op");
|
||||
UNREACHABLE("Unhandled logic op");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ radv_create_group_handles(struct radv_device *device, const VkRayTracingPipeline
|
|||
}
|
||||
break;
|
||||
case VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR:
|
||||
unreachable("VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR");
|
||||
UNREACHABLE("VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR");
|
||||
}
|
||||
|
||||
if (group_info->pShaderGroupCaptureReplayHandle) {
|
||||
|
|
@ -817,7 +817,7 @@ compute_rt_stack_size(const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, stru
|
|||
any_hit_size = MAX2(any_hit_size, size);
|
||||
break;
|
||||
default:
|
||||
unreachable("Invalid stage type in RT shader");
|
||||
UNREACHABLE("Invalid stage type in RT shader");
|
||||
}
|
||||
}
|
||||
pipeline->stack_size =
|
||||
|
|
|
|||
|
|
@ -329,7 +329,7 @@ radv_dump_printf_data(struct radv_device *device, FILE *out)
|
|||
break;
|
||||
}
|
||||
default:
|
||||
unreachable("Unsupported data type");
|
||||
UNREACHABLE("Unsupported data type");
|
||||
}
|
||||
|
||||
if (lane != lane_count - 1)
|
||||
|
|
|
|||
|
|
@ -1742,7 +1742,7 @@ get_pipeline(struct radv_device *device, VkQueryType query_type, VkPipeline *pip
|
|||
key = RADV_META_OBJECT_KEY_QUERY_MESH_PRIMS_GEN;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid query type");
|
||||
UNREACHABLE("invalid query type");
|
||||
}
|
||||
|
||||
VkPipeline pipeline_from_cache = vk_meta_lookup_pipeline(&device->meta_state.device, &key, sizeof(key));
|
||||
|
|
@ -1771,7 +1771,7 @@ get_pipeline(struct radv_device *device, VkQueryType query_type, VkPipeline *pip
|
|||
cs = build_ms_prim_gen_query_shader(device);
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid query type");
|
||||
UNREACHABLE("invalid query type");
|
||||
}
|
||||
|
||||
const VkPipelineShaderStageCreateInfo stage_info = {
|
||||
|
|
@ -1998,7 +1998,7 @@ radv_create_query_pool(struct radv_device *device, const VkQueryPoolCreateInfo *
|
|||
pool->stride = 48;
|
||||
break;
|
||||
default:
|
||||
unreachable("creating unhandled query type");
|
||||
UNREACHABLE("creating unhandled query type");
|
||||
}
|
||||
|
||||
pool->availability_offset = pool->stride * pCreateInfo->queryCount;
|
||||
|
|
@ -2425,7 +2425,7 @@ radv_GetQueryPoolResults(VkDevice _device, VkQueryPool queryPool, uint32_t first
|
|||
break;
|
||||
}
|
||||
default:
|
||||
unreachable("trying to get results of unhandled query type");
|
||||
UNREACHABLE("trying to get results of unhandled query type");
|
||||
}
|
||||
|
||||
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
|
||||
|
|
@ -2519,7 +2519,7 @@ radv_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPoo
|
|||
radv_copy_ms_prim_query_result(cmd_buffer, pool, firstQuery, queryCount, dst_va, stride, flags);
|
||||
break;
|
||||
default:
|
||||
unreachable("trying to get results of unhandled query type");
|
||||
UNREACHABLE("trying to get results of unhandled query type");
|
||||
}
|
||||
|
||||
radv_resume_conditional_rendering(cmd_buffer);
|
||||
|
|
@ -2598,7 +2598,7 @@ emit_begin_query(struct radv_cmd_buffer *cmd_buffer, struct radv_query_pool *poo
|
|||
cmd_buffer->video.feedback_query_va = va;
|
||||
break;
|
||||
default:
|
||||
unreachable("beginning unhandled query type");
|
||||
UNREACHABLE("beginning unhandled query type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2632,7 +2632,7 @@ emit_end_query(struct radv_cmd_buffer *cmd_buffer, struct radv_query_pool *pool,
|
|||
cmd_buffer->video.feedback_query_va = 0;
|
||||
break;
|
||||
default:
|
||||
unreachable("ending unhandled query type");
|
||||
UNREACHABLE("ending unhandled query type");
|
||||
}
|
||||
|
||||
cmd_buffer->active_query_flush_bits |=
|
||||
|
|
@ -2817,7 +2817,7 @@ radv_CmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer,
|
|||
va += offsetof(struct radv_accel_struct_header, size);
|
||||
break;
|
||||
default:
|
||||
unreachable("Unhandle accel struct query type.");
|
||||
UNREACHABLE("Unhandle accel struct query type.");
|
||||
}
|
||||
|
||||
radeon_emit(PKT3(PKT3_COPY_DATA, 4, 0));
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfo *pObj
|
|||
case VK_QUEUE_GLOBAL_PRIORITY_LOW:
|
||||
return RADEON_CTX_PRIORITY_LOW;
|
||||
default:
|
||||
unreachable("Illegal global priority value");
|
||||
UNREACHABLE("Illegal global priority value");
|
||||
return RADEON_CTX_PRIORITY_INVALID;
|
||||
}
|
||||
}
|
||||
|
|
@ -2146,6 +2146,6 @@ radv_queue_family_to_ring(const struct radv_physical_device *pdev, enum radv_que
|
|||
case RADV_QUEUE_VIDEO_ENC:
|
||||
return AMD_IP_VCN_ENC;
|
||||
default:
|
||||
unreachable("Unknown queue family");
|
||||
UNREACHABLE("Unknown queue family");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -381,7 +381,7 @@ fill_memory_info(const struct radeon_info *gpu_info, struct vk_rmv_memory_info *
|
|||
out_info->size = MIN2((uint64_t)gpu_info->gart_size_kb * 1024ULL, ram_size);
|
||||
} break;
|
||||
default:
|
||||
unreachable("invalid memory index");
|
||||
UNREACHABLE("invalid memory index");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -410,7 +410,7 @@ memory_type_from_vram_type(uint32_t vram_type)
|
|||
case AMD_VRAM_TYPE_LPDDR5:
|
||||
return VK_RMV_MEMORY_TYPE_LPDDR5;
|
||||
default:
|
||||
unreachable("Invalid vram type");
|
||||
UNREACHABLE("Invalid vram type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -929,7 +929,7 @@ radv_rmv_log_submit(struct radv_device *device, enum amd_ip_type type)
|
|||
vk_rmv_log_misc_token(&device->vk, VK_RMV_MISC_EVENT_TYPE_SUBMIT_COPY);
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid ip type");
|
||||
UNREACHABLE("invalid ip type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -153,7 +153,7 @@ amdgpu_vram_type_to_rra(uint32_t type)
|
|||
case AMD_VRAM_TYPE_LPDDR5:
|
||||
return RRA_MEMORY_TYPE_LPDDR5;
|
||||
default:
|
||||
unreachable("invalid vram type");
|
||||
UNREACHABLE("invalid vram type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -142,7 +142,7 @@ rra_gather_bvh_info_gfx12(const uint8_t *bvh, uint32_t node_id, struct rra_bvh_i
|
|||
dst->leaf_nodes_size += sizeof(struct radv_gfx12_primitive_node);
|
||||
break;
|
||||
default:
|
||||
unreachable("Invalid node type");
|
||||
UNREACHABLE("Invalid node type");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ radv_tex_wrap(VkSamplerAddressMode address_mode)
|
|||
case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
|
||||
return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
|
||||
default:
|
||||
unreachable("illegal tex wrap mode");
|
||||
UNREACHABLE("illegal tex wrap mode");
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
|
@ -59,7 +59,7 @@ radv_tex_compare(VkCompareOp op)
|
|||
case VK_COMPARE_OP_ALWAYS:
|
||||
return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
|
||||
default:
|
||||
unreachable("illegal compare mode");
|
||||
UNREACHABLE("illegal compare mode");
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -255,7 +255,7 @@ radv_sdma_get_tiled_info_dword(const struct radv_device *const device, const str
|
|||
} else if (ver >= SDMA_4_0) {
|
||||
return info | dimension << 9 | surf->u.gfx9.epitch << 16;
|
||||
} else {
|
||||
unreachable("unsupported SDMA version");
|
||||
UNREACHABLE("unsupported SDMA version");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -273,7 +273,7 @@ radv_sdma_get_tiled_header_dword(const struct radv_device *const device, const s
|
|||
const uint32_t mip_id = subresource.mipLevel;
|
||||
return (mip_max - 1) << 20 | mip_id << 24;
|
||||
} else {
|
||||
unreachable("unsupported SDMA version");
|
||||
UNREACHABLE("unsupported SDMA version");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -781,7 +781,7 @@ radv_lower_ngg(struct radv_device *device, struct radv_shader_stage *ngg_stage,
|
|||
else
|
||||
assert(nir->info.mesh.primitive_type == MESA_PRIM_TRIANGLES);
|
||||
} else {
|
||||
unreachable("NGG needs to be VS, TES or GS.");
|
||||
UNREACHABLE("NGG needs to be VS, TES or GS.");
|
||||
}
|
||||
|
||||
ac_nir_lower_ngg_options options = {0};
|
||||
|
|
@ -831,7 +831,7 @@ radv_lower_ngg(struct radv_device *device, struct radv_shader_stage *ngg_stage,
|
|||
gfx_state->has_multiview_view_index, info->ms.has_query);
|
||||
ngg_stage->info.ms.needs_ms_scratch_ring = scratch_ring;
|
||||
} else {
|
||||
unreachable("invalid SW stage passed to radv_lower_ngg");
|
||||
UNREACHABLE("invalid SW stage passed to radv_lower_ngg");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1151,7 +1151,7 @@ radv_free_shader_memory(struct radv_device *device, union radv_shader_arena_bloc
|
|||
free_list = NULL;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid shader arena type");
|
||||
UNREACHABLE("invalid shader arena type");
|
||||
}
|
||||
|
||||
/* merge with previous hole */
|
||||
|
|
@ -1862,7 +1862,7 @@ radv_precompute_registers_pgm(const struct radv_device *device, struct radv_shad
|
|||
info->regs.pgm_rsrc3 = R_00B8A0_COMPUTE_PGM_RSRC3;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid hw stage");
|
||||
UNREACHABLE("invalid hw stage");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
@ -2162,7 +2162,7 @@ radv_postprocess_binary_config(struct radv_device *device, struct radv_shader_bi
|
|||
|
||||
break;
|
||||
default:
|
||||
unreachable("unsupported shader type");
|
||||
UNREACHABLE("unsupported shader type");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -2187,7 +2187,7 @@ radv_postprocess_binary_config(struct radv_device *device, struct radv_shader_bi
|
|||
} else if (es_stage == MESA_SHADER_MESH) {
|
||||
es_vgpr_comp_cnt = 0;
|
||||
} else {
|
||||
unreachable("Unexpected ES shader stage");
|
||||
UNREACHABLE("Unexpected ES shader stage");
|
||||
}
|
||||
|
||||
if (stage == MESA_SHADER_MESH && pdev->info.mesh_fast_launch_2) {
|
||||
|
|
@ -2251,7 +2251,7 @@ radv_postprocess_binary_config(struct radv_device *device, struct radv_shader_bi
|
|||
} else if (es_stage == MESA_SHADER_TESS_EVAL) {
|
||||
es_vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
|
||||
} else {
|
||||
unreachable("invalid shader ES type");
|
||||
UNREACHABLE("invalid shader ES type");
|
||||
}
|
||||
|
||||
/* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
|
||||
|
|
@ -3696,7 +3696,7 @@ radv_dump_shader_stats(struct radv_device *device, struct radv_pipeline *pipelin
|
|||
fprintf(output, "%f", stats[i].value.f64);
|
||||
break;
|
||||
default:
|
||||
unreachable("Invalid pipeline statistic format");
|
||||
UNREACHABLE("Invalid pipeline statistic format");
|
||||
}
|
||||
fprintf(output, "\n");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -714,7 +714,7 @@ radv_get_rt_priority(gl_shader_stage stage)
|
|||
case MESA_SHADER_CALLABLE:
|
||||
return radv_rt_priority_callable;
|
||||
default:
|
||||
unreachable("Unimplemented RT shader stage.");
|
||||
UNREACHABLE("Unimplemented RT shader stage.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -872,7 +872,7 @@ declare_shader_args(const struct radv_device *device, const struct radv_graphics
|
|||
declare_ps_input_vgprs(info, args);
|
||||
break;
|
||||
default:
|
||||
unreachable("Shader stage not implemented");
|
||||
UNREACHABLE("Shader stage not implemented");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1032,7 +1032,7 @@ radv_get_user_data_0(const struct radv_device *device, struct radv_shader_info *
|
|||
case MESA_SHADER_ANY_HIT:
|
||||
return R_00B900_COMPUTE_USER_DATA_0;
|
||||
default:
|
||||
unreachable("invalid shader stage");
|
||||
UNREACHABLE("invalid shader stage");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1622,6 +1622,6 @@ radv_select_hw_stage(const struct radv_shader_info *const info, const enum amd_g
|
|||
case MESA_SHADER_CALLABLE:
|
||||
return AC_HW_COMPUTE_SHADER;
|
||||
default:
|
||||
unreachable("Unsupported HW stage");
|
||||
UNREACHABLE("Unsupported HW stage");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ radv_shader_stage_init(const VkShaderCreateInfoEXT *sinfo, struct radv_shader_st
|
|||
else if (subgroup_size->requiredSubgroupSize == 64)
|
||||
out_stage->key.subgroup_required_size = RADV_REQUIRED_WAVE64;
|
||||
else
|
||||
unreachable("Unsupported required subgroup size.");
|
||||
UNREACHABLE("Unsupported required subgroup size.");
|
||||
}
|
||||
|
||||
if (sinfo->flags & VK_SHADER_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT) {
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ radv_ip_to_queue_family(enum amd_ip_type t)
|
|||
case AMD_IP_SDMA:
|
||||
return RADV_QUEUE_TRANSFER;
|
||||
default:
|
||||
unreachable("Unknown IP type");
|
||||
UNREACHABLE("Unknown IP type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -544,7 +544,7 @@ radv_begin_sqtt(struct radv_queue *queue)
|
|||
radeon_emit(0);
|
||||
break;
|
||||
default:
|
||||
unreachable("Incorrect queue family");
|
||||
UNREACHABLE("Incorrect queue family");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -621,7 +621,7 @@ radv_end_sqtt(struct radv_queue *queue)
|
|||
radeon_emit(0);
|
||||
break;
|
||||
default:
|
||||
unreachable("Incorrect queue family");
|
||||
UNREACHABLE("Incorrect queue family");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -854,7 +854,7 @@ radv_GetPhysicalDeviceVideoCapabilitiesKHR(VkPhysicalDevice physicalDevice, cons
|
|||
break;
|
||||
#endif
|
||||
default:
|
||||
unreachable("unsupported operation");
|
||||
UNREACHABLE("unsupported operation");
|
||||
}
|
||||
|
||||
if (cap && !cap->valid)
|
||||
|
|
@ -2558,7 +2558,7 @@ rvcn_dec_message_decode(struct radv_cmd_buffer *cmd_buffer, struct radv_video_se
|
|||
break;
|
||||
}
|
||||
default:
|
||||
unreachable("unknown operation");
|
||||
UNREACHABLE("unknown operation");
|
||||
}
|
||||
|
||||
header->total_size += index_codec->size;
|
||||
|
|
@ -3311,7 +3311,7 @@ radv_vcn_decode_video(struct radv_cmd_buffer *cmd_buffer, const VkVideoDecodeInf
|
|||
size += sizeof(rvcn_dec_message_vp9_t);
|
||||
break;
|
||||
default:
|
||||
unreachable("unsupported codec.");
|
||||
UNREACHABLE("unsupported codec.");
|
||||
}
|
||||
|
||||
radv_vid_buffer_upload_alloc(cmd_buffer, FB_BUFFER_SIZE, &fb_offset, &fb_ptr);
|
||||
|
|
|
|||
|
|
@ -2804,7 +2804,7 @@ radv_video_enc_control_video_coding(struct radv_cmd_buffer *cmd_buffer, const Vk
|
|||
}
|
||||
break;
|
||||
default:
|
||||
unreachable("Unsupported\n");
|
||||
UNREACHABLE("Unsupported\n");
|
||||
}
|
||||
|
||||
if (control_info->flags & VK_VIDEO_CODING_CONTROL_RESET_BIT_KHR) {
|
||||
|
|
|
|||
|
|
@ -202,5 +202,5 @@ radv_test::get_pipeline_hash(VkShaderStageFlags stage)
|
|||
return stats[i].value.u64;
|
||||
}
|
||||
|
||||
unreachable("Driver pipeline hash not found");
|
||||
UNREACHABLE("Driver pipeline hash not found");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -349,7 +349,7 @@ get_nop_packet(struct radv_amdgpu_cs *cs)
|
|||
case AMDGPU_HW_IP_VCN_ENC:
|
||||
return 0; /* NOPs are illegal in encode, so don't pad */
|
||||
default:
|
||||
unreachable("Unknown IP type");
|
||||
UNREACHABLE("Unknown IP type");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1579,7 +1579,7 @@ radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
|
|||
case RADEON_CTX_PRIORITY_LOW:
|
||||
return AMDGPU_CTX_PRIORITY_LOW;
|
||||
default:
|
||||
unreachable("Invalid context priority");
|
||||
UNREACHABLE("Invalid context priority");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1686,7 +1686,7 @@ radv_to_amdgpu_pstate(enum radeon_ctx_pstate radv_pstate)
|
|||
case RADEON_CTX_PSTATE_PEAK:
|
||||
return AMDGPU_CTX_STABLE_PSTATE_PEAK;
|
||||
default:
|
||||
unreachable("Invalid pstate");
|
||||
UNREACHABLE("Invalid pstate");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue