util: Remove usage of USE_**_ASM macros

Use DETECT_ARCH_X86 DETECT_ARCH_X86_64 DETECT_ARCH_AARCH64 DETECT_CC_GCC instead

Signed-off-by: Yonggang Luo <luoyonggang@gmail.com>
Acked-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36635>
This commit is contained in:
Yonggang Luo 2025-08-08 01:01:20 +08:00 committed by Marge Bot
parent 7c1b96faaa
commit b2761d1481
5 changed files with 16 additions and 11 deletions

View file

@ -134,7 +134,7 @@ void _mesa_print_info( struct gl_context *ctx )
*/
_mesa_debug(NULL, "Mesa GL_EXTENSIONS = %s\n", ctx->Extensions.String);
#if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
#if DETECT_ARCH_X86
_mesa_debug(NULL, "Mesa x86-optimized: YES\n");
#else
_mesa_debug(NULL, "Mesa x86-optimized: NO\n");

View file

@ -42,7 +42,9 @@
#include <popcntintrin.h>
#endif
#include "macros.h"
#include "util/detect_arch.h"
#include "util/detect_cc.h"
#include "util/macros.h"
#ifdef __cplusplus
extern "C" {
@ -363,7 +365,7 @@ util_bitcount(unsigned n)
static inline unsigned
util_popcnt_inline_asm(unsigned n)
{
#if defined(USE_X86_64_ASM) || defined(USE_X86_ASM)
#if (DETECT_ARCH_X86 || DETECT_ARCH_X86_64) && DETECT_CC_GCC
uint32_t out;
__asm volatile("popcnt %1, %0" : "=r"(out) : "r"(n));
return out;

View file

@ -29,9 +29,11 @@
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include "util/detect_arch.h"
#include "util/detect_cc.h"
#include "util/u_cpu_detect.h"
#if defined(USE_X86_64_ASM)
#if DETECT_ARCH_X86_64
#include <xmmintrin.h>
#endif
@ -57,7 +59,7 @@ uint16_t _mesa_float_to_float16_rtz_slow(float val);
static inline uint16_t
_mesa_float_to_half(float val)
{
#if defined(USE_X86_64_ASM)
#if DETECT_ARCH_X86_64 && DETECT_CC_GCC
if (util_get_cpu_caps()->has_f16c) {
__m128 in = {val};
__m128i out;
@ -73,7 +75,7 @@ _mesa_float_to_half(float val)
static inline float
_mesa_half_to_float(uint16_t val)
{
#if defined(USE_X86_64_ASM)
#if DETECT_ARCH_X86_64 && DETECT_CC_GCC
if (util_get_cpu_caps()->has_f16c) {
__m128i in = {val};
__m128 out;
@ -81,7 +83,7 @@ _mesa_half_to_float(uint16_t val)
__asm volatile("vcvtph2ps %1, %0" : "=v"(out) : "v"(in));
return out[0];
}
#elif defined(USE_AARCH64_ASM)
#elif DETECT_ARCH_AARCH64
float result;
uint16_t in = val;
@ -98,7 +100,7 @@ _mesa_half_to_float(uint16_t val)
static inline uint16_t
_mesa_float_to_float16_rtz(float val)
{
#if defined(USE_X86_64_ASM)
#if DETECT_ARCH_X86_64 && DETECT_CC_GCC
if (util_get_cpu_caps()->has_f16c) {
__m128 in = {val};
__m128i out;

View file

@ -26,6 +26,7 @@
*
*/
#include "util/detect_arch.h"
#include "util/streaming-load-memcpy.h"
#include "util/u_cpu_detect.h"
#include "util/u_math.h"
@ -42,7 +43,7 @@ util_streaming_load_memcpy(void *restrict dst, void *restrict src, size_t len)
char *restrict d = dst;
char *restrict s = src;
#if defined(USE_SSE41) || defined(USE_AARCH64_ASM)
#if defined(USE_SSE41) || DETECT_ARCH_AARCH64
/* If dst and src are not co-aligned, or if non-temporal load instructions
* are not present, fallback to memcpy(). */
if (((uintptr_t)d & 15) != ((uintptr_t)s & 15)
@ -91,7 +92,7 @@ util_streaming_load_memcpy(void *restrict dst, void *restrict src, size_t len)
len -= 64;
}
#elif defined(USE_AARCH64_ASM)
#elif DETECT_ARCH_AARCH64
if (len >= 64) {
__asm__ volatile(
/* Memory barrier for loads completion in the non-shareable domain:

View file

@ -130,7 +130,7 @@ util_fast_log2(float x)
static inline int
util_ifloor(float f)
{
#if defined(USE_X86_ASM) && defined(__GNUC__) && defined(__i386__)
#if DETECT_ARCH_X86 && DETECT_CC_GCC
/*
* IEEE floor for computers that round to nearest or even.
* 'f' must be between -4194304 and 4194303.