[PATCH 1/6] configure.ac: deduplicate intrinsics test code

Jussi Kivilinna jussi.kivilinna at iki.fi
Thu Aug 7 15:28:50 CEST 2025


* configure.ac (gcry_cv_cc_aarch64_neon_intrinsics)
(gcry_cv_cc_aarch64_neon_intrinsics_cflags): Move test source code
to new macro GCRY_AARCH64_NEON_INTRINSICS_TEST.
(gcry_cv_cc_ppc_altivec, gcry_cv_cc_ppc_altivec_cflags):
Move test source code to new macro GCRY_POWERPC_VECTOR_INTRINSICS_TEST.
--

Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
 configure.ac | 164 ++++++++++++++++++++++-----------------------------
 1 file changed, 70 insertions(+), 94 deletions(-)

diff --git a/configure.ac b/configure.ac
index c8d9b4a3..3ce405e9 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2235,6 +2235,37 @@ fi
 #
 # Check whether compiler supports AArch64/NEON/crypto intrinsics
 #
+m4_define([GCRY_AARCH64_NEON_INTRINSICS_TEST],
+  [AC_LANG_SOURCE(
+    [[#include <arm_neon.h>
+      #define __m128i uint64x2_t
+      #define vpsrldq128(s, a, o) \
+	({ uint64x2_t __tmp = { 0, 0 }; \
+	    o = (__m128i)vextq_u8((uint8x16_t)a, \
+				  (uint8x16_t)__tmp, (s) & 15); })
+      #define vaesenclast128(a, b, o) \
+	(o = (__m128i)vaeseq_u8((uint8x16_t)b, (uint8x16_t)a))
+      #define memory_barrier_with_vec(a) __asm__("" : "+w"(a) :: "memory")
+      static inline __attribute__((always_inline)) __m128i
+      fn2(__m128i a)
+      {
+	vpsrldq128(2, a, a);
+	return a;
+      }
+      __m128i fn(__m128i in)
+      {
+	__m128i x;
+	memory_barrier_with_vec(in);
+	x = fn2(in);
+	memory_barrier_with_vec(x);
+	vaesenclast128(in, x, in);
+	memory_barrier_with_vec(in);
+	return in;
+      }
+    ]]
+  )]
+)
+
 AC_CACHE_CHECK([whether compiler supports AArch64/NEON/crypto intrinsics],
       [gcry_cv_cc_aarch64_neon_intrinsics],
       [if test "$mpi_cpu_arch" != "aarch64" ||
@@ -2242,34 +2273,9 @@ AC_CACHE_CHECK([whether compiler supports AArch64/NEON/crypto intrinsics],
 	gcry_cv_cc_aarch64_neon_intrinsics="n/a"
       else
 	gcry_cv_cc_aarch64_neon_intrinsics=no
-	AC_COMPILE_IFELSE([AC_LANG_SOURCE(
-	[[#include <arm_neon.h>
-	  #define __m128i uint64x2_t
-	  #define vpsrldq128(s, a, o) \
-	    ({ uint64x2_t __tmp = { 0, 0 }; \
-		o = (__m128i)vextq_u8((uint8x16_t)a, \
-				      (uint8x16_t)__tmp, (s) & 15); })
-	  #define vaesenclast128(a, b, o) \
-	    (o = (__m128i)vaeseq_u8((uint8x16_t)b, (uint8x16_t)a))
-	  #define memory_barrier_with_vec(a) __asm__("" : "+w"(a) :: "memory")
-	  static inline __attribute__((always_inline)) __m128i
-	  fn2(__m128i a)
-	  {
-	    vpsrldq128(2, a, a);
-	    return a;
-	  }
-	  __m128i fn(__m128i in)
-	  {
-	    __m128i x;
-	    memory_barrier_with_vec(in);
-	    x = fn2(in);
-	    memory_barrier_with_vec(x);
-	    vaesenclast128(in, x, in);
-	    memory_barrier_with_vec(in);
-	    return in;
-	  }
-	  ]])],
-	[gcry_cv_cc_aarch64_neon_intrinsics=yes])
+	AC_COMPILE_IFELSE(
+	  [GCRY_AARCH64_NEON_INTRINSICS_TEST],
+	  [gcry_cv_cc_aarch64_neon_intrinsics=yes])
       fi])
 if test "$gcry_cv_cc_aarch64_neon_intrinsics" = "yes" ; then
     AC_DEFINE(HAVE_COMPATIBLE_CC_AARCH64_NEON_INTRINSICS,1,
@@ -2284,35 +2290,12 @@ if test "$gcry_cv_cc_aarch64_neon_intrinsics" = "no" &&
    test "$try_asm_modules" = "yes" ; then
   AC_CACHE_CHECK([whether compiler supports AArch64/NEON/crypto intrinsics with extra GCC flags],
     [gcry_cv_cc_aarch64_neon_intrinsics_cflags],
-    [gcry_cv_cc_aarch64_neon_intrinsics_cflags=no
-    AC_COMPILE_IFELSE([AC_LANG_SOURCE(
-      [[#include <arm_neon.h>
-	#define __m128i uint64x2_t
-	#define vpsrldq128(s, a, o) \
-	  ({ uint64x2_t __tmp = { 0, 0 }; \
-	      o = (__m128i)vextq_u8((uint8x16_t)a, \
-				    (uint8x16_t)__tmp, (s) & 15); })
-	#define vaesenclast128(a, b, o) \
-	  (o = (__m128i)vaeseq_u8((uint8x16_t)b, (uint8x16_t)a))
-	#define memory_barrier_with_vec(a) __asm__("" : "+w"(a) :: "memory")
-	static inline __attribute__((always_inline)) __m128i
-	fn2(__m128i a)
-	{
-	  vpsrldq128(2, a, a);
-	  return a;
-	}
-	__m128i fn(__m128i in)
-	{
-	  __m128i x;
-	  memory_barrier_with_vec(in);
-	  x = fn2(in);
-	  memory_barrier_with_vec(x);
-	  vaesenclast128(in, x, in);
-	  memory_barrier_with_vec(in);
-	  return in;
-	}
-	]])],
-      [gcry_cv_cc_aarch64_neon_intrinsics_cflags=yes])])
+    [
+      gcry_cv_cc_aarch64_neon_intrinsics_cflags=no
+      AC_COMPILE_IFELSE(
+	[GCRY_AARCH64_NEON_INTRINSICS_TEST],
+	[gcry_cv_cc_aarch64_neon_intrinsics_cflags=yes])
+    ])
   if test "$gcry_cv_cc_aarch64_neon_intrinsics_cflags" = "yes" ; then
     AC_DEFINE(HAVE_COMPATIBLE_CC_AARCH64_NEON_INTRINSICS,1,
 	      [Defined if underlying compiler supports AArch64/NEON/crypto intrinsics])
@@ -2331,6 +2314,27 @@ CFLAGS=$_gcc_cflags_save;
 #
 # Check whether compiler supports PowerPC AltiVec/VSX intrinsics
 #
+m4_define([GCRY_POWERPC_VECTOR_INTRINSICS_TEST],
+  [AC_LANG_SOURCE(
+    [[#include <altivec.h>
+      typedef vector unsigned char block;
+      typedef vector unsigned int vecu32;
+      static inline __attribute__((always_inline)) vecu32
+      vec_sld_u32(vecu32 a, vecu32 b, unsigned int idx)
+      {
+	return vec_sld (a, b, (4 * idx) & 15);
+      }
+      block fn(block in)
+      {
+	block t = vec_perm (in, in, vec_vsx_ld (0, (unsigned char*)0));
+	vecu32 y = vec_vsx_ld (0, (unsigned int*)0);
+	y = vec_sld_u32 (y, y, 3);
+	return vec_cipher_be (t, in) ^ (block)y;
+      }
+    ]]
+  )]
+)
+
 AC_CACHE_CHECK([whether compiler supports PowerPC AltiVec/VSX/crypto intrinsics],
       [gcry_cv_cc_ppc_altivec],
       [if test "$mpi_cpu_arch" != "ppc" ||
@@ -2338,24 +2342,9 @@ AC_CACHE_CHECK([whether compiler supports PowerPC AltiVec/VSX/crypto intrinsics]
 	gcry_cv_cc_ppc_altivec="n/a"
       else
 	gcry_cv_cc_ppc_altivec=no
-	AC_COMPILE_IFELSE([AC_LANG_SOURCE(
-	[[#include <altivec.h>
-	  typedef vector unsigned char block;
-	  typedef vector unsigned int vecu32;
-	  static inline __attribute__((always_inline)) vecu32
-	  vec_sld_u32(vecu32 a, vecu32 b, unsigned int idx)
-	  {
-	    return vec_sld (a, b, (4 * idx) & 15);
-	  }
-	  block fn(block in)
-	  {
-	    block t = vec_perm (in, in, vec_vsx_ld (0, (unsigned char*)0));
-	    vecu32 y = vec_vsx_ld (0, (unsigned int*)0);
-	    y = vec_sld_u32 (y, y, 3);
-	    return vec_cipher_be (t, in) ^ (block)y;
-	  }
-	  ]])],
-	[gcry_cv_cc_ppc_altivec=yes])
+	AC_COMPILE_IFELSE(
+	  [GCRY_POWERPC_VECTOR_INTRINSICS_TEST],
+	  [gcry_cv_cc_ppc_altivec=yes])
       fi])
 if test "$gcry_cv_cc_ppc_altivec" = "yes" ; then
     AC_DEFINE(HAVE_COMPATIBLE_CC_PPC_ALTIVEC,1,
@@ -2370,25 +2359,12 @@ if test "$gcry_cv_cc_ppc_altivec" = "no" &&
    test "$try_asm_modules" = "yes" ; then
   AC_CACHE_CHECK([whether compiler supports PowerPC AltiVec/VSX/crypto intrinsics with extra GCC flags],
     [gcry_cv_cc_ppc_altivec_cflags],
-    [gcry_cv_cc_ppc_altivec_cflags=no
-    AC_COMPILE_IFELSE([AC_LANG_SOURCE(
-      [[#include <altivec.h>
-	typedef vector unsigned char block;
-	typedef vector unsigned int vecu32;
-	static inline __attribute__((always_inline)) vecu32
-	vec_sld_u32(vecu32 a, vecu32 b, unsigned int idx)
-	{
-	  return vec_sld (a, b, (4 * idx) & 15);
-	}
-	block fn(block in)
-	{
-	  block t = vec_perm (in, in, vec_vsx_ld (0, (unsigned char*)0));
-	  vecu32 y = vec_vsx_ld (0, (unsigned int*)0);
-	  y = vec_sld_u32 (y, y, 3);
-	  return vec_cipher_be (t, in) ^ (block)y;
-	}
-	]])],
-      [gcry_cv_cc_ppc_altivec_cflags=yes])])
+    [
+      gcry_cv_cc_ppc_altivec_cflags=no
+      AC_COMPILE_IFELSE(
+	[GCRY_POWERPC_VECTOR_INTRINSICS_TEST],
+	[gcry_cv_cc_ppc_altivec_cflags=yes])
+    ])
   if test "$gcry_cv_cc_ppc_altivec_cflags" = "yes" ; then
     AC_DEFINE(HAVE_COMPATIBLE_CC_PPC_ALTIVEC,1,
 	      [Defined if underlying compiler supports PowerPC AltiVec/VSX/crypto intrinsics])
-- 
2.48.1




More information about the Gcrypt-devel mailing list