[PATCH] AArch64: Check kernel version for SVE ifuncs

Szabolcs Nagy szabolcs.nagy@arm.com
Wed Mar 13 18:39:43 GMT 2024


The 03/13/2024 14:31, Wilco Dijkstra wrote:
> 
> Older Linux kernels may disable SVE after certain syscalls.  Calling the
> SVE-optimized memcpy afterwards will then cause a trap to reenable SVE.
> As a result, applications with a high use of syscalls may run slower with
> the SVE memcpy.  Avoid this by checking the kernel version and enable the
> SVE-optimized memcpy/memmove ifuncs only on Linux kernel 6.2 or newer.

a kernel version check at startup is a bit ugly.

i think we should use the aarch64 kernel-features.h to define an
__ASSUME_FAST_SVE or similar based on the __LINUX_KERNEL_VERSION
(so once we raise the min kernel version to 6.2 the startup check
is not done anymore or users can config --enable-kernel=6.2.0).

(fast_sve may not be the best name, 'no_excessive_sve_traps' or
'sve_stays_enabled' may be better, but i'm not clear on the exact
kernel behaviour we want here so let you decide)

> 
> Passes regress, OK for commit?
> 

patch looks ok otherwise.

> ---
> 
> diff --git a/sysdeps/aarch64/cpu-features.h b/sysdeps/aarch64/cpu-features.h
> index 77a782422af1b6e4b2af32bfebfda37874111510..5f2da91ebbd0adafb0d84ec503b0f902f566da5a 100644
> --- a/sysdeps/aarch64/cpu-features.h
> +++ b/sysdeps/aarch64/cpu-features.h
> @@ -71,6 +71,7 @@ struct cpu_features
>    /* Currently, the GLIBC memory tagging tunable only defines 8 bits.  */
>    uint8_t mte_state;
>    bool sve;
> +  bool prefer_sve_ifuncs;
>    bool mops;
>  };
>  
> diff --git a/sysdeps/aarch64/multiarch/init-arch.h b/sysdeps/aarch64/multiarch/init-arch.h
> index c52860efb22d70eb4bdf356781f51c7de8ec67dc..61dc40088f4d9e5e06b57bdc7d54bde1e2a686a4 100644
> --- a/sysdeps/aarch64/multiarch/init-arch.h
> +++ b/sysdeps/aarch64/multiarch/init-arch.h
> @@ -36,5 +36,7 @@
>      MTE_ENABLED ();							      \
>    bool __attribute__((unused)) sve =					      \
>      GLRO(dl_aarch64_cpu_features).sve;					      \
> +  bool __attribute__((unused)) prefer_sve_ifuncs =			      \
> +    GLRO(dl_aarch64_cpu_features).prefer_sve_ifuncs;			      \
>    bool __attribute__((unused)) mops =					      \
>      GLRO(dl_aarch64_cpu_features).mops;
> diff --git a/sysdeps/aarch64/multiarch/memcpy.c b/sysdeps/aarch64/multiarch/memcpy.c
> index d12eccfca51f4bcfef6ccf5aa286edb301e361ac..ce53567dab33c2f00b89b4069235abd4651811a6 100644
> --- a/sysdeps/aarch64/multiarch/memcpy.c
> +++ b/sysdeps/aarch64/multiarch/memcpy.c
> @@ -47,7 +47,7 @@ select_memcpy_ifunc (void)
>      {
>        if (IS_A64FX (midr))
>  	return __memcpy_a64fx;
> -      return __memcpy_sve;
> +      return prefer_sve_ifuncs ? __memcpy_sve : __memcpy_generic;
>      }
>  
>    if (IS_THUNDERX (midr))
> diff --git a/sysdeps/aarch64/multiarch/memmove.c b/sysdeps/aarch64/multiarch/memmove.c
> index 2081eeb4d40e0240e67a7b26b64576f44eaf18e3..fe95037be391896c7670ef606bf4d3ba7dfb6a00 100644
> --- a/sysdeps/aarch64/multiarch/memmove.c
> +++ b/sysdeps/aarch64/multiarch/memmove.c
> @@ -47,7 +47,7 @@ select_memmove_ifunc (void)
>      {
>        if (IS_A64FX (midr))
>  	return __memmove_a64fx;
> -      return __memmove_sve;
> +      return prefer_sve_ifuncs ? __memmove_sve : __memmove_generic;
>      }
>  
>    if (IS_THUNDERX (midr))
> diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> index b1a3f673f067280bdacfddd92723a81e418023e5..13b02c45df80b493516b3c9d4acbbbffaa47af92 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> +++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> @@ -21,6 +21,7 @@
>  #include <sys/auxv.h>
>  #include <elf/dl-hwcaps.h>
>  #include <sys/prctl.h>
> +#include <sys/utsname.h>
>  #include <dl-tunables-parse.h>
>  
>  #define DCZID_DZP_MASK (1 << 4)
> @@ -62,6 +63,41 @@ get_midr_from_mcpu (const struct tunable_str_t *mcpu)
>    return UINT64_MAX;
>  }
>  
> +/* Parse kernel version without calling any library functions.
> +   Allow 2 digits for kernel version and 3 digits for major version,
> +   separated by '.': "kk.mmm.".

looks reasonable.

> +   Return kernel version * 1000 + major version, or -1 on failure.  */
> +
> +static inline int
> +kernel_version (void)
> +{
> +  struct utsname buf;
> +  const char *p = &buf.release[0];
> +  int kernel = 0;
> +  int major = 0;
> +
> +  if (__uname (&buf) < 0)
> +    return -1;
> +
> +  if (*p >= '0' && *p <= '9')
> +    kernel = (kernel * 10) + *p++ - '0';
> +  if (*p >= '0' && *p <= '9')
> +    kernel = (kernel * 10) + *p++ - '0';
> +  if (*p != '.')
> +    return -1;
> +  p++;
> +  if (*p >= '0' && *p <= '9')
> +    major = (major * 10) + *p++ - '0';
> +  if (*p >= '0' && *p <= '9')
> +    major = (major * 10) + *p++ - '0';
> +  if (*p >= '0' && *p <= '9')
> +    major = (major * 10) + *p++ - '0';
> +  if (*p != '.' && *p != '\0')
> +    return -1;
> +
> +  return kernel * 1000 + major;
> +}
> +
>  static inline void
>  init_cpu_features (struct cpu_features *cpu_features)
>  {
> @@ -126,6 +162,10 @@ init_cpu_features (struct cpu_features *cpu_features)
>    /* Check if SVE is supported.  */
>    cpu_features->sve = GLRO (dl_hwcap) & HWCAP_SVE;
>  
> +  /* Prefer using SVE in string ifuncs from Linux 6.2 onwards.  */
> +  cpu_features->prefer_sve_ifuncs =
> +    cpu_features->sve && kernel_version () >= 6002;

e.g. this can be '&& fast_sve ()' and define fast_sve based
on __ASSUME_FAST_SVE.

> +
>    /* Check if MOPS is supported.  */
>    cpu_features->mops = GLRO (dl_hwcap2) & HWCAP2_MOPS;
>  }
> 


More information about the Libc-alpha mailing list