]> sourceware.org Git - glibc.git/commitdiff
x86: Add support for building {w}memset{_chk} with explicit ISA level
authorNoah Goldstein <goldstein.w.n@gmail.com>
Wed, 29 Jun 2022 23:07:06 +0000 (16:07 -0700)
committerNoah Goldstein <goldstein.w.n@gmail.com>
Tue, 5 Jul 2022 23:42:42 +0000 (16:42 -0700)
1. Refactor files so that all implementations are in the multiarch
   directory
    - Moved the implementation portion of memset sse2 from memset.S to
      multiarch/memset-sse2.S

    - The non-multiarch file now only includes one of the
      implementations in the multiarch directory based on the compiled
      ISA level (only used for non-multiarch builds.  Otherwise we go
      through the ifunc selector).

2. Add ISA level build guards to different implementations.
    - I.e memset-avx2-unaligned-erms.S which is ISA level 3 will only
      build if compiled ISA level <= 3. Otherwise there is no reason
      to include it as we will always use one of the ISA level 4
      implementations (memset-evex-unaligned-erms.S).

3. Add new multiarch/rtld-memset.S that just include the
   non-multiarch memset.S which will in turn select the best
   implementation based on the compiled ISA level.

4. Refactor the ifunc selector and ifunc implementation list to use
   the ISA level aware wrapper macros that allow functions below the
   compiled ISA level (with a guranteed replacement) to be skipped.

Tested with and without multiarch on x86_64 for ISA levels:
{generic, x86-64-v2, x86-64-v3, x86-64-v4}

And m32 with and without multiarch.

sysdeps/x86_64/memset.S
sysdeps/x86_64/multiarch/ifunc-impl-list.c
sysdeps/x86_64/multiarch/ifunc-memset.h
sysdeps/x86_64/multiarch/ifunc-wmemset.h
sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S
sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S
sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S
sysdeps/x86_64/multiarch/rtld-memset.S [new file with mode: 0644]

index a6eea61a4d04dd5928134983e6aca327f0d0878d..f4e1bab601cbe66024a18e03e0a0bff4eaacb79d 100644 (file)
    <https://www.gnu.org/licenses/>.  */
 
 #include <sysdep.h>
-#define USE_WITH_SSE2  1
 
-#define VEC_SIZE       16
-#define MOV_SIZE       3
-#define RET_SIZE       1
+#define MEMSET_SYMBOL(p,s)     memset
+#define MEMSET_CHK_SYMBOL(p,s) p
 
-#define VEC(i)         xmm##i
-#define VMOVU     movups
-#define VMOVA     movaps
+#define WMEMSET_SYMBOL(p,s)    __wmemset
+#define WMEMSET_CHK_SYMBOL(p,s) p
 
-# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
-  movd d, %xmm0; \
-  movq r, %rax; \
-  punpcklbw %xmm0, %xmm0; \
-  punpcklwd %xmm0, %xmm0; \
-  pshufd $0, %xmm0, %xmm0
+#define DEFAULT_IMPL_V1        "multiarch/memset-sse2-unaligned-erms.S"
+#define DEFAULT_IMPL_V3        "multiarch/memset-avx2-unaligned-erms.S"
+#define DEFAULT_IMPL_V4        "multiarch/memset-evex-unaligned-erms.S"
 
-# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
-  movd d, %xmm0; \
-  pshufd $0, %xmm0, %xmm0; \
-  movq r, %rax
-
-# define MEMSET_VDUP_TO_VEC0_HIGH()
-# define MEMSET_VDUP_TO_VEC0_LOW()
-
-# define WMEMSET_VDUP_TO_VEC0_HIGH()
-# define WMEMSET_VDUP_TO_VEC0_LOW()
-
-#define SECTION(p)             p
-
-#ifndef MEMSET_SYMBOL
-# define MEMSET_CHK_SYMBOL(p,s)        p
-# define MEMSET_SYMBOL(p,s)    memset
-#endif
-
-#ifndef WMEMSET_SYMBOL
-# define WMEMSET_CHK_SYMBOL(p,s) p
-# define WMEMSET_SYMBOL(p,s)   __wmemset
-#endif
-
-#include "multiarch/memset-vec-unaligned-erms.S"
+#include "isa-default-impl.h"
 
 libc_hidden_builtin_def (memset)
 
index 7858aa316f26f5bba2f1a5e15f42e70d6f1b55ca..21008c72b4bb18c6058c422010e31303f7615f42 100644 (file)
@@ -213,94 +213,99 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   IFUNC_IMPL (i, name, __memset_chk,
              IFUNC_IMPL_ADD (array, i, __memset_chk, 1,
                              __memset_chk_erms)
-             IFUNC_IMPL_ADD (array, i, __memset_chk, 1,
-                             __memset_chk_sse2_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memset_chk, 1,
-                             __memset_chk_sse2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __memset_chk_avx2_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __memset_chk_avx2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memset_chk_avx2_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memset_chk_avx2_unaligned_erms_rtm)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_chk_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_chk_evex_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_chk_avx512_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_chk_avx512_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             CPU_FEATURE_USABLE (AVX512F),
-                             __memset_chk_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_chk_avx512_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_chk_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk,
+                                    CPU_FEATURE_USABLE (AVX512F),
+                                    __memset_chk_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_chk_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_chk_evex_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __memset_chk_avx2_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __memset_chk_avx2_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memset_chk_avx2_unaligned_rtm)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memset_chk_avx2_unaligned_erms_rtm)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __memset_chk, 1,
+                                    __memset_chk_sse2_unaligned)
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __memset_chk, 1,
+                                    __memset_chk_sse2_unaligned_erms)
              )
 #endif
 
   /* Support sysdeps/x86_64/multiarch/memset.c.  */
   IFUNC_IMPL (i, name, memset,
              IFUNC_IMPL_ADD (array, i, memset, 1,
-                             __memset_sse2_unaligned)
-             IFUNC_IMPL_ADD (array, i, memset, 1,
-                             __memset_sse2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memset, 1, __memset_erms)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __memset_avx2_unaligned)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __memset_avx2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memset_avx2_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memset_avx2_unaligned_erms_rtm)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_evex_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_avx512_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_avx512_unaligned)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             CPU_FEATURE_USABLE (AVX512F),
-                             __memset_avx512_no_vzeroupper)
+                             __memset_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_avx512_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memset,
+                                    CPU_FEATURE_USABLE (AVX512F),
+                                    __memset_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_evex_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memset,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __memset_avx2_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memset,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __memset_avx2_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memset_avx2_unaligned_rtm)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memset_avx2_unaligned_erms_rtm)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, memset, 1,
+                                    __memset_sse2_unaligned)
+             X86_IFUNC_IMPL_ADD_V2 (array, i, memset, 1,
+                                    __memset_sse2_unaligned_erms)
             )
 
   /* Support sysdeps/x86_64/multiarch/rawmemchr.c.  */
@@ -821,25 +826,27 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 
   /* Support sysdeps/x86_64/multiarch/wmemset.c.  */
   IFUNC_IMPL (i, name, wmemset,
-             IFUNC_IMPL_ADD (array, i, wmemset, 1,
-                             __wmemset_sse2_unaligned)
-             IFUNC_IMPL_ADD (array, i, wmemset,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __wmemset_avx2_unaligned)
-             IFUNC_IMPL_ADD (array, i, wmemset,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __wmemset_avx2_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, wmemset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __wmemset_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, wmemset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __wmemset_avx512_unaligned))
+             X86_IFUNC_IMPL_ADD_V4 (array, i, wmemset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __wmemset_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, wmemset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __wmemset_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, wmemset,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __wmemset_avx2_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, wmemset,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __wmemset_avx2_unaligned_rtm)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, wmemset, 1,
+                                    __wmemset_sse2_unaligned))
 
 #ifdef SHARED
   /* Support sysdeps/x86_64/multiarch/memcpy_chk.c.  */
@@ -1049,25 +1056,27 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 #ifdef SHARED
   /* Support sysdeps/x86_64/multiarch/wmemset_chk.c.  */
   IFUNC_IMPL (i, name, __wmemset_chk,
-             IFUNC_IMPL_ADD (array, i, __wmemset_chk, 1,
-                             __wmemset_chk_sse2_unaligned)
-             IFUNC_IMPL_ADD (array, i, __wmemset_chk,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __wmemset_chk_avx2_unaligned)
-             IFUNC_IMPL_ADD (array, i, __wmemset_chk,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __wmemset_chk_avx2_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, __wmemset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __wmemset_chk_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, __wmemset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __wmemset_chk_avx512_unaligned))
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __wmemset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __wmemset_chk_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __wmemset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __wmemset_chk_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __wmemset_chk,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __wmemset_chk_avx2_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __wmemset_chk,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __wmemset_chk_avx2_unaligned_rtm)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __wmemset_chk, 1,
+                                    __wmemset_chk_sse2_unaligned))
 #endif
 
   return 0;
index 64d179913c0dda7ec2d82e652d2c53bc138767e7..ed514976aa443e89f733172a6130b35a01461fdb 100644 (file)
 #include <init-arch.h>
 
 extern __typeof (REDIRECT_NAME) OPTIMIZE (erms) attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned)
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms)
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms)
+  attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper)
+  attribute_hidden;
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned)
+  attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms)
   attribute_hidden;
+
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms)
   attribute_hidden;
@@ -31,31 +40,26 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm)
   attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms_rtm)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned)
-  attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms)
-  attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned)
-  attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms)
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper)
+extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms)
   attribute_hidden;
 
 static inline void *
 IFUNC_SELECTOR (void)
 {
-  const struct cpu_featurescpu_features = __get_cpu_features ();
+  const struct cpu_features *cpu_features = __get_cpu_features ();
 
   if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_ERMS))
     return OPTIMIZE (erms);
 
-  if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
+  if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
       && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
     {
-      if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
-          && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
-          && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
+      if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
+         && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
+         && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
        {
          if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
            return OPTIMIZE (avx512_unaligned_erms);
@@ -66,11 +70,11 @@ IFUNC_SELECTOR (void)
       return OPTIMIZE (avx512_no_vzeroupper);
     }
 
-  if (CPU_FEATURE_USABLE_P (cpu_features, AVX2))
+  if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX2))
     {
-      if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
-          && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
-          && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
+      if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
+         && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
+         && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
        {
          if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
            return OPTIMIZE (evex_unaligned_erms);
@@ -86,7 +90,8 @@ IFUNC_SELECTOR (void)
          return OPTIMIZE (avx2_unaligned_rtm);
        }
 
-      if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
+      if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
+                                      Prefer_No_VZEROUPPER, !))
        {
          if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
            return OPTIMIZE (avx2_unaligned_erms);
index 87c48e2387f5212724d36b8cc5bb59828b721b53..3810c719c612d782e2a2bca2f3b39e7c1d931da3 100644 (file)
 
 #include <init-arch.h>
 
-extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden;
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden;
+
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden;
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden;
 
 static inline void *
 IFUNC_SELECTOR (void)
 {
-  const struct cpu_featurescpu_features = __get_cpu_features ();
+  const struct cpu_features *cpu_features = __get_cpu_features ();
 
-  if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
+  if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX2)
+      && X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
+                                     AVX_Fast_Unaligned_Load, !))
     {
-      if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL))
+      if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL))
        {
          if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
            return OPTIMIZE (avx512_unaligned);
@@ -44,7 +48,8 @@ IFUNC_SELECTOR (void)
       if (CPU_FEATURE_USABLE_P (cpu_features, RTM))
        return OPTIMIZE (avx2_unaligned_rtm);
 
-      if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
+      if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
+                                      Prefer_No_VZEROUPPER, !))
        return OPTIMIZE (avx2_unaligned);
     }
 
index c0bf2875d03d51ab6c0c759bbde876c0afc6bc0f..a9054a91220f52d2a2f945aebe0a202705a32266 100644 (file)
@@ -1,4 +1,7 @@
-#if IS_IN (libc)
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (3)
+
 # define USE_WITH_AVX2 1
 
 # define VEC_SIZE      32
index c5be8f57ef41c6b8a296ee04026e3520dd66b025..8cc9c16d73e34ba109dae848550499fc2e691248 100644 (file)
    <https://www.gnu.org/licenses/>.  */
 
 #include <sysdep.h>
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
 
-#if IS_IN (libc)
 
 #include "asm-syntax.h"
 #ifndef MEMSET
index 5241216a77bf72b714f342bef0919fb87a07bfbd..47623b8ee84fd23800077850beebca50d42eb6f0 100644 (file)
@@ -1,4 +1,7 @@
-#if IS_IN (libc)
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
+
 # define USE_WITH_AVX512       1
 
 # define VEC_SIZE      64
 # define WMEMSET_VDUP_TO_VEC0_LOW()
 
 # define SECTION(p)            p##.evex512
+
+#ifndef MEMSET_SYMBOL
 # define MEMSET_SYMBOL(p,s)    p##_avx512_##s
+#endif
+#ifndef WMEMSET_SYMBOL
 # define WMEMSET_SYMBOL(p,s)   p##_avx512_##s
+#endif
+
+
 # define USE_LESS_VEC_MASK_STORE       1
 # include "memset-vec-unaligned-erms.S"
 #endif
index 637002150659123c7fce8612e03eb6b041cfba05..ac4b2d2d5085948d571f3fa1d1695a24f18e4f5e 100644 (file)
@@ -1,4 +1,7 @@
-#if IS_IN (libc)
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
+
 # define USE_WITH_EVEX 1
 
 # define VEC_SIZE      32
 # define WMEMSET_VDUP_TO_VEC0_LOW()
 
 # define SECTION(p)            p##.evex
+
+#ifndef MEMSET_SYMBOL
 # define MEMSET_SYMBOL(p,s)    p##_evex_##s
+#endif
+#ifndef WMEMSET_SYMBOL
 # define WMEMSET_SYMBOL(p,s)   p##_evex_##s
+#endif
+
+
 # define USE_LESS_VEC_MASK_STORE       1
 # include "memset-vec-unaligned-erms.S"
 #endif
index 3d92f6993a579d11cdd4782dade46257dfb93d71..44f9b8888b4341fcc376fdbadfd4cf7bea2b4574 100644 (file)
    License along with the GNU C Library; if not, see
    <https://www.gnu.org/licenses/>.  */
 
-#include <sysdep.h>
-#include <shlib-compat.h>
+#include <isa-level.h>
 
-#if IS_IN (libc)
-# define MEMSET_SYMBOL(p,s)    p##_sse2_##s
-# define WMEMSET_SYMBOL(p,s)   p##_sse2_##s
+/* MINIMUM_X86_ISA_LEVEL <= 2 because there is no V2 implementation
+   so we need this to build for ISA V2 builds. */
+#if ISA_SHOULD_BUILD (2)
 
-# ifdef SHARED
-#  undef libc_hidden_builtin_def
-#  define libc_hidden_builtin_def(name)
+# include <sysdep.h>
+# define USE_WITH_SSE2 1
+
+# define VEC_SIZE      16
+# define MOV_SIZE      3
+# define RET_SIZE      1
+
+# define VEC(i)                xmm##i
+# define VMOVU     movups
+# define VMOVA     movaps
+
+# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
+  movd d, %xmm0; \
+  movq r, %rax; \
+  punpcklbw %xmm0, %xmm0; \
+  punpcklwd %xmm0, %xmm0; \
+  pshufd $0, %xmm0, %xmm0
+
+# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
+  movd d, %xmm0; \
+  pshufd $0, %xmm0, %xmm0; \
+  movq r, %rax
+
+# define MEMSET_VDUP_TO_VEC0_HIGH()
+# define MEMSET_VDUP_TO_VEC0_LOW()
+
+# define WMEMSET_VDUP_TO_VEC0_HIGH()
+# define WMEMSET_VDUP_TO_VEC0_LOW()
+
+# define SECTION(p)            p
+
+# ifndef MEMSET_SYMBOL
+#  define MEMSET_SYMBOL(p,s)   p##_sse2_##s
 # endif
 
-# undef weak_alias
-# define weak_alias(original, alias)
-# undef strong_alias
-# define strong_alias(ignored1, ignored2)
-#endif
+# ifndef WMEMSET_SYMBOL
+#  define WMEMSET_SYMBOL(p,s)  p##_sse2_##s
+# endif
+
+# include "memset-vec-unaligned-erms.S"
 
-#include <sysdeps/x86_64/memset.S>
+#endif
diff --git a/sysdeps/x86_64/multiarch/rtld-memset.S b/sysdeps/x86_64/multiarch/rtld-memset.S
new file mode 100644 (file)
index 0000000..d912bfa
--- /dev/null
@@ -0,0 +1,18 @@
+/* Copyright (C) 2022 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "../memset.S"
This page took 0.065927 seconds and 5 git commands to generate.