]> sourceware.org Git - glibc.git/commitdiff
x86: Add support for building {w}memmove{_chk} with explicit ISA level
authorNoah Goldstein <goldstein.w.n@gmail.com>
Tue, 5 Jul 2022 19:41:07 +0000 (12:41 -0700)
committerNoah Goldstein <goldstein.w.n@gmail.com>
Tue, 5 Jul 2022 23:42:42 +0000 (16:42 -0700)
1. Refactor files so that all implementations are in the multiarch
   directory
    - Moved the implementation portion of memmove sse2 from memmove.S
      to multiarch/memmove-sse2.S

    - The non-multiarch file now only includes one of the
      implementations in the multiarch directory based on the compiled
      ISA level (only used for non-multiarch builds.  Otherwise we go
      through the ifunc selector).

2. Add ISA level build guards to different implementations.
    - I.e memmove-avx2-unaligned-erms.S which is ISA level 3 will only
      build if compiled ISA level <= 3. Otherwise there is no reason
      to include it as we will always use one of the ISA level 4
      implementations (memmove-evex-unaligned-erms.S).

3. Add new multiarch/rtld-memmove.S that just include the
   non-multiarch memmove.S which will in turn select the best
   implementation based on the compiled ISA level.

4. Refactor the ifunc selector and ifunc implementation list to use
   the ISA level aware wrapper macros that allow functions below the
   compiled ISA level (with a guranteed replacement) to be skipped.

Tested with and without multiarch on x86_64 for ISA levels:
{generic, x86-64-v2, x86-64-v3, x86-64-v4}

And m32 with and without multiarch.
isa raising memmove

sysdeps/x86_64/memmove.S
sysdeps/x86_64/multiarch/ifunc-impl-list.c
sysdeps/x86_64/multiarch/ifunc-memmove.h
sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S
sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S
sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S
sysdeps/x86_64/multiarch/memmove-shlib-compat.h [new file with mode: 0644]
sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S
sysdeps/x86_64/multiarch/memmove-ssse3.S
sysdeps/x86_64/multiarch/rtld-memmove.S [new file with mode: 0644]

index 78e8d974d95104c45ca3aa1e1eb81ba5556ae657..19527690eb0e2539f43c38421b3ab4042f045b70 100644 (file)
    License along with the GNU C Library; if not, see
    <https://www.gnu.org/licenses/>.  */
 
-#include <sysdep.h>
-
-#define VEC_SIZE       16
-#define VEC(i)         xmm##i
-#define PREFETCHNT     prefetchnta
-#define VMOVNT         movntdq
-/* Use movups and movaps for smaller code sizes.  */
-#define VMOVU          movups
-#define VMOVA          movaps
-#define MOV_SIZE       3
-#define SECTION(p)             p
 
 #ifdef USE_MULTIARCH
 # if !IS_IN (libc)
 #if !defined USE_MULTIARCH || !IS_IN (libc)
 # define MEMPCPY_SYMBOL(p,s)           __mempcpy
 #endif
-#ifndef MEMMOVE_SYMBOL
-# define MEMMOVE_CHK_SYMBOL(p,s)       p
-# define MEMMOVE_SYMBOL(p,s)           memmove
-#endif
 
-#include "multiarch/memmove-vec-unaligned-erms.S"
+#define MEMMOVE_CHK_SYMBOL(p,s)        p
+#define MEMMOVE_SYMBOL(p,s)    memmove
+
+
+#define DEFAULT_IMPL_V1        "multiarch/memmove-sse2-unaligned-erms.S"
+#define DEFAULT_IMPL_V3        "multiarch/memmove-avx-unaligned-erms.S"
+#define DEFAULT_IMPL_V4        "multiarch/memmove-evex-unaligned-erms.S"
+
+#include "isa-default-impl.h"
+
+weak_alias (__mempcpy, mempcpy)
 
 #ifndef USE_MULTIARCH
 libc_hidden_builtin_def (memmove)
@@ -59,13 +54,10 @@ libc_hidden_def (__mempcpy)
 weak_alias (__mempcpy, mempcpy)
 libc_hidden_builtin_def (mempcpy)
 
+
 # if defined SHARED && IS_IN (libc)
 #  undef memcpy
 #  include <shlib-compat.h>
 versioned_symbol (libc, __memcpy, memcpy, GLIBC_2_14);
-
-#  if SHLIB_COMPAT (libc, GLIBC_2_2_5, GLIBC_2_14)
-compat_symbol (libc, memmove, memcpy, GLIBC_2_2_5);
-#  endif
 # endif
 #endif
index b84acfead2070832c1dcee6a61bbbba1fbc95ed4..7858aa316f26f5bba2f1a5e15f42e70d6f1b55ca 100644 (file)
@@ -101,84 +101,96 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 #ifdef SHARED
   /* Support sysdeps/x86_64/multiarch/memmove_chk.c.  */
   IFUNC_IMPL (i, name, __memmove_chk,
-             IFUNC_IMPL_ADD (array, i, __memmove_chk,
-                             CPU_FEATURE_USABLE (AVX512F),
-                             __memmove_chk_avx512_no_vzeroupper)
-             IFUNC_IMPL_ADD (array, i, __memmove_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memmove_chk_avx512_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memmove_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memmove_chk_avx512_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memmove_chk,
-                             CPU_FEATURE_USABLE (AVX),
-                             __memmove_chk_avx_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memmove_chk,
-                             CPU_FEATURE_USABLE (AVX),
-                             __memmove_chk_avx_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memmove_chk,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memmove_chk_avx_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, __memmove_chk,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memmove_chk_avx_unaligned_erms_rtm)
-             IFUNC_IMPL_ADD (array, i, __memmove_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memmove_chk_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memmove_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memmove_chk_evex_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memmove_chk,
-                             CPU_FEATURE_USABLE (SSSE3),
-                             __memmove_chk_ssse3)
              IFUNC_IMPL_ADD (array, i, __memmove_chk, 1,
-                             __memmove_chk_sse2_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memmove_chk, 1,
-                             __memmove_chk_sse2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memmove_chk, 1,
-                             __memmove_chk_erms))
+                             __memmove_chk_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memmove_chk,
+                                    CPU_FEATURE_USABLE (AVX512F),
+                                    __memmove_chk_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memmove_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memmove_chk_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memmove_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memmove_chk_avx512_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memmove_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memmove_chk_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memmove_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memmove_chk_evex_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memmove_chk,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __memmove_chk_avx_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memmove_chk,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __memmove_chk_avx_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memmove_chk,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memmove_chk_avx_unaligned_rtm)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memmove_chk,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memmove_chk_avx_unaligned_erms_rtm)
+             /* By V3 we assume fast aligned copy.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __memmove_chk,
+                                    CPU_FEATURE_USABLE (SSSE3),
+                                    __memmove_chk_ssse3)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2 (SSSE3 is too
+                optimized around aligned copy to be better as general
+                purpose memmove).  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __memmove_chk, 1,
+                                    __memmove_chk_sse2_unaligned)
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __memmove_chk, 1,
+                                    __memmove_chk_sse2_unaligned_erms))
 #endif
 
   /* Support sysdeps/x86_64/multiarch/memmove.c.  */
   IFUNC_IMPL (i, name, memmove,
-             IFUNC_IMPL_ADD (array, i, memmove,
-                             CPU_FEATURE_USABLE (AVX),
-                             __memmove_avx_unaligned)
-             IFUNC_IMPL_ADD (array, i, memmove,
-                             CPU_FEATURE_USABLE (AVX),
-                             __memmove_avx_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memmove,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memmove_avx_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, memmove,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memmove_avx_unaligned_erms_rtm)
-             IFUNC_IMPL_ADD (array, i, memmove,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memmove_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, memmove,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memmove_evex_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memmove,
-                             CPU_FEATURE_USABLE (AVX512F),
-                             __memmove_avx512_no_vzeroupper)
-             IFUNC_IMPL_ADD (array, i, memmove,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memmove_avx512_unaligned)
-             IFUNC_IMPL_ADD (array, i, memmove,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memmove_avx512_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memmove, CPU_FEATURE_USABLE (SSSE3),
-                             __memmove_ssse3)
-             IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_erms)
              IFUNC_IMPL_ADD (array, i, memmove, 1,
-                             __memmove_sse2_unaligned)
-             IFUNC_IMPL_ADD (array, i, memmove, 1,
-                             __memmove_sse2_unaligned_erms))
+                             __memmove_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memmove,
+                                    CPU_FEATURE_USABLE (AVX512F),
+                                    __memmove_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memmove,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memmove_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memmove,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memmove_avx512_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memmove,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memmove_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memmove,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memmove_evex_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memmove,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __memmove_avx_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memmove,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __memmove_avx_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memmove,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memmove_avx_unaligned_rtm)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memmove,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memmove_avx_unaligned_erms_rtm)
+             /* By V3 we assume fast aligned copy.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, memmove,
+                                    CPU_FEATURE_USABLE (SSSE3),
+                                    __memmove_ssse3)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2 (SSSE3 is too
+                optimized around aligned copy to be better as general
+                purpose memmove).  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, memmove, 1,
+                                    __memmove_sse2_unaligned)
+             X86_IFUNC_IMPL_ADD_V2 (array, i, memmove, 1,
+                                    __memmove_sse2_unaligned_erms))
 
   /* Support sysdeps/x86_64/multiarch/memrchr.c.  */
   IFUNC_IMPL (i, name, memrchr,
@@ -832,165 +844,190 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 #ifdef SHARED
   /* Support sysdeps/x86_64/multiarch/memcpy_chk.c.  */
   IFUNC_IMPL (i, name, __memcpy_chk,
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-                             CPU_FEATURE_USABLE (AVX512F),
-                             __memcpy_chk_avx512_no_vzeroupper)
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memcpy_chk_avx512_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memcpy_chk_avx512_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-                             CPU_FEATURE_USABLE (AVX),
-                             __memcpy_chk_avx_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-                             CPU_FEATURE_USABLE (AVX),
-                             __memcpy_chk_avx_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memcpy_chk_avx_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memcpy_chk_avx_unaligned_erms_rtm)
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memcpy_chk_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memcpy_chk_evex_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-                             CPU_FEATURE_USABLE (SSSE3),
-                             __memcpy_chk_ssse3)
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk, 1,
-                             __memcpy_chk_sse2_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memcpy_chk, 1,
-                             __memcpy_chk_sse2_unaligned_erms)
              IFUNC_IMPL_ADD (array, i, __memcpy_chk, 1,
-                             __memcpy_chk_erms))
+                             __memcpy_chk_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX512F),
+                                    __memcpy_chk_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memcpy_chk_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memcpy_chk_avx512_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memcpy_chk_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memcpy_chk_evex_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __memcpy_chk_avx_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __memcpy_chk_avx_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memcpy_chk,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memcpy_chk_avx_unaligned_rtm)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memcpy_chk,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memcpy_chk_avx_unaligned_erms_rtm)
+             /* By V3 we assume fast aligned copy.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __memcpy_chk,
+                                    CPU_FEATURE_USABLE (SSSE3),
+                                    __memcpy_chk_ssse3)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2 (SSSE3 is too
+                optimized around aligned copy to be better as general
+                purpose memmove).  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __memcpy_chk, 1,
+                                    __memcpy_chk_sse2_unaligned)
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __memcpy_chk, 1,
+                                    __memcpy_chk_sse2_unaligned_erms))
 #endif
 
   /* Support sysdeps/x86_64/multiarch/memcpy.c.  */
   IFUNC_IMPL (i, name, memcpy,
-             IFUNC_IMPL_ADD (array, i, memcpy,
-                             CPU_FEATURE_USABLE (AVX),
-                             __memcpy_avx_unaligned)
-             IFUNC_IMPL_ADD (array, i, memcpy,
-                             CPU_FEATURE_USABLE (AVX),
-                             __memcpy_avx_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memcpy,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memcpy_avx_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, memcpy,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memcpy_avx_unaligned_erms_rtm)
-             IFUNC_IMPL_ADD (array, i, memcpy,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memcpy_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, memcpy,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memcpy_evex_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memcpy, CPU_FEATURE_USABLE (SSSE3),
-                             __memcpy_ssse3)
-             IFUNC_IMPL_ADD (array, i, memcpy,
-                             CPU_FEATURE_USABLE (AVX512F),
-                             __memcpy_avx512_no_vzeroupper)
-             IFUNC_IMPL_ADD (array, i, memcpy,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memcpy_avx512_unaligned)
-             IFUNC_IMPL_ADD (array, i, memcpy,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __memcpy_avx512_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_sse2_unaligned)
              IFUNC_IMPL_ADD (array, i, memcpy, 1,
-                             __memcpy_sse2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_erms))
+                             __memcpy_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memcpy,
+                                    CPU_FEATURE_USABLE (AVX512F),
+                                    __memcpy_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memcpy,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memcpy_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memcpy,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memcpy_avx512_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memcpy,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memcpy_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memcpy,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __memcpy_evex_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memcpy,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __memcpy_avx_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memcpy,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __memcpy_avx_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memcpy,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memcpy_avx_unaligned_rtm)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memcpy,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memcpy_avx_unaligned_erms_rtm)
+             /* By V3 we assume fast aligned copy.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, memcpy,
+                                    CPU_FEATURE_USABLE (SSSE3),
+                                    __memcpy_ssse3)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2 (SSSE3 is too
+                optimized around aligned copy to be better as general
+                purpose memmove).  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, memcpy, 1,
+                                    __memcpy_sse2_unaligned)
+             X86_IFUNC_IMPL_ADD_V2 (array, i, memcpy, 1,
+                                    __memcpy_sse2_unaligned_erms))
 
 #ifdef SHARED
   /* Support sysdeps/x86_64/multiarch/mempcpy_chk.c.  */
   IFUNC_IMPL (i, name, __mempcpy_chk,
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-                             CPU_FEATURE_USABLE (AVX512F),
-                             __mempcpy_chk_avx512_no_vzeroupper)
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __mempcpy_chk_avx512_unaligned)
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __mempcpy_chk_avx512_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-                             CPU_FEATURE_USABLE (AVX),
-                             __mempcpy_chk_avx_unaligned)
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-                             CPU_FEATURE_USABLE (AVX),
-                             __mempcpy_chk_avx_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __mempcpy_chk_avx_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __mempcpy_chk_avx_unaligned_erms_rtm)
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __mempcpy_chk_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __mempcpy_chk_evex_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-                             CPU_FEATURE_USABLE (SSSE3),
-                             __mempcpy_chk_ssse3)
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk, 1,
-                             __mempcpy_chk_sse2_unaligned)
              IFUNC_IMPL_ADD (array, i, __mempcpy_chk, 1,
-                             __mempcpy_chk_sse2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __mempcpy_chk, 1,
-                             __mempcpy_chk_erms))
+                             __mempcpy_chk_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __mempcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX512F),
+                                    __mempcpy_chk_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __mempcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __mempcpy_chk_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __mempcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __mempcpy_chk_avx512_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __mempcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __mempcpy_chk_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __mempcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __mempcpy_chk_evex_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __mempcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __mempcpy_chk_avx_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __mempcpy_chk,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __mempcpy_chk_avx_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __mempcpy_chk,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __mempcpy_chk_avx_unaligned_rtm)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __mempcpy_chk,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __mempcpy_chk_avx_unaligned_erms_rtm)
+             /* By V3 we assume fast aligned copy.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __mempcpy_chk,
+                                    CPU_FEATURE_USABLE (SSSE3),
+                                    __mempcpy_chk_ssse3)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2 (SSSE3 is too
+                optimized around aligned copy to be better as general
+                purpose memmove).  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __mempcpy_chk, 1,
+                                    __mempcpy_chk_sse2_unaligned)
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __mempcpy_chk, 1,
+                                    __mempcpy_chk_sse2_unaligned_erms))
 #endif
 
   /* Support sysdeps/x86_64/multiarch/mempcpy.c.  */
   IFUNC_IMPL (i, name, mempcpy,
-             IFUNC_IMPL_ADD (array, i, mempcpy,
-                             CPU_FEATURE_USABLE (AVX512F),
-                             __mempcpy_avx512_no_vzeroupper)
-             IFUNC_IMPL_ADD (array, i, mempcpy,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __mempcpy_avx512_unaligned)
-             IFUNC_IMPL_ADD (array, i, mempcpy,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __mempcpy_avx512_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, mempcpy,
-                             CPU_FEATURE_USABLE (AVX),
-                             __mempcpy_avx_unaligned)
-             IFUNC_IMPL_ADD (array, i, mempcpy,
-                             CPU_FEATURE_USABLE (AVX),
-                             __mempcpy_avx_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, mempcpy,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __mempcpy_avx_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, mempcpy,
-                             (CPU_FEATURE_USABLE (AVX)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __mempcpy_avx_unaligned_erms_rtm)
-             IFUNC_IMPL_ADD (array, i, mempcpy,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __mempcpy_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, mempcpy,
-                             CPU_FEATURE_USABLE (AVX512VL),
-                             __mempcpy_evex_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, mempcpy, CPU_FEATURE_USABLE (SSSE3),
-                             __mempcpy_ssse3)
-             IFUNC_IMPL_ADD (array, i, mempcpy, 1,
-                             __mempcpy_sse2_unaligned)
              IFUNC_IMPL_ADD (array, i, mempcpy, 1,
-                             __mempcpy_sse2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, mempcpy, 1, __mempcpy_erms))
+                             __mempcpy_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, mempcpy,
+                                    CPU_FEATURE_USABLE (AVX512F),
+                                    __mempcpy_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, mempcpy,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __mempcpy_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, mempcpy,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __mempcpy_avx512_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, mempcpy,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __mempcpy_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, mempcpy,
+                                    CPU_FEATURE_USABLE (AVX512VL),
+                                    __mempcpy_evex_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, mempcpy,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __mempcpy_avx_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, mempcpy,
+                                    CPU_FEATURE_USABLE (AVX),
+                                    __mempcpy_avx_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, mempcpy,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __mempcpy_avx_unaligned_rtm)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, mempcpy,
+                                    (CPU_FEATURE_USABLE (AVX)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __mempcpy_avx_unaligned_erms_rtm)
+             /* By V3 we assume fast aligned copy.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, mempcpy,
+                                    CPU_FEATURE_USABLE (SSSE3),
+                                    __mempcpy_ssse3)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2 (SSSE3 is too
+                optimized around aligned copy to be better as general
+                purpose memmove).  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, mempcpy, 1,
+                                    __mempcpy_sse2_unaligned)
+             X86_IFUNC_IMPL_ADD_V2 (array, i, mempcpy, 1,
+                                    __mempcpy_sse2_unaligned_erms))
 
   /* Support sysdeps/x86_64/multiarch/strncmp.c.  */
   IFUNC_IMPL (i, name, strncmp,
index fb01fbb301a5726aabf4cb631729302d000042e4..1643d32887c99468e50b1556c5e17f0b3f6d67a2 100644 (file)
 #include <init-arch.h>
 
 extern __typeof (REDIRECT_NAME) OPTIMIZE (erms) attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned)
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms)
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper)
+  attribute_hidden;
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned)
+  attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms)
+  attribute_hidden;
+
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned) attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_erms)
   attribute_hidden;
@@ -32,30 +40,27 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_rtm)
   attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_erms_rtm)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned)
-  attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms)
-  attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned)
-  attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms)
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden;
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper)
+extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms)
   attribute_hidden;
 
 static inline void *
 IFUNC_SELECTOR (void)
 {
-  const struct cpu_featurescpu_features = __get_cpu_features ();
+  const struct cpu_features *cpu_features = __get_cpu_features ();
 
   if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_ERMS)
       || CPU_FEATURES_ARCH_P (cpu_features, Prefer_FSRM))
     return OPTIMIZE (erms);
 
-  if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
+  if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
       && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
     {
-      if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL))
+      if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL))
        {
          if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
            return OPTIMIZE (avx512_unaligned_erms);
@@ -66,9 +71,10 @@ IFUNC_SELECTOR (void)
       return OPTIMIZE (avx512_no_vzeroupper);
     }
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
+  if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
+                                  AVX_Fast_Unaligned_Load, ))
     {
-      if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL))
+      if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL))
        {
          if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
            return OPTIMIZE (evex_unaligned_erms);
@@ -84,7 +90,8 @@ IFUNC_SELECTOR (void)
          return OPTIMIZE (avx_unaligned_rtm);
        }
 
-      if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
+      if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
+                                      Prefer_No_VZEROUPPER, !))
        {
          if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
            return OPTIMIZE (avx_unaligned_erms);
@@ -93,7 +100,11 @@ IFUNC_SELECTOR (void)
        }
     }
 
-  if (CPU_FEATURE_USABLE_P (cpu_features, SSSE3)
+  if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, SSSE3)
+      /* Leave this as runtime check.  The SSSE3 is optimized almost
+         exclusively for avoiding unaligned memory access during the
+         copy and by and large is not better than the sse2
+         implementation as a general purpose memmove.  */
       && !CPU_FEATURES_ARCH_P (cpu_features, Fast_Unaligned_Copy))
     {
       return OPTIMIZE (ssse3);
index 975ae6c0515b83cbfc69466359d0518e79336445..a14b1556676106d86a953c6ee376efd1a37aa8fb 100644 (file)
@@ -1,12 +1,23 @@
-#if IS_IN (libc)
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (3)
+
 # define VEC_SIZE      32
 # define VEC(i)                ymm##i
 # define VMOVNT                vmovntdq
 # define VMOVU         vmovdqu
 # define VMOVA         vmovdqa
 # define MOV_SIZE      4
+
 # define SECTION(p)            p##.avx
-# define MEMMOVE_SYMBOL(p,s)   p##_avx_##s
+
+# ifndef MEMMOVE_SYMBOL
+#  define MEMMOVE_SYMBOL(p,s)  p##_avx_##s
+# endif
 
 # include "memmove-vec-unaligned-erms.S"
+
+# if MINIMUM_X86_ISA_LEVEL == 3
+#  include "memmove-shlib-compat.h"
+# endif
 #endif
index 42d15a142ac3529058b5efcb934c43edc8955fa3..9c090d368bab61390d2e86a80bd9bbae36b8863b 100644 (file)
@@ -17,8 +17,9 @@
    <https://www.gnu.org/licenses/>.  */
 
 #include <sysdep.h>
+#include <isa-level.h>
 
-#if IS_IN (libc)
+#if ISA_SHOULD_BUILD (4)
 
 # include "asm-syntax.h"
 
index 0fa7126830af7acbde5a21ea87e2d855df76ca6a..8d1568a7ba90b7e8363a2b8b6fbef97c462af57a 100644 (file)
@@ -1,4 +1,7 @@
-#if IS_IN (libc)
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
+
 # define VEC_SIZE      64
 # define XMM0          xmm16
 # define XMM1          xmm17
 # define VMOVA         vmovdqa64
 # define VZEROUPPER
 # define MOV_SIZE      6
+
 # define SECTION(p)            p##.evex512
-# define MEMMOVE_SYMBOL(p,s)   p##_avx512_##s
+
+# ifndef MEMMOVE_SYMBOL
+#  define MEMMOVE_SYMBOL(p,s)  p##_avx512_##s
+# endif
 
 # include "memmove-vec-unaligned-erms.S"
 #endif
index 88715441feaaccf5151b144d473d04c8f4eca3d9..2373017358a035a8f3781ee25b2e0cd440f7df80 100644 (file)
@@ -1,4 +1,7 @@
-#if IS_IN (libc)
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
+
 # define VEC_SIZE      32
 # define XMM0          xmm16
 # define XMM1          xmm17
 # define VMOVA         vmovdqa64
 # define VZEROUPPER
 # define MOV_SIZE      6
+
 # define SECTION(p)            p##.evex
-# define MEMMOVE_SYMBOL(p,s)   p##_evex_##s
+
+# ifndef MEMMOVE_SYMBOL
+#  define MEMMOVE_SYMBOL(p,s)  p##_evex_##s
+# endif
 
 # include "memmove-vec-unaligned-erms.S"
+
+
+# if MINIMUM_X86_ISA_LEVEL == 4
+#  include "memmove-shlib-compat.h"
+# endif
 #endif
diff --git a/sysdeps/x86_64/multiarch/memmove-shlib-compat.h b/sysdeps/x86_64/multiarch/memmove-shlib-compat.h
new file mode 100644 (file)
index 0000000..c0793d6
--- /dev/null
@@ -0,0 +1,26 @@
+/* Copyright (C) 2016-2022 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#if defined SHARED && IS_IN(libc)
+# include <shlib-compat.h>
+# if SHLIB_COMPAT(libc, GLIBC_2_2_5, GLIBC_2_14)
+/* Use __memmove_{isa_level}_unaligned to support overlapping
+   addresses.  */
+compat_symbol (libc, MEMMOVE_SYMBOL (__memmove, unaligned), memcpy,
+              GLIBC_2_2_5);
+# endif
+#endif
index 09e7c1d6cda5acf7d98381be3037a293a286efba..422a079902b2d0e2589508b9a4e356b14a93db3d 100644 (file)
    License along with the GNU C Library; if not, see
    <https://www.gnu.org/licenses/>.  */
 
-#if IS_IN (libc)
-# define MEMMOVE_SYMBOL(p,s)   p##_sse2_##s
-#else
-weak_alias (__mempcpy, mempcpy)
-#endif
+#include <isa-level.h>
+
+/* MINIMUM_X86_ISA_LEVEL <= 2 because there is no V2 implementation
+   so we need this to build for ISA V2 builds. */
+#if ISA_SHOULD_BUILD (2)
+
+# include <sysdep.h>
+
+# define VEC_SIZE      16
+# define VEC(i)                xmm##i
+# define PREFETCHNT    prefetchnta
+# define VMOVNT                movntdq
+/* Use movups and movaps for smaller code sizes.  */
+# define VMOVU         movups
+# define VMOVA         movaps
+# define MOV_SIZE      3
+
+# define SECTION(p)            p
+
+# ifndef MEMMOVE_SYMBOL
+#  define MEMMOVE_SYMBOL(p,s)  p##_sse2_##s
+# endif
 
-#include <sysdeps/x86_64/memmove.S>
+# include "multiarch/memmove-vec-unaligned-erms.S"
 
-#if defined SHARED && IS_IN (libc)
-# include <shlib-compat.h>
-# if SHLIB_COMPAT (libc, GLIBC_2_2_5, GLIBC_2_14)
-/* Use __memmove_sse2_unaligned to support overlapping addresses.  */
-compat_symbol (libc, __memmove_sse2_unaligned, memcpy, GLIBC_2_2_5);
+# if MINIMUM_X86_ISA_LEVEL <= 2
+#  include "memmove-shlib-compat.h"
 # endif
 #endif
index a88fde4a8f378be3d6af2dfdd467901cccf5c324..57599752c70132eb5e57a564d265dcdc1fefc53a 100644 (file)
@@ -18,7 +18,9 @@
    <https://www.gnu.org/licenses/>.  */
 
 
-#if IS_IN (libc)
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (2)
 
 # include <sysdep.h>
 # ifndef MEMMOVE
diff --git a/sysdeps/x86_64/multiarch/rtld-memmove.S b/sysdeps/x86_64/multiarch/rtld-memmove.S
new file mode 100644 (file)
index 0000000..1f3ad64
--- /dev/null
@@ -0,0 +1,18 @@
+/* Copyright (C) 2022 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "../memmove.S"
This page took 0.996686 seconds and 5 git commands to generate.