This is the mail archive of the
glibc-cvs@sourceware.org
mailing list for the glibc project.
GNU C Library master sources branch neleai/string-x64 created. glibc-2.21-485-g165308e
- From: neleai at sourceware dot org
- To: glibc-cvs at sourceware dot org
- Date: 26 Jun 2015 20:21:00 -0000
- Subject: GNU C Library master sources branch neleai/string-x64 created. glibc-2.21-485-g165308e
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU C Library master sources".
The branch, neleai/string-x64 has been created
at 165308eb2c66542c88d002d63dc68df112f5c818 (commit)
- Log -----------------------------------------------------------------
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=165308eb2c66542c88d002d63dc68df112f5c818
commit 165308eb2c66542c88d002d63dc68df112f5c818
Author: OndÅ?ej BÃlka <neleai@seznam.cz>
Date: Fri Jun 26 22:19:29 2015 +0200
Optimize sse4 strspn/strcspn/strpbrk
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index 8094162..05d5c9b 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -22,11 +22,7 @@ sysdep_routines += strncat-c stpncpy-c strncpy-c strcmp-ssse3 \
strchr-sse2-no-bsf memcmp-ssse3 strstr-sse2-unaligned
ifeq (yes,$(config-cflags-sse4))
-sysdep_routines += strcspn-c strpbrk-c strspn-c varshift
-CFLAGS-varshift.c += -msse4
-CFLAGS-strcspn-c.c += -msse4
-CFLAGS-strpbrk-c.c += -msse4
-CFLAGS-strspn-c.c += -msse4
+sysdep_routines += strcspn_sse42 strpbrk_sse42 strspn_sse42
endif
ifeq (yes,$(config-cflags-avx2))
diff --git a/sysdeps/x86_64/multiarch/strcspn-c.c b/sysdeps/x86_64/multiarch/strcspn-c.c
deleted file mode 100644
index 60b2ed7..0000000
--- a/sysdeps/x86_64/multiarch/strcspn-c.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/* strcspn with SSE4.2 intrinsics
- Copyright (C) 2009-2015 Free Software Foundation, Inc.
- Contributed by Intel Corporation.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <nmmintrin.h>
-#include <string.h>
-#include "varshift.h"
-
-/* We use 0x2:
- _SIDD_SBYTE_OPS
- | _SIDD_CMP_EQUAL_ANY
- | _SIDD_POSITIVE_POLARITY
- | _SIDD_LEAST_SIGNIFICANT
- on pcmpistri to compare xmm/mem128
-
- 0 1 2 3 4 5 6 7 8 9 A B C D E F
- X X X X X X X X X X X X X X X X
-
- against xmm
-
- 0 1 2 3 4 5 6 7 8 9 A B C D E F
- A A A A A A A A A A A A A A A A
-
- to find out if the first 16byte data element has any byte A and
- the offset of the first byte. There are 3 cases:
-
- 1. The first 16byte data element has the byte A at the offset X.
- 2. The first 16byte data element has EOS and doesn't have the byte A.
- 3. The first 16byte data element is valid and doesn't have the byte A.
-
- Here is the table of ECX, CFlag, ZFlag and SFlag for 2 cases:
-
- 1 X 1 0/1 0
- 2 16 0 1 0
- 3 16 0 0 0
-
- We exit from the loop for cases 1 and 2 with jbe which branches
- when either CFlag or ZFlag is 1. If CFlag == 1, ECX has the offset
- X for case 1. */
-
-#ifndef STRCSPN_SSE2
-# define STRCSPN_SSE2 __strcspn_sse2
-# define STRCSPN_SSE42 __strcspn_sse42
-#endif
-
-#ifdef USE_AS_STRPBRK
-# define RETURN(val1, val2) return val1
-#else
-# define RETURN(val1, val2) return val2
-#endif
-
-extern
-#ifdef USE_AS_STRPBRK
-char *
-#else
-size_t
-#endif
-STRCSPN_SSE2 (const char *, const char *);
-
-
-#ifdef USE_AS_STRPBRK
-char *
-#else
-size_t
-#endif
-__attribute__ ((section (".text.sse4.2")))
-STRCSPN_SSE42 (const char *s, const char *a)
-{
- if (*a == 0)
- RETURN (NULL, strlen (s));
-
- const char *aligned;
- __m128i mask;
- int offset = (int) ((size_t) a & 15);
- if (offset != 0)
- {
- /* Load masks. */
- aligned = (const char *) ((size_t) a & -16L);
- __m128i mask0 = _mm_load_si128 ((__m128i *) aligned);
-
- mask = __m128i_shift_right (mask0, offset);
-
- /* Find where the NULL terminator is. */
- int length = _mm_cmpistri (mask, mask, 0x3a);
- if (length == 16 - offset)
- {
- /* There is no NULL terminator. */
- __m128i mask1 = _mm_load_si128 ((__m128i *) (aligned + 16));
- int index = _mm_cmpistri (mask1, mask1, 0x3a);
- length += index;
-
- /* Don't use SSE4.2 if the length of A > 16. */
- if (length > 16)
- return STRCSPN_SSE2 (s, a);
-
- if (index != 0)
- {
- /* Combine mask0 and mask1. We could play games with
- palignr, but frankly this data should be in L1 now
- so do the merge via an unaligned load. */
- mask = _mm_loadu_si128 ((__m128i *) a);
- }
- }
- }
- else
- {
- /* A is aligned. */
- mask = _mm_load_si128 ((__m128i *) a);
-
- /* Find where the NULL terminator is. */
- int length = _mm_cmpistri (mask, mask, 0x3a);
- if (length == 16)
- {
- /* There is no NULL terminator. Don't use SSE4.2 if the length
- of A > 16. */
- if (a[16] != 0)
- return STRCSPN_SSE2 (s, a);
- }
- }
-
- offset = (int) ((size_t) s & 15);
- if (offset != 0)
- {
- /* Check partial string. */
- aligned = (const char *) ((size_t) s & -16L);
- __m128i value = _mm_load_si128 ((__m128i *) aligned);
-
- value = __m128i_shift_right (value, offset);
-
- int length = _mm_cmpistri (mask, value, 0x2);
- /* No need to check ZFlag since ZFlag is always 1. */
- int cflag = _mm_cmpistrc (mask, value, 0x2);
- if (cflag)
- RETURN ((char *) (s + length), length);
- /* Find where the NULL terminator is. */
- int index = _mm_cmpistri (value, value, 0x3a);
- if (index < 16 - offset)
- RETURN (NULL, index);
- aligned += 16;
- }
- else
- aligned = s;
-
- while (1)
- {
- __m128i value = _mm_load_si128 ((__m128i *) aligned);
- int index = _mm_cmpistri (mask, value, 0x2);
- int cflag = _mm_cmpistrc (mask, value, 0x2);
- int zflag = _mm_cmpistrz (mask, value, 0x2);
- if (cflag)
- RETURN ((char *) (aligned + index), (size_t) (aligned + index - s));
- if (zflag)
- RETURN (NULL,
- /* Find where the NULL terminator is. */
- (size_t) (aligned + _mm_cmpistri (value, value, 0x3a) - s));
- aligned += 16;
- }
-}
diff --git a/sysdeps/x86_64/multiarch/strcspn_sse42.S b/sysdeps/x86_64/multiarch/strcspn_sse42.S
new file mode 100644
index 0000000..3e4e659
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strcspn_sse42.S
@@ -0,0 +1,3 @@
+#define AS_STRCSPN
+#define __strpbrk_sse42 __strcspn_sse42
+#include "strpbrk_sse42.S"
diff --git a/sysdeps/x86_64/multiarch/strpbrk-c.c b/sysdeps/x86_64/multiarch/strpbrk-c.c
deleted file mode 100644
index bbf5c49..0000000
--- a/sysdeps/x86_64/multiarch/strpbrk-c.c
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Don't define multiple versions for strpbrk in static library since we
- need strpbrk before the initialization happened. */
-#ifdef SHARED
-# define USE_AS_STRPBRK
-# define STRCSPN_SSE2 __strpbrk_sse2
-# define STRCSPN_SSE42 __strpbrk_sse42
-# include "strcspn-c.c"
-#endif
diff --git a/sysdeps/x86_64/multiarch/strpbrk_sse42.S b/sysdeps/x86_64/multiarch/strpbrk_sse42.S
new file mode 100644
index 0000000..512ac19
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strpbrk_sse42.S
@@ -0,0 +1,204 @@
+/* strcspn (str, ss) -- Return the length of the initial segment of STR
+ which contains no characters from SS.
+ Copyright (C) 1994-2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+#ifdef AS_STRSPN
+# define AS_STRCSPN
+# define MATCH_ALL $18
+#else
+# define MATCH_ALL $2
+#endif
+
+ENTRY(__strpbrk_sse42)
+ movq %rdi, %rax
+ andl $4095, %eax
+ cmp $4032, %eax
+ ja L(cross_page)
+ movq %rsi, %rax
+ andl $4095, %eax
+ cmp $4080, %eax
+ ja L(cross_page)
+ movdqu (%rsi), %xmm4
+ movdqu (%rdi), %xmm1
+ movdqu 16(%rdi), %xmm5
+ movdqu 32(%rdi), %xmm6
+ movdqu 48(%rdi), %xmm7
+
+L(back_from_crosspage):
+ pxor %xmm3, %xmm3
+ pxor %xmm2, %xmm2
+
+ pcmpeqb %xmm4, %xmm2
+ pmovmskb %xmm2, %eax
+ testl %eax, %eax
+ je L(call)
+ pcmpistri MATCH_ALL, %xmm1, %xmm4
+ jc L(rx0)
+ je L(ret0)
+ pcmpistri MATCH_ALL, %xmm5, %xmm4
+ jc L(rx16)
+ je L(ret16)
+ pcmpistri MATCH_ALL, %xmm6, %xmm4
+ jc L(rx32)
+ je L(ret32)
+ pcmpistri MATCH_ALL, %xmm7, %xmm4
+ jc L(rx48)
+ je L(ret48)
+
+ movq %rdi, %rax
+ andq $-16, %rax
+ addq $16, %rax
+ .p2align 4,,10
+ .p2align 3
+L(loop):
+ pcmpistri MATCH_ALL, (%rax), %xmm4
+ lea 16(%rax), %rax
+ jc L(rx_loop)
+ jne L(loop)
+#ifdef AS_STRCSPN
+ movdqa -16(%rax), %xmm1
+ pcmpistri $58, %xmm1, %xmm1
+ lea -16(%rcx, %rax), %rax
+ sub %rdi, %rax
+#else
+ xor %eax, %eax
+#endif
+ ret
+L(rx_loop):
+ lea -16(%rcx, %rax), %rax
+#ifdef AS_STRCSPN
+ sub %rdi, %rax
+#endif
+ ret
+ .p2align 4,,10
+ .p2align 3
+#ifndef AS_STRCSPN
+L(ret0):
+L(ret16):
+L(ret32):
+L(ret48):
+ xorl %eax, %eax
+ ret
+#endif
+L(call):
+#ifdef AS_STRCSPN
+# ifdef AS_STRSPN
+ jmp __strspn_sse2
+# else
+ jmp __strcspn_sse2
+# endif
+#else
+ jmp __strpbrk_sse2
+#endif
+ .p2align 4,,10
+ .p2align 3
+#ifdef AS_STRCSPN
+L(ret0):
+ pcmpistri $58, %xmm1, %xmm1
+L(rx0):
+ lea 0(%rcx), %rax
+#else
+L(rx0):
+ leaq (%rdi,%rcx), %rax
+#endif
+ ret
+#ifdef AS_STRCSPN
+L(ret16):
+ pcmpistri $58, %xmm5, %xmm5
+L(rx16):
+ lea 16(%rcx), %rax
+#else
+L(rx16):
+ leaq 16(%rdi,%rcx), %rax
+#endif
+ ret
+#ifdef AS_STRCSPN
+L(ret32):
+ pcmpistri $58, %xmm6, %xmm6
+L(rx32):
+ lea 32(%rcx), %rax
+#else
+L(rx32):
+ leaq 32(%rdi,%rcx), %rax
+#endif
+ ret
+#ifdef AS_STRCSPN
+L(ret48):
+ pcmpistri $58, %xmm7, %xmm7
+L(rx48):
+ lea 48(%rcx), %rax
+#else
+L(rx48):
+ leaq 48(%rdi,%rcx), %rax
+#endif
+ ret
+
+ .p2align 4,,10
+ .p2align 3
+L(cross_page):
+ movzbl (%rdi), %ecx
+ xorl %eax, %eax
+ leaq -80(%rsp), %r8
+ testb %cl, %cl
+ je L(sloop_end)
+ leaq -80(%rsp), %r8
+ xorl %edx, %edx
+ xorl %eax, %eax
+ .p2align 4,,10
+ .p2align 3
+L(sloop):
+ movb %cl, (%r8,%rdx)
+ movzbl 1(%rdi,%rdx), %ecx
+ addl $1, %eax
+ testb %cl, %cl
+ je L(sloop_end)
+ addq $1, %rdx
+ cmpl $64, %eax
+ jne L(sloop)
+L(sloop_end):
+ movzbl (%rsi), %ecx
+ cltq
+ movb $0, -80(%rsp,%rax)
+ movdqu (%r8), %xmm1
+ movdqu 16(%r8), %xmm5
+ movdqu 32(%r8), %xmm6
+ movdqu 48(%r8), %xmm7
+
+ xorl %eax, %eax
+ testb %cl, %cl
+ je L(aloop_end)
+ xorl %edx, %edx
+ .p2align 4,,10
+ .p2align 3
+L(aloop):
+ movb %cl, (%r8,%rdx)
+ movzbl 1(%rsi,%rdx), %ecx
+ addl $1, %eax
+ testb %cl, %cl
+ je L(aloop_end)
+ addq $1, %rdx
+ cmpl $16, %eax
+ jne L(aloop)
+L(aloop_end):
+ cltq
+ movb $0, -80(%rsp,%rax)
+ movdqu (%r8), %xmm4
+ jmp L(back_from_crosspage)
+END(__strpbrk_sse42)
diff --git a/sysdeps/x86_64/multiarch/strspn-c.c b/sysdeps/x86_64/multiarch/strspn-c.c
deleted file mode 100644
index 6b0c80a..0000000
--- a/sysdeps/x86_64/multiarch/strspn-c.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/* strspn with SSE4.2 intrinsics
- Copyright (C) 2009-2015 Free Software Foundation, Inc.
- Contributed by Intel Corporation.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <nmmintrin.h>
-#include <string.h>
-#include "varshift.h"
-
-/* We use 0x12:
- _SIDD_SBYTE_OPS
- | _SIDD_CMP_EQUAL_ANY
- | _SIDD_NEGATIVE_POLARITY
- | _SIDD_LEAST_SIGNIFICANT
- on pcmpistri to compare xmm/mem128
-
- 0 1 2 3 4 5 6 7 8 9 A B C D E F
- X X X X X X X X X X X X X X X X
-
- against xmm
-
- 0 1 2 3 4 5 6 7 8 9 A B C D E F
- A A A A A A A A A A A A A A A A
-
- to find out if the first 16byte data element has any non-A byte and
- the offset of the first byte. There are 2 cases:
-
- 1. The first 16byte data element has the non-A byte, including
- EOS, at the offset X.
- 2. The first 16byte data element is valid and doesn't have the non-A
- byte.
-
- Here is the table of ECX, CFlag, ZFlag and SFlag for 2 cases:
-
- case ECX CFlag ZFlag SFlag
- 1 X 1 0/1 0
- 2 16 0 0 0
-
- We exit from the loop for case 1. */
-
-extern size_t __strspn_sse2 (const char *, const char *);
-
-
-size_t
-__attribute__ ((section (".text.sse4.2")))
-__strspn_sse42 (const char *s, const char *a)
-{
- if (*a == 0)
- return 0;
-
- const char *aligned;
- __m128i mask;
- int offset = (int) ((size_t) a & 15);
- if (offset != 0)
- {
- /* Load masks. */
- aligned = (const char *) ((size_t) a & -16L);
- __m128i mask0 = _mm_load_si128 ((__m128i *) aligned);
-
- mask = __m128i_shift_right (mask0, offset);
-
- /* Find where the NULL terminator is. */
- int length = _mm_cmpistri (mask, mask, 0x3a);
- if (length == 16 - offset)
- {
- /* There is no NULL terminator. */
- __m128i mask1 = _mm_load_si128 ((__m128i *) (aligned + 16));
- int index = _mm_cmpistri (mask1, mask1, 0x3a);
- length += index;
-
- /* Don't use SSE4.2 if the length of A > 16. */
- if (length > 16)
- return __strspn_sse2 (s, a);
-
- if (index != 0)
- {
- /* Combine mask0 and mask1. We could play games with
- palignr, but frankly this data should be in L1 now
- so do the merge via an unaligned load. */
- mask = _mm_loadu_si128 ((__m128i *) a);
- }
- }
- }
- else
- {
- /* A is aligned. */
- mask = _mm_load_si128 ((__m128i *) a);
-
- /* Find where the NULL terminator is. */
- int length = _mm_cmpistri (mask, mask, 0x3a);
- if (length == 16)
- {
- /* There is no NULL terminator. Don't use SSE4.2 if the length
- of A > 16. */
- if (a[16] != 0)
- return __strspn_sse2 (s, a);
- }
- }
-
- offset = (int) ((size_t) s & 15);
- if (offset != 0)
- {
- /* Check partial string. */
- aligned = (const char *) ((size_t) s & -16L);
- __m128i value = _mm_load_si128 ((__m128i *) aligned);
-
- value = __m128i_shift_right (value, offset);
-
- int length = _mm_cmpistri (mask, value, 0x12);
- /* No need to check CFlag since it is always 1. */
- if (length < 16 - offset)
- return length;
- /* Find where the NULL terminator is. */
- int index = _mm_cmpistri (value, value, 0x3a);
- if (index < 16 - offset)
- return length;
- aligned += 16;
- }
- else
- aligned = s;
-
- while (1)
- {
- __m128i value = _mm_load_si128 ((__m128i *) aligned);
- int index = _mm_cmpistri (mask, value, 0x12);
- int cflag = _mm_cmpistrc (mask, value, 0x12);
- if (cflag)
- return (size_t) (aligned + index - s);
- aligned += 16;
- }
-}
diff --git a/sysdeps/x86_64/multiarch/strspn_sse42.S b/sysdeps/x86_64/multiarch/strspn_sse42.S
new file mode 100644
index 0000000..d460167
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strspn_sse42.S
@@ -0,0 +1,3 @@
+#define AS_STRSPN
+#define __strpbrk_sse42 __strspn_sse42
+#include "strpbrk_sse42.S"
diff --git a/sysdeps/x86_64/multiarch/varshift.c b/sysdeps/x86_64/multiarch/varshift.c
deleted file mode 100644
index 0007ef7..0000000
--- a/sysdeps/x86_64/multiarch/varshift.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Helper for variable shifts of SSE registers.
- Copyright (C) 2010-2015 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include "varshift.h"
-
-const int8_t ___m128i_shift_right[31] attribute_hidden =
- {
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
- };
diff --git a/sysdeps/x86_64/multiarch/varshift.h b/sysdeps/x86_64/multiarch/varshift.h
deleted file mode 100644
index 30ace3d..0000000
--- a/sysdeps/x86_64/multiarch/varshift.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Helper for variable shifts of SSE registers.
- Copyright (C) 2010-2015 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <stdint.h>
-#include <tmmintrin.h>
-
-extern const int8_t ___m128i_shift_right[31] attribute_hidden;
-
-static __inline__ __m128i
-__m128i_shift_right (__m128i value, unsigned long int offset)
-{
- return _mm_shuffle_epi8 (value,
- _mm_loadu_si128 ((__m128i *) (___m128i_shift_right
- + offset)));
-}
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=0b69916d3c02dfab7987e26325a100815217faa1
commit 0b69916d3c02dfab7987e26325a100815217faa1
Author: OndÅ?ej BÃlka <neleai@seznam.cz>
Date: Thu Jun 25 10:26:32 2015 +0200
microoptimize strlen and strnlen
diff --git a/sysdeps/x86_64/strlen.S b/sysdeps/x86_64/strlen.S
index c382c8d..3e8beb0 100644
--- a/sysdeps/x86_64/strlen.S
+++ b/sysdeps/x86_64/strlen.S
@@ -1,5 +1,5 @@
/* SSE2 version of strlen.
- Copyright (C) 2012-2015 Free Software Foundation, Inc.
+ Copyright (C) 2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,222 +18,224 @@
#include <sysdep.h>
-/* Long lived register in strlen(s), strnlen(s, n) are:
-
- %xmm11 - zero
- %rdi - s
- %r10 (s+n) & (~(64-1))
- %r11 s+n
-*/
.text
ENTRY(strlen)
-
-/* Test 64 bytes from %rax for zero. Save result as bitmask in %rdx. */
-#define FIND_ZERO \
- pcmpeqb (%rax), %xmm8; \
- pcmpeqb 16(%rax), %xmm9; \
- pcmpeqb 32(%rax), %xmm10; \
- pcmpeqb 48(%rax), %xmm11; \
- pmovmskb %xmm8, %esi; \
- pmovmskb %xmm9, %edx; \
- pmovmskb %xmm10, %r8d; \
- pmovmskb %xmm11, %ecx; \
- salq $16, %rdx; \
- salq $16, %rcx; \
- orq %rsi, %rdx; \
- orq %r8, %rcx; \
- salq $32, %rcx; \
- orq %rcx, %rdx;
-
#ifdef AS_STRNLEN
-/* Do not read anything when n==0. */
+ mov %rsi, %r8
+ xor %edx, %edx
test %rsi, %rsi
- jne L(n_nonzero)
- xor %rax, %rax
- ret
-L(n_nonzero):
-
-/* Initialize long lived registers. */
-
- add %rdi, %rsi
- mov %rsi, %r10
- and $-64, %r10
- mov %rsi, %r11
+ je L(return_zero)
+ cmp $64, %rsi
+ jae L(dont_set)
+ bts %rsi, %rdx
+L(dont_set):
#endif
-
- pxor %xmm8, %xmm8
- pxor %xmm9, %xmm9
- pxor %xmm10, %xmm10
- pxor %xmm11, %xmm11
- movq %rdi, %rax
- movq %rdi, %rcx
- andq $4095, %rcx
-/* Offsets 4032-4047 will be aligned into 4032 thus fit into page. */
- cmpq $4047, %rcx
-/* We cannot unify this branching as it would be ~6 cycles slower. */
+ pxor %xmm0, %xmm0
+ mov %edi, %ecx
+ and $4095, %ecx
+ cmp $4032, %ecx
ja L(cross_page)
-
+ movdqu (%rdi), %xmm4
+ pcmpeqb %xmm0, %xmm4
+ pmovmskb %xmm4, %ecx
#ifdef AS_STRNLEN
-/* Test if end is among first 64 bytes. */
-# define STRNLEN_PROLOG \
- mov %r11, %rsi; \
- subq %rax, %rsi; \
- andq $-64, %rax; \
- testq $-64, %rsi; \
- je L(strnlen_ret)
+ or %dx, %cx
#else
-# define STRNLEN_PROLOG andq $-64, %rax;
+ test %ecx, %ecx
#endif
-
-/* Ignore bits in mask that come before start of string. */
-#define PROLOG(lab) \
- movq %rdi, %rcx; \
- xorq %rax, %rcx; \
- STRNLEN_PROLOG; \
- sarq %cl, %rdx; \
- test %rdx, %rdx; \
- je L(lab); \
- bsfq %rdx, %rax; \
+ je L(next48_bytes)
+ bsf %ecx, %eax
ret
#ifdef AS_STRNLEN
- andq $-16, %rax
- FIND_ZERO
-#else
- /* Test first 16 bytes unaligned. */
- movdqu (%rax), %xmm12
- pcmpeqb %xmm8, %xmm12
- pmovmskb %xmm12, %edx
- test %edx, %edx
- je L(next48_bytes)
- bsf %edx, %eax /* If eax is zeroed 16bit bsf can be used. */
+L(return_zero):
+ xor %eax, %eax
ret
-
+L(return_noread):
+ add $64, %rax
+ sub %rdi, %rax
+ ret
+#endif
+ .p2align 4
L(next48_bytes):
-/* Same as FIND_ZERO except we do not check first 16 bytes. */
- andq $-16, %rax
- pcmpeqb 16(%rax), %xmm9
- pcmpeqb 32(%rax), %xmm10
- pcmpeqb 48(%rax), %xmm11
- pmovmskb %xmm9, %edx
- pmovmskb %xmm10, %r8d
- pmovmskb %xmm11, %ecx
- salq $16, %rdx
- salq $16, %rcx
- orq %r8, %rcx
+ movdqu 16(%rdi), %xmm1
+ movdqu 32(%rdi), %xmm2
+ movdqu 48(%rdi), %xmm3
+ pcmpeqb %xmm0, %xmm1
+ pcmpeqb %xmm0, %xmm2
+ pcmpeqb %xmm0, %xmm3
+#ifdef AS_STRNLEN
+ pmovmskb %xmm1, %ecx
+ sal $16, %ecx
+ or %rcx, %rdx
+#else
+ pmovmskb %xmm1, %edx
+ sal $16, %edx
+#endif
+ pmovmskb %xmm2, %esi
+ pmovmskb %xmm3, %ecx
+ sal $16, %ecx
+ or %esi, %ecx
salq $32, %rcx
orq %rcx, %rdx
-#endif
-
- /* When no zero byte is found xmm9-11 are zero so we do not have to
- zero them. */
- PROLOG(loop)
+ je L(loop_init)
+ bsfq %rdx, %rax
+ ret
.p2align 4
L(cross_page):
- andq $-64, %rax
- FIND_ZERO
- PROLOG(loop_init)
+ movq %rdi, %rax
+ pxor %xmm1, %xmm1
+ pxor %xmm2, %xmm2
+ pxor %xmm3, %xmm3
#ifdef AS_STRNLEN
-/* We must do this check to correctly handle strnlen (s, -1). */
-L(strnlen_ret):
- bts %rsi, %rdx
+ mov %rdx, %r9
+#endif
+ andq $-64, %rax
+ pcmpeqb (%rax), %xmm0
+ pcmpeqb 16(%rax), %xmm1
+ pcmpeqb 32(%rax), %xmm2
+ pcmpeqb 48(%rax), %xmm3
+ pmovmskb %xmm0, %esi
+ pxor %xmm0, %xmm0
+ pmovmskb %xmm1, %edx
+ pmovmskb %xmm2, %r10d
+ pmovmskb %xmm3, %ecx
+ sal $16, %edx
+ sal $16, %ecx
+ or %esi, %edx
+ or %r10, %rcx
+ salq $32, %rcx
+ orq %rcx, %rdx
+ mov %edi, %ecx
+#ifdef AS_STRNLEN
+ salq %cl, %r9
+ or %r9, %rdx
+#endif
sarq %cl, %rdx
test %rdx, %rdx
je L(loop_init)
bsfq %rdx, %rax
ret
-#endif
.p2align 4
L(loop_init):
- pxor %xmm9, %xmm9
- pxor %xmm10, %xmm10
- pxor %xmm11, %xmm11
+ movq %rdi, %rax
+ andq $-64, %rax
#ifdef AS_STRNLEN
+ add %rdi, %r8
+ sub %rax, %r8
+ cmp $64, %r8
+ je L(return_noread)
+#endif
+ pxor %xmm1, %xmm1
+ pxor %xmm2, %xmm2
+#ifdef USE_AVX2
+ vpxor %xmm0, %xmm0, %xmm0
+#endif
.p2align 4
L(loop):
+#ifdef USE_AVX2
+ vmovdqa 64(%rax), %ymm1
+ vpminub 96(%rax), %ymm1, %ymm2
+ vpcmpeqb %ymm0, %ymm2, %ymm2
+ vpmovmskb %ymm2, %edx
+#else
+ movdqa 64(%rax), %xmm5
+ pminub 80(%rax), %xmm5
+ pminub 96(%rax), %xmm5
+ pminub 112(%rax), %xmm5
+ pcmpeqb %xmm0, %xmm5
+ pmovmskb %xmm5, %edx
+#endif
- addq $64, %rax
- cmpq %rax, %r10
- je L(exit_end)
-
- movdqa (%rax), %xmm8
- pminub 16(%rax), %xmm8
- pminub 32(%rax), %xmm8
- pminub 48(%rax), %xmm8
- pcmpeqb %xmm11, %xmm8
- pmovmskb %xmm8, %edx
+#ifdef AS_STRNLEN
+ sub $64, %r8
testl %edx, %edx
- jne L(exit)
- jmp L(loop)
-
- .p2align 4
-L(exit_end):
- cmp %rax, %r11
- je L(first) /* Do not read when end is at page boundary. */
- pxor %xmm8, %xmm8
- FIND_ZERO
-
-L(first):
- bts %r11, %rdx
- bsfq %rdx, %rdx
- addq %rdx, %rax
- subq %rdi, %rax
- ret
-
- .p2align 4
-L(exit):
- pxor %xmm8, %xmm8
- FIND_ZERO
-
- bsfq %rdx, %rdx
- addq %rdx, %rax
- subq %rdi, %rax
- ret
-
+ jne L(exit64)
+ cmp $64, %r8
+ jbe L(exit64_zero)
#else
-
- /* Main loop. Unrolled twice to improve L2 cache performance on core2. */
- .p2align 4
-L(loop):
-
- movdqa 64(%rax), %xmm8
- pminub 80(%rax), %xmm8
- pminub 96(%rax), %xmm8
- pminub 112(%rax), %xmm8
- pcmpeqb %xmm11, %xmm8
- pmovmskb %xmm8, %edx
testl %edx, %edx
jne L(exit64)
+#endif
subq $-128, %rax
-
- movdqa (%rax), %xmm8
- pminub 16(%rax), %xmm8
- pminub 32(%rax), %xmm8
- pminub 48(%rax), %xmm8
- pcmpeqb %xmm11, %xmm8
- pmovmskb %xmm8, %edx
+#ifdef USE_AVX2
+ vmovdqa (%rax), %ymm1
+ vpminub 32(%rax), %ymm1, %ymm2
+ vpcmpeqb %ymm0, %ymm2, %ymm2
+ vpmovmskb %ymm2, %edx
+#else
+ movdqa (%rax), %xmm5
+ pminub 16(%rax), %xmm5
+ pminub 32(%rax), %xmm5
+ pminub 48(%rax), %xmm5
+ pcmpeqb %xmm0, %xmm5
+ pmovmskb %xmm5, %edx
+#endif
+#ifdef AS_STRNLEN
+ sub $64, %r8
testl %edx, %edx
jne L(exit0)
+ cmp $64, %r8
+ jbe L(exit0_zero)
+#else
+ testl %edx, %edx
+ jne L(exit0)
+#endif
jmp L(loop)
+#ifdef AS_STRNLEN
+ .p2align 4
+L(exit64_zero):
+ addq $64, %rax
+L(exit0_zero):
+ add %r8, %rax
+ sub %rdi, %rax
+ ret
+#endif
.p2align 4
+
+
L(exit64):
addq $64, %rax
L(exit0):
- pxor %xmm8, %xmm8
- FIND_ZERO
-
+#ifdef USE_AVX2
+ sal $32, %rdx
+#else
+ sal $48, %rdx
+#endif
+#ifdef AS_STRNLEN
+ cmp $64, %r8
+ jae L(dont_set2)
+ bts %r8, %rdx
+ L(dont_set2):
+#endif
+#ifdef USE_AVX2
+ subq %rdi, %rax
+ vpcmpeqb %ymm0, %ymm1, %ymm1
+ vpmovmskb %ymm1, %ecx
+ vzeroupper
+ or %rcx, %rdx
+#else
+ pcmpeqb (%rax), %xmm0
+ pcmpeqb 16(%rax), %xmm1
+ pcmpeqb 32(%rax), %xmm2
+ subq %rdi, %rax
+ pmovmskb %xmm0, %esi
+ pmovmskb %xmm1, %ecx
+ pmovmskb %xmm2, %r8d
+ sal $16, %ecx
+ or %esi, %ecx
+ salq $32, %r8
+ orq %r8, %rcx
+ orq %rcx, %rdx
+#endif
bsfq %rdx, %rdx
addq %rdx, %rax
- subq %rdi, %rax
ret
-
-#endif
-
END(strlen)
libc_hidden_builtin_def (strlen)
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=b154f1ffacd7734ab4b4e75c79812e40f339f902
commit b154f1ffacd7734ab4b4e75c79812e40f339f902
Author: OndÅ?ej BÃlka <neleai@seznam.cz>
Date: Fri Jun 19 17:36:06 2015 +0200
Optimize strcmp more and add strncmp, strcasecmp, strncasecmp implementations.
diff --git a/sysdeps/x86_64/locale-defines.sym b/sysdeps/x86_64/locale-defines.sym
index aebff9a..804debb 100644
--- a/sysdeps/x86_64/locale-defines.sym
+++ b/sysdeps/x86_64/locale-defines.sym
@@ -8,4 +8,5 @@ LOCALE_T___LOCALES offsetof (struct __locale_struct, __locales)
LC_CTYPE
_NL_CTYPE_NONASCII_CASE
LOCALE_DATA_VALUES offsetof (struct __locale_data, values)
+LOCALE_TOLOWER offsetof (struct __locale_struct, __ctype_tolower)
SIZEOF_VALUES sizeof (((struct __locale_data *) 0)->values[0])
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index 679db2a..8094162 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -7,12 +7,13 @@ endif
ifeq ($(subdir),string)
sysdep_routines += strncat-c stpncpy-c strncpy-c strcmp-ssse3 \
- strcmp-sse2-unaligned strncmp-ssse3 \
+ strcmp-sse2-unaligned strncmp-sse2-unaligned strncmp-ssse3 \
memcpy-ssse3 \
memcpy-sse2-unaligned mempcpy-ssse3 \
memmove-ssse3 memcpy-ssse3-back mempcpy-ssse3-back \
memmove-avx-unaligned memcpy-avx-unaligned mempcpy-avx-unaligned \
memmove-ssse3-back strcasecmp_l-ssse3 \
+ strcasecmp_l-sse2-unaligned strncase_l-sse2-unaligned \
strncase_l-ssse3 strcat-ssse3 strncat-ssse3\
strcpy-ssse3 strncpy-ssse3 stpcpy-ssse3 stpncpy-ssse3 \
strcpy-sse2-unaligned strncpy-sse2-unaligned \
@@ -29,7 +30,7 @@ CFLAGS-strspn-c.c += -msse4
endif
ifeq (yes,$(config-cflags-avx2))
-sysdep_routines += memset-avx2 strcpy-avx2 stpcpy-avx2 memcmp-avx2
+sysdep_routines += memset-avx2 strcpy-avx2 stpcpy-avx2 memcmp-avx2 strcmp-avx2 strncmp-avx2 strcasecmp_l-avx2 strncase_l-avx2
endif
endif
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index b3dbe65..8c71030 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -94,20 +94,18 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/x86_64/multiarch/strcasecmp_l.S. */
IFUNC_IMPL (i, name, strcasecmp,
- IFUNC_IMPL_ADD (array, i, strcasecmp, HAS_AVX,
- __strcasecmp_avx)
- IFUNC_IMPL_ADD (array, i, strcasecmp, HAS_SSE4_2,
- __strcasecmp_sse42)
+ IFUNC_IMPL_ADD (array, i, strcasecmp, 1,
+ __strcasecmp_sse2_unaligned)
+ IFUNC_IMPL_ADD (array, i, strcasecmp, HAS_AVX2,
+ __strcasecmp_avx2)
IFUNC_IMPL_ADD (array, i, strcasecmp, HAS_SSSE3,
__strcasecmp_ssse3)
IFUNC_IMPL_ADD (array, i, strcasecmp, 1, __strcasecmp_sse2))
/* Support sysdeps/x86_64/multiarch/strcasecmp_l.S. */
IFUNC_IMPL (i, name, strcasecmp_l,
- IFUNC_IMPL_ADD (array, i, strcasecmp_l, HAS_AVX,
- __strcasecmp_l_avx)
- IFUNC_IMPL_ADD (array, i, strcasecmp_l, HAS_SSE4_2,
- __strcasecmp_l_sse42)
+ IFUNC_IMPL_ADD (array, i, strcasecmp_l, 1,
+ __strcasecmp_sse2_unaligned_l)
IFUNC_IMPL_ADD (array, i, strcasecmp_l, HAS_SSSE3,
__strcasecmp_l_ssse3)
IFUNC_IMPL_ADD (array, i, strcasecmp_l, 1,
@@ -130,7 +128,7 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/x86_64/multiarch/strcmp.S. */
IFUNC_IMPL (i, name, strcmp,
- IFUNC_IMPL_ADD (array, i, strcmp, HAS_SSE4_2, __strcmp_sse42)
+ IFUNC_IMPL_ADD (array, i, strcmp, HAS_AVX2, __strcmp_avx2)
IFUNC_IMPL_ADD (array, i, strcmp, HAS_SSSE3, __strcmp_ssse3)
IFUNC_IMPL_ADD (array, i, strcmp, 1, __strcmp_sse2_unaligned)
IFUNC_IMPL_ADD (array, i, strcmp, 1, __strcmp_sse2))
@@ -150,10 +148,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/x86_64/multiarch/strncase_l.S. */
IFUNC_IMPL (i, name, strncasecmp,
- IFUNC_IMPL_ADD (array, i, strncasecmp, HAS_AVX,
- __strncasecmp_avx)
- IFUNC_IMPL_ADD (array, i, strncasecmp, HAS_SSE4_2,
- __strncasecmp_sse42)
+ IFUNC_IMPL_ADD (array, i, strncasecmp, HAS_AVX2,
+ __strncasecmp_avx2)
+ IFUNC_IMPL_ADD (array, i, strncasecmp, 1,
+ __strncasecmp_sse2_unaligned)
IFUNC_IMPL_ADD (array, i, strncasecmp, HAS_SSSE3,
__strncasecmp_ssse3)
IFUNC_IMPL_ADD (array, i, strncasecmp, 1,
@@ -161,10 +159,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/x86_64/multiarch/strncase_l.S. */
IFUNC_IMPL (i, name, strncasecmp_l,
- IFUNC_IMPL_ADD (array, i, strncasecmp_l, HAS_AVX,
- __strncasecmp_l_avx)
- IFUNC_IMPL_ADD (array, i, strncasecmp_l, HAS_SSE4_2,
- __strncasecmp_l_sse42)
+ IFUNC_IMPL_ADD (array, i, strncasecmp_l, HAS_AVX2,
+ __strncasecmp_avx2_l)
+ IFUNC_IMPL_ADD (array, i, strncasecmp_l, 1,
+ __strncasecmp_sse2_unaligned_l)
IFUNC_IMPL_ADD (array, i, strncasecmp_l, HAS_SSSE3,
__strncasecmp_l_ssse3)
IFUNC_IMPL_ADD (array, i, strncasecmp_l, 1,
@@ -261,8 +259,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/x86_64/multiarch/strncmp.S. */
IFUNC_IMPL (i, name, strncmp,
- IFUNC_IMPL_ADD (array, i, strncmp, HAS_SSE4_2,
- __strncmp_sse42)
+ IFUNC_IMPL_ADD (array, i, strncmp, 1, __strncmp_sse2_unaligned)
+ IFUNC_IMPL_ADD (array, i, strncmp, HAS_AVX2, __strncmp_avx2)
+
IFUNC_IMPL_ADD (array, i, strncmp, HAS_SSSE3,
__strncmp_ssse3)
IFUNC_IMPL_ADD (array, i, strncmp, 1, __strncmp_sse2))
diff --git a/sysdeps/x86_64/multiarch/strcasecmp_l-avx2.S b/sysdeps/x86_64/multiarch/strcasecmp_l-avx2.S
new file mode 100644
index 0000000..d10379f
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strcasecmp_l-avx2.S
@@ -0,0 +1,5 @@
+#define AS_STRCASECMP
+#define USE_AVX2
+#define __strcasecmp_sse2_unaligned __strcasecmp_avx2
+#define STRCMP __strcasecmp_avx2_l
+#include "strcmp-sse2-unaligned.S"
diff --git a/sysdeps/x86_64/multiarch/strcasecmp_l-sse2-unaligned.S b/sysdeps/x86_64/multiarch/strcasecmp_l-sse2-unaligned.S
new file mode 100644
index 0000000..e2ed03f
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strcasecmp_l-sse2-unaligned.S
@@ -0,0 +1,3 @@
+#define AS_STRCASECMP
+#define STRCMP __strcasecmp_sse2_unaligned_l
+#include "strcmp-sse2-unaligned.S"
diff --git a/sysdeps/x86_64/multiarch/strcmp-avx2.S b/sysdeps/x86_64/multiarch/strcmp-avx2.S
new file mode 100644
index 0000000..606df63
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strcmp-avx2.S
@@ -0,0 +1,3 @@
+#define USE_AVX2
+#define STRCMP __strcmp_avx2
+#include "strcmp-sse2-unaligned.S"
diff --git a/sysdeps/x86_64/multiarch/strcmp-sse2-unaligned.S b/sysdeps/x86_64/multiarch/strcmp-sse2-unaligned.S
index 20b65fa..ef67fb0 100644
--- a/sysdeps/x86_64/multiarch/strcmp-sse2-unaligned.S
+++ b/sysdeps/x86_64/multiarch/strcmp-sse2-unaligned.S
@@ -18,29 +18,127 @@
#include "sysdep.h"
-ENTRY ( __strcmp_sse2_unaligned)
- movl %edi, %eax
- xorl %edx, %edx
+#ifndef STRCMP
+# define STRCMP __strcmp_sse2_unaligned
+#endif
+
+#ifdef AS_STRCASECMP
+# include "locale-defines.h"
+
+# ifdef AS_STRNCMP
+ENTRY (__strncasecmp_sse2_unaligned)
+ movq __libc_tsd_LOCALE@gottpoff(%rip), %rax
+ mov %fs:(%rax), %rcx
+ // XXX 5 byte should be before the function
+ /* 5-byte NOP. */
+ .byte 0x0f,0x1f,0x44,0x00,0x00
+
+END (__strncasecmp_sse2_unaligned)
+
+ENTRY (STRCMP)
+ test %rdx, %rdx
+ je L(ret_zero)
+ mov LOCALE_TOLOWER(%rcx), %r11
+# else
+ENTRY (__strcasecmp_sse2_unaligned)
+ movq __libc_tsd_LOCALE@gottpoff(%rip), %rax
+ mov %fs:(%rax), %rdx
+ // XXX 5 byte should be before the function
+ /* 5-byte NOP. */
+ .byte 0x0f,0x1f,0x44,0x00,0x00
+
+END (__strcasecmp_sse2_unaligned)
+
+ENTRY (STRCMP)
+ mov LOCALE_TOLOWER(%rdx), %r11
+# endif
+ movzbl (%rdi), %eax
+ movzbl (%rsi), %ecx
+ movl (%r11,%rax,4), %eax
+ subl (%r11,%rcx,4), %eax
+ je L(next)
+L(return):
+ ret
+L(next):
+ test %ecx, %ecx
+ je L(return)
+ leaq 1(%rsi), %rsi
+ leaq 1(%rdi), %rdi
+#ifdef AS_STRNCMP
+ sub $1, %rdx
+#endif
+
+#else
+ENTRY (STRCMP)
+#endif
+
+#ifdef AS_STRNCMP
+ lea -1(%rdx), %r10
+ test %rdx, %rdx
+ je L(ret_zero)
+L(back_to_start):
+ xor %rdx, %rdx
+#endif
+
pxor %xmm7, %xmm7
- orl %esi, %eax
+ movl %esi, %eax
+ andl $4095, %eax
+ cmpl $4032, %eax
+ jg L(cross_page)
+
+ movl %edi, %eax
andl $4095, %eax
cmpl $4032, %eax
jg L(cross_page)
+#ifdef AS_STRNCMP
+ cmp $64, %r10
+ jae L(dont_set_mask)
+ bts %r10, %rdx
+L(dont_set_mask):
+#endif
+
movdqu (%rdi), %xmm1
movdqu (%rsi), %xmm0
pcmpeqb %xmm1, %xmm0
pminub %xmm1, %xmm0
- pxor %xmm1, %xmm1
- pcmpeqb %xmm1, %xmm0
- pmovmskb %xmm0, %eax
- testq %rax, %rax
+ pcmpeqb %xmm7, %xmm0
+ pmovmskb %xmm0, %ecx
+#ifdef AS_STRNCMP
+ or %dx, %cx
+#else
+ test %ecx, %ecx
+#endif
je L(next_48_bytes)
-L(return):
- bsfq %rax, %rdx
+#ifdef AS_STRCASECMP
+L(caseloop1):
+ bsf %ecx, %r9d
+ movzbl (%rdi,%r9), %eax
+ movzbl (%rsi,%r9), %r8d
+ movl (%r11,%rax,4), %eax
+ subl (%r11,%r8,4), %eax
+ jne L(return)
+ test %r8d, %r8d
+ je L(return)
+# ifdef AS_STRNCMP
+ cmp %r9, %r10
+ je L(return)
+# endif
+ leaq -1(%rcx), %rax
+ andq %rax, %rcx
+ je L(next_48_bytes)
+ jmp L(caseloop1)
+#else
+ bsf %ecx, %edx
movzbl (%rdi, %rdx), %eax
movzbl (%rsi, %rdx), %edx
subl %edx, %eax
ret
+#endif
+#ifdef AS_STRNCMP
+ L(ret_zero):
+ xor %eax, %eax
+ ret
+#endif
.p2align 4
L(next_48_bytes):
@@ -50,49 +148,108 @@ L(next_48_bytes):
pcmpeqb %xmm6, %xmm3
movdqu 32(%rsi), %xmm2
pminub %xmm6, %xmm3
- pcmpeqb %xmm1, %xmm3
+ pcmpeqb %xmm7, %xmm3
movdqu 48(%rdi), %xmm4
pcmpeqb %xmm5, %xmm2
- pmovmskb %xmm3, %edx
movdqu 48(%rsi), %xmm0
pminub %xmm5, %xmm2
- pcmpeqb %xmm1, %xmm2
+ pcmpeqb %xmm7, %xmm2
pcmpeqb %xmm4, %xmm0
- pmovmskb %xmm2, %eax
- salq $16, %rdx
- pminub %xmm4, %xmm0
- pcmpeqb %xmm1, %xmm0
+ pmovmskb %xmm2, %eax
salq $32, %rax
+#ifdef AS_STRNCMP
+ or %rdx, %rax
+#endif
+ pmovmskb %xmm3, %edx
+ sal $16, %edx
+ pminub %xmm4, %xmm0
+ pcmpeqb %xmm7, %xmm0
orq %rdx, %rax
- pmovmskb %xmm0, %ecx
- movq %rcx, %rdx
- salq $48, %rdx
- orq %rdx, %rax
+ pmovmskb %xmm0, %ecx
+ salq $48, %rcx
+ orq %rax, %rcx
+ je L(main_loop_header)
+#ifdef AS_STRCASECMP
+L(caseloop2):
+ bsf %rcx, %r9
+ movzbl (%rdi,%r9), %eax
+ movzbl (%rsi,%r9), %r8d
+ movl (%r11,%rax,4), %eax
+ subl (%r11,%r8,4), %eax
jne L(return)
+ test %r8d, %r8d
+ je L(return)
+# ifdef AS_STRNCMP
+ cmp %r9, %r10
+ je L(return)
+# endif
+ leaq -1(%rcx), %rax
+ andq %rax, %rcx
+ je L(main_loop_header)
+ jmp L(caseloop2)
+#else
+ bsf %rcx, %rdx
+ movzbl (%rdi, %rdx), %eax
+ movzbl (%rsi, %rdx), %edx
+ subl %edx, %eax
+ ret
+#endif
+
L(main_loop_header):
+#ifdef USE_AVX2
+ vpxor %xmm7, %xmm7, %xmm7
+#endif
leaq 64(%rdi), %rdx
- movl $4096, %ecx
- pxor %xmm9, %xmm9
andq $-64, %rdx
+# ifdef AS_STRNCMP
+ addq %rdi, %r10
+ subq %rdx, %r10
+# endif
subq %rdi, %rdx
leaq (%rdi, %rdx), %rax
addq %rsi, %rdx
- movq %rdx, %rsi
- andl $4095, %esi
- subq %rsi, %rcx
- shrq $6, %rcx
- movq %rcx, %rsi
- jmp L(loop_start)
+ movl $4096, %esi
+ mov %edx, %ecx
+ andl $4095, %ecx
+ sub %ecx, %esi
+ shr $6, %esi
+#ifdef AS_STRNCMP
+ mov %r10, %r9
+ addq %rdx, %r10
+ shr $6, %r9
+ cmp %r9, %rsi
+ jb L(dont_set_page_bound)
+ mov %r9, %rsi
+L(dont_set_page_bound):
+#endif
.p2align 4
L(loop):
+ add $-1, %rsi
+ ja L(loop_cross_page)
+L(back_to_loop):
+#ifdef USE_AVX2
+ vmovdqa (%rax), %ymm4
+ vmovdqa 32(%rax), %ymm5
+ vmovdqu (%rdx), %ymm0
+ vmovdqu 32(%rdx), %ymm1
+ vpcmpeqb %ymm4, %ymm0, %ymm0
+ vpminub %ymm4, %ymm0, %ymm0
+ vpcmpeqb %ymm5, %ymm1, %ymm1
+ vpminub %ymm5, %ymm1, %ymm1
+ vpminub %ymm0, %ymm1, %ymm2
+ vpcmpeqb %ymm7, %ymm2, %ymm2
addq $64, %rax
addq $64, %rdx
-L(loop_start):
- testq %rsi, %rsi
- leaq -1(%rsi), %rsi
- je L(loop_cross_page)
-L(back_to_loop):
+ vpmovmskb %ymm2, %edi
+ test %edi, %edi
+ je L(loop)
+ shl $32, %rdi
+ vpcmpeqb %ymm7, %ymm0, %ymm0
+ vpmovmskb %ymm0, %ecx
+ or %rdi, %rcx
+ vzeroupper
+#else
movdqu (%rdx), %xmm0
movdqu 16(%rdx), %xmm1
movdqa (%rax), %xmm2
@@ -104,61 +261,99 @@ L(back_to_loop):
movdqu 48(%rdx), %xmm6
pminub %xmm3, %xmm1
movdqa 32(%rax), %xmm2
- pminub %xmm1, %xmm0
movdqa 48(%rax), %xmm3
pcmpeqb %xmm2, %xmm5
pcmpeqb %xmm3, %xmm6
+ addq $64, %rax
pminub %xmm2, %xmm5
pminub %xmm3, %xmm6
- pminub %xmm5, %xmm0
- pminub %xmm6, %xmm0
- pcmpeqb %xmm7, %xmm0
- pmovmskb %xmm0, %ecx
+ addq $64, %rdx
+ pminub %xmm5, %xmm6
+ pminub %xmm1, %xmm6
+ pminub %xmm0, %xmm6
+ pcmpeqb %xmm7, %xmm6
+ pmovmskb %xmm6, %ecx
testl %ecx, %ecx
je L(loop)
- pcmpeqb %xmm7, %xmm5
- movdqu (%rdx), %xmm0
- pcmpeqb %xmm7, %xmm1
- movdqa (%rax), %xmm2
- pcmpeqb %xmm2, %xmm0
- pminub %xmm2, %xmm0
- pcmpeqb %xmm7, %xmm6
pcmpeqb %xmm7, %xmm0
- pmovmskb %xmm1, %ecx
- pmovmskb %xmm5, %r8d
- pmovmskb %xmm0, %edi
- salq $16, %rcx
+ pcmpeqb %xmm7, %xmm1
+ pcmpeqb %xmm7, %xmm5
+ pmovmskb %xmm0, %edi
+ pmovmskb %xmm1, %r9d
+ pmovmskb %xmm5, %r8d
+ salq $48, %rcx
salq $32, %r8
- pmovmskb %xmm6, %esi
orq %r8, %rcx
orq %rdi, %rcx
- salq $48, %rsi
- orq %rsi, %rcx
+ sal $16, %r9d
+ orq %r9, %rcx
+#endif
+#ifdef AS_STRCASECMP
+L(caseloop3):
+ bsf %rcx, %r9
+ movzbl -64(%rax,%r9), %edi
+ movzbl -64(%rdx,%r9), %r8d
+ movl (%r11,%rdi,4), %edi
+ subl (%r11,%r8,4), %edi
+ jne L(return2)
+ test %r8d, %r8d
+ je L(return2)
+ leaq -1(%rcx), %rdi
+ andq %rdi, %rcx
+ je L(loop)
+ jmp L(caseloop3)
+L(return2):
+ mov %rdi, %rax
+ ret
+#else
bsfq %rcx, %rcx
- movzbl (%rax, %rcx), %eax
- movzbl (%rdx, %rcx), %edx
+ movzbl -64(%rax, %rcx), %eax
+ movzbl -64(%rdx, %rcx), %edx
subl %edx, %eax
ret
+#endif
.p2align 4
L(loop_cross_page):
- xor %r10, %r10
- movq %rdx, %r9
- and $63, %r9
- subq %r9, %r10
-
- movdqa (%rdx, %r10), %xmm0
- movdqa 16(%rdx, %r10), %xmm1
- movdqu (%rax, %r10), %xmm2
- movdqu 16(%rax, %r10), %xmm3
+#ifdef AS_STRNCMP
+ mov %r10, %r9
+ sub %rdx, %r9
+ cmp $64, %r9
+ jb L(prepare_back_to_start)
+#endif
+
+ mov %edx, %ecx
+ and $63, %ecx
+ neg %rcx
+#ifdef USE_AVX2
+ vmovdqu (%rax, %rcx), %ymm4
+ vmovdqu 32(%rax, %rcx), %ymm5
+ vmovdqa (%rdx, %rcx), %ymm0
+ vmovdqa 32(%rdx, %rcx), %ymm1
+ vpcmpeqb %ymm4, %ymm0, %ymm0
+ vpminub %ymm4, %ymm0, %ymm0
+ vpcmpeqb %ymm5, %ymm1, %ymm1
+ vpminub %ymm5, %ymm1, %ymm1
+ vpminub %ymm0, %ymm1, %ymm2
+ vpcmpeqb %ymm7, %ymm2, %ymm2
+ vpmovmskb %ymm2, %esi
+ shl $32, %rsi
+ vpcmpeqb %ymm7, %ymm0, %ymm0
+ vpmovmskb %ymm0, %edi
+ or %rsi, %rdi
+#else
+ movdqa (%rdx, %rcx), %xmm0
+ movdqa 16(%rdx, %rcx), %xmm1
+ movdqu (%rax, %rcx), %xmm2
+ movdqu 16(%rax, %rcx), %xmm3
pcmpeqb %xmm2, %xmm0
- movdqa 32(%rdx, %r10), %xmm5
+ movdqa 32(%rdx, %rcx), %xmm5
pcmpeqb %xmm3, %xmm1
pminub %xmm2, %xmm0
- movdqa 48(%rdx, %r10), %xmm6
+ movdqa 48(%rdx, %rcx), %xmm6
pminub %xmm3, %xmm1
- movdqu 32(%rax, %r10), %xmm2
- movdqu 48(%rax, %r10), %xmm3
+ movdqu 32(%rax, %rcx), %xmm2
+ movdqu 48(%rax, %rcx), %xmm3
pcmpeqb %xmm2, %xmm5
pcmpeqb %xmm3, %xmm6
pminub %xmm2, %xmm5
@@ -169,41 +364,143 @@ L(loop_cross_page):
pcmpeqb %xmm7, %xmm5
pcmpeqb %xmm7, %xmm6
- pmovmskb %xmm1, %ecx
- pmovmskb %xmm5, %r8d
- pmovmskb %xmm0, %edi
- salq $16, %rcx
+ pmovmskb %xmm1, %ecx
+ pmovmskb %xmm5, %r8d
+ pmovmskb %xmm0, %edi
+ sal $16, %ecx
salq $32, %r8
- pmovmskb %xmm6, %esi
+ pmovmskb %xmm6, %esi
orq %r8, %rdi
orq %rcx, %rdi
salq $48, %rsi
orq %rsi, %rdi
- movq %r9, %rcx
- movq $63, %rsi
+#endif
+ mov %edx, %ecx
+ mov $63, %esi
+#ifdef AS_STRNCMP
+ shr $6, %r9
+ sub $1, %r9
+ cmp %r9, %rsi
+ jb L(dont_set_bound2)
+ mov %r9, %rsi
+L(dont_set_bound2):
+#endif
shrq %cl, %rdi
test %rdi, %rdi
je L(back_to_loop)
+#ifdef USE_AVX2
+ vzeroupper
+#endif
+
+#ifdef AS_STRCASECMP
+ mov %rdi, %rcx
+L(caseloop4):
+ bsf %rcx, %r9
+ movzbl (%rax,%r9), %edi
+ movzbl (%rdx,%r9), %r8d
+ movl (%r11,%rdi,4), %edi
+ subl (%r11,%r8,4), %edi
+ jne L(return2)
+ test %r8d, %r8d
+ je L(return2)
+ leaq -1(%rcx), %rdi
+ andq %rdi, %rcx
+ je L(back_to_loop)
+ jmp L(caseloop4)
+#else
bsfq %rdi, %rcx
movzbl (%rax, %rcx), %eax
movzbl (%rdx, %rcx), %edx
subl %edx, %eax
ret
+#endif
+#ifdef AS_STRNCMP
+L(prepare_back_to_start):
+# ifdef USE_AVX2
+ vzeroupper
+# endif
+ mov %r9, %r10
+ mov %rdx, %rsi
+ mov %rax, %rdi
+ jmp L(back_to_start)
+#endif
+
+L(cross_page):
+ xorl %edx, %edx
.p2align 4
L(cross_page_loop):
- cmpb %cl, %al
- jne L(different)
- addq $1, %rdx
- cmpq $64, %rdx
- je L(main_loop_header)
-L(cross_page):
movzbl (%rdi, %rdx), %eax
movzbl (%rsi, %rdx), %ecx
- testb %al, %al
- jne L(cross_page_loop)
- xorl %eax, %eax
-L(different):
+#ifdef AS_STRCASECMP
+ movl (%r11,%rax,4), %eax
+ subl (%r11,%rcx,4), %eax
+#else
+ subl %ecx, %eax
+#endif
+ jne L(different)
+#ifdef AS_STRNCMP
+ cmp %rdx, %r10
+ je L(different)
+#endif
+ test %ecx, %ecx
+ je L(different)
+
+ movzbl 1(%rdi, %rdx), %eax
+ movzbl 1(%rsi, %rdx), %ecx
+#ifdef AS_STRCASECMP
+ movl (%r11,%rax,4), %eax
+ subl (%r11,%rcx,4), %eax
+#else
subl %ecx, %eax
+#endif
+ jne L(different)
+#ifdef AS_STRNCMP
+ lea 1(%rdx), %r9
+ cmp %r9, %r10
+ je L(different)
+#endif
+ test %ecx, %ecx
+ je L(different)
+
+ movzbl 2(%rdi, %rdx), %eax
+ movzbl 2(%rsi, %rdx), %ecx
+#ifdef AS_STRCASECMP
+ movl (%r11,%rax,4), %eax
+ subl (%r11,%rcx,4), %eax
+#else
+ subl %ecx, %eax
+#endif
+ jne L(different)
+#ifdef AS_STRNCMP
+ lea 2(%rdx), %r9
+ cmp %r9, %r10
+ je L(different)
+#endif
+ test %ecx, %ecx
+ je L(different)
+
+ movzbl 3(%rdi, %rdx), %eax
+ movzbl 3(%rsi, %rdx), %ecx
+#ifdef AS_STRCASECMP
+ movl (%r11,%rax,4), %eax
+ subl (%r11,%rcx,4), %eax
+#else
+ subl %ecx, %eax
+#endif
+ jne L(different)
+#ifdef AS_STRNCMP
+ lea 3(%rdx), %r9
+ cmp %r9, %r10
+ je L(different)
+#endif
+ test %ecx, %ecx
+ je L(different)
+
+ add $4, %edx
+ cmp $64, %edx
+ je L(main_loop_header)
+ jmp L(cross_page_loop)
+L(different):
ret
-END (__strcmp_sse2_unaligned)
+END (STRCMP)
diff --git a/sysdeps/x86_64/multiarch/strcmp-sse42.S b/sysdeps/x86_64/multiarch/strcmp-sse42.S
deleted file mode 100644
index 4dff0a5..0000000
--- a/sysdeps/x86_64/multiarch/strcmp-sse42.S
+++ /dev/null
@@ -1,1792 +0,0 @@
-/* strcmp with SSE4.2
- Copyright (C) 2009-2015 Free Software Foundation, Inc.
- Contributed by Intel Corporation.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-
-/* We use 0x1a:
- _SIDD_SBYTE_OPS
- | _SIDD_CMP_EQUAL_EACH
- | _SIDD_NEGATIVE_POLARITY
- | _SIDD_LEAST_SIGNIFICANT
- on pcmpistri to find out if two 16byte data elements are the same
- and the offset of the first different byte. There are 4 cases:
-
- 1. Both 16byte data elements are valid and identical.
- 2. Both 16byte data elements have EOS and identical.
- 3. Both 16byte data elements are valid and they differ at offset X.
- 4. At least one 16byte data element has EOS at offset X. Two 16byte
- data elements must differ at or before offset X.
-
- Here is the table of ECX, CFlag, ZFlag and SFlag for 4 cases:
-
- case ECX CFlag ZFlag SFlag
- 1 16 0 0 0
- 2 16 0 1 1
- 3 X 1 0 0
- 4 0 <= X 1 0/1 0/1
-
- We exit from the loop for cases 2, 3 and 4 with jbe which branches
- when either CFlag or ZFlag is 1. If CFlag == 0, we return 0 for
- case 2. */
-
- /* Put all SSE 4.2 functions together. */
- .section .text.SECTION,"ax",@progbits
- .align 16
- .type STRCMP_SSE42, @function
- .globl STRCMP_SSE42
- .hidden STRCMP_SSE42
-#ifdef USE_AS_STRCASECMP_L
-ENTRY (GLABEL(__strcasecmp))
- movq __libc_tsd_LOCALE@gottpoff(%rip),%rax
- mov %fs:(%rax),%RDX_LP
-
- // XXX 5 byte should be before the function
- /* 5-byte NOP. */
- .byte 0x0f,0x1f,0x44,0x00,0x00
-END (GLABEL(__strcasecmp))
- /* FALLTHROUGH to strcasecmp_l. */
-#endif
-#ifdef USE_AS_STRNCASECMP_L
-ENTRY (GLABEL(__strncasecmp))
- movq __libc_tsd_LOCALE@gottpoff(%rip),%rax
- mov %fs:(%rax),%RCX_LP
-
- // XXX 5 byte should be before the function
- /* 5-byte NOP. */
- .byte 0x0f,0x1f,0x44,0x00,0x00
-END (GLABEL(__strncasecmp))
- /* FALLTHROUGH to strncasecmp_l. */
-#endif
-
-
-#ifdef USE_AVX
-# define movdqa vmovdqa
-# define movdqu vmovdqu
-# define pmovmskb vpmovmskb
-# define pcmpistri vpcmpistri
-# define psubb vpsubb
-# define pcmpeqb vpcmpeqb
-# define psrldq vpsrldq
-# define pslldq vpslldq
-# define palignr vpalignr
-# define pxor vpxor
-# define D(arg) arg, arg
-#else
-# define D(arg) arg
-#endif
-
-STRCMP_SSE42:
- cfi_startproc
- CALL_MCOUNT
-
-/*
- * This implementation uses SSE to compare up to 16 bytes at a time.
- */
-#ifdef USE_AS_STRCASECMP_L
- /* We have to fall back on the C implementation for locales
- with encodings not matching ASCII for single bytes. */
-# if LOCALE_T___LOCALES != 0 || LC_CTYPE != 0
- mov LOCALE_T___LOCALES+LC_CTYPE*LP_SIZE(%rdx), %RAX_LP
-# else
- mov (%rdx), %RAX_LP
-# endif
- testl $1, LOCALE_DATA_VALUES+_NL_CTYPE_NONASCII_CASE*SIZEOF_VALUES(%rax)
- jne __strcasecmp_l_nonascii
-#endif
-#ifdef USE_AS_STRNCASECMP_L
- /* We have to fall back on the C implementation for locales
- with encodings not matching ASCII for single bytes. */
-# if LOCALE_T___LOCALES != 0 || LC_CTYPE != 0
- mov LOCALE_T___LOCALES+LC_CTYPE*LP_SIZE(%rcx), %RAX_LP
-# else
- mov (%rcx), %RAX_LP
-# endif
- testl $1, LOCALE_DATA_VALUES+_NL_CTYPE_NONASCII_CASE*SIZEOF_VALUES(%rax)
- jne __strncasecmp_l_nonascii
-#endif
-
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- test %rdx, %rdx
- je LABEL(strcmp_exitz)
- cmp $1, %rdx
- je LABEL(Byte0)
- mov %rdx, %r11
-#endif
- mov %esi, %ecx
- mov %edi, %eax
-/* Use 64bit AND here to avoid long NOP padding. */
- and $0x3f, %rcx /* rsi alignment in cache line */
- and $0x3f, %rax /* rdi alignment in cache line */
-#if defined USE_AS_STRCASECMP_L || defined USE_AS_STRNCASECMP_L
- .section .rodata.cst16,"aM",@progbits,16
- .align 16
-LABEL(belowupper):
- .quad 0x4040404040404040
- .quad 0x4040404040404040
-LABEL(topupper):
-# ifdef USE_AVX
- .quad 0x5a5a5a5a5a5a5a5a
- .quad 0x5a5a5a5a5a5a5a5a
-# else
- .quad 0x5b5b5b5b5b5b5b5b
- .quad 0x5b5b5b5b5b5b5b5b
-# endif
-LABEL(touppermask):
- .quad 0x2020202020202020
- .quad 0x2020202020202020
- .previous
- movdqa LABEL(belowupper)(%rip), %xmm4
-# define UCLOW_reg %xmm4
- movdqa LABEL(topupper)(%rip), %xmm5
-# define UCHIGH_reg %xmm5
- movdqa LABEL(touppermask)(%rip), %xmm6
-# define LCQWORD_reg %xmm6
-#endif
- cmp $0x30, %ecx
- ja LABEL(crosscache)/* rsi: 16-byte load will cross cache line */
- cmp $0x30, %eax
- ja LABEL(crosscache)/* rdi: 16-byte load will cross cache line */
- movdqu (%rdi), %xmm1
- movdqu (%rsi), %xmm2
-#if defined USE_AS_STRCASECMP_L || defined USE_AS_STRNCASECMP_L
-# ifdef USE_AVX
-# define TOLOWER(reg1, reg2) \
- vpcmpgtb UCLOW_reg, reg1, %xmm7; \
- vpcmpgtb UCHIGH_reg, reg1, %xmm8; \
- vpcmpgtb UCLOW_reg, reg2, %xmm9; \
- vpcmpgtb UCHIGH_reg, reg2, %xmm10; \
- vpandn %xmm7, %xmm8, %xmm8; \
- vpandn %xmm9, %xmm10, %xmm10; \
- vpand LCQWORD_reg, %xmm8, %xmm8; \
- vpand LCQWORD_reg, %xmm10, %xmm10; \
- vpor reg1, %xmm8, reg1; \
- vpor reg2, %xmm10, reg2
-# else
-# define TOLOWER(reg1, reg2) \
- movdqa reg1, %xmm7; \
- movdqa UCHIGH_reg, %xmm8; \
- movdqa reg2, %xmm9; \
- movdqa UCHIGH_reg, %xmm10; \
- pcmpgtb UCLOW_reg, %xmm7; \
- pcmpgtb reg1, %xmm8; \
- pcmpgtb UCLOW_reg, %xmm9; \
- pcmpgtb reg2, %xmm10; \
- pand %xmm8, %xmm7; \
- pand %xmm10, %xmm9; \
- pand LCQWORD_reg, %xmm7; \
- pand LCQWORD_reg, %xmm9; \
- por %xmm7, reg1; \
- por %xmm9, reg2
-# endif
- TOLOWER (%xmm1, %xmm2)
-#else
-# define TOLOWER(reg1, reg2)
-#endif
- pxor %xmm0, D(%xmm0) /* clear %xmm0 for null char checks */
- pcmpeqb %xmm1, D(%xmm0) /* Any null chars? */
- pcmpeqb %xmm2, D(%xmm1) /* compare first 16 bytes for equality */
- psubb %xmm0, D(%xmm1) /* packed sub of comparison results*/
- pmovmskb %xmm1, %edx
- sub $0xffff, %edx /* if first 16 bytes are same, edx == 0xffff */
- jnz LABEL(less16bytes)/* If not, find different value or null char */
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)/* finish comparison */
-#endif
- add $16, %rsi /* prepare to search next 16 bytes */
- add $16, %rdi /* prepare to search next 16 bytes */
-
- /*
- * Determine source and destination string offsets from 16-byte
- * alignment. Use relative offset difference between the two to
- * determine which case below to use.
- */
- .p2align 4
-LABEL(crosscache):
- and $0xfffffffffffffff0, %rsi /* force %rsi is 16 byte aligned */
- and $0xfffffffffffffff0, %rdi /* force %rdi is 16 byte aligned */
- mov $0xffff, %edx /* for equivalent offset */
- xor %r8d, %r8d
- and $0xf, %ecx /* offset of rsi */
- and $0xf, %eax /* offset of rdi */
- pxor %xmm0, D(%xmm0) /* clear %xmm0 for null char check */
- cmp %eax, %ecx
- je LABEL(ashr_0) /* rsi and rdi relative offset same */
- ja LABEL(bigger)
- mov %edx, %r8d /* r8d is offset flag for exit tail */
- xchg %ecx, %eax
- xchg %rsi, %rdi
-LABEL(bigger):
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- lea 15(%rax), %r9
- sub %rcx, %r9
- lea LABEL(unaligned_table)(%rip), %r10
- movslq (%r10, %r9,4), %r9
- pcmpeqb %xmm1, D(%xmm0) /* Any null chars? */
- lea (%r10, %r9), %r10
- jmp *%r10 /* jump to corresponding case */
-
-/*
- * The following cases will be handled by ashr_0
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(0~15) n(0~15) 15(15+ n-n) ashr_0
- */
- .p2align 4
-LABEL(ashr_0):
-
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, D(%xmm0) /* Any null chars? */
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpeqb (%rdi), D(%xmm1) /* compare 16 bytes for equality */
-#else
- movdqa (%rdi), %xmm2
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm2, D(%xmm1) /* compare 16 bytes for equality */
-#endif
- psubb %xmm0, D(%xmm1) /* packed sub of comparison results*/
- pmovmskb %xmm1, %r9d
- shr %cl, %edx /* adjust 0xffff for offset */
- shr %cl, %r9d /* adjust for 16-byte offset */
- sub %r9d, %edx
- /*
- * edx must be the same with r9d if in left byte (16-rcx) is equal to
- * the start from (16-rax) and no null char was seen.
- */
- jne LABEL(less32bytes) /* mismatch or null char */
- UPDATE_STRNCMP_COUNTER
- mov $16, %rcx
- mov $16, %r9
-
- /*
- * Now both strings are aligned at 16-byte boundary. Loop over strings
- * checking 32-bytes per iteration.
- */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
- .p2align 4
-LABEL(ashr_0_use):
- movdqa (%rdi,%rdx), %xmm0
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- lea 16(%rdx), %rdx
- jbe LABEL(ashr_0_exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- movdqa (%rdi,%rdx), %xmm0
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- lea 16(%rdx), %rdx
- jbe LABEL(ashr_0_exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- jmp LABEL(ashr_0_use)
-
-
- .p2align 4
-LABEL(ashr_0_exit_use):
- jnc LABEL(strcmp_exitz)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub %rcx, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- lea -16(%rdx, %rcx), %rcx
- movzbl (%rdi, %rcx), %eax
- movzbl (%rsi, %rcx), %edx
-#if defined USE_AS_STRCASECMP_L || defined USE_AS_STRNCASECMP_L
- leaq _nl_C_LC_CTYPE_tolower+128*4(%rip), %rcx
- movl (%rcx,%rax,4), %eax
- movl (%rcx,%rdx,4), %edx
-#endif
- sub %edx, %eax
- ret
-
-
-
-/*
- * The following cases will be handled by ashr_1
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(15) n -15 0(15 +(n-15) - n) ashr_1
- */
- .p2align 4
-LABEL(ashr_1):
- pslldq $15, D(%xmm2) /* shift first string to align with second */
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2) /* compare 16 bytes for equality */
- psubb %xmm0, D(%xmm2) /* packed sub of comparison results*/
- pmovmskb %xmm2, %r9d
- shr %cl, %edx /* adjust 0xffff for offset */
- shr %cl, %r9d /* adjust for 16-byte offset */
- sub %r9d, %edx
- jnz LABEL(less32bytes) /* mismatch or null char seen */
- movdqa (%rdi), %xmm3
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads*/
- mov $1, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 1(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_1_use):
- add $16, %r10
- jg LABEL(nibble_ashr_1_use)
-
-LABEL(nibble_ashr_1_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $1, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_1_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $1, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_1_use)
-
- .p2align 4
-LABEL(nibble_ashr_1_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $1, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $14, %ecx
- ja LABEL(nibble_ashr_1_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_2
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(14~15) n -14 1(15 +(n-14) - n) ashr_2
- */
- .p2align 4
-LABEL(ashr_2):
- pslldq $14, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $2, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 2(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_2_use):
- add $16, %r10
- jg LABEL(nibble_ashr_2_use)
-
-LABEL(nibble_ashr_2_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $2, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_2_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $2, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_2_use)
-
- .p2align 4
-LABEL(nibble_ashr_2_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $2, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $13, %ecx
- ja LABEL(nibble_ashr_2_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_3
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(13~15) n -13 2(15 +(n-13) - n) ashr_3
- */
- .p2align 4
-LABEL(ashr_3):
- pslldq $13, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $3, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 3(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
-LABEL(loop_ashr_3_use):
- add $16, %r10
- jg LABEL(nibble_ashr_3_use)
-
-LABEL(nibble_ashr_3_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $3, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_3_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $3, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_3_use)
-
- .p2align 4
-LABEL(nibble_ashr_3_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $3, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $12, %ecx
- ja LABEL(nibble_ashr_3_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_4
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(12~15) n -12 3(15 +(n-12) - n) ashr_4
- */
- .p2align 4
-LABEL(ashr_4):
- pslldq $12, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $4, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 4(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_4_use):
- add $16, %r10
- jg LABEL(nibble_ashr_4_use)
-
-LABEL(nibble_ashr_4_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $4, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_4_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $4, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_4_use)
-
- .p2align 4
-LABEL(nibble_ashr_4_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $4, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $11, %ecx
- ja LABEL(nibble_ashr_4_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_5
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(11~15) n - 11 4(15 +(n-11) - n) ashr_5
- */
- .p2align 4
-LABEL(ashr_5):
- pslldq $11, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $5, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 5(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_5_use):
- add $16, %r10
- jg LABEL(nibble_ashr_5_use)
-
-LABEL(nibble_ashr_5_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $5, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_5_use)
-
- movdqa (%rdi, %rdx), %xmm0
-
- palignr $5, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_5_use)
-
- .p2align 4
-LABEL(nibble_ashr_5_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $5, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $10, %ecx
- ja LABEL(nibble_ashr_5_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_6
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(10~15) n - 10 5(15 +(n-10) - n) ashr_6
- */
- .p2align 4
-LABEL(ashr_6):
- pslldq $10, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $6, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 6(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_6_use):
- add $16, %r10
- jg LABEL(nibble_ashr_6_use)
-
-LABEL(nibble_ashr_6_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $6, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_6_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $6, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_6_use)
-
- .p2align 4
-LABEL(nibble_ashr_6_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $6, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $9, %ecx
- ja LABEL(nibble_ashr_6_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_7
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(9~15) n - 9 6(15 +(n - 9) - n) ashr_7
- */
- .p2align 4
-LABEL(ashr_7):
- pslldq $9, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $7, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 7(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_7_use):
- add $16, %r10
- jg LABEL(nibble_ashr_7_use)
-
-LABEL(nibble_ashr_7_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $7, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_7_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $7, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_7_use)
-
- .p2align 4
-LABEL(nibble_ashr_7_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $7, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $8, %ecx
- ja LABEL(nibble_ashr_7_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_8
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(8~15) n - 8 7(15 +(n - 8) - n) ashr_8
- */
- .p2align 4
-LABEL(ashr_8):
- pslldq $8, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $8, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 8(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_8_use):
- add $16, %r10
- jg LABEL(nibble_ashr_8_use)
-
-LABEL(nibble_ashr_8_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $8, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_8_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $8, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_8_use)
-
- .p2align 4
-LABEL(nibble_ashr_8_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $8, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $7, %ecx
- ja LABEL(nibble_ashr_8_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_9
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(7~15) n - 7 8(15 +(n - 7) - n) ashr_9
- */
- .p2align 4
-LABEL(ashr_9):
- pslldq $7, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $9, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 9(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_9_use):
- add $16, %r10
- jg LABEL(nibble_ashr_9_use)
-
-LABEL(nibble_ashr_9_restart_use):
- movdqa (%rdi, %rdx), %xmm0
-
- palignr $9, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_9_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $9, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_9_use)
-
- .p2align 4
-LABEL(nibble_ashr_9_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $9, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $6, %ecx
- ja LABEL(nibble_ashr_9_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_10
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(6~15) n - 6 9(15 +(n - 6) - n) ashr_10
- */
- .p2align 4
-LABEL(ashr_10):
- pslldq $6, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $10, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 10(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_10_use):
- add $16, %r10
- jg LABEL(nibble_ashr_10_use)
-
-LABEL(nibble_ashr_10_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $10, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_10_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $10, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_10_use)
-
- .p2align 4
-LABEL(nibble_ashr_10_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $10, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $5, %ecx
- ja LABEL(nibble_ashr_10_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_11
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(5~15) n - 5 10(15 +(n - 5) - n) ashr_11
- */
- .p2align 4
-LABEL(ashr_11):
- pslldq $5, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $11, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 11(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_11_use):
- add $16, %r10
- jg LABEL(nibble_ashr_11_use)
-
-LABEL(nibble_ashr_11_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $11, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_11_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $11, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_11_use)
-
- .p2align 4
-LABEL(nibble_ashr_11_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $11, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $4, %ecx
- ja LABEL(nibble_ashr_11_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_12
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(4~15) n - 4 11(15 +(n - 4) - n) ashr_12
- */
- .p2align 4
-LABEL(ashr_12):
- pslldq $4, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $12, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 12(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_12_use):
- add $16, %r10
- jg LABEL(nibble_ashr_12_use)
-
-LABEL(nibble_ashr_12_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $12, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_12_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $12, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_12_use)
-
- .p2align 4
-LABEL(nibble_ashr_12_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $12, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $3, %ecx
- ja LABEL(nibble_ashr_12_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_13
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(3~15) n - 3 12(15 +(n - 3) - n) ashr_13
- */
- .p2align 4
-LABEL(ashr_13):
- pslldq $3, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $13, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 13(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_13_use):
- add $16, %r10
- jg LABEL(nibble_ashr_13_use)
-
-LABEL(nibble_ashr_13_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $13, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_13_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $13, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_13_use)
-
- .p2align 4
-LABEL(nibble_ashr_13_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $13, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $2, %ecx
- ja LABEL(nibble_ashr_13_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_14
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(2~15) n - 2 13(15 +(n - 2) - n) ashr_14
- */
- .p2align 4
-LABEL(ashr_14):
- pslldq $2, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $14, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 14(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_14_use):
- add $16, %r10
- jg LABEL(nibble_ashr_14_use)
-
-LABEL(nibble_ashr_14_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $14, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_14_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $14, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_14_use)
-
- .p2align 4
-LABEL(nibble_ashr_14_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $14, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $1, %ecx
- ja LABEL(nibble_ashr_14_restart_use)
-
- jmp LABEL(nibble_ashr_exit_use)
-
-/*
- * The following cases will be handled by ashr_15
- * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
- * n(1~15) n - 1 14(15 +(n - 1) - n) ashr_15
- */
- .p2align 4
-LABEL(ashr_15):
- pslldq $1, D(%xmm2)
- TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, D(%xmm2)
- psubb %xmm0, D(%xmm2)
- pmovmskb %xmm2, %r9d
- shr %cl, %edx
- shr %cl, %r9d
- sub %r9d, %edx
- jnz LABEL(less32bytes)
-
- movdqa (%rdi), %xmm3
-
- UPDATE_STRNCMP_COUNTER
-
- mov $16, %rcx /* index for loads */
- mov $15, %r9d /* byte position left over from less32bytes case */
- /*
- * Setup %r10 value allows us to detect crossing a page boundary.
- * When %r10 goes positive we have crossed a page boundary and
- * need to do a nibble.
- */
- lea 15(%rdi), %r10
- and $0xfff, %r10 /* offset into 4K page */
-
- sub $0x1000, %r10 /* subtract 4K pagesize */
-
- mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
-
- .p2align 4
-LABEL(loop_ashr_15_use):
- add $16, %r10
- jg LABEL(nibble_ashr_15_use)
-
-LABEL(nibble_ashr_15_restart_use):
- movdqa (%rdi, %rdx), %xmm0
- palignr $15, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
-
- add $16, %rdx
- add $16, %r10
- jg LABEL(nibble_ashr_15_use)
-
- movdqa (%rdi, %rdx), %xmm0
- palignr $15, -16(%rdi, %rdx), D(%xmm0)
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a, (%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- jbe LABEL(exit_use)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub $16, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add $16, %rdx
- jmp LABEL(loop_ashr_15_use)
-
- .p2align 4
-LABEL(nibble_ashr_15_use):
- sub $0x1000, %r10
- movdqa -16(%rdi, %rdx), %xmm0
- psrldq $15, D(%xmm0)
- pcmpistri $0x3a,%xmm0, %xmm0
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- cmp %r11, %rcx
- jae LABEL(nibble_ashr_exit_use)
-#endif
- cmp $0, %ecx
- ja LABEL(nibble_ashr_15_restart_use)
-
-LABEL(nibble_ashr_exit_use):
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpistri $0x1a,(%rsi,%rdx), %xmm0
-#else
- movdqa (%rsi,%rdx), %xmm1
- TOLOWER (%xmm0, %xmm1)
- pcmpistri $0x1a, %xmm1, %xmm0
-#endif
- .p2align 4
-LABEL(exit_use):
- jnc LABEL(strcmp_exitz)
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub %rcx, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- add %rcx, %rdx
- lea -16(%rdi, %r9), %rdi
- movzbl (%rdi, %rdx), %eax
- movzbl (%rsi, %rdx), %edx
- test %r8d, %r8d
- jz LABEL(ret_use)
- xchg %eax, %edx
-LABEL(ret_use):
-#if defined USE_AS_STRCASECMP_L || defined USE_AS_STRNCASECMP_L
- leaq _nl_C_LC_CTYPE_tolower+128*4(%rip), %rcx
- movl (%rcx,%rdx,4), %edx
- movl (%rcx,%rax,4), %eax
-#endif
-
- sub %edx, %eax
- ret
-
-LABEL(less32bytes):
- lea (%rdi, %rax), %rdi /* locate the exact address for first operand(rdi) */
- lea (%rsi, %rcx), %rsi /* locate the exact address for second operand(rsi) */
- test %r8d, %r8d
- jz LABEL(ret)
- xchg %rsi, %rdi /* recover original order according to flag(%r8d) */
-
- .p2align 4
-LABEL(ret):
-LABEL(less16bytes):
- bsf %rdx, %rdx /* find and store bit index in %rdx */
-
-#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
- sub %rdx, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- movzbl (%rsi, %rdx), %ecx
- movzbl (%rdi, %rdx), %eax
-
-#if defined USE_AS_STRCASECMP_L || defined USE_AS_STRNCASECMP_L
- leaq _nl_C_LC_CTYPE_tolower+128*4(%rip), %rdx
- movl (%rdx,%rcx,4), %ecx
- movl (%rdx,%rax,4), %eax
-#endif
-
- sub %ecx, %eax
- ret
-
-LABEL(strcmp_exitz):
- xor %eax, %eax
- ret
-
- .p2align 4
- // XXX Same as code above
-LABEL(Byte0):
- movzx (%rsi), %ecx
- movzx (%rdi), %eax
-
-#if defined USE_AS_STRCASECMP_L || defined USE_AS_STRNCASECMP_L
- leaq _nl_C_LC_CTYPE_tolower+128*4(%rip), %rdx
- movl (%rdx,%rcx,4), %ecx
- movl (%rdx,%rax,4), %eax
-#endif
-
- sub %ecx, %eax
- ret
- cfi_endproc
- .size STRCMP_SSE42, .-STRCMP_SSE42
-
-#undef UCLOW_reg
-#undef UCHIGH_reg
-#undef LCQWORD_reg
-#undef TOLOWER
-
- /* Put all SSE 4.2 functions together. */
- .section .rodata.SECTION,"a",@progbits
- .p2align 3
-LABEL(unaligned_table):
- .int LABEL(ashr_1) - LABEL(unaligned_table)
- .int LABEL(ashr_2) - LABEL(unaligned_table)
- .int LABEL(ashr_3) - LABEL(unaligned_table)
- .int LABEL(ashr_4) - LABEL(unaligned_table)
- .int LABEL(ashr_5) - LABEL(unaligned_table)
- .int LABEL(ashr_6) - LABEL(unaligned_table)
- .int LABEL(ashr_7) - LABEL(unaligned_table)
- .int LABEL(ashr_8) - LABEL(unaligned_table)
- .int LABEL(ashr_9) - LABEL(unaligned_table)
- .int LABEL(ashr_10) - LABEL(unaligned_table)
- .int LABEL(ashr_11) - LABEL(unaligned_table)
- .int LABEL(ashr_12) - LABEL(unaligned_table)
- .int LABEL(ashr_13) - LABEL(unaligned_table)
- .int LABEL(ashr_14) - LABEL(unaligned_table)
- .int LABEL(ashr_15) - LABEL(unaligned_table)
- .int LABEL(ashr_0) - LABEL(unaligned_table)
-
-#undef LABEL
-#undef GLABEL
-#undef SECTION
-#undef movdqa
-#undef movdqu
-#undef pmovmskb
-#undef pcmpistri
-#undef psubb
-#undef pcmpeqb
-#undef psrldq
-#undef pslldq
-#undef palignr
-#undef pxor
-#undef D
diff --git a/sysdeps/x86_64/multiarch/strcmp.S b/sysdeps/x86_64/multiarch/strcmp.S
index f50f26c..63aa62e 100644
--- a/sysdeps/x86_64/multiarch/strcmp.S
+++ b/sysdeps/x86_64/multiarch/strcmp.S
@@ -31,8 +31,8 @@
test %r9, %r9; \
je LABEL(strcmp_exitz); \
mov %r9, %r11
-
-# define STRCMP_SSE42 __strncmp_sse42
+# define STRCMP_AVX2 __strncmp_avx2
+# define STRCMP_SSE2_UNALIGNED __strncmp_sse2_unaligned
# define STRCMP_SSSE3 __strncmp_ssse3
# define STRCMP_SSE2 __strncmp_sse2
# define __GI_STRCMP __GI_strncmp
@@ -40,9 +40,8 @@
# include "locale-defines.h"
# define UPDATE_STRNCMP_COUNTER
-
-# define STRCMP_AVX __strcasecmp_l_avx
-# define STRCMP_SSE42 __strcasecmp_l_sse42
+# define STRCMP_AVX2 __strcasecmp_avx2_l
+# define STRCMP_SSE2_UNALIGNED __strcasecmp_sse2_unaligned_l
# define STRCMP_SSSE3 __strcasecmp_l_ssse3
# define STRCMP_SSE2 __strcasecmp_l_sse2
# define __GI_STRCMP __GI___strcasecmp_l
@@ -60,8 +59,8 @@
je LABEL(strcmp_exitz); \
mov %r9, %r11
-# define STRCMP_AVX __strncasecmp_l_avx
-# define STRCMP_SSE42 __strncasecmp_l_sse42
+# define STRCMP_AVX2 __strncasecmp_avx2_l
+# define STRCMP_SSE2_UNALIGNED __strncasecmp_sse2_unaligned_l
# define STRCMP_SSSE3 __strncasecmp_l_ssse3
# define STRCMP_SSE2 __strncasecmp_l_sse2
# define __GI_STRCMP __GI___strncasecmp_l
@@ -69,8 +68,9 @@
# define USE_AS_STRCMP
# define UPDATE_STRNCMP_COUNTER
# ifndef STRCMP
+# define STRCMP_AVX2 __strcmp_avx2
+# define STRCMP_SSE2_UNALIGNED __strcmp_sse2_unaligned
# define STRCMP strcmp
-# define STRCMP_SSE42 __strcmp_sse42
# define STRCMP_SSSE3 __strcmp_ssse3
# define STRCMP_SSE2 __strcmp_sse2
# define __GI_STRCMP __GI_strcmp
@@ -89,17 +89,16 @@ ENTRY(STRCMP)
jne 1f
call __init_cpu_features
1:
-#ifdef USE_AS_STRCMP
- leaq __strcmp_sse2_unaligned(%rip), %rax
+# ifdef HAVE_AVX2_SUPPORT
+
+ leaq STRCMP_AVX2(%rip), %rax
+ testl $bit_AVX_Fast_Unaligned_Load, __cpu_features+FEATURE_OFFSET+index_AVX_Fast_Unaligned_Load(%rip)
+ jnz 3f
+# endif
+ leaq STRCMP_SSE2_UNALIGNED(%rip), %rax
testl $bit_Fast_Unaligned_Load, __cpu_features+FEATURE_OFFSET+index_Fast_Unaligned_Load(%rip)
jnz 3f
-#else
- testl $bit_Slow_SSE4_2, __cpu_features+FEATURE_OFFSET+index_Slow_SSE4_2(%rip)
- jnz 2f
- leaq STRCMP_SSE42(%rip), %rax
- testl $bit_SSE4_2, __cpu_features+CPUID_OFFSET+index_SSE4_2(%rip)
- jnz 3f
-#endif
+
2: leaq STRCMP_SSSE3(%rip), %rax
testl $bit_SSSE3, __cpu_features+CPUID_OFFSET+index_SSSE3(%rip)
jnz 3f
@@ -115,21 +114,22 @@ ENTRY(__strcasecmp)
jne 1f
call __init_cpu_features
1:
-# ifdef HAVE_AVX_SUPPORT
- leaq __strcasecmp_avx(%rip), %rax
- testl $bit_AVX_Usable, __cpu_features+FEATURE_OFFSET+index_AVX_Usable(%rip)
+# ifdef HAVE_AVX2_SUPPORT
+
+ leaq __strcasecmp_avx2(%rip), %rax
+ testl $bit_AVX_Fast_Unaligned_Load, __cpu_features+FEATURE_OFFSET+index_AVX_Fast_Unaligned_Load(%rip)
jnz 3f
# endif
- testl $bit_Slow_SSE4_2, __cpu_features+FEATURE_OFFSET+index_Slow_SSE4_2(%rip)
- jnz 2f
- leaq __strcasecmp_sse42(%rip), %rax
- testl $bit_SSE4_2, __cpu_features+CPUID_OFFSET+index_SSE4_2(%rip)
- jnz 3f
+ leaq __strcasecmp_sse2_unaligned(%rip), %rax
+ testl $bit_Fast_Unaligned_Load, __cpu_features+FEATURE_OFFSET+index_Fast_Unaligned_Load(%rip)
+ jnz 3f
+
2: leaq __strcasecmp_ssse3(%rip), %rax
testl $bit_SSSE3, __cpu_features+CPUID_OFFSET+index_SSSE3(%rip)
jnz 3f
leaq __strcasecmp_sse2(%rip), %rax
3: ret
+
END(__strcasecmp)
weak_alias (__strcasecmp, strcasecmp)
# endif
@@ -141,45 +141,26 @@ ENTRY(__strncasecmp)
jne 1f
call __init_cpu_features
1:
-# ifdef HAVE_AVX_SUPPORT
- leaq __strncasecmp_avx(%rip), %rax
- testl $bit_AVX_Usable, __cpu_features+FEATURE_OFFSET+index_AVX_Usable(%rip)
+# ifdef HAVE_AVX2_SUPPORT
+
+ leaq __strncasecmp_avx2(%rip), %rax
+ testl $bit_AVX_Fast_Unaligned_Load, __cpu_features+FEATURE_OFFSET+index_AVX_Fast_Unaligned_Load(%rip)
jnz 3f
# endif
- testl $bit_Slow_SSE4_2, __cpu_features+FEATURE_OFFSET+index_Slow_SSE4_2(%rip)
- jnz 2f
- leaq __strncasecmp_sse42(%rip), %rax
- testl $bit_SSE4_2, __cpu_features+CPUID_OFFSET+index_SSE4_2(%rip)
- jnz 3f
+ leaq __strncasecmp_sse2_unaligned(%rip), %rax
+ testl $bit_Fast_Unaligned_Load, __cpu_features+FEATURE_OFFSET+index_Fast_Unaligned_Load(%rip)
+ jnz 3f
+
2: leaq __strncasecmp_ssse3(%rip), %rax
testl $bit_SSSE3, __cpu_features+CPUID_OFFSET+index_SSSE3(%rip)
jnz 3f
leaq __strncasecmp_sse2(%rip), %rax
3: ret
+
END(__strncasecmp)
weak_alias (__strncasecmp, strncasecmp)
# endif
-# undef LABEL
-# define LABEL(l) .L##l##_sse42
-# define GLABEL(l) l##_sse42
-# define SECTION sse4.2
-# include "strcmp-sse42.S"
-
-
-# ifdef HAVE_AVX_SUPPORT
-# if defined USE_AS_STRCASECMP_L || defined USE_AS_STRNCASECMP_L
-# define LABEL(l) .L##l##_avx
-# define GLABEL(l) l##_avx
-# define USE_AVX 1
-# undef STRCMP_SSE42
-# define STRCMP_SSE42 STRCMP_AVX
-# define SECTION avx
-# include "strcmp-sse42.S"
-# endif
-# endif
-
-
# undef ENTRY
# define ENTRY(name) \
.type STRCMP_SSE2, @function; \
diff --git a/sysdeps/x86_64/multiarch/strncase_l-avx2.S b/sysdeps/x86_64/multiarch/strncase_l-avx2.S
new file mode 100644
index 0000000..809b966
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strncase_l-avx2.S
@@ -0,0 +1,6 @@
+#define AS_STRCASECMP
+#define AS_STRNCMP
+#define USE_AVX2
+#define __strncasecmp_sse2_unaligned __strncasecmp_avx2
+#define STRCMP __strncasecmp_avx2_l
+#include "strcmp-sse2-unaligned.S"
diff --git a/sysdeps/x86_64/multiarch/strncase_l-sse2-unaligned.S b/sysdeps/x86_64/multiarch/strncase_l-sse2-unaligned.S
new file mode 100644
index 0000000..a372ed4
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strncase_l-sse2-unaligned.S
@@ -0,0 +1,4 @@
+#define AS_STRCASECMP
+#define AS_STRNCMP
+#define STRCMP __strncasecmp_sse2_unaligned_l
+#include "strcmp-sse2-unaligned.S"
diff --git a/sysdeps/x86_64/multiarch/strncmp-avx2.S b/sysdeps/x86_64/multiarch/strncmp-avx2.S
new file mode 100644
index 0000000..2d9a032
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strncmp-avx2.S
@@ -0,0 +1,4 @@
+#define USE_AVX2
+#define AS_STRNCMP
+#define STRCMP __strncmp_avx2
+#include "strcmp-sse2-unaligned.S"
diff --git a/sysdeps/x86_64/multiarch/strncmp-sse2-unaligned.S b/sysdeps/x86_64/multiarch/strncmp-sse2-unaligned.S
new file mode 100644
index 0000000..7f9a5fd
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strncmp-sse2-unaligned.S
@@ -0,0 +1,3 @@
+#define AS_STRNCMP
+#define STRCMP __strncmp_sse2_unaligned
+#include "strcmp-sse2-unaligned.S"
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=b3ed0e6def9a2fdea4c47469b2d98db33c094025
commit b3ed0e6def9a2fdea4c47469b2d98db33c094025
Author: OndÅ?ej BÃlka <neleai@seznam.cz>
Date: Tue Jun 23 07:52:36 2015 +0200
faster memchr
diff --git a/sysdeps/x86_64/memchr.S b/sysdeps/x86_64/memchr.S
index fae85ca..9649b1c 100644
--- a/sysdeps/x86_64/memchr.S
+++ b/sysdeps/x86_64/memchr.S
@@ -1,5 +1,4 @@
-/* Copyright (C) 2011-2015 Free Software Foundation, Inc.
- Contributed by Intel Corporation.
+/* Copyright (C) 2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,292 +17,134 @@
#include <sysdep.h>
-/* fast SSE2 version with using pmaxub and 64 byte loop */
+/* fast SSE2 version with using 64 byte loop */
.text
ENTRY(memchr)
- movd %rsi, %xmm1
- mov %rdi, %rcx
-
- punpcklbw %xmm1, %xmm1
- test %rdx, %rdx
- jz L(return_null)
- punpcklbw %xmm1, %xmm1
-
- and $63, %rcx
- pshufd $0, %xmm1, %xmm1
-
- cmp $48, %rcx
- ja L(crosscache)
-
- movdqu (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
- pmovmskb %xmm0, %eax
- test %eax, %eax
-
- jnz L(matches_1)
- sub $16, %rdx
- jbe L(return_null)
- add $16, %rdi
- and $15, %rcx
- and $-16, %rdi
- add %rcx, %rdx
- sub $64, %rdx
- jbe L(exit_loop)
- jmp L(loop_prolog)
-
- .p2align 4
-L(crosscache):
- and $15, %rcx
- and $-16, %rdi
- movdqa (%rdi), %xmm0
-
- pcmpeqb %xmm1, %xmm0
-/* Check if there is a match. */
- pmovmskb %xmm0, %eax
-/* Remove the leading bytes. */
- sar %cl, %eax
- test %eax, %eax
- je L(unaligned_no_match)
-/* Check which byte is a match. */
+ movd %esi, %xmm2
+ testq %rdx, %rdx
+ punpcklbw %xmm2, %xmm2
+ punpcklwd %xmm2, %xmm2
+ pshufd $0, %xmm2, %xmm2
+ je L(return_null)
+ movl %edi, %eax
+ andl $4095, %eax
+ cmpl $4032, %eax
+ jg L(cross_page)
+ movdqu (%rdi), %xmm1
+ pcmpeqb %xmm2, %xmm1
+ pmovmskb %xmm1, %eax
+ test %eax, %eax
+ je L(next_48_bytes)
bsf %eax, %eax
-
- sub %rax, %rdx
+ cmpq %rax, %rdx
jbe L(return_null)
- add %rdi, %rax
- add %rcx, %rax
- ret
-
- .p2align 4
-L(unaligned_no_match):
- add %rcx, %rdx
- sub $16, %rdx
+ addq %rdi, %rax
+ ret
+.p2align 4,,10
+.p2align 3
+L(next_48_bytes):
+ movdqu 16(%rdi), %xmm1
+ movdqu 32(%rdi), %xmm3
+ pcmpeqb %xmm2, %xmm1
+ pcmpeqb %xmm2, %xmm3
+ movdqu 48(%rdi), %xmm4
+ pmovmskb %xmm1, %esi
+ pmovmskb %xmm3, %ecx
+ pcmpeqb %xmm2, %xmm4
+ pmovmskb %xmm4, %eax
+ salq $32, %rcx
+ sal $16, %esi
+ orq %rsi, %rcx
+ salq $48, %rax
+ orq %rcx, %rax
+ je L(prepare_loop)
+L(return):
+ bsf %rax, %rax
+ cmpq %rax, %rdx
jbe L(return_null)
- add $16, %rdi
- sub $64, %rdx
- jbe L(exit_loop)
-
- .p2align 4
-L(loop_prolog):
- movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
- pmovmskb %xmm0, %eax
- test %eax, %eax
- jnz L(matches)
+ addq %rdi, %rax
+ ret
- movdqa 16(%rdi), %xmm2
- pcmpeqb %xmm1, %xmm2
- pmovmskb %xmm2, %eax
- test %eax, %eax
- jnz L(matches16)
-
- movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
- pmovmskb %xmm3, %eax
- test %eax, %eax
- jnz L(matches32)
-
- movdqa 48(%rdi), %xmm4
- pcmpeqb %xmm1, %xmm4
- add $64, %rdi
- pmovmskb %xmm4, %eax
- test %eax, %eax
- jnz L(matches0)
-
- test $0x3f, %rdi
- jz L(align64_loop)
-
- sub $64, %rdx
- jbe L(exit_loop)
-
- movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
- pmovmskb %xmm0, %eax
- test %eax, %eax
- jnz L(matches)
-
- movdqa 16(%rdi), %xmm2
- pcmpeqb %xmm1, %xmm2
- pmovmskb %xmm2, %eax
- test %eax, %eax
- jnz L(matches16)
-
- movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
- pmovmskb %xmm3, %eax
- test %eax, %eax
- jnz L(matches32)
-
- movdqa 48(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
- pmovmskb %xmm3, %eax
-
- add $64, %rdi
- test %eax, %eax
- jnz L(matches0)
-
- mov %rdi, %rcx
- and $-64, %rdi
- and $63, %rcx
- add %rcx, %rdx
-
- .p2align 4
-L(align64_loop):
- sub $64, %rdx
- jbe L(exit_loop)
- movdqa (%rdi), %xmm0
- movdqa 16(%rdi), %xmm2
- movdqa 32(%rdi), %xmm3
- movdqa 48(%rdi), %xmm4
-
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm1, %xmm2
- pcmpeqb %xmm1, %xmm3
- pcmpeqb %xmm1, %xmm4
-
- pmaxub %xmm0, %xmm3
- pmaxub %xmm2, %xmm4
+.p2align 4,,10
+.p2align 3
+L(return_null):
+ xorl %eax, %eax
+ ret
+.p2align 4,,10
+.p2align 4
+L(prepare_loop):
+ movq %rdi, %rcx
+ andq $-64, %rcx
+ subq %rcx, %rdi
+ leaq (%rdx, %rdi), %rsi
+.p2align 4,,10
+.p2align 3
+L(loop):
+ subq $64, %rsi
+ jbe L(return_null)
+
+ movdqa 64(%rcx), %xmm0
+ movdqa 80(%rcx), %xmm1
+ movdqa 96(%rcx), %xmm3
+ movdqa 112(%rcx), %xmm4
+
+ pcmpeqb %xmm2, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ pcmpeqb %xmm2, %xmm3
+ pcmpeqb %xmm2, %xmm4
+
+ pmaxub %xmm0, %xmm1
+ pmaxub %xmm1, %xmm3
pmaxub %xmm3, %xmm4
- pmovmskb %xmm4, %eax
-
- add $64, %rdi
-
- test %eax, %eax
- jz L(align64_loop)
-
- sub $64, %rdi
-
+ addq $64, %rcx
+ pmovmskb %xmm4, %edx
+ testl %edx, %edx
+ je L(loop)
+ pmovmskb %xmm3, %r8d
+ pmovmskb %xmm1, %edi
+ salq $48, %rdx
pmovmskb %xmm0, %eax
- test %eax, %eax
- jnz L(matches)
-
- pmovmskb %xmm2, %eax
- test %eax, %eax
- jnz L(matches16)
-
- movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
-
- pcmpeqb 48(%rdi), %xmm1
- pmovmskb %xmm3, %eax
- test %eax, %eax
- jnz L(matches32)
-
- pmovmskb %xmm1, %eax
- bsf %eax, %eax
- lea 48(%rdi, %rax), %rax
- ret
-
- .p2align 4
-L(exit_loop):
- add $32, %rdx
- jle L(exit_loop_32)
-
- movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
- pmovmskb %xmm0, %eax
- test %eax, %eax
- jnz L(matches)
-
- movdqa 16(%rdi), %xmm2
- pcmpeqb %xmm1, %xmm2
- pmovmskb %xmm2, %eax
- test %eax, %eax
- jnz L(matches16)
-
- movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
- pmovmskb %xmm3, %eax
- test %eax, %eax
- jnz L(matches32_1)
- sub $16, %rdx
- jle L(return_null)
-
- pcmpeqb 48(%rdi), %xmm1
- pmovmskb %xmm1, %eax
- test %eax, %eax
- jnz L(matches48_1)
- xor %rax, %rax
- ret
-
- .p2align 4
-L(exit_loop_32):
- add $32, %rdx
- movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
- pmovmskb %xmm0, %eax
- test %eax, %eax
- jnz L(matches_1)
- sub $16, %rdx
- jbe L(return_null)
-
- pcmpeqb 16(%rdi), %xmm1
- pmovmskb %xmm1, %eax
- test %eax, %eax
- jnz L(matches16_1)
- xor %rax, %rax
- ret
-
- .p2align 4
-L(matches0):
- bsf %eax, %eax
- lea -16(%rax, %rdi), %rax
- ret
-
- .p2align 4
-L(matches):
- bsf %eax, %eax
- add %rdi, %rax
- ret
-
- .p2align 4
-L(matches16):
- bsf %eax, %eax
- lea 16(%rax, %rdi), %rax
- ret
-
- .p2align 4
-L(matches32):
- bsf %eax, %eax
- lea 32(%rax, %rdi), %rax
- ret
-
- .p2align 4
-L(matches_1):
- bsf %eax, %eax
- sub %rax, %rdx
- jbe L(return_null)
- add %rdi, %rax
- ret
-
- .p2align 4
-L(matches16_1):
- bsf %eax, %eax
- sub %rax, %rdx
+ salq $32, %r8
+ sal $16, %edi
+ or %edi, %eax
+ orq %r8, %rax
+ orq %rax, %rdx
+ bsfq %rdx, %rax
+ cmp %rax, %rsi
jbe L(return_null)
- lea 16(%rdi, %rax), %rax
+ addq %rcx, %rax
ret
- .p2align 4
-L(matches32_1):
- bsf %eax, %eax
- sub %rax, %rdx
- jbe L(return_null)
- lea 32(%rdi, %rax), %rax
- ret
-
- .p2align 4
-L(matches48_1):
- bsf %eax, %eax
- sub %rax, %rdx
- jbe L(return_null)
- lea 48(%rdi, %rax), %rax
- ret
-
- .p2align 4
-L(return_null):
- xor %rax, %rax
- ret
+.p2align 4,,10
+.p2align 3
+L(cross_page):
+ movq %rdi, %rsi
+ andq $-64, %rsi
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm2, %xmm1
+ pmovmskb %xmm1, %ecx
+ movdqa 16(%rsi), %xmm1
+ pcmpeqb %xmm2, %xmm1
+ pmovmskb %xmm1, %eax
+ movdqa 32(%rsi), %xmm1
+ pcmpeqb %xmm2, %xmm1
+ sal $16, %eax
+ movdqa %xmm2, %xmm0
+ pcmpeqb 48(%rsi), %xmm0
+ pmovmskb %xmm1, %r8d
+ pmovmskb %xmm0, %r9d
+ salq $32, %r8
+ salq $48, %r9
+ or %ecx, %eax
+ orq %r9, %rax
+ orq %r8, %rax
+ movq %rdi, %rcx
+ subq %rsi, %rcx
+ shrq %cl, %rax
+ testq %rax, %rax
+ jne L(return)
+ jmp L(prepare_loop)
END(memchr)
strong_alias (memchr, __memchr)
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=ee77548855e3df7ad29edfac45ca9bd95115ec19
commit ee77548855e3df7ad29edfac45ca9bd95115ec19
Author: OndÅ?ej BÃlka <neleai@seznam.cz>
Date: Thu Jun 18 09:02:22 2015 +0200
unaligned sse2 memcmp
diff --git a/sysdeps/x86_64/memcmp.S b/sysdeps/x86_64/memcmp.S
index f636716..88c0c4a 100644
--- a/sysdeps/x86_64/memcmp.S
+++ b/sysdeps/x86_64/memcmp.S
@@ -19,340 +19,204 @@
#include <sysdep.h>
+#ifndef MEMCMP
+# define MEMCMP memcmp
+#endif
+
.text
-ENTRY (memcmp)
- test %rdx, %rdx
- jz L(finz)
- cmpq $1, %rdx
- jle L(finr1b)
- subq %rdi, %rsi
- movq %rdx, %r10
- cmpq $32, %r10
- jge L(gt32)
- /* Handle small chunks and last block of less than 32 bytes. */
-L(small):
- testq $1, %r10
- jz L(s2b)
- movzbl (%rdi), %eax
- movzbl (%rdi, %rsi), %edx
- subq $1, %r10
- je L(finz1)
- addq $1, %rdi
- subl %edx, %eax
- jnz L(exit)
-L(s2b):
- testq $2, %r10
- jz L(s4b)
- movzwl (%rdi), %eax
- movzwl (%rdi, %rsi), %edx
- subq $2, %r10
- je L(fin2_7)
- addq $2, %rdi
- cmpl %edx, %eax
- jnz L(fin2_7)
-L(s4b):
- testq $4, %r10
- jz L(s8b)
- movl (%rdi), %eax
- movl (%rdi, %rsi), %edx
- subq $4, %r10
- je L(fin2_7)
- addq $4, %rdi
- cmpl %edx, %eax
- jnz L(fin2_7)
-L(s8b):
- testq $8, %r10
- jz L(s16b)
- movq (%rdi), %rax
- movq (%rdi, %rsi), %rdx
- subq $8, %r10
- je L(fin2_7)
- addq $8, %rdi
- cmpq %rdx, %rax
- jnz L(fin2_7)
-L(s16b):
- movdqu (%rdi), %xmm1
- movdqu (%rdi, %rsi), %xmm0
- pcmpeqb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- xorl %eax, %eax
- subl $0xffff, %edx
- jz L(finz)
- bsfl %edx, %ecx
- leaq (%rdi, %rcx), %rcx
- movzbl (%rcx), %eax
- movzbl (%rsi, %rcx), %edx
- jmp L(finz1)
+ENTRY (MEMCMP)
+ testq %rdx, %rdx
+ je L(return_zero)
+#ifdef AS_WMEMCMP
+ shl $2, %rdx
+#endif
+ pxor %xmm4, %xmm4
+ movl %edi, %eax
+ andl $4095, %eax
+ cmpl $4032, %eax
+ ja L(cross_page_start)
+L(handle_end):
+ movl %esi, %eax
+ andl $4095, %eax
+ cmpl $4032, %eax
+ ja L(cross_page_start)
+L(back_header):
+ xor %ecx, %ecx
+ bts %rdx, %rcx
+ sub $1, %rcx
+ movdqu (%rdi), %xmm0
+ movdqu (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm4, %xmm0
+ pmovmskb %xmm0, %eax
+ and %ecx, %eax
+ jne L(different)
+ cmpq $16, %rdx
+ ja L(next)
+ ret
+L(next):
+ pmovmskb %xmm0, %r8d
+ movdqu 16(%rdi), %xmm2
+ movdqu 16(%rsi), %xmm6
+ movdqu 32(%rdi), %xmm1
+ pcmpeqb %xmm6, %xmm2
+ movdqu 32(%rsi), %xmm5
+ pcmpeqb %xmm4, %xmm2
+ pcmpeqb %xmm5, %xmm1
+ movdqu 48(%rdi), %xmm7
+ pmovmskb %xmm2, %eax
+ movdqu 48(%rsi), %xmm3
+ pcmpeqb %xmm4, %xmm1
+ pmovmskb %xmm1, %r9d
+ sal $16, %eax
+ pcmpeqb %xmm3, %xmm7
+ salq $32, %r9
+ pcmpeqb %xmm4, %xmm7
+ orq %r9, %rax
+ orq %r8, %rax
+ pmovmskb %xmm7, %r8d
+ salq $48, %r8
+ orq %r8, %rax
+ movq %rax, %r8
+ andq %rcx, %rax
+ jne L(different)
+ cmpq $64, %rdx
+ jb L(return_zero)
+ movq %r8, %rax
+ testq %rax, %rax
+ jne L(different)
+L(align_loop):
+ leaq 64(%rdi), %rax
+ andq $-64, %rax
+ subq %rdi, %rax
+ subq %rax, %rdx
+ addq %rax, %rdi
+ addq %rax, %rsi
+ cmpq $64, %rdx
+ ja L(loop_start)
+ testq %rdx, %rdx
+ jne L(handle_end)
+ xorl %eax, %eax
+ ret
- .p2align 4,, 4
-L(finr1b):
- movzbl (%rdi), %eax
- movzbl (%rsi), %edx
-L(finz1):
+ .p2align 4
+L(different):
+ bsfq %rax, %rdx
+#ifdef AS_WMEMCMP
+ and $-4, %rdx
+ mov (%rdi,%rdx), %eax
+ mov (%rsi,%rdx), %edx
subl %edx, %eax
-L(exit):
+ jg L(ret1)
+ jl L(ret_neg_1)
ret
-
- .p2align 4,, 4
-L(fin2_7):
- cmpq %rdx, %rax
- jz L(finz)
- movq %rax, %r11
- subq %rdx, %r11
- bsfq %r11, %rcx
- sarq $3, %rcx
- salq $3, %rcx
- sarq %cl, %rax
- movzbl %al, %eax
- sarq %cl, %rdx
- movzbl %dl, %edx
+L(ret1):
+ mov $1, %eax
+ ret
+L(ret_neg_1):
+ mov $-1, %eax
+ ret
+#else
+ movzbl (%rdi,%rdx), %eax
+ movzbl (%rsi,%rdx), %edx
subl %edx, %eax
ret
-
- .p2align 4,, 4
-L(finz):
+#endif
+L(return_zero):
+ xor %eax, %eax
+ ret
+ .p2align 4
+L(loop):
+ subq $64, %rdx
+ addq $64, %rdi
+ addq $64, %rsi
+ cmpq $64, %rdx
+ jbe L(less_64_bytes)
+L(loop_start):
+ movdqu (%rsi), %xmm0
+ movdqu 16(%rsi), %xmm1
+ pcmpeqb (%rdi), %xmm0
+ movdqu 32(%rsi), %xmm2
+ pcmpeqb 16(%rdi), %xmm1
+ movdqu 48(%rsi), %xmm3
+ pcmpeqb 32(%rdi), %xmm2
+ pcmpeqb 48(%rdi), %xmm3
+ pminub %xmm0, %xmm3
+ pminub %xmm1, %xmm3
+ pminub %xmm2, %xmm3
+ pcmpeqb %xmm4, %xmm3
+ pmovmskb %xmm3, %eax
+ testl %eax, %eax
+ je L(loop)
+ shl $48, %rax
+ pcmpeqb %xmm4, %xmm0
+ pcmpeqb %xmm4, %xmm1
+ pcmpeqb %xmm4, %xmm2
+ pmovmskb %xmm0, %r8
+ pmovmskb %xmm1, %rcx
+ pmovmskb %xmm2, %r9
+ shl $16, %ecx
+ shl $32, %r9
+ or %r8, %rax
+ or %r9, %rax
+ or %rcx, %rax
+ jmp L(different)
+
+ .p2align 4
+L(less_64_bytes):
+ testq %rdx, %rdx
+ jne L(handle_end)
xorl %eax, %eax
ret
- /* For blocks bigger than 32 bytes
- 1. Advance one of the addr pointer to be 16B aligned.
- 2. Treat the case of both addr pointers aligned to 16B
- separately to avoid movdqu.
- 3. Handle any blocks of greater than 64 consecutive bytes with
- unrolling to reduce branches.
- 4. At least one addr pointer is 16B aligned, use memory version
- of pcmbeqb.
- */
- .p2align 4,, 4
-L(gt32):
- movq %rdx, %r11
- addq %rdi, %r11
- movq %rdi, %r8
-
- andq $15, %r8
- jz L(16am)
- /* Both pointers may be misaligned. */
- movdqu (%rdi), %xmm1
- movdqu (%rdi, %rsi), %xmm0
- pcmpeqb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- subl $0xffff, %edx
- jnz L(neq)
- neg %r8
- leaq 16(%rdi, %r8), %rdi
-L(16am):
- /* Handle two 16B aligned pointers separately. */
- testq $15, %rsi
- jz L(ATR)
- testq $16, %rdi
- jz L(A32)
- movdqu (%rdi, %rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-L(A32):
- movq %r11, %r10
- andq $-32, %r10
- cmpq %r10, %rdi
- jge L(mt16)
- /* Pre-unroll to be ready for unrolled 64B loop. */
- testq $32, %rdi
- jz L(A64)
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
-L(A64):
- movq %r11, %r10
- andq $-64, %r10
- cmpq %r10, %rdi
- jge L(mt32)
-
-L(A64main):
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- cmpq %rdi, %r10
- jne L(A64main)
-
-L(mt32):
- movq %r11, %r10
- andq $-32, %r10
- cmpq %r10, %rdi
- jge L(mt16)
-L(A32main):
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqu (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- cmpq %rdi, %r10
- jne L(A32main)
-L(mt16):
- subq %rdi, %r11
- je L(finz)
- movq %r11, %r10
- jmp L(small)
-
- .p2align 4,, 4
-L(neq):
- bsfl %edx, %ecx
- movzbl (%rdi, %rcx), %eax
- addq %rdi, %rsi
- movzbl (%rsi,%rcx), %edx
- jmp L(finz1)
-
- .p2align 4,, 4
-L(ATR):
- movq %r11, %r10
- andq $-32, %r10
- cmpq %r10, %rdi
- jge L(mt16)
- testq $16, %rdi
- jz L(ATR32)
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
- cmpq %rdi, %r10
- je L(mt16)
-
-L(ATR32):
- movq %r11, %r10
- andq $-64, %r10
- testq $32, %rdi
- jz L(ATR64)
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
-L(ATR64):
- cmpq %rdi, %r10
- je L(mt32)
-
-L(ATR64main):
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
- cmpq %rdi, %r10
- jne L(ATR64main)
-
- movq %r11, %r10
- andq $-32, %r10
- cmpq %r10, %rdi
- jge L(mt16)
-
-L(ATR32res):
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- movdqa (%rdi,%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
- pmovmskb %xmm0, %edx
- subl $0xffff, %edx
- jnz L(neq)
- addq $16, %rdi
-
- cmpq %r10, %rdi
- jne L(ATR32res)
-
- subq %rdi, %r11
- je L(finz)
- movq %r11, %r10
- jmp L(small)
- /* Align to 16byte to improve instruction fetch. */
- .p2align 4,, 4
-END(memcmp)
+ .p2align 4
+L(cross_page_start):
+ cmp $64, %rdx
+ ja L(back_header)
+
+ .p2align 4
+L(cross_page):
+ test %edx, %edx
+ je L(return_zero)
+#ifdef AS_WMEMCMP
+ mov (%rdi), %eax
+ mov (%rsi), %ecx
+ subl %ecx, %eax
+ jg L(ret1)
+ jl L(ret_neg_1)
+#else
+ movzbl (%rdi), %eax
+ movzbl (%rsi), %ecx
+ subl %ecx, %eax
+ jne L(return)
+ cmp $1, %edx
+ je L(return)
+ movzbl 1(%rdi), %eax
+ movzbl 1(%rsi), %ecx
+ subl %ecx, %eax
+ jne L(return)
+ cmp $2, %edx
+ je L(return)
+ movzbl 2(%rdi), %eax
+ movzbl 2(%rsi), %ecx
+ subl %ecx, %eax
+ jne L(return)
+ cmp $3, %edx
+ je L(return)
+ movzbl 3(%rdi), %eax
+ movzbl 3(%rsi), %ecx
+ subl %ecx, %eax
+ jne L(return)
+#endif
+ sub $4, %edx
+ add $4, %rdi
+ add $4, %rsi
+ jmp L(cross_page)
+L(return):
+ ret
+END(MEMCMP)
-#undef bcmp
+#undef bcmp
weak_alias (memcmp, bcmp)
libc_hidden_builtin_def (memcmp)
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index c573744..679db2a 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -8,7 +8,7 @@ ifeq ($(subdir),string)
sysdep_routines += strncat-c stpncpy-c strncpy-c strcmp-ssse3 \
strcmp-sse2-unaligned strncmp-ssse3 \
- memcmp-sse4 memcpy-ssse3 \
+ memcpy-ssse3 \
memcpy-sse2-unaligned mempcpy-ssse3 \
memmove-ssse3 memcpy-ssse3-back mempcpy-ssse3-back \
memmove-avx-unaligned memcpy-avx-unaligned mempcpy-avx-unaligned \
@@ -29,10 +29,10 @@ CFLAGS-strspn-c.c += -msse4
endif
ifeq (yes,$(config-cflags-avx2))
-sysdep_routines += memset-avx2 strcpy-avx2 stpcpy-avx2
+sysdep_routines += memset-avx2 strcpy-avx2 stpcpy-avx2 memcmp-avx2
endif
endif
ifeq ($(subdir),wcsmbs)
-sysdep_routines += wmemcmp-sse4 wmemcmp-ssse3 wmemcmp-c wcscpy-ssse3 wcscpy-c
+sysdep_routines += wmemcmp-sse2-unaligned wmemcmp-ssse3 wmemcmp-c wcscpy-ssse3 wcscpy-c
endif
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index d398e43..b3dbe65 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -39,10 +39,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/x86_64/multiarch/memcmp.S. */
IFUNC_IMPL (i, name, memcmp,
- IFUNC_IMPL_ADD (array, i, memcmp, HAS_SSE4_1,
- __memcmp_sse4_1)
+ IFUNC_IMPL_ADD (array, i, memcmp, HAS_AVX2, __memcmp_avx2)
IFUNC_IMPL_ADD (array, i, memcmp, HAS_SSSE3, __memcmp_ssse3)
- IFUNC_IMPL_ADD (array, i, memcmp, 1, __memcmp_sse2))
+ IFUNC_IMPL_ADD (array, i, memcmp, 1, __memcmp_sse2_unaligned))
/* Support sysdeps/x86_64/multiarch/memmove_chk.S. */
IFUNC_IMPL (i, name, __memmove_chk,
@@ -211,8 +210,8 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/x86_64/multiarch/wmemcmp.S. */
IFUNC_IMPL (i, name, wmemcmp,
- IFUNC_IMPL_ADD (array, i, wmemcmp, HAS_SSE4_1,
- __wmemcmp_sse4_1)
+ IFUNC_IMPL_ADD (array, i, wmemcmp, 1,
+ __wmemcmp_sse2_unaligned)
IFUNC_IMPL_ADD (array, i, wmemcmp, HAS_SSSE3,
__wmemcmp_ssse3)
IFUNC_IMPL_ADD (array, i, wmemcmp, 1, __wmemcmp_sse2))
diff --git a/sysdeps/x86_64/multiarch/memcmp-avx2.S b/sysdeps/x86_64/multiarch/memcmp-avx2.S
new file mode 100644
index 0000000..60483bf
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/memcmp-avx2.S
@@ -0,0 +1,3 @@
+#define USE_AVX2
+#define MEMCMP __memcmp_avx2
+#include "../memcmp.S"
diff --git a/sysdeps/x86_64/multiarch/memcmp-sse4.S b/sysdeps/x86_64/multiarch/memcmp-sse4.S
deleted file mode 100644
index 533fece..0000000
--- a/sysdeps/x86_64/multiarch/memcmp-sse4.S
+++ /dev/null
@@ -1,1776 +0,0 @@
-/* memcmp with SSE4.1, wmemcmp with SSE4.1
- Copyright (C) 2010-2015 Free Software Foundation, Inc.
- Contributed by Intel Corporation.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#if IS_IN (libc)
-
-# include <sysdep.h>
-
-# ifndef MEMCMP
-# define MEMCMP __memcmp_sse4_1
-# endif
-
-# define JMPTBL(I, B) (I - B)
-
-# define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
- lea TABLE(%rip), %r11; \
- movslq (%r11, INDEX, SCALE), %rcx; \
- add %r11, %rcx; \
- jmp *%rcx; \
- ud2
-
-/* Warning!
- wmemcmp has to use SIGNED comparison for elements.
- memcmp has to use UNSIGNED comparison for elemnts.
-*/
-
- .section .text.sse4.1,"ax",@progbits
-ENTRY (MEMCMP)
-# ifdef USE_AS_WMEMCMP
- shl $2, %rdx
-# endif
- pxor %xmm0, %xmm0
- cmp $79, %rdx
- ja L(79bytesormore)
-# ifndef USE_AS_WMEMCMP
- cmp $1, %rdx
- je L(firstbyte)
-# endif
- add %rdx, %rsi
- add %rdx, %rdi
- BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 4)
-
-# ifndef USE_AS_WMEMCMP
- .p2align 4
-L(firstbyte):
- movzbl (%rdi), %eax
- movzbl (%rsi), %ecx
- sub %ecx, %eax
- ret
-# endif
-
- .p2align 4
-L(79bytesormore):
- movdqu (%rsi), %xmm1
- movdqu (%rdi), %xmm2
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(16bytesin256)
- mov %rsi, %rcx
- and $-16, %rsi
- add $16, %rsi
- sub %rsi, %rcx
-
- sub %rcx, %rdi
- add %rcx, %rdx
- test $0xf, %rdi
- jz L(2aligned)
-
- cmp $128, %rdx
- ja L(128bytesormore)
-L(less128bytes):
- sub $64, %rdx
-
- movdqu (%rdi), %xmm2
- pxor (%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(16bytesin256)
-
- movdqu 16(%rdi), %xmm2
- pxor 16(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(32bytesin256)
-
- movdqu 32(%rdi), %xmm2
- pxor 32(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(48bytesin256)
-
- movdqu 48(%rdi), %xmm2
- pxor 48(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(64bytesin256)
- cmp $32, %rdx
- jb L(less32bytesin64)
-
- movdqu 64(%rdi), %xmm2
- pxor 64(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(80bytesin256)
-
- movdqu 80(%rdi), %xmm2
- pxor 80(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(96bytesin256)
- sub $32, %rdx
- add $32, %rdi
- add $32, %rsi
-L(less32bytesin64):
- add $64, %rdi
- add $64, %rsi
- add %rdx, %rsi
- add %rdx, %rdi
- BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 4)
-
-L(128bytesormore):
- cmp $512, %rdx
- ja L(512bytesormore)
- cmp $256, %rdx
- ja L(less512bytes)
-L(less256bytes):
- sub $128, %rdx
-
- movdqu (%rdi), %xmm2
- pxor (%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(16bytesin256)
-
- movdqu 16(%rdi), %xmm2
- pxor 16(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(32bytesin256)
-
- movdqu 32(%rdi), %xmm2
- pxor 32(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(48bytesin256)
-
- movdqu 48(%rdi), %xmm2
- pxor 48(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(64bytesin256)
-
- movdqu 64(%rdi), %xmm2
- pxor 64(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(80bytesin256)
-
- movdqu 80(%rdi), %xmm2
- pxor 80(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(96bytesin256)
-
- movdqu 96(%rdi), %xmm2
- pxor 96(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(112bytesin256)
-
- movdqu 112(%rdi), %xmm2
- pxor 112(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(128bytesin256)
-
- add $128, %rsi
- add $128, %rdi
-
- cmp $64, %rdx
- jae L(less128bytes)
-
- cmp $32, %rdx
- jb L(less32bytesin128)
-
- movdqu (%rdi), %xmm2
- pxor (%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(16bytesin256)
-
- movdqu 16(%rdi), %xmm2
- pxor 16(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(32bytesin256)
- sub $32, %rdx
- add $32, %rdi
- add $32, %rsi
-L(less32bytesin128):
- add %rdx, %rsi
- add %rdx, %rdi
- BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 4)
-
-L(less512bytes):
- sub $256, %rdx
- movdqu (%rdi), %xmm2
- pxor (%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(16bytesin256)
-
- movdqu 16(%rdi), %xmm2
- pxor 16(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(32bytesin256)
-
- movdqu 32(%rdi), %xmm2
- pxor 32(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(48bytesin256)
-
- movdqu 48(%rdi), %xmm2
- pxor 48(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(64bytesin256)
-
- movdqu 64(%rdi), %xmm2
- pxor 64(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(80bytesin256)
-
- movdqu 80(%rdi), %xmm2
- pxor 80(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(96bytesin256)
-
- movdqu 96(%rdi), %xmm2
- pxor 96(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(112bytesin256)
-
- movdqu 112(%rdi), %xmm2
- pxor 112(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(128bytesin256)
-
- movdqu 128(%rdi), %xmm2
- pxor 128(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(144bytesin256)
-
- movdqu 144(%rdi), %xmm2
- pxor 144(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(160bytesin256)
-
- movdqu 160(%rdi), %xmm2
- pxor 160(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(176bytesin256)
-
- movdqu 176(%rdi), %xmm2
- pxor 176(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(192bytesin256)
-
- movdqu 192(%rdi), %xmm2
- pxor 192(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(208bytesin256)
-
- movdqu 208(%rdi), %xmm2
- pxor 208(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(224bytesin256)
-
- movdqu 224(%rdi), %xmm2
- pxor 224(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(240bytesin256)
-
- movdqu 240(%rdi), %xmm2
- pxor 240(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(256bytesin256)
-
- add $256, %rsi
- add $256, %rdi
-
- cmp $128, %rdx
- jae L(less256bytes)
-
- cmp $64, %rdx
- jae L(less128bytes)
-
- cmp $32, %rdx
- jb L(less32bytesin256)
-
- movdqu (%rdi), %xmm2
- pxor (%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(16bytesin256)
-
- movdqu 16(%rdi), %xmm2
- pxor 16(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(32bytesin256)
- sub $32, %rdx
- add $32, %rdi
- add $32, %rsi
-L(less32bytesin256):
- add %rdx, %rsi
- add %rdx, %rdi
- BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 4)
-
- .p2align 4
-L(512bytesormore):
-# ifdef DATA_CACHE_SIZE_HALF
- mov $DATA_CACHE_SIZE_HALF, %R8_LP
-# else
- mov __x86_data_cache_size_half(%rip), %R8_LP
-# endif
- mov %r8, %r9
- shr $1, %r8
- add %r9, %r8
- cmp %r8, %rdx
- ja L(L2_L3_cache_unaglined)
- sub $64, %rdx
- .p2align 4
-L(64bytesormore_loop):
- movdqu (%rdi), %xmm2
- pxor (%rsi), %xmm2
- movdqa %xmm2, %xmm1
-
- movdqu 16(%rdi), %xmm3
- pxor 16(%rsi), %xmm3
- por %xmm3, %xmm1
-
- movdqu 32(%rdi), %xmm4
- pxor 32(%rsi), %xmm4
- por %xmm4, %xmm1
-
- movdqu 48(%rdi), %xmm5
- pxor 48(%rsi), %xmm5
- por %xmm5, %xmm1
-
- ptest %xmm1, %xmm0
- jnc L(64bytesormore_loop_end)
- add $64, %rsi
- add $64, %rdi
- sub $64, %rdx
- jae L(64bytesormore_loop)
-
- add $64, %rdx
- add %rdx, %rsi
- add %rdx, %rdi
- BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 4)
-
-L(L2_L3_cache_unaglined):
- sub $64, %rdx
- .p2align 4
-L(L2_L3_unaligned_128bytes_loop):
- prefetchnta 0x1c0(%rdi)
- prefetchnta 0x1c0(%rsi)
- movdqu (%rdi), %xmm2
- pxor (%rsi), %xmm2
- movdqa %xmm2, %xmm1
-
- movdqu 16(%rdi), %xmm3
- pxor 16(%rsi), %xmm3
- por %xmm3, %xmm1
-
- movdqu 32(%rdi), %xmm4
- pxor 32(%rsi), %xmm4
- por %xmm4, %xmm1
-
- movdqu 48(%rdi), %xmm5
- pxor 48(%rsi), %xmm5
- por %xmm5, %xmm1
-
- ptest %xmm1, %xmm0
- jnc L(64bytesormore_loop_end)
- add $64, %rsi
- add $64, %rdi
- sub $64, %rdx
- jae L(L2_L3_unaligned_128bytes_loop)
-
- add $64, %rdx
- add %rdx, %rsi
- add %rdx, %rdi
- BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 4)
-
-/*
- * This case is for machines which are sensitive for unaligned instructions.
- */
- .p2align 4
-L(2aligned):
- cmp $128, %rdx
- ja L(128bytesormorein2aligned)
-L(less128bytesin2aligned):
- sub $64, %rdx
-
- movdqa (%rdi), %xmm2
- pxor (%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(16bytesin256)
-
- movdqa 16(%rdi), %xmm2
- pxor 16(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(32bytesin256)
-
- movdqa 32(%rdi), %xmm2
- pxor 32(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(48bytesin256)
-
- movdqa 48(%rdi), %xmm2
- pxor 48(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(64bytesin256)
- cmp $32, %rdx
- jb L(less32bytesin64in2alinged)
-
- movdqa 64(%rdi), %xmm2
- pxor 64(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(80bytesin256)
-
- movdqa 80(%rdi), %xmm2
- pxor 80(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(96bytesin256)
- sub $32, %rdx
- add $32, %rdi
- add $32, %rsi
-L(less32bytesin64in2alinged):
- add $64, %rdi
- add $64, %rsi
- add %rdx, %rsi
- add %rdx, %rdi
- BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 4)
-
- .p2align 4
-L(128bytesormorein2aligned):
- cmp $512, %rdx
- ja L(512bytesormorein2aligned)
- cmp $256, %rdx
- ja L(256bytesormorein2aligned)
-L(less256bytesin2alinged):
- sub $128, %rdx
-
- movdqa (%rdi), %xmm2
- pxor (%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(16bytesin256)
-
- movdqa 16(%rdi), %xmm2
- pxor 16(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(32bytesin256)
-
- movdqa 32(%rdi), %xmm2
- pxor 32(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(48bytesin256)
-
- movdqa 48(%rdi), %xmm2
- pxor 48(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(64bytesin256)
-
- movdqa 64(%rdi), %xmm2
- pxor 64(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(80bytesin256)
-
- movdqa 80(%rdi), %xmm2
- pxor 80(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(96bytesin256)
-
- movdqa 96(%rdi), %xmm2
- pxor 96(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(112bytesin256)
-
- movdqa 112(%rdi), %xmm2
- pxor 112(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(128bytesin256)
-
- add $128, %rsi
- add $128, %rdi
-
- cmp $64, %rdx
- jae L(less128bytesin2aligned)
-
- cmp $32, %rdx
- jb L(less32bytesin128in2aligned)
-
- movdqu (%rdi), %xmm2
- pxor (%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(16bytesin256)
-
- movdqu 16(%rdi), %xmm2
- pxor 16(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(32bytesin256)
- sub $32, %rdx
- add $32, %rdi
- add $32, %rsi
-L(less32bytesin128in2aligned):
- add %rdx, %rsi
- add %rdx, %rdi
- BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 4)
-
- .p2align 4
-L(256bytesormorein2aligned):
-
- sub $256, %rdx
- movdqa (%rdi), %xmm2
- pxor (%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(16bytesin256)
-
- movdqa 16(%rdi), %xmm2
- pxor 16(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(32bytesin256)
-
- movdqa 32(%rdi), %xmm2
- pxor 32(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(48bytesin256)
-
- movdqa 48(%rdi), %xmm2
- pxor 48(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(64bytesin256)
-
- movdqa 64(%rdi), %xmm2
- pxor 64(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(80bytesin256)
-
- movdqa 80(%rdi), %xmm2
- pxor 80(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(96bytesin256)
-
- movdqa 96(%rdi), %xmm2
- pxor 96(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(112bytesin256)
-
- movdqa 112(%rdi), %xmm2
- pxor 112(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(128bytesin256)
-
- movdqa 128(%rdi), %xmm2
- pxor 128(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(144bytesin256)
-
- movdqa 144(%rdi), %xmm2
- pxor 144(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(160bytesin256)
-
- movdqa 160(%rdi), %xmm2
- pxor 160(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(176bytesin256)
-
- movdqa 176(%rdi), %xmm2
- pxor 176(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(192bytesin256)
-
- movdqa 192(%rdi), %xmm2
- pxor 192(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(208bytesin256)
-
- movdqa 208(%rdi), %xmm2
- pxor 208(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(224bytesin256)
-
- movdqa 224(%rdi), %xmm2
- pxor 224(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(240bytesin256)
-
- movdqa 240(%rdi), %xmm2
- pxor 240(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(256bytesin256)
-
- add $256, %rsi
- add $256, %rdi
-
- cmp $128, %rdx
- jae L(less256bytesin2alinged)
-
- cmp $64, %rdx
- jae L(less128bytesin2aligned)
-
- cmp $32, %rdx
- jb L(less32bytesin256in2alinged)
-
- movdqa (%rdi), %xmm2
- pxor (%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(16bytesin256)
-
- movdqa 16(%rdi), %xmm2
- pxor 16(%rsi), %xmm2
- ptest %xmm2, %xmm0
- jnc L(32bytesin256)
- sub $32, %rdx
- add $32, %rdi
- add $32, %rsi
-L(less32bytesin256in2alinged):
- add %rdx, %rsi
- add %rdx, %rdi
- BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 4)
-
- .p2align 4
-L(512bytesormorein2aligned):
-# ifdef DATA_CACHE_SIZE_HALF
- mov $DATA_CACHE_SIZE_HALF, %R8_LP
-# else
- mov __x86_data_cache_size_half(%rip), %R8_LP
-# endif
- mov %r8, %r9
- shr $1, %r8
- add %r9, %r8
- cmp %r8, %rdx
- ja L(L2_L3_cache_aglined)
-
- sub $64, %rdx
- .p2align 4
-L(64bytesormore_loopin2aligned):
- movdqa (%rdi), %xmm2
- pxor (%rsi), %xmm2
- movdqa %xmm2, %xmm1
-
- movdqa 16(%rdi), %xmm3
- pxor 16(%rsi), %xmm3
- por %xmm3, %xmm1
-
- movdqa 32(%rdi), %xmm4
- pxor 32(%rsi), %xmm4
- por %xmm4, %xmm1
-
- movdqa 48(%rdi), %xmm5
- pxor 48(%rsi), %xmm5
- por %xmm5, %xmm1
-
- ptest %xmm1, %xmm0
- jnc L(64bytesormore_loop_end)
- add $64, %rsi
- add $64, %rdi
- sub $64, %rdx
- jae L(64bytesormore_loopin2aligned)
-
- add $64, %rdx
- add %rdx, %rsi
- add %rdx, %rdi
- BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 4)
-L(L2_L3_cache_aglined):
- sub $64, %rdx
-
- .p2align 4
-L(L2_L3_aligned_128bytes_loop):
- prefetchnta 0x1c0(%rdi)
- prefetchnta 0x1c0(%rsi)
- movdqa (%rdi), %xmm2
- pxor (%rsi), %xmm2
- movdqa %xmm2, %xmm1
-
- movdqa 16(%rdi), %xmm3
- pxor 16(%rsi), %xmm3
- por %xmm3, %xmm1
-
- movdqa 32(%rdi), %xmm4
- pxor 32(%rsi), %xmm4
- por %xmm4, %xmm1
-
- movdqa 48(%rdi), %xmm5
- pxor 48(%rsi), %xmm5
- por %xmm5, %xmm1
-
- ptest %xmm1, %xmm0
- jnc L(64bytesormore_loop_end)
- add $64, %rsi
- add $64, %rdi
- sub $64, %rdx
- jae L(L2_L3_aligned_128bytes_loop)
-
- add $64, %rdx
- add %rdx, %rsi
- add %rdx, %rdi
- BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 4)
-
-
- .p2align 4
-L(64bytesormore_loop_end):
- add $16, %rdi
- add $16, %rsi
- ptest %xmm2, %xmm0
- jnc L(16bytes)
-
- add $16, %rdi
- add $16, %rsi
- ptest %xmm3, %xmm0
- jnc L(16bytes)
-
- add $16, %rdi
- add $16, %rsi
- ptest %xmm4, %xmm0
- jnc L(16bytes)
-
- add $16, %rdi
- add $16, %rsi
- jmp L(16bytes)
-
-L(256bytesin256):
- add $256, %rdi
- add $256, %rsi
- jmp L(16bytes)
-L(240bytesin256):
- add $240, %rdi
- add $240, %rsi
- jmp L(16bytes)
-L(224bytesin256):
- add $224, %rdi
- add $224, %rsi
- jmp L(16bytes)
-L(208bytesin256):
- add $208, %rdi
- add $208, %rsi
- jmp L(16bytes)
-L(192bytesin256):
- add $192, %rdi
- add $192, %rsi
- jmp L(16bytes)
-L(176bytesin256):
- add $176, %rdi
- add $176, %rsi
- jmp L(16bytes)
-L(160bytesin256):
- add $160, %rdi
- add $160, %rsi
- jmp L(16bytes)
-L(144bytesin256):
- add $144, %rdi
- add $144, %rsi
- jmp L(16bytes)
-L(128bytesin256):
- add $128, %rdi
- add $128, %rsi
- jmp L(16bytes)
-L(112bytesin256):
- add $112, %rdi
- add $112, %rsi
- jmp L(16bytes)
-L(96bytesin256):
- add $96, %rdi
- add $96, %rsi
- jmp L(16bytes)
-L(80bytesin256):
- add $80, %rdi
- add $80, %rsi
- jmp L(16bytes)
-L(64bytesin256):
- add $64, %rdi
- add $64, %rsi
- jmp L(16bytes)
-L(48bytesin256):
- add $16, %rdi
- add $16, %rsi
-L(32bytesin256):
- add $16, %rdi
- add $16, %rsi
-L(16bytesin256):
- add $16, %rdi
- add $16, %rsi
-L(16bytes):
- mov -16(%rdi), %rax
- mov -16(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
-L(8bytes):
- mov -8(%rdi), %rax
- mov -8(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-
- .p2align 4
-L(12bytes):
- mov -12(%rdi), %rax
- mov -12(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
-L(4bytes):
- mov -4(%rsi), %ecx
-# ifndef USE_AS_WMEMCMP
- mov -4(%rdi), %eax
- cmp %eax, %ecx
-# else
- cmp -4(%rdi), %ecx
-# endif
- jne L(diffin4bytes)
-L(0bytes):
- xor %eax, %eax
- ret
-
-# ifndef USE_AS_WMEMCMP
-/* unreal case for wmemcmp */
- .p2align 4
-L(65bytes):
- movdqu -65(%rdi), %xmm1
- movdqu -65(%rsi), %xmm2
- mov $-65, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(49bytes):
- movdqu -49(%rdi), %xmm1
- movdqu -49(%rsi), %xmm2
- mov $-49, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(33bytes):
- movdqu -33(%rdi), %xmm1
- movdqu -33(%rsi), %xmm2
- mov $-33, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(17bytes):
- mov -17(%rdi), %rax
- mov -17(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
-L(9bytes):
- mov -9(%rdi), %rax
- mov -9(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- movzbl -1(%rdi), %eax
- movzbl -1(%rsi), %edx
- sub %edx, %eax
- ret
-
- .p2align 4
-L(13bytes):
- mov -13(%rdi), %rax
- mov -13(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- mov -8(%rdi), %rax
- mov -8(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-
- .p2align 4
-L(5bytes):
- mov -5(%rdi), %eax
- mov -5(%rsi), %ecx
- cmp %eax, %ecx
- jne L(diffin4bytes)
- movzbl -1(%rdi), %eax
- movzbl -1(%rsi), %edx
- sub %edx, %eax
- ret
-
- .p2align 4
-L(66bytes):
- movdqu -66(%rdi), %xmm1
- movdqu -66(%rsi), %xmm2
- mov $-66, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(50bytes):
- movdqu -50(%rdi), %xmm1
- movdqu -50(%rsi), %xmm2
- mov $-50, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(34bytes):
- movdqu -34(%rdi), %xmm1
- movdqu -34(%rsi), %xmm2
- mov $-34, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(18bytes):
- mov -18(%rdi), %rax
- mov -18(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
-L(10bytes):
- mov -10(%rdi), %rax
- mov -10(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- movzwl -2(%rdi), %eax
- movzwl -2(%rsi), %ecx
- cmp %cl, %al
- jne L(end)
- and $0xffff, %eax
- and $0xffff, %ecx
- sub %ecx, %eax
- ret
-
- .p2align 4
-L(14bytes):
- mov -14(%rdi), %rax
- mov -14(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- mov -8(%rdi), %rax
- mov -8(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-
- .p2align 4
-L(6bytes):
- mov -6(%rdi), %eax
- mov -6(%rsi), %ecx
- cmp %eax, %ecx
- jne L(diffin4bytes)
-L(2bytes):
- movzwl -2(%rsi), %ecx
- movzwl -2(%rdi), %eax
- cmp %cl, %al
- jne L(end)
- and $0xffff, %eax
- and $0xffff, %ecx
- sub %ecx, %eax
- ret
-
- .p2align 4
-L(67bytes):
- movdqu -67(%rdi), %xmm2
- movdqu -67(%rsi), %xmm1
- mov $-67, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(51bytes):
- movdqu -51(%rdi), %xmm2
- movdqu -51(%rsi), %xmm1
- mov $-51, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(35bytes):
- movdqu -35(%rsi), %xmm1
- movdqu -35(%rdi), %xmm2
- mov $-35, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(19bytes):
- mov -19(%rdi), %rax
- mov -19(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
-L(11bytes):
- mov -11(%rdi), %rax
- mov -11(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- mov -4(%rdi), %eax
- mov -4(%rsi), %ecx
- cmp %eax, %ecx
- jne L(diffin4bytes)
- xor %eax, %eax
- ret
-
- .p2align 4
-L(15bytes):
- mov -15(%rdi), %rax
- mov -15(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- mov -8(%rdi), %rax
- mov -8(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-
- .p2align 4
-L(7bytes):
- mov -7(%rdi), %eax
- mov -7(%rsi), %ecx
- cmp %eax, %ecx
- jne L(diffin4bytes)
- mov -4(%rdi), %eax
- mov -4(%rsi), %ecx
- cmp %eax, %ecx
- jne L(diffin4bytes)
- xor %eax, %eax
- ret
-
- .p2align 4
-L(3bytes):
- movzwl -3(%rdi), %eax
- movzwl -3(%rsi), %ecx
- cmp %eax, %ecx
- jne L(diffin2bytes)
-L(1bytes):
- movzbl -1(%rdi), %eax
- movzbl -1(%rsi), %ecx
- sub %ecx, %eax
- ret
-# endif
-
- .p2align 4
-L(68bytes):
- movdqu -68(%rdi), %xmm2
- movdqu -68(%rsi), %xmm1
- mov $-68, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(52bytes):
- movdqu -52(%rdi), %xmm2
- movdqu -52(%rsi), %xmm1
- mov $-52, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(36bytes):
- movdqu -36(%rdi), %xmm2
- movdqu -36(%rsi), %xmm1
- mov $-36, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(20bytes):
- movdqu -20(%rdi), %xmm2
- movdqu -20(%rsi), %xmm1
- mov $-20, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
- mov -4(%rsi), %ecx
-
-# ifndef USE_AS_WMEMCMP
- mov -4(%rdi), %eax
- cmp %eax, %ecx
-# else
- cmp -4(%rdi), %ecx
-# endif
- jne L(diffin4bytes)
- xor %eax, %eax
- ret
-
-# ifndef USE_AS_WMEMCMP
-/* unreal cases for wmemcmp */
- .p2align 4
-L(69bytes):
- movdqu -69(%rsi), %xmm1
- movdqu -69(%rdi), %xmm2
- mov $-69, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(53bytes):
- movdqu -53(%rsi), %xmm1
- movdqu -53(%rdi), %xmm2
- mov $-53, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(37bytes):
- movdqu -37(%rsi), %xmm1
- movdqu -37(%rdi), %xmm2
- mov $-37, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(21bytes):
- movdqu -21(%rsi), %xmm1
- movdqu -21(%rdi), %xmm2
- mov $-21, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
- mov -8(%rdi), %rax
- mov -8(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-
- .p2align 4
-L(70bytes):
- movdqu -70(%rsi), %xmm1
- movdqu -70(%rdi), %xmm2
- mov $-70, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(54bytes):
- movdqu -54(%rsi), %xmm1
- movdqu -54(%rdi), %xmm2
- mov $-54, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(38bytes):
- movdqu -38(%rsi), %xmm1
- movdqu -38(%rdi), %xmm2
- mov $-38, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(22bytes):
- movdqu -22(%rsi), %xmm1
- movdqu -22(%rdi), %xmm2
- mov $-22, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
- mov -8(%rdi), %rax
- mov -8(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-
- .p2align 4
-L(71bytes):
- movdqu -71(%rsi), %xmm1
- movdqu -71(%rdi), %xmm2
- mov $-71, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(55bytes):
- movdqu -55(%rdi), %xmm2
- movdqu -55(%rsi), %xmm1
- mov $-55, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(39bytes):
- movdqu -39(%rdi), %xmm2
- movdqu -39(%rsi), %xmm1
- mov $-39, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(23bytes):
- movdqu -23(%rdi), %xmm2
- movdqu -23(%rsi), %xmm1
- mov $-23, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
- mov -8(%rdi), %rax
- mov -8(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-# endif
-
- .p2align 4
-L(72bytes):
- movdqu -72(%rsi), %xmm1
- movdqu -72(%rdi), %xmm2
- mov $-72, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(56bytes):
- movdqu -56(%rdi), %xmm2
- movdqu -56(%rsi), %xmm1
- mov $-56, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(40bytes):
- movdqu -40(%rdi), %xmm2
- movdqu -40(%rsi), %xmm1
- mov $-40, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(24bytes):
- movdqu -24(%rdi), %xmm2
- movdqu -24(%rsi), %xmm1
- mov $-24, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-
- mov -8(%rsi), %rcx
- mov -8(%rdi), %rax
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-
-# ifndef USE_AS_WMEMCMP
-/* unreal cases for wmemcmp */
- .p2align 4
-L(73bytes):
- movdqu -73(%rsi), %xmm1
- movdqu -73(%rdi), %xmm2
- mov $-73, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(57bytes):
- movdqu -57(%rdi), %xmm2
- movdqu -57(%rsi), %xmm1
- mov $-57, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(41bytes):
- movdqu -41(%rdi), %xmm2
- movdqu -41(%rsi), %xmm1
- mov $-41, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(25bytes):
- movdqu -25(%rdi), %xmm2
- movdqu -25(%rsi), %xmm1
- mov $-25, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
- mov -9(%rdi), %rax
- mov -9(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- movzbl -1(%rdi), %eax
- movzbl -1(%rsi), %ecx
- sub %ecx, %eax
- ret
-
- .p2align 4
-L(74bytes):
- movdqu -74(%rsi), %xmm1
- movdqu -74(%rdi), %xmm2
- mov $-74, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(58bytes):
- movdqu -58(%rdi), %xmm2
- movdqu -58(%rsi), %xmm1
- mov $-58, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(42bytes):
- movdqu -42(%rdi), %xmm2
- movdqu -42(%rsi), %xmm1
- mov $-42, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(26bytes):
- movdqu -26(%rdi), %xmm2
- movdqu -26(%rsi), %xmm1
- mov $-26, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
- mov -10(%rdi), %rax
- mov -10(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- movzwl -2(%rdi), %eax
- movzwl -2(%rsi), %ecx
- jmp L(diffin2bytes)
-
- .p2align 4
-L(75bytes):
- movdqu -75(%rsi), %xmm1
- movdqu -75(%rdi), %xmm2
- mov $-75, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(59bytes):
- movdqu -59(%rdi), %xmm2
- movdqu -59(%rsi), %xmm1
- mov $-59, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(43bytes):
- movdqu -43(%rdi), %xmm2
- movdqu -43(%rsi), %xmm1
- mov $-43, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(27bytes):
- movdqu -27(%rdi), %xmm2
- movdqu -27(%rsi), %xmm1
- mov $-27, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
- mov -11(%rdi), %rax
- mov -11(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- mov -4(%rdi), %eax
- mov -4(%rsi), %ecx
- cmp %eax, %ecx
- jne L(diffin4bytes)
- xor %eax, %eax
- ret
-# endif
- .p2align 4
-L(76bytes):
- movdqu -76(%rsi), %xmm1
- movdqu -76(%rdi), %xmm2
- mov $-76, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(60bytes):
- movdqu -60(%rdi), %xmm2
- movdqu -60(%rsi), %xmm1
- mov $-60, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(44bytes):
- movdqu -44(%rdi), %xmm2
- movdqu -44(%rsi), %xmm1
- mov $-44, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(28bytes):
- movdqu -28(%rdi), %xmm2
- movdqu -28(%rsi), %xmm1
- mov $-28, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
- mov -12(%rdi), %rax
- mov -12(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- mov -4(%rsi), %ecx
-# ifndef USE_AS_WMEMCMP
- mov -4(%rdi), %eax
- cmp %eax, %ecx
-# else
- cmp -4(%rdi), %ecx
-# endif
- jne L(diffin4bytes)
- xor %eax, %eax
- ret
-
-# ifndef USE_AS_WMEMCMP
-/* unreal cases for wmemcmp */
- .p2align 4
-L(77bytes):
- movdqu -77(%rsi), %xmm1
- movdqu -77(%rdi), %xmm2
- mov $-77, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(61bytes):
- movdqu -61(%rdi), %xmm2
- movdqu -61(%rsi), %xmm1
- mov $-61, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(45bytes):
- movdqu -45(%rdi), %xmm2
- movdqu -45(%rsi), %xmm1
- mov $-45, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(29bytes):
- movdqu -29(%rdi), %xmm2
- movdqu -29(%rsi), %xmm1
- mov $-29, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-
- mov -13(%rdi), %rax
- mov -13(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
-
- mov -8(%rdi), %rax
- mov -8(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-
- .p2align 4
-L(78bytes):
- movdqu -78(%rsi), %xmm1
- movdqu -78(%rdi), %xmm2
- mov $-78, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(62bytes):
- movdqu -62(%rdi), %xmm2
- movdqu -62(%rsi), %xmm1
- mov $-62, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(46bytes):
- movdqu -46(%rdi), %xmm2
- movdqu -46(%rsi), %xmm1
- mov $-46, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(30bytes):
- movdqu -30(%rdi), %xmm2
- movdqu -30(%rsi), %xmm1
- mov $-30, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
- mov -14(%rdi), %rax
- mov -14(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- mov -8(%rdi), %rax
- mov -8(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-
- .p2align 4
-L(79bytes):
- movdqu -79(%rsi), %xmm1
- movdqu -79(%rdi), %xmm2
- mov $-79, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(63bytes):
- movdqu -63(%rdi), %xmm2
- movdqu -63(%rsi), %xmm1
- mov $-63, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(47bytes):
- movdqu -47(%rdi), %xmm2
- movdqu -47(%rsi), %xmm1
- mov $-47, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(31bytes):
- movdqu -31(%rdi), %xmm2
- movdqu -31(%rsi), %xmm1
- mov $-31, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
- mov -15(%rdi), %rax
- mov -15(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- mov -8(%rdi), %rax
- mov -8(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-# endif
- .p2align 4
-L(64bytes):
- movdqu -64(%rdi), %xmm2
- movdqu -64(%rsi), %xmm1
- mov $-64, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(48bytes):
- movdqu -48(%rdi), %xmm2
- movdqu -48(%rsi), %xmm1
- mov $-48, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-L(32bytes):
- movdqu -32(%rdi), %xmm2
- movdqu -32(%rsi), %xmm1
- mov $-32, %dl
- pxor %xmm1, %xmm2
- ptest %xmm2, %xmm0
- jnc L(less16bytes)
-
- mov -16(%rdi), %rax
- mov -16(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
-
- mov -8(%rdi), %rax
- mov -8(%rsi), %rcx
- cmp %rax, %rcx
- jne L(diffin8bytes)
- xor %eax, %eax
- ret
-
-/*
- * Aligned 8 bytes to avoid 2 branch "taken" in one 16 alinged code block.
- */
- .p2align 3
-L(less16bytes):
- movsbq %dl, %rdx
- mov (%rsi, %rdx), %rcx
- mov (%rdi, %rdx), %rax
- cmp %rax, %rcx
- jne L(diffin8bytes)
- mov 8(%rsi, %rdx), %rcx
- mov 8(%rdi, %rdx), %rax
-L(diffin8bytes):
- cmp %eax, %ecx
- jne L(diffin4bytes)
- shr $32, %rcx
- shr $32, %rax
-
-# ifdef USE_AS_WMEMCMP
-/* for wmemcmp */
- cmp %eax, %ecx
- jne L(diffin4bytes)
- xor %eax, %eax
- ret
-# endif
-
-L(diffin4bytes):
-# ifndef USE_AS_WMEMCMP
- cmp %cx, %ax
- jne L(diffin2bytes)
- shr $16, %ecx
- shr $16, %eax
-L(diffin2bytes):
- cmp %cl, %al
- jne L(end)
- and $0xffff, %eax
- and $0xffff, %ecx
- sub %ecx, %eax
- ret
-
- .p2align 4
-L(end):
- and $0xff, %eax
- and $0xff, %ecx
- sub %ecx, %eax
- ret
-# else
-
-/* for wmemcmp */
- mov $1, %eax
- jl L(nequal_bigger)
- neg %eax
- ret
-
- .p2align 4
-L(nequal_bigger):
- ret
-
-L(unreal_case):
- xor %eax, %eax
- ret
-# endif
-
-END (MEMCMP)
-
- .section .rodata.sse4.1,"a",@progbits
- .p2align 3
-# ifndef USE_AS_WMEMCMP
-L(table_64bytes):
- .int JMPTBL (L(0bytes), L(table_64bytes))
- .int JMPTBL (L(1bytes), L(table_64bytes))
- .int JMPTBL (L(2bytes), L(table_64bytes))
- .int JMPTBL (L(3bytes), L(table_64bytes))
- .int JMPTBL (L(4bytes), L(table_64bytes))
- .int JMPTBL (L(5bytes), L(table_64bytes))
- .int JMPTBL (L(6bytes), L(table_64bytes))
- .int JMPTBL (L(7bytes), L(table_64bytes))
- .int JMPTBL (L(8bytes), L(table_64bytes))
- .int JMPTBL (L(9bytes), L(table_64bytes))
- .int JMPTBL (L(10bytes), L(table_64bytes))
- .int JMPTBL (L(11bytes), L(table_64bytes))
- .int JMPTBL (L(12bytes), L(table_64bytes))
- .int JMPTBL (L(13bytes), L(table_64bytes))
- .int JMPTBL (L(14bytes), L(table_64bytes))
- .int JMPTBL (L(15bytes), L(table_64bytes))
- .int JMPTBL (L(16bytes), L(table_64bytes))
- .int JMPTBL (L(17bytes), L(table_64bytes))
- .int JMPTBL (L(18bytes), L(table_64bytes))
- .int JMPTBL (L(19bytes), L(table_64bytes))
- .int JMPTBL (L(20bytes), L(table_64bytes))
- .int JMPTBL (L(21bytes), L(table_64bytes))
- .int JMPTBL (L(22bytes), L(table_64bytes))
- .int JMPTBL (L(23bytes), L(table_64bytes))
- .int JMPTBL (L(24bytes), L(table_64bytes))
- .int JMPTBL (L(25bytes), L(table_64bytes))
- .int JMPTBL (L(26bytes), L(table_64bytes))
- .int JMPTBL (L(27bytes), L(table_64bytes))
- .int JMPTBL (L(28bytes), L(table_64bytes))
- .int JMPTBL (L(29bytes), L(table_64bytes))
- .int JMPTBL (L(30bytes), L(table_64bytes))
- .int JMPTBL (L(31bytes), L(table_64bytes))
- .int JMPTBL (L(32bytes), L(table_64bytes))
- .int JMPTBL (L(33bytes), L(table_64bytes))
- .int JMPTBL (L(34bytes), L(table_64bytes))
- .int JMPTBL (L(35bytes), L(table_64bytes))
- .int JMPTBL (L(36bytes), L(table_64bytes))
- .int JMPTBL (L(37bytes), L(table_64bytes))
- .int JMPTBL (L(38bytes), L(table_64bytes))
- .int JMPTBL (L(39bytes), L(table_64bytes))
- .int JMPTBL (L(40bytes), L(table_64bytes))
- .int JMPTBL (L(41bytes), L(table_64bytes))
- .int JMPTBL (L(42bytes), L(table_64bytes))
- .int JMPTBL (L(43bytes), L(table_64bytes))
- .int JMPTBL (L(44bytes), L(table_64bytes))
- .int JMPTBL (L(45bytes), L(table_64bytes))
- .int JMPTBL (L(46bytes), L(table_64bytes))
- .int JMPTBL (L(47bytes), L(table_64bytes))
- .int JMPTBL (L(48bytes), L(table_64bytes))
- .int JMPTBL (L(49bytes), L(table_64bytes))
- .int JMPTBL (L(50bytes), L(table_64bytes))
- .int JMPTBL (L(51bytes), L(table_64bytes))
- .int JMPTBL (L(52bytes), L(table_64bytes))
- .int JMPTBL (L(53bytes), L(table_64bytes))
- .int JMPTBL (L(54bytes), L(table_64bytes))
- .int JMPTBL (L(55bytes), L(table_64bytes))
- .int JMPTBL (L(56bytes), L(table_64bytes))
- .int JMPTBL (L(57bytes), L(table_64bytes))
- .int JMPTBL (L(58bytes), L(table_64bytes))
- .int JMPTBL (L(59bytes), L(table_64bytes))
- .int JMPTBL (L(60bytes), L(table_64bytes))
- .int JMPTBL (L(61bytes), L(table_64bytes))
- .int JMPTBL (L(62bytes), L(table_64bytes))
- .int JMPTBL (L(63bytes), L(table_64bytes))
- .int JMPTBL (L(64bytes), L(table_64bytes))
- .int JMPTBL (L(65bytes), L(table_64bytes))
- .int JMPTBL (L(66bytes), L(table_64bytes))
- .int JMPTBL (L(67bytes), L(table_64bytes))
- .int JMPTBL (L(68bytes), L(table_64bytes))
- .int JMPTBL (L(69bytes), L(table_64bytes))
- .int JMPTBL (L(70bytes), L(table_64bytes))
- .int JMPTBL (L(71bytes), L(table_64bytes))
- .int JMPTBL (L(72bytes), L(table_64bytes))
- .int JMPTBL (L(73bytes), L(table_64bytes))
- .int JMPTBL (L(74bytes), L(table_64bytes))
- .int JMPTBL (L(75bytes), L(table_64bytes))
- .int JMPTBL (L(76bytes), L(table_64bytes))
- .int JMPTBL (L(77bytes), L(table_64bytes))
- .int JMPTBL (L(78bytes), L(table_64bytes))
- .int JMPTBL (L(79bytes), L(table_64bytes))
-# else
-L(table_64bytes):
- .int JMPTBL (L(0bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(4bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(8bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(12bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(16bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(20bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(24bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(28bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(32bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(36bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(40bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(44bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(48bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(52bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(56bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(60bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(64bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(68bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(72bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(76bytes), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
- .int JMPTBL (L(unreal_case), L(table_64bytes))
-# endif
-#endif
diff --git a/sysdeps/x86_64/multiarch/memcmp.S b/sysdeps/x86_64/multiarch/memcmp.S
index f8b4636..5d87a17 100644
--- a/sysdeps/x86_64/multiarch/memcmp.S
+++ b/sysdeps/x86_64/multiarch/memcmp.S
@@ -29,33 +29,28 @@ ENTRY(memcmp)
cmpl $0, KIND_OFFSET+__cpu_features(%rip)
jne 1f
call __init_cpu_features
-
-1: testl $bit_SSSE3, __cpu_features+CPUID_OFFSET+index_SSSE3(%rip)
+ testl $bit_Fast_Unaligned_Load, __cpu_features+FEATURE_OFFSET+index_Fast_Unaligned_Load(%rip)
jnz 2f
- leaq __memcmp_sse2(%rip), %rax
- ret
-
-2: testl $bit_SSE4_1, __cpu_features+CPUID_OFFSET+index_SSE4_1(%rip)
- jz 3f
- leaq __memcmp_sse4_1(%rip), %rax
+1: testl $bit_SSSE3, __cpu_features+CPUID_OFFSET+index_SSSE3(%rip)
+ jnz 3f
+2: leaq __memcmp_sse2_unaligned(%rip), %rax
ret
3: leaq __memcmp_ssse3(%rip), %rax
ret
-
END(memcmp)
# undef ENTRY
# define ENTRY(name) \
- .type __memcmp_sse2, @function; \
+ .type __memcmp_sse2_unaligned, @function; \
.p2align 4; \
- .globl __memcmp_sse2; \
- .hidden __memcmp_sse2; \
- __memcmp_sse2: cfi_startproc; \
+ .globl __memcmp_sse2_unaligned; \
+ .hidden __memcmp_sse2_unaligned; \
+ __memcmp_sse2_unaligned: cfi_startproc; \
CALL_MCOUNT
# undef END
# define END(name) \
- cfi_endproc; .size __memcmp_sse2, .-__memcmp_sse2
+ cfi_endproc; .size __memcmp_sse2_unaligned, .-__memcmp_sse2_unaligned
# ifdef SHARED
# undef libc_hidden_builtin_def
@@ -63,7 +58,7 @@ END(memcmp)
they will be called without setting up EBX needed for PLT which is
used by IFUNC. */
# define libc_hidden_builtin_def(name) \
- .globl __GI_memcmp; __GI_memcmp = __memcmp_sse2
+ .globl __GI_memcmp; __GI_memcmp = __memcmp_sse2_unaligned
# endif
#endif
diff --git a/sysdeps/x86_64/multiarch/stpcpy-sse2-unaligned.S b/sysdeps/x86_64/multiarch/stpcpy-sse2-unaligned.S
index 695a236..5dd8d44 100644
--- a/sysdeps/x86_64/multiarch/stpcpy-sse2-unaligned.S
+++ b/sysdeps/x86_64/multiarch/stpcpy-sse2-unaligned.S
@@ -201,6 +201,10 @@ L(prepare_loop):
movdqu %xmm2, 96(%rdi)
movdqu %xmm3, 112(%rdi)
+#ifdef USE_AVX2
+ vpxor %xmm5, %xmm5, %xmm5
+#endif
+
subq %rsi, %rdi
add $64, %rsi
andq $-64, %rsi
@@ -348,10 +352,13 @@ L(cross_loop):
sub $1, %rcx
ja L(cross_loop)
+#ifdef USE_AVX2
+ vpxor %xmm5, %xmm5, %xmm5
+#else
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
-
+#endif
lea -64(%rsi), %rdx
andq $-64, %rdx
addq %rdx, %rdi
diff --git a/sysdeps/x86_64/multiarch/wmemcmp-sse2-unaligned.S b/sysdeps/x86_64/multiarch/wmemcmp-sse2-unaligned.S
new file mode 100644
index 0000000..575f92e
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/wmemcmp-sse2-unaligned.S
@@ -0,0 +1,3 @@
+#define MEMCMP __wmemcmp_sse2_unaligned
+#define AS_WMEMCMP
+#include "../memcmp.S"
diff --git a/sysdeps/x86_64/multiarch/wmemcmp-sse4.S b/sysdeps/x86_64/multiarch/wmemcmp-sse4.S
deleted file mode 100644
index b07973a..0000000
--- a/sysdeps/x86_64/multiarch/wmemcmp-sse4.S
+++ /dev/null
@@ -1,4 +0,0 @@
-#define USE_AS_WMEMCMP 1
-#define MEMCMP __wmemcmp_sse4_1
-
-#include "memcmp-sse4.S"
diff --git a/sysdeps/x86_64/multiarch/wmemcmp.S b/sysdeps/x86_64/multiarch/wmemcmp.S
index 109e245..dabd3ed 100644
--- a/sysdeps/x86_64/multiarch/wmemcmp.S
+++ b/sysdeps/x86_64/multiarch/wmemcmp.S
@@ -30,18 +30,16 @@ ENTRY(wmemcmp)
jne 1f
call __init_cpu_features
-1: testl $bit_SSSE3, __cpu_features+CPUID_OFFSET+index_SSSE3(%rip)
+ testl $bit_Fast_Unaligned_Load, __cpu_features+FEATURE_OFFSET+index_Fast_Unaligned_Load(%rip)
jnz 2f
- leaq __wmemcmp_sse2(%rip), %rax
- ret
-
-2: testl $bit_SSE4_1, __cpu_features+CPUID_OFFSET+index_SSE4_1(%rip)
- jz 3f
- leaq __wmemcmp_sse4_1(%rip), %rax
+1: testl $bit_SSSE3, __cpu_features+CPUID_OFFSET+index_SSSE3(%rip)
+ jnz 3f
+2: leaq __wmemcmp_sse2_unaligned(%rip), %rax
ret
3: leaq __wmemcmp_ssse3(%rip), %rax
ret
+
END(wmemcmp)
#endif
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=d0731dac4e35206d4cd7a512e357ef66353b3581
commit d0731dac4e35206d4cd7a512e357ef66353b3581
Author: OndÅ?ej BÃlka <neleai@seznam.cz>
Date: Wed Jun 17 15:32:54 2015 +0200
new sse2 and avx2 strcpy and stpcpy
diff --git a/math/Makefile b/math/Makefile
index 7f6b85e..143fa47 100644
--- a/math/Makefile
+++ b/math/Makefile
@@ -115,7 +115,7 @@ tests-static = test-fpucw-static test-fpucw-ieee-static
test-longdouble-yes = test-ldouble test-ildoubl
ifneq (no,$(PERL))
-libm-vec-tests = $(addprefix test-,$(libmvec-tests))
+#libm-vec-tests = $(addprefix test-,$(libmvec-tests))
libm-tests = test-float test-double $(test-longdouble-$(long-double-fcts)) \
test-ifloat test-idouble $(libm-vec-tests)
libm-tests.o = $(addsuffix .o,$(libm-tests))
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index d7002a9..c573744 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -29,7 +29,7 @@ CFLAGS-strspn-c.c += -msse4
endif
ifeq (yes,$(config-cflags-avx2))
-sysdep_routines += memset-avx2
+sysdep_routines += memset-avx2 strcpy-avx2 stpcpy-avx2
endif
endif
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index b64e4f1..d398e43 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -88,6 +88,7 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/x86_64/multiarch/stpcpy.S. */
IFUNC_IMPL (i, name, stpcpy,
+ IFUNC_IMPL_ADD (array, i, strcpy, HAS_AVX2, __stpcpy_avx2)
IFUNC_IMPL_ADD (array, i, stpcpy, HAS_SSSE3, __stpcpy_ssse3)
IFUNC_IMPL_ADD (array, i, stpcpy, 1, __stpcpy_sse2_unaligned)
IFUNC_IMPL_ADD (array, i, stpcpy, 1, __stpcpy_sse2))
@@ -137,6 +138,7 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/x86_64/multiarch/strcpy.S. */
IFUNC_IMPL (i, name, strcpy,
+ IFUNC_IMPL_ADD (array, i, strcpy, HAS_AVX2, __strcpy_avx2)
IFUNC_IMPL_ADD (array, i, strcpy, HAS_SSSE3, __strcpy_ssse3)
IFUNC_IMPL_ADD (array, i, strcpy, 1, __strcpy_sse2_unaligned)
IFUNC_IMPL_ADD (array, i, strcpy, 1, __strcpy_sse2))
diff --git a/sysdeps/x86_64/multiarch/stpcpy-avx2.S b/sysdeps/x86_64/multiarch/stpcpy-avx2.S
new file mode 100644
index 0000000..bd30ef6
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/stpcpy-avx2.S
@@ -0,0 +1,3 @@
+#define USE_AVX2
+#define STPCPY __stpcpy_avx2
+#include "stpcpy-sse2-unaligned.S"
diff --git a/sysdeps/x86_64/multiarch/stpcpy-sse2-unaligned.S b/sysdeps/x86_64/multiarch/stpcpy-sse2-unaligned.S
index 34231f8..695a236 100644
--- a/sysdeps/x86_64/multiarch/stpcpy-sse2-unaligned.S
+++ b/sysdeps/x86_64/multiarch/stpcpy-sse2-unaligned.S
@@ -1,3 +1,436 @@
-#define USE_AS_STPCPY
-#define STRCPY __stpcpy_sse2_unaligned
-#include "strcpy-sse2-unaligned.S"
+/* stpcpy with SSE2 and unaligned load
+ Copyright (C) 2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#ifndef STPCPY
+# define STPCPY __stpcpy_sse2_unaligned
+#endif
+
+ENTRY(STPCPY)
+ mov %esi, %edx
+#ifdef AS_STRCPY
+ movq %rdi, %rax
+#endif
+ pxor %xmm4, %xmm4
+ pxor %xmm5, %xmm5
+ andl $4095, %edx
+ cmp $3968, %edx
+ ja L(cross_page)
+
+ movdqu (%rsi), %xmm0
+ pcmpeqb %xmm0, %xmm4
+ pmovmskb %xmm4, %edx
+ testl %edx, %edx
+ je L(more16bytes)
+ bsf %edx, %ecx
+#ifndef AS_STRCPY
+ lea (%rdi, %rcx), %rax
+#endif
+ cmp $7, %ecx
+ movq (%rsi), %rdx
+ jb L(less_8_bytesb)
+L(8bytes_from_cross):
+ movq -7(%rsi, %rcx), %rsi
+ movq %rdx, (%rdi)
+#ifdef AS_STRCPY
+ movq %rsi, -7(%rdi, %rcx)
+#else
+ movq %rsi, -7(%rax)
+#endif
+ ret
+
+ .p2align 4
+L(less_8_bytesb):
+ cmp $2, %ecx
+ jbe L(less_4_bytes)
+L(4bytes_from_cross):
+ mov -3(%rsi, %rcx), %esi
+ mov %edx, (%rdi)
+#ifdef AS_STRCPY
+ mov %esi, -3(%rdi, %rcx)
+#else
+ mov %esi, -3(%rax)
+#endif
+ ret
+
+.p2align 4
+ L(less_4_bytes):
+ /*
+ Test branch vs this branchless that works for i 0,1,2
+ d[i] = 0;
+ d[i/2] = s[1];
+ d[0] = s[0];
+ */
+#ifdef AS_STRCPY
+ movb $0, (%rdi, %rcx)
+#endif
+
+ shr $1, %ecx
+ mov %edx, %esi
+ shr $8, %edx
+ movb %dl, (%rdi, %rcx)
+#ifndef AS_STRCPY
+ movb $0, (%rax)
+#endif
+ movb %sil, (%rdi)
+ ret
+
+
+
+
+
+ .p2align 4
+L(more16bytes):
+ pxor %xmm6, %xmm6
+ movdqu 16(%rsi), %xmm1
+ pxor %xmm7, %xmm7
+ pcmpeqb %xmm1, %xmm5
+ pmovmskb %xmm5, %edx
+ testl %edx, %edx
+ je L(more32bytes)
+ bsf %edx, %edx
+#ifdef AS_STRCPY
+ movdqu 1(%rsi, %rdx), %xmm1
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm1, 1(%rdi, %rdx)
+#else
+ lea 16(%rdi, %rdx), %rax
+ movdqu 1(%rsi, %rdx), %xmm1
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm1, -15(%rax)
+#endif
+ ret
+
+ .p2align 4
+L(more32bytes):
+ movdqu 32(%rsi), %xmm2
+ movdqu 48(%rsi), %xmm3
+
+ pcmpeqb %xmm2, %xmm6
+ pcmpeqb %xmm3, %xmm7
+ pmovmskb %xmm7, %edx
+ shl $16, %edx
+ pmovmskb %xmm6, %ecx
+ or %ecx, %edx
+ je L(more64bytes)
+ bsf %edx, %edx
+#ifndef AS_STRCPY
+ lea 32(%rdi, %rdx), %rax
+#endif
+ movdqu 1(%rsi, %rdx), %xmm2
+ movdqu 17(%rsi, %rdx), %xmm3
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm1, 16(%rdi)
+#ifdef AS_STRCPY
+ movdqu %xmm2, 1(%rdi, %rdx)
+ movdqu %xmm3, 17(%rdi, %rdx)
+#else
+ movdqu %xmm2, -31(%rax)
+ movdqu %xmm3, -15(%rax)
+#endif
+ ret
+
+ .p2align 4
+L(more64bytes):
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm1, 16(%rdi)
+ movdqu %xmm2, 32(%rdi)
+ movdqu %xmm3, 48(%rdi)
+ movdqu 64(%rsi), %xmm0
+ movdqu 80(%rsi), %xmm1
+ movdqu 96(%rsi), %xmm2
+ movdqu 112(%rsi), %xmm3
+
+ pcmpeqb %xmm0, %xmm4
+ pcmpeqb %xmm1, %xmm5
+ pcmpeqb %xmm2, %xmm6
+ pcmpeqb %xmm3, %xmm7
+ pmovmskb %xmm4, %ecx
+ pmovmskb %xmm5, %edx
+ pmovmskb %xmm6, %r8d
+ pmovmskb %xmm7, %r9d
+ shl $16, %edx
+ or %ecx, %edx
+ shl $32, %r8
+ shl $48, %r9
+ or %r8, %rdx
+ or %r9, %rdx
+ test %rdx, %rdx
+ je L(prepare_loop)
+ bsf %rdx, %rdx
+#ifndef AS_STRCPY
+ lea 64(%rdi, %rdx), %rax
+#endif
+ movdqu 1(%rsi, %rdx), %xmm0
+ movdqu 17(%rsi, %rdx), %xmm1
+ movdqu 33(%rsi, %rdx), %xmm2
+ movdqu 49(%rsi, %rdx), %xmm3
+#ifdef AS_STRCPY
+ movdqu %xmm0, 1(%rdi, %rdx)
+ movdqu %xmm1, 17(%rdi, %rdx)
+ movdqu %xmm2, 33(%rdi, %rdx)
+ movdqu %xmm3, 49(%rdi, %rdx)
+#else
+ movdqu %xmm0, -63(%rax)
+ movdqu %xmm1, -47(%rax)
+ movdqu %xmm2, -31(%rax)
+ movdqu %xmm3, -15(%rax)
+#endif
+ ret
+
+
+ .p2align 4
+L(prepare_loop):
+ movdqu %xmm0, 64(%rdi)
+ movdqu %xmm1, 80(%rdi)
+ movdqu %xmm2, 96(%rdi)
+ movdqu %xmm3, 112(%rdi)
+
+ subq %rsi, %rdi
+ add $64, %rsi
+ andq $-64, %rsi
+ addq %rsi, %rdi
+ jmp L(loop_entry)
+
+#ifdef USE_AVX2
+ .p2align 4
+L(loop):
+ vmovdqu %ymm1, (%rdi)
+ vmovdqu %ymm3, 32(%rdi)
+L(loop_entry):
+ vmovdqa 96(%rsi), %ymm3
+ vmovdqa 64(%rsi), %ymm1
+ vpminub %ymm3, %ymm1, %ymm2
+ addq $64, %rsi
+ addq $64, %rdi
+ vpcmpeqb %ymm5, %ymm2, %ymm0
+ vpmovmskb %ymm0, %edx
+ test %edx, %edx
+ je L(loop)
+ salq $32, %rdx
+ vpcmpeqb %ymm5, %ymm1, %ymm4
+ vpmovmskb %ymm4, %ecx
+ or %rcx, %rdx
+ bsfq %rdx, %rdx
+#ifndef AS_STRCPY
+ lea (%rdi, %rdx), %rax
+#endif
+ vmovdqu -63(%rsi, %rdx), %ymm0
+ vmovdqu -31(%rsi, %rdx), %ymm2
+#ifdef AS_STRCPY
+ vmovdqu %ymm0, -63(%rdi, %rdx)
+ vmovdqu %ymm2, -31(%rdi, %rdx)
+#else
+ vmovdqu %ymm0, -63(%rax)
+ vmovdqu %ymm2, -31(%rax)
+#endif
+ vzeroupper
+ ret
+#else
+ .p2align 4
+L(loop):
+ movdqu %xmm1, (%rdi)
+ movdqu %xmm2, 16(%rdi)
+ movdqu %xmm3, 32(%rdi)
+ movdqu %xmm4, 48(%rdi)
+L(loop_entry):
+ movdqa 96(%rsi), %xmm3
+ movdqa 112(%rsi), %xmm4
+ movdqa %xmm3, %xmm0
+ movdqa 80(%rsi), %xmm2
+ pminub %xmm4, %xmm0
+ movdqa 64(%rsi), %xmm1
+ pminub %xmm2, %xmm0
+ pminub %xmm1, %xmm0
+ addq $64, %rsi
+ addq $64, %rdi
+ pcmpeqb %xmm5, %xmm0
+ pmovmskb %xmm0, %edx
+ test %edx, %edx
+ je L(loop)
+ salq $48, %rdx
+ pcmpeqb %xmm1, %xmm5
+ pcmpeqb %xmm2, %xmm6
+ pmovmskb %xmm5, %ecx
+#ifdef AS_STRCPY
+ pmovmskb %xmm6, %r8d
+ pcmpeqb %xmm3, %xmm7
+ pmovmskb %xmm7, %r9d
+ sal $16, %r8d
+ or %r8d, %ecx
+#else
+ pmovmskb %xmm6, %eax
+ pcmpeqb %xmm3, %xmm7
+ pmovmskb %xmm7, %r9d
+ sal $16, %eax
+ or %eax, %ecx
+#endif
+ salq $32, %r9
+ orq %rcx, %rdx
+ orq %r9, %rdx
+ bsfq %rdx, %rdx
+#ifndef AS_STRCPY
+ lea (%rdi, %rdx), %rax
+#endif
+ movdqu -63(%rsi, %rdx), %xmm0
+ movdqu -47(%rsi, %rdx), %xmm1
+ movdqu -31(%rsi, %rdx), %xmm2
+ movdqu -15(%rsi, %rdx), %xmm3
+#ifdef AS_STRCPY
+ movdqu %xmm0, -63(%rdi, %rdx)
+ movdqu %xmm1, -47(%rdi, %rdx)
+ movdqu %xmm2, -31(%rdi, %rdx)
+ movdqu %xmm3, -15(%rdi, %rdx)
+#else
+ movdqu %xmm0, -63(%rax)
+ movdqu %xmm1, -47(%rax)
+ movdqu %xmm2, -31(%rax)
+ movdqu %xmm3, -15(%rax)
+#endif
+ ret
+#endif
+
+ .p2align 4
+L(cross_page):
+ movq %rsi, %rcx
+ pxor %xmm0, %xmm0
+ and $15, %ecx
+ movq %rsi, %r9
+ movq %rdi, %r10
+ subq %rcx, %rsi
+ subq %rcx, %rdi
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ shr %cl, %edx
+ shl %cl, %edx
+ test %edx, %edx
+ jne L(less_32_cross)
+
+ addq $16, %rsi
+ addq $16, %rdi
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pmovmskb %xmm0, %edx
+ test %edx, %edx
+ jne L(less_32_cross)
+ movdqu %xmm1, (%rdi)
+
+ movdqu (%r9), %xmm0
+ movdqu %xmm0, (%r10)
+
+ mov $8, %rcx
+L(cross_loop):
+ addq $16, %rsi
+ addq $16, %rdi
+ pxor %xmm0, %xmm0
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pmovmskb %xmm0, %edx
+ test %edx, %edx
+ jne L(return_cross)
+ movdqu %xmm1, (%rdi)
+ sub $1, %rcx
+ ja L(cross_loop)
+
+ pxor %xmm5, %xmm5
+ pxor %xmm6, %xmm6
+ pxor %xmm7, %xmm7
+
+ lea -64(%rsi), %rdx
+ andq $-64, %rdx
+ addq %rdx, %rdi
+ subq %rsi, %rdi
+ movq %rdx, %rsi
+ jmp L(loop_entry)
+
+ .p2align 4
+L(return_cross):
+ bsf %edx, %edx
+#ifdef AS_STRCPY
+ movdqu -15(%rsi, %rdx), %xmm0
+ movdqu %xmm0, -15(%rdi, %rdx)
+#else
+ lea (%rdi, %rdx), %rax
+ movdqu -15(%rsi, %rdx), %xmm0
+ movdqu %xmm0, -15(%rax)
+#endif
+ ret
+
+ .p2align 4
+L(less_32_cross):
+ bsf %rdx, %rdx
+ lea (%rdi, %rdx), %rcx
+#ifndef AS_STRCPY
+ mov %rcx, %rax
+#endif
+ mov %r9, %rsi
+ mov %r10, %rdi
+ sub %rdi, %rcx
+ cmp $15, %ecx
+ jb L(less_16_cross)
+ movdqu (%rsi), %xmm0
+ movdqu -15(%rsi, %rcx), %xmm1
+ movdqu %xmm0, (%rdi)
+#ifdef AS_STRCPY
+ movdqu %xmm1, -15(%rdi, %rcx)
+#else
+ movdqu %xmm1, -15(%rax)
+#endif
+ ret
+
+L(less_16_cross):
+ cmp $7, %ecx
+ jb L(less_8_bytes_cross)
+ movq (%rsi), %rdx
+ jmp L(8bytes_from_cross)
+
+L(less_8_bytes_cross):
+ cmp $2, %ecx
+ jbe L(3_bytes_cross)
+ mov (%rsi), %edx
+ jmp L(4bytes_from_cross)
+
+L(3_bytes_cross):
+ jb L(1_2bytes_cross)
+ movzwl (%rsi), %edx
+ jmp L(_3_bytesb)
+
+L(1_2bytes_cross):
+ movb (%rsi), %dl
+ jmp L(0_2bytes_from_cross)
+
+ .p2align 4
+L(less_4_bytesb):
+ je L(_3_bytesb)
+L(0_2bytes_from_cross):
+ movb %dl, (%rdi)
+#ifdef AS_STRCPY
+ movb $0, (%rdi, %rcx)
+#else
+ movb $0, (%rax)
+#endif
+ ret
+
+ .p2align 4
+L(_3_bytesb):
+ movw %dx, (%rdi)
+ movb $0, 2(%rdi)
+ ret
+
+END(STPCPY)
diff --git a/sysdeps/x86_64/multiarch/stpncpy-sse2-unaligned.S b/sysdeps/x86_64/multiarch/stpncpy-sse2-unaligned.S
index 658520f..3f35068 100644
--- a/sysdeps/x86_64/multiarch/stpncpy-sse2-unaligned.S
+++ b/sysdeps/x86_64/multiarch/stpncpy-sse2-unaligned.S
@@ -1,4 +1,3 @@
#define USE_AS_STPCPY
-#define USE_AS_STRNCPY
#define STRCPY __stpncpy_sse2_unaligned
-#include "strcpy-sse2-unaligned.S"
+#include "strncpy-sse2-unaligned.S"
diff --git a/sysdeps/x86_64/multiarch/stpncpy.S b/sysdeps/x86_64/multiarch/stpncpy.S
index 2698ca6..159604a 100644
--- a/sysdeps/x86_64/multiarch/stpncpy.S
+++ b/sysdeps/x86_64/multiarch/stpncpy.S
@@ -1,8 +1,7 @@
/* Multiple versions of stpncpy
All versions must be listed in ifunc-impl-list.c. */
-#define STRCPY __stpncpy
+#define STRNCPY __stpncpy
#define USE_AS_STPCPY
-#define USE_AS_STRNCPY
-#include "strcpy.S"
+#include "strncpy.S"
weak_alias (__stpncpy, stpncpy)
diff --git a/sysdeps/x86_64/multiarch/strcat-sse2-unaligned.S b/sysdeps/x86_64/multiarch/strcat-sse2-unaligned.S
index 81f1b40..1faa49d 100644
--- a/sysdeps/x86_64/multiarch/strcat-sse2-unaligned.S
+++ b/sysdeps/x86_64/multiarch/strcat-sse2-unaligned.S
@@ -275,5 +275,5 @@ L(StartStrcpyPart):
# define USE_AS_STRNCPY
# endif
-# include "strcpy-sse2-unaligned.S"
+# include "strncpy-sse2-unaligned.S"
#endif
diff --git a/sysdeps/x86_64/multiarch/strchrnul_avx2.S b/sysdeps/x86_64/multiarch/strchrnul_avx2.S
new file mode 100644
index 0000000..4dcb981
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strchrnul_avx2.S
@@ -0,0 +1,3 @@
+#define USE_AS_STRCHRNUL
+#define __strchr_avx2 __strchrnul_avx2
+#include "strchr_avx2.S"
diff --git a/sysdeps/x86_64/multiarch/strcpy-avx2.S b/sysdeps/x86_64/multiarch/strcpy-avx2.S
new file mode 100644
index 0000000..a3133a4
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strcpy-avx2.S
@@ -0,0 +1,4 @@
+#define USE_AVX2
+#define AS_STRCPY
+#define STPCPY __strcpy_avx2
+#include "stpcpy-sse2-unaligned.S"
diff --git a/sysdeps/x86_64/multiarch/strcpy-sse2-unaligned.S b/sysdeps/x86_64/multiarch/strcpy-sse2-unaligned.S
index 8f03d1d..310e4fa 100644
--- a/sysdeps/x86_64/multiarch/strcpy-sse2-unaligned.S
+++ b/sysdeps/x86_64/multiarch/strcpy-sse2-unaligned.S
@@ -1,1887 +1,3 @@
-/* strcpy with SSE2 and unaligned load
- Copyright (C) 2011-2015 Free Software Foundation, Inc.
- Contributed by Intel Corporation.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#if IS_IN (libc)
-
-# ifndef USE_AS_STRCAT
-# include <sysdep.h>
-
-# ifndef STRCPY
-# define STRCPY __strcpy_sse2_unaligned
-# endif
-
-# endif
-
-# define JMPTBL(I, B) I - B
-# define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
- lea TABLE(%rip), %r11; \
- movslq (%r11, INDEX, SCALE), %rcx; \
- lea (%r11, %rcx), %rcx; \
- jmp *%rcx
-
-# ifndef USE_AS_STRCAT
-
-.text
-ENTRY (STRCPY)
-# ifdef USE_AS_STRNCPY
- mov %rdx, %r8
- test %r8, %r8
- jz L(ExitZero)
-# endif
- mov %rsi, %rcx
-# ifndef USE_AS_STPCPY
- mov %rdi, %rax /* save result */
-# endif
-
-# endif
-
- and $63, %rcx
- cmp $32, %rcx
- jbe L(SourceStringAlignmentLess32)
-
- and $-16, %rsi
- and $15, %rcx
- pxor %xmm0, %xmm0
- pxor %xmm1, %xmm1
-
- pcmpeqb (%rsi), %xmm1
- pmovmskb %xmm1, %rdx
- shr %cl, %rdx
-
-# ifdef USE_AS_STRNCPY
-# if defined USE_AS_STPCPY || defined USE_AS_STRCAT
- mov $16, %r10
- sub %rcx, %r10
- cmp %r10, %r8
-# else
- mov $17, %r10
- sub %rcx, %r10
- cmp %r10, %r8
-# endif
- jbe L(CopyFrom1To16BytesTailCase2OrCase3)
-# endif
- test %rdx, %rdx
- jnz L(CopyFrom1To16BytesTail)
-
- pcmpeqb 16(%rsi), %xmm0
- pmovmskb %xmm0, %rdx
-
-# ifdef USE_AS_STRNCPY
- add $16, %r10
- cmp %r10, %r8
- jbe L(CopyFrom1To32BytesCase2OrCase3)
-# endif
- test %rdx, %rdx
- jnz L(CopyFrom1To32Bytes)
-
- movdqu (%rsi, %rcx), %xmm1 /* copy 16 bytes */
- movdqu %xmm1, (%rdi)
-
-/* If source address alignment != destination address alignment */
- .p2align 4
-L(Unalign16Both):
- sub %rcx, %rdi
-# ifdef USE_AS_STRNCPY
- add %rcx, %r8
-# endif
- mov $16, %rcx
- movdqa (%rsi, %rcx), %xmm1
- movaps 16(%rsi, %rcx), %xmm2
- movdqu %xmm1, (%rdi, %rcx)
- pcmpeqb %xmm2, %xmm0
- pmovmskb %xmm0, %rdx
- add $16, %rcx
-# ifdef USE_AS_STRNCPY
- sub $48, %r8
- jbe L(CopyFrom1To16BytesCase2OrCase3)
-# endif
- test %rdx, %rdx
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- jnz L(CopyFrom1To16BytesUnalignedXmm2)
-# else
- jnz L(CopyFrom1To16Bytes)
-# endif
-
- movaps 16(%rsi, %rcx), %xmm3
- movdqu %xmm2, (%rdi, %rcx)
- pcmpeqb %xmm3, %xmm0
- pmovmskb %xmm0, %rdx
- add $16, %rcx
-# ifdef USE_AS_STRNCPY
- sub $16, %r8
- jbe L(CopyFrom1To16BytesCase2OrCase3)
-# endif
- test %rdx, %rdx
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- jnz L(CopyFrom1To16BytesUnalignedXmm3)
-# else
- jnz L(CopyFrom1To16Bytes)
-# endif
-
- movaps 16(%rsi, %rcx), %xmm4
- movdqu %xmm3, (%rdi, %rcx)
- pcmpeqb %xmm4, %xmm0
- pmovmskb %xmm0, %rdx
- add $16, %rcx
-# ifdef USE_AS_STRNCPY
- sub $16, %r8
- jbe L(CopyFrom1To16BytesCase2OrCase3)
-# endif
- test %rdx, %rdx
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- jnz L(CopyFrom1To16BytesUnalignedXmm4)
-# else
- jnz L(CopyFrom1To16Bytes)
-# endif
-
- movaps 16(%rsi, %rcx), %xmm1
- movdqu %xmm4, (%rdi, %rcx)
- pcmpeqb %xmm1, %xmm0
- pmovmskb %xmm0, %rdx
- add $16, %rcx
-# ifdef USE_AS_STRNCPY
- sub $16, %r8
- jbe L(CopyFrom1To16BytesCase2OrCase3)
-# endif
- test %rdx, %rdx
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- jnz L(CopyFrom1To16BytesUnalignedXmm1)
-# else
- jnz L(CopyFrom1To16Bytes)
-# endif
-
- movaps 16(%rsi, %rcx), %xmm2
- movdqu %xmm1, (%rdi, %rcx)
- pcmpeqb %xmm2, %xmm0
- pmovmskb %xmm0, %rdx
- add $16, %rcx
-# ifdef USE_AS_STRNCPY
- sub $16, %r8
- jbe L(CopyFrom1To16BytesCase2OrCase3)
-# endif
- test %rdx, %rdx
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- jnz L(CopyFrom1To16BytesUnalignedXmm2)
-# else
- jnz L(CopyFrom1To16Bytes)
-# endif
-
- movaps 16(%rsi, %rcx), %xmm3
- movdqu %xmm2, (%rdi, %rcx)
- pcmpeqb %xmm3, %xmm0
- pmovmskb %xmm0, %rdx
- add $16, %rcx
-# ifdef USE_AS_STRNCPY
- sub $16, %r8
- jbe L(CopyFrom1To16BytesCase2OrCase3)
-# endif
- test %rdx, %rdx
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- jnz L(CopyFrom1To16BytesUnalignedXmm3)
-# else
- jnz L(CopyFrom1To16Bytes)
-# endif
-
- movdqu %xmm3, (%rdi, %rcx)
- mov %rsi, %rdx
- lea 16(%rsi, %rcx), %rsi
- and $-0x40, %rsi
- sub %rsi, %rdx
- sub %rdx, %rdi
-# ifdef USE_AS_STRNCPY
- lea 128(%r8, %rdx), %r8
-# endif
-L(Unaligned64Loop):
- movaps (%rsi), %xmm2
- movaps %xmm2, %xmm4
- movaps 16(%rsi), %xmm5
- movaps 32(%rsi), %xmm3
- movaps %xmm3, %xmm6
- movaps 48(%rsi), %xmm7
- pminub %xmm5, %xmm2
- pminub %xmm7, %xmm3
- pminub %xmm2, %xmm3
- pcmpeqb %xmm0, %xmm3
- pmovmskb %xmm3, %rdx
-# ifdef USE_AS_STRNCPY
- sub $64, %r8
- jbe L(UnalignedLeaveCase2OrCase3)
-# endif
- test %rdx, %rdx
- jnz L(Unaligned64Leave)
-
-L(Unaligned64Loop_start):
- add $64, %rdi
- add $64, %rsi
- movdqu %xmm4, -64(%rdi)
- movaps (%rsi), %xmm2
- movdqa %xmm2, %xmm4
- movdqu %xmm5, -48(%rdi)
- movaps 16(%rsi), %xmm5
- pminub %xmm5, %xmm2
- movaps 32(%rsi), %xmm3
- movdqu %xmm6, -32(%rdi)
- movaps %xmm3, %xmm6
- movdqu %xmm7, -16(%rdi)
- movaps 48(%rsi), %xmm7
- pminub %xmm7, %xmm3
- pminub %xmm2, %xmm3
- pcmpeqb %xmm0, %xmm3
- pmovmskb %xmm3, %rdx
-# ifdef USE_AS_STRNCPY
- sub $64, %r8
- jbe L(UnalignedLeaveCase2OrCase3)
-# endif
- test %rdx, %rdx
- jz L(Unaligned64Loop_start)
-
-L(Unaligned64Leave):
- pxor %xmm1, %xmm1
-
- pcmpeqb %xmm4, %xmm0
- pcmpeqb %xmm5, %xmm1
- pmovmskb %xmm0, %rdx
- pmovmskb %xmm1, %rcx
- test %rdx, %rdx
- jnz L(CopyFrom1To16BytesUnaligned_0)
- test %rcx, %rcx
- jnz L(CopyFrom1To16BytesUnaligned_16)
-
- pcmpeqb %xmm6, %xmm0
- pcmpeqb %xmm7, %xmm1
- pmovmskb %xmm0, %rdx
- pmovmskb %xmm1, %rcx
- test %rdx, %rdx
- jnz L(CopyFrom1To16BytesUnaligned_32)
-
- bsf %rcx, %rdx
- movdqu %xmm4, (%rdi)
- movdqu %xmm5, 16(%rdi)
- movdqu %xmm6, 32(%rdi)
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
-# ifdef USE_AS_STPCPY
- lea 48(%rdi, %rdx), %rax
-# endif
- movdqu %xmm7, 48(%rdi)
- add $15, %r8
- sub %rdx, %r8
- lea 49(%rdi, %rdx), %rdi
- jmp L(StrncpyFillTailWithZero)
-# else
- add $48, %rsi
- add $48, %rdi
- BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
-# endif
-
-/* If source address alignment == destination address alignment */
-
-L(SourceStringAlignmentLess32):
- pxor %xmm0, %xmm0
- movdqu (%rsi), %xmm1
- movdqu 16(%rsi), %xmm2
- pcmpeqb %xmm1, %xmm0
- pmovmskb %xmm0, %rdx
-
-# ifdef USE_AS_STRNCPY
-# if defined USE_AS_STPCPY || defined USE_AS_STRCAT
- cmp $16, %r8
-# else
- cmp $17, %r8
-# endif
- jbe L(CopyFrom1To16BytesTail1Case2OrCase3)
-# endif
- test %rdx, %rdx
- jnz L(CopyFrom1To16BytesTail1)
-
- pcmpeqb %xmm2, %xmm0
- movdqu %xmm1, (%rdi)
- pmovmskb %xmm0, %rdx
-
-# ifdef USE_AS_STRNCPY
-# if defined USE_AS_STPCPY || defined USE_AS_STRCAT
- cmp $32, %r8
-# else
- cmp $33, %r8
-# endif
- jbe L(CopyFrom1To32Bytes1Case2OrCase3)
-# endif
- test %rdx, %rdx
- jnz L(CopyFrom1To32Bytes1)
-
- and $-16, %rsi
- and $15, %rcx
- jmp L(Unalign16Both)
-
-/*------End of main part with loops---------------------*/
-
-/* Case1 */
-
-# if (!defined USE_AS_STRNCPY) || (defined USE_AS_STRCAT)
- .p2align 4
-L(CopyFrom1To16Bytes):
- add %rcx, %rdi
- add %rcx, %rsi
- bsf %rdx, %rdx
- BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
-# endif
- .p2align 4
-L(CopyFrom1To16BytesTail):
- add %rcx, %rsi
- bsf %rdx, %rdx
- BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
-
- .p2align 4
-L(CopyFrom1To32Bytes1):
- add $16, %rsi
- add $16, %rdi
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $16, %r8
-# endif
-L(CopyFrom1To16BytesTail1):
- bsf %rdx, %rdx
- BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
-
- .p2align 4
-L(CopyFrom1To32Bytes):
- bsf %rdx, %rdx
- add %rcx, %rsi
- add $16, %rdx
- sub %rcx, %rdx
- BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
-
- .p2align 4
-L(CopyFrom1To16BytesUnaligned_0):
- bsf %rdx, %rdx
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
-# ifdef USE_AS_STPCPY
- lea (%rdi, %rdx), %rax
-# endif
- movdqu %xmm4, (%rdi)
- add $63, %r8
- sub %rdx, %r8
- lea 1(%rdi, %rdx), %rdi
- jmp L(StrncpyFillTailWithZero)
-# else
- BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
-# endif
-
- .p2align 4
-L(CopyFrom1To16BytesUnaligned_16):
- bsf %rcx, %rdx
- movdqu %xmm4, (%rdi)
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
-# ifdef USE_AS_STPCPY
- lea 16(%rdi, %rdx), %rax
-# endif
- movdqu %xmm5, 16(%rdi)
- add $47, %r8
- sub %rdx, %r8
- lea 17(%rdi, %rdx), %rdi
- jmp L(StrncpyFillTailWithZero)
-# else
- add $16, %rsi
- add $16, %rdi
- BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
-# endif
-
- .p2align 4
-L(CopyFrom1To16BytesUnaligned_32):
- bsf %rdx, %rdx
- movdqu %xmm4, (%rdi)
- movdqu %xmm5, 16(%rdi)
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
-# ifdef USE_AS_STPCPY
- lea 32(%rdi, %rdx), %rax
-# endif
- movdqu %xmm6, 32(%rdi)
- add $31, %r8
- sub %rdx, %r8
- lea 33(%rdi, %rdx), %rdi
- jmp L(StrncpyFillTailWithZero)
-# else
- add $32, %rsi
- add $32, %rdi
- BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
-# endif
-
-# ifdef USE_AS_STRNCPY
-# ifndef USE_AS_STRCAT
- .p2align 4
-L(CopyFrom1To16BytesUnalignedXmm6):
- movdqu %xmm6, (%rdi, %rcx)
- jmp L(CopyFrom1To16BytesXmmExit)
-
- .p2align 4
-L(CopyFrom1To16BytesUnalignedXmm5):
- movdqu %xmm5, (%rdi, %rcx)
- jmp L(CopyFrom1To16BytesXmmExit)
-
- .p2align 4
-L(CopyFrom1To16BytesUnalignedXmm4):
- movdqu %xmm4, (%rdi, %rcx)
- jmp L(CopyFrom1To16BytesXmmExit)
-
- .p2align 4
-L(CopyFrom1To16BytesUnalignedXmm3):
- movdqu %xmm3, (%rdi, %rcx)
- jmp L(CopyFrom1To16BytesXmmExit)
-
- .p2align 4
-L(CopyFrom1To16BytesUnalignedXmm1):
- movdqu %xmm1, (%rdi, %rcx)
- jmp L(CopyFrom1To16BytesXmmExit)
-# endif
-
- .p2align 4
-L(CopyFrom1To16BytesExit):
- BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
-
-/* Case2 */
-
- .p2align 4
-L(CopyFrom1To16BytesCase2):
- add $16, %r8
- add %rcx, %rdi
- add %rcx, %rsi
- bsf %rdx, %rdx
- cmp %r8, %rdx
- jb L(CopyFrom1To16BytesExit)
- BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
-
- .p2align 4
-L(CopyFrom1To32BytesCase2):
- add %rcx, %rsi
- bsf %rdx, %rdx
- add $16, %rdx
- sub %rcx, %rdx
- cmp %r8, %rdx
- jb L(CopyFrom1To16BytesExit)
- BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
-
-L(CopyFrom1To16BytesTailCase2):
- add %rcx, %rsi
- bsf %rdx, %rdx
- cmp %r8, %rdx
- jb L(CopyFrom1To16BytesExit)
- BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
-
-L(CopyFrom1To16BytesTail1Case2):
- bsf %rdx, %rdx
- cmp %r8, %rdx
- jb L(CopyFrom1To16BytesExit)
- BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
-
-/* Case2 or Case3, Case3 */
-
- .p2align 4
-L(CopyFrom1To16BytesCase2OrCase3):
- test %rdx, %rdx
- jnz L(CopyFrom1To16BytesCase2)
-L(CopyFrom1To16BytesCase3):
- add $16, %r8
- add %rcx, %rdi
- add %rcx, %rsi
- BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
-
- .p2align 4
-L(CopyFrom1To32BytesCase2OrCase3):
- test %rdx, %rdx
- jnz L(CopyFrom1To32BytesCase2)
- add %rcx, %rsi
- BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
-
- .p2align 4
-L(CopyFrom1To16BytesTailCase2OrCase3):
- test %rdx, %rdx
- jnz L(CopyFrom1To16BytesTailCase2)
- add %rcx, %rsi
- BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
-
- .p2align 4
-L(CopyFrom1To32Bytes1Case2OrCase3):
- add $16, %rdi
- add $16, %rsi
- sub $16, %r8
-L(CopyFrom1To16BytesTail1Case2OrCase3):
- test %rdx, %rdx
- jnz L(CopyFrom1To16BytesTail1Case2)
- BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
-
-# endif
-
-/*------------End labels regarding with copying 1-16 bytes--and 1-32 bytes----*/
-
- .p2align 4
-L(Exit1):
- mov %dh, (%rdi)
-# ifdef USE_AS_STPCPY
- lea (%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $1, %r8
- lea 1(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit2):
- mov (%rsi), %dx
- mov %dx, (%rdi)
-# ifdef USE_AS_STPCPY
- lea 1(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $2, %r8
- lea 2(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit3):
- mov (%rsi), %cx
- mov %cx, (%rdi)
- mov %dh, 2(%rdi)
-# ifdef USE_AS_STPCPY
- lea 2(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $3, %r8
- lea 3(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit4):
- mov (%rsi), %edx
- mov %edx, (%rdi)
-# ifdef USE_AS_STPCPY
- lea 3(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $4, %r8
- lea 4(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit5):
- mov (%rsi), %ecx
- mov %dh, 4(%rdi)
- mov %ecx, (%rdi)
-# ifdef USE_AS_STPCPY
- lea 4(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $5, %r8
- lea 5(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit6):
- mov (%rsi), %ecx
- mov 4(%rsi), %dx
- mov %ecx, (%rdi)
- mov %dx, 4(%rdi)
-# ifdef USE_AS_STPCPY
- lea 5(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $6, %r8
- lea 6(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit7):
- mov (%rsi), %ecx
- mov 3(%rsi), %edx
- mov %ecx, (%rdi)
- mov %edx, 3(%rdi)
-# ifdef USE_AS_STPCPY
- lea 6(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $7, %r8
- lea 7(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit8):
- mov (%rsi), %rdx
- mov %rdx, (%rdi)
-# ifdef USE_AS_STPCPY
- lea 7(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $8, %r8
- lea 8(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit9):
- mov (%rsi), %rcx
- mov %dh, 8(%rdi)
- mov %rcx, (%rdi)
-# ifdef USE_AS_STPCPY
- lea 8(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $9, %r8
- lea 9(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit10):
- mov (%rsi), %rcx
- mov 8(%rsi), %dx
- mov %rcx, (%rdi)
- mov %dx, 8(%rdi)
-# ifdef USE_AS_STPCPY
- lea 9(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $10, %r8
- lea 10(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit11):
- mov (%rsi), %rcx
- mov 7(%rsi), %edx
- mov %rcx, (%rdi)
- mov %edx, 7(%rdi)
-# ifdef USE_AS_STPCPY
- lea 10(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $11, %r8
- lea 11(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit12):
- mov (%rsi), %rcx
- mov 8(%rsi), %edx
- mov %rcx, (%rdi)
- mov %edx, 8(%rdi)
-# ifdef USE_AS_STPCPY
- lea 11(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $12, %r8
- lea 12(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit13):
- mov (%rsi), %rcx
- mov 5(%rsi), %rdx
- mov %rcx, (%rdi)
- mov %rdx, 5(%rdi)
-# ifdef USE_AS_STPCPY
- lea 12(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $13, %r8
- lea 13(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit14):
- mov (%rsi), %rcx
- mov 6(%rsi), %rdx
- mov %rcx, (%rdi)
- mov %rdx, 6(%rdi)
-# ifdef USE_AS_STPCPY
- lea 13(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $14, %r8
- lea 14(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit15):
- mov (%rsi), %rcx
- mov 7(%rsi), %rdx
- mov %rcx, (%rdi)
- mov %rdx, 7(%rdi)
-# ifdef USE_AS_STPCPY
- lea 14(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $15, %r8
- lea 15(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit16):
- movdqu (%rsi), %xmm0
- movdqu %xmm0, (%rdi)
-# ifdef USE_AS_STPCPY
- lea 15(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $16, %r8
- lea 16(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit17):
- movdqu (%rsi), %xmm0
- movdqu %xmm0, (%rdi)
- mov %dh, 16(%rdi)
-# ifdef USE_AS_STPCPY
- lea 16(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $17, %r8
- lea 17(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit18):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %cx
- movdqu %xmm0, (%rdi)
- mov %cx, 16(%rdi)
-# ifdef USE_AS_STPCPY
- lea 17(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $18, %r8
- lea 18(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit19):
- movdqu (%rsi), %xmm0
- mov 15(%rsi), %ecx
- movdqu %xmm0, (%rdi)
- mov %ecx, 15(%rdi)
-# ifdef USE_AS_STPCPY
- lea 18(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $19, %r8
- lea 19(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit20):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %ecx
- movdqu %xmm0, (%rdi)
- mov %ecx, 16(%rdi)
-# ifdef USE_AS_STPCPY
- lea 19(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $20, %r8
- lea 20(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit21):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %ecx
- movdqu %xmm0, (%rdi)
- mov %ecx, 16(%rdi)
- mov %dh, 20(%rdi)
-# ifdef USE_AS_STPCPY
- lea 20(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $21, %r8
- lea 21(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit22):
- movdqu (%rsi), %xmm0
- mov 14(%rsi), %rcx
- movdqu %xmm0, (%rdi)
- mov %rcx, 14(%rdi)
-# ifdef USE_AS_STPCPY
- lea 21(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $22, %r8
- lea 22(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit23):
- movdqu (%rsi), %xmm0
- mov 15(%rsi), %rcx
- movdqu %xmm0, (%rdi)
- mov %rcx, 15(%rdi)
-# ifdef USE_AS_STPCPY
- lea 22(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $23, %r8
- lea 23(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit24):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %rcx
- movdqu %xmm0, (%rdi)
- mov %rcx, 16(%rdi)
-# ifdef USE_AS_STPCPY
- lea 23(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $24, %r8
- lea 24(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit25):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %rcx
- movdqu %xmm0, (%rdi)
- mov %rcx, 16(%rdi)
- mov %dh, 24(%rdi)
-# ifdef USE_AS_STPCPY
- lea 24(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $25, %r8
- lea 25(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit26):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %rdx
- mov 24(%rsi), %cx
- movdqu %xmm0, (%rdi)
- mov %rdx, 16(%rdi)
- mov %cx, 24(%rdi)
-# ifdef USE_AS_STPCPY
- lea 25(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $26, %r8
- lea 26(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit27):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %rdx
- mov 23(%rsi), %ecx
- movdqu %xmm0, (%rdi)
- mov %rdx, 16(%rdi)
- mov %ecx, 23(%rdi)
-# ifdef USE_AS_STPCPY
- lea 26(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $27, %r8
- lea 27(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit28):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %rdx
- mov 24(%rsi), %ecx
- movdqu %xmm0, (%rdi)
- mov %rdx, 16(%rdi)
- mov %ecx, 24(%rdi)
-# ifdef USE_AS_STPCPY
- lea 27(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $28, %r8
- lea 28(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit29):
- movdqu (%rsi), %xmm0
- movdqu 13(%rsi), %xmm2
- movdqu %xmm0, (%rdi)
- movdqu %xmm2, 13(%rdi)
-# ifdef USE_AS_STPCPY
- lea 28(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $29, %r8
- lea 29(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit30):
- movdqu (%rsi), %xmm0
- movdqu 14(%rsi), %xmm2
- movdqu %xmm0, (%rdi)
- movdqu %xmm2, 14(%rdi)
-# ifdef USE_AS_STPCPY
- lea 29(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $30, %r8
- lea 30(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit31):
- movdqu (%rsi), %xmm0
- movdqu 15(%rsi), %xmm2
- movdqu %xmm0, (%rdi)
- movdqu %xmm2, 15(%rdi)
-# ifdef USE_AS_STPCPY
- lea 30(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $31, %r8
- lea 31(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
- .p2align 4
-L(Exit32):
- movdqu (%rsi), %xmm0
- movdqu 16(%rsi), %xmm2
- movdqu %xmm0, (%rdi)
- movdqu %xmm2, 16(%rdi)
-# ifdef USE_AS_STPCPY
- lea 31(%rdi), %rax
-# endif
-# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
- sub $32, %r8
- lea 32(%rdi), %rdi
- jnz L(StrncpyFillTailWithZero)
-# endif
- ret
-
-# ifdef USE_AS_STRNCPY
-
- .p2align 4
-L(StrncpyExit0):
-# ifdef USE_AS_STPCPY
- mov %rdi, %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, (%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit1):
- mov (%rsi), %dl
- mov %dl, (%rdi)
-# ifdef USE_AS_STPCPY
- lea 1(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 1(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit2):
- mov (%rsi), %dx
- mov %dx, (%rdi)
-# ifdef USE_AS_STPCPY
- lea 2(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 2(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit3):
- mov (%rsi), %cx
- mov 2(%rsi), %dl
- mov %cx, (%rdi)
- mov %dl, 2(%rdi)
-# ifdef USE_AS_STPCPY
- lea 3(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 3(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit4):
- mov (%rsi), %edx
- mov %edx, (%rdi)
-# ifdef USE_AS_STPCPY
- lea 4(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 4(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit5):
- mov (%rsi), %ecx
- mov 4(%rsi), %dl
- mov %ecx, (%rdi)
- mov %dl, 4(%rdi)
-# ifdef USE_AS_STPCPY
- lea 5(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 5(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit6):
- mov (%rsi), %ecx
- mov 4(%rsi), %dx
- mov %ecx, (%rdi)
- mov %dx, 4(%rdi)
-# ifdef USE_AS_STPCPY
- lea 6(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 6(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit7):
- mov (%rsi), %ecx
- mov 3(%rsi), %edx
- mov %ecx, (%rdi)
- mov %edx, 3(%rdi)
-# ifdef USE_AS_STPCPY
- lea 7(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 7(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit8):
- mov (%rsi), %rdx
- mov %rdx, (%rdi)
-# ifdef USE_AS_STPCPY
- lea 8(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 8(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit9):
- mov (%rsi), %rcx
- mov 8(%rsi), %dl
- mov %rcx, (%rdi)
- mov %dl, 8(%rdi)
-# ifdef USE_AS_STPCPY
- lea 9(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 9(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit10):
- mov (%rsi), %rcx
- mov 8(%rsi), %dx
- mov %rcx, (%rdi)
- mov %dx, 8(%rdi)
-# ifdef USE_AS_STPCPY
- lea 10(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 10(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit11):
- mov (%rsi), %rcx
- mov 7(%rsi), %edx
- mov %rcx, (%rdi)
- mov %edx, 7(%rdi)
-# ifdef USE_AS_STPCPY
- lea 11(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 11(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit12):
- mov (%rsi), %rcx
- mov 8(%rsi), %edx
- mov %rcx, (%rdi)
- mov %edx, 8(%rdi)
-# ifdef USE_AS_STPCPY
- lea 12(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 12(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit13):
- mov (%rsi), %rcx
- mov 5(%rsi), %rdx
- mov %rcx, (%rdi)
- mov %rdx, 5(%rdi)
-# ifdef USE_AS_STPCPY
- lea 13(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 13(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit14):
- mov (%rsi), %rcx
- mov 6(%rsi), %rdx
- mov %rcx, (%rdi)
- mov %rdx, 6(%rdi)
-# ifdef USE_AS_STPCPY
- lea 14(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 14(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit15):
- mov (%rsi), %rcx
- mov 7(%rsi), %rdx
- mov %rcx, (%rdi)
- mov %rdx, 7(%rdi)
-# ifdef USE_AS_STPCPY
- lea 15(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 15(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit16):
- movdqu (%rsi), %xmm0
- movdqu %xmm0, (%rdi)
-# ifdef USE_AS_STPCPY
- lea 16(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 16(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit17):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %cl
- movdqu %xmm0, (%rdi)
- mov %cl, 16(%rdi)
-# ifdef USE_AS_STPCPY
- lea 17(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 17(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit18):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %cx
- movdqu %xmm0, (%rdi)
- mov %cx, 16(%rdi)
-# ifdef USE_AS_STPCPY
- lea 18(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 18(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit19):
- movdqu (%rsi), %xmm0
- mov 15(%rsi), %ecx
- movdqu %xmm0, (%rdi)
- mov %ecx, 15(%rdi)
-# ifdef USE_AS_STPCPY
- lea 19(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 19(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit20):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %ecx
- movdqu %xmm0, (%rdi)
- mov %ecx, 16(%rdi)
-# ifdef USE_AS_STPCPY
- lea 20(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 20(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit21):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %ecx
- mov 20(%rsi), %dl
- movdqu %xmm0, (%rdi)
- mov %ecx, 16(%rdi)
- mov %dl, 20(%rdi)
-# ifdef USE_AS_STPCPY
- lea 21(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 21(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit22):
- movdqu (%rsi), %xmm0
- mov 14(%rsi), %rcx
- movdqu %xmm0, (%rdi)
- mov %rcx, 14(%rdi)
-# ifdef USE_AS_STPCPY
- lea 22(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 22(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit23):
- movdqu (%rsi), %xmm0
- mov 15(%rsi), %rcx
- movdqu %xmm0, (%rdi)
- mov %rcx, 15(%rdi)
-# ifdef USE_AS_STPCPY
- lea 23(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 23(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit24):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %rcx
- movdqu %xmm0, (%rdi)
- mov %rcx, 16(%rdi)
-# ifdef USE_AS_STPCPY
- lea 24(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 24(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit25):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %rdx
- mov 24(%rsi), %cl
- movdqu %xmm0, (%rdi)
- mov %rdx, 16(%rdi)
- mov %cl, 24(%rdi)
-# ifdef USE_AS_STPCPY
- lea 25(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 25(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit26):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %rdx
- mov 24(%rsi), %cx
- movdqu %xmm0, (%rdi)
- mov %rdx, 16(%rdi)
- mov %cx, 24(%rdi)
-# ifdef USE_AS_STPCPY
- lea 26(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 26(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit27):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %rdx
- mov 23(%rsi), %ecx
- movdqu %xmm0, (%rdi)
- mov %rdx, 16(%rdi)
- mov %ecx, 23(%rdi)
-# ifdef USE_AS_STPCPY
- lea 27(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 27(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit28):
- movdqu (%rsi), %xmm0
- mov 16(%rsi), %rdx
- mov 24(%rsi), %ecx
- movdqu %xmm0, (%rdi)
- mov %rdx, 16(%rdi)
- mov %ecx, 24(%rdi)
-# ifdef USE_AS_STPCPY
- lea 28(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 28(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit29):
- movdqu (%rsi), %xmm0
- movdqu 13(%rsi), %xmm2
- movdqu %xmm0, (%rdi)
- movdqu %xmm2, 13(%rdi)
-# ifdef USE_AS_STPCPY
- lea 29(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 29(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit30):
- movdqu (%rsi), %xmm0
- movdqu 14(%rsi), %xmm2
- movdqu %xmm0, (%rdi)
- movdqu %xmm2, 14(%rdi)
-# ifdef USE_AS_STPCPY
- lea 30(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 30(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit31):
- movdqu (%rsi), %xmm0
- movdqu 15(%rsi), %xmm2
- movdqu %xmm0, (%rdi)
- movdqu %xmm2, 15(%rdi)
-# ifdef USE_AS_STPCPY
- lea 31(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 31(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit32):
- movdqu (%rsi), %xmm0
- movdqu 16(%rsi), %xmm2
- movdqu %xmm0, (%rdi)
- movdqu %xmm2, 16(%rdi)
-# ifdef USE_AS_STPCPY
- lea 32(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 32(%rdi)
-# endif
- ret
-
- .p2align 4
-L(StrncpyExit33):
- movdqu (%rsi), %xmm0
- movdqu 16(%rsi), %xmm2
- mov 32(%rsi), %cl
- movdqu %xmm0, (%rdi)
- movdqu %xmm2, 16(%rdi)
- mov %cl, 32(%rdi)
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 33(%rdi)
-# endif
- ret
-
-# ifndef USE_AS_STRCAT
-
- .p2align 4
-L(Fill0):
- ret
-
- .p2align 4
-L(Fill1):
- mov %dl, (%rdi)
- ret
-
- .p2align 4
-L(Fill2):
- mov %dx, (%rdi)
- ret
-
- .p2align 4
-L(Fill3):
- mov %edx, -1(%rdi)
- ret
-
- .p2align 4
-L(Fill4):
- mov %edx, (%rdi)
- ret
-
- .p2align 4
-L(Fill5):
- mov %edx, (%rdi)
- mov %dl, 4(%rdi)
- ret
-
- .p2align 4
-L(Fill6):
- mov %edx, (%rdi)
- mov %dx, 4(%rdi)
- ret
-
- .p2align 4
-L(Fill7):
- mov %rdx, -1(%rdi)
- ret
-
- .p2align 4
-L(Fill8):
- mov %rdx, (%rdi)
- ret
-
- .p2align 4
-L(Fill9):
- mov %rdx, (%rdi)
- mov %dl, 8(%rdi)
- ret
-
- .p2align 4
-L(Fill10):
- mov %rdx, (%rdi)
- mov %dx, 8(%rdi)
- ret
-
- .p2align 4
-L(Fill11):
- mov %rdx, (%rdi)
- mov %edx, 7(%rdi)
- ret
-
- .p2align 4
-L(Fill12):
- mov %rdx, (%rdi)
- mov %edx, 8(%rdi)
- ret
-
- .p2align 4
-L(Fill13):
- mov %rdx, (%rdi)
- mov %rdx, 5(%rdi)
- ret
-
- .p2align 4
-L(Fill14):
- mov %rdx, (%rdi)
- mov %rdx, 6(%rdi)
- ret
-
- .p2align 4
-L(Fill15):
- movdqu %xmm0, -1(%rdi)
- ret
-
- .p2align 4
-L(Fill16):
- movdqu %xmm0, (%rdi)
- ret
-
- .p2align 4
-L(CopyFrom1To16BytesUnalignedXmm2):
- movdqu %xmm2, (%rdi, %rcx)
-
- .p2align 4
-L(CopyFrom1To16BytesXmmExit):
- bsf %rdx, %rdx
- add $15, %r8
- add %rcx, %rdi
-# ifdef USE_AS_STPCPY
- lea (%rdi, %rdx), %rax
-# endif
- sub %rdx, %r8
- lea 1(%rdi, %rdx), %rdi
-
- .p2align 4
-L(StrncpyFillTailWithZero):
- pxor %xmm0, %xmm0
- xor %rdx, %rdx
- sub $16, %r8
- jbe L(StrncpyFillExit)
-
- movdqu %xmm0, (%rdi)
- add $16, %rdi
-
- mov %rdi, %rsi
- and $0xf, %rsi
- sub %rsi, %rdi
- add %rsi, %r8
- sub $64, %r8
- jb L(StrncpyFillLess64)
-
-L(StrncpyFillLoopMovdqa):
- movdqa %xmm0, (%rdi)
- movdqa %xmm0, 16(%rdi)
- movdqa %xmm0, 32(%rdi)
- movdqa %xmm0, 48(%rdi)
- add $64, %rdi
- sub $64, %r8
- jae L(StrncpyFillLoopMovdqa)
-
-L(StrncpyFillLess64):
- add $32, %r8
- jl L(StrncpyFillLess32)
- movdqa %xmm0, (%rdi)
- movdqa %xmm0, 16(%rdi)
- add $32, %rdi
- sub $16, %r8
- jl L(StrncpyFillExit)
- movdqa %xmm0, (%rdi)
- add $16, %rdi
- BRANCH_TO_JMPTBL_ENTRY (L(FillTable), %r8, 4)
-
-L(StrncpyFillLess32):
- add $16, %r8
- jl L(StrncpyFillExit)
- movdqa %xmm0, (%rdi)
- add $16, %rdi
- BRANCH_TO_JMPTBL_ENTRY (L(FillTable), %r8, 4)
-
-L(StrncpyFillExit):
- add $16, %r8
- BRANCH_TO_JMPTBL_ENTRY (L(FillTable), %r8, 4)
-
-/* end of ifndef USE_AS_STRCAT */
-# endif
-
- .p2align 4
-L(UnalignedLeaveCase2OrCase3):
- test %rdx, %rdx
- jnz L(Unaligned64LeaveCase2)
-L(Unaligned64LeaveCase3):
- lea 64(%r8), %rcx
- and $-16, %rcx
- add $48, %r8
- jl L(CopyFrom1To16BytesCase3)
- movdqu %xmm4, (%rdi)
- sub $16, %r8
- jb L(CopyFrom1To16BytesCase3)
- movdqu %xmm5, 16(%rdi)
- sub $16, %r8
- jb L(CopyFrom1To16BytesCase3)
- movdqu %xmm6, 32(%rdi)
- sub $16, %r8
- jb L(CopyFrom1To16BytesCase3)
- movdqu %xmm7, 48(%rdi)
-# ifdef USE_AS_STPCPY
- lea 64(%rdi), %rax
-# endif
-# ifdef USE_AS_STRCAT
- xor %ch, %ch
- movb %ch, 64(%rdi)
-# endif
- ret
-
- .p2align 4
-L(Unaligned64LeaveCase2):
- xor %rcx, %rcx
- pcmpeqb %xmm4, %xmm0
- pmovmskb %xmm0, %rdx
- add $48, %r8
- jle L(CopyFrom1To16BytesCase2OrCase3)
- test %rdx, %rdx
-# ifndef USE_AS_STRCAT
- jnz L(CopyFrom1To16BytesUnalignedXmm4)
-# else
- jnz L(CopyFrom1To16Bytes)
-# endif
- pcmpeqb %xmm5, %xmm0
- pmovmskb %xmm0, %rdx
- movdqu %xmm4, (%rdi)
- add $16, %rcx
- sub $16, %r8
- jbe L(CopyFrom1To16BytesCase2OrCase3)
- test %rdx, %rdx
-# ifndef USE_AS_STRCAT
- jnz L(CopyFrom1To16BytesUnalignedXmm5)
-# else
- jnz L(CopyFrom1To16Bytes)
-# endif
-
- pcmpeqb %xmm6, %xmm0
- pmovmskb %xmm0, %rdx
- movdqu %xmm5, 16(%rdi)
- add $16, %rcx
- sub $16, %r8
- jbe L(CopyFrom1To16BytesCase2OrCase3)
- test %rdx, %rdx
-# ifndef USE_AS_STRCAT
- jnz L(CopyFrom1To16BytesUnalignedXmm6)
-# else
- jnz L(CopyFrom1To16Bytes)
-# endif
-
- pcmpeqb %xmm7, %xmm0
- pmovmskb %xmm0, %rdx
- movdqu %xmm6, 32(%rdi)
- lea 16(%rdi, %rcx), %rdi
- lea 16(%rsi, %rcx), %rsi
- bsf %rdx, %rdx
- cmp %r8, %rdx
- jb L(CopyFrom1To16BytesExit)
- BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
-
- .p2align 4
-L(ExitZero):
-# ifndef USE_AS_STRCAT
- mov %rdi, %rax
-# endif
- ret
-
-# endif
-
-# ifndef USE_AS_STRCAT
-END (STRCPY)
-# else
-END (STRCAT)
-# endif
- .p2align 4
- .section .rodata
-L(ExitTable):
- .int JMPTBL(L(Exit1), L(ExitTable))
- .int JMPTBL(L(Exit2), L(ExitTable))
- .int JMPTBL(L(Exit3), L(ExitTable))
- .int JMPTBL(L(Exit4), L(ExitTable))
- .int JMPTBL(L(Exit5), L(ExitTable))
- .int JMPTBL(L(Exit6), L(ExitTable))
- .int JMPTBL(L(Exit7), L(ExitTable))
- .int JMPTBL(L(Exit8), L(ExitTable))
- .int JMPTBL(L(Exit9), L(ExitTable))
- .int JMPTBL(L(Exit10), L(ExitTable))
- .int JMPTBL(L(Exit11), L(ExitTable))
- .int JMPTBL(L(Exit12), L(ExitTable))
- .int JMPTBL(L(Exit13), L(ExitTable))
- .int JMPTBL(L(Exit14), L(ExitTable))
- .int JMPTBL(L(Exit15), L(ExitTable))
- .int JMPTBL(L(Exit16), L(ExitTable))
- .int JMPTBL(L(Exit17), L(ExitTable))
- .int JMPTBL(L(Exit18), L(ExitTable))
- .int JMPTBL(L(Exit19), L(ExitTable))
- .int JMPTBL(L(Exit20), L(ExitTable))
- .int JMPTBL(L(Exit21), L(ExitTable))
- .int JMPTBL(L(Exit22), L(ExitTable))
- .int JMPTBL(L(Exit23), L(ExitTable))
- .int JMPTBL(L(Exit24), L(ExitTable))
- .int JMPTBL(L(Exit25), L(ExitTable))
- .int JMPTBL(L(Exit26), L(ExitTable))
- .int JMPTBL(L(Exit27), L(ExitTable))
- .int JMPTBL(L(Exit28), L(ExitTable))
- .int JMPTBL(L(Exit29), L(ExitTable))
- .int JMPTBL(L(Exit30), L(ExitTable))
- .int JMPTBL(L(Exit31), L(ExitTable))
- .int JMPTBL(L(Exit32), L(ExitTable))
-# ifdef USE_AS_STRNCPY
-L(ExitStrncpyTable):
- .int JMPTBL(L(StrncpyExit0), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit1), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit2), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit3), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit4), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit5), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit6), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit7), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit8), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit9), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit10), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit11), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit12), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit13), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit14), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit15), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit16), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit17), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit18), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit19), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit20), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit21), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit22), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit23), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit24), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit25), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit26), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit27), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit28), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit29), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit30), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit31), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit32), L(ExitStrncpyTable))
- .int JMPTBL(L(StrncpyExit33), L(ExitStrncpyTable))
-# ifndef USE_AS_STRCAT
- .p2align 4
-L(FillTable):
- .int JMPTBL(L(Fill0), L(FillTable))
- .int JMPTBL(L(Fill1), L(FillTable))
- .int JMPTBL(L(Fill2), L(FillTable))
- .int JMPTBL(L(Fill3), L(FillTable))
- .int JMPTBL(L(Fill4), L(FillTable))
- .int JMPTBL(L(Fill5), L(FillTable))
- .int JMPTBL(L(Fill6), L(FillTable))
- .int JMPTBL(L(Fill7), L(FillTable))
- .int JMPTBL(L(Fill8), L(FillTable))
- .int JMPTBL(L(Fill9), L(FillTable))
- .int JMPTBL(L(Fill10), L(FillTable))
- .int JMPTBL(L(Fill11), L(FillTable))
- .int JMPTBL(L(Fill12), L(FillTable))
- .int JMPTBL(L(Fill13), L(FillTable))
- .int JMPTBL(L(Fill14), L(FillTable))
- .int JMPTBL(L(Fill15), L(FillTable))
- .int JMPTBL(L(Fill16), L(FillTable))
-# endif
-# endif
-#endif
+#define AS_STRCPY
+#define STPCPY __strcpy_sse2_unaligned
+#include "stpcpy-sse2-unaligned.S"
diff --git a/sysdeps/x86_64/multiarch/strcpy.S b/sysdeps/x86_64/multiarch/strcpy.S
index 9464ee8..92be04c 100644
--- a/sysdeps/x86_64/multiarch/strcpy.S
+++ b/sysdeps/x86_64/multiarch/strcpy.S
@@ -28,31 +28,18 @@
#endif
#ifdef USE_AS_STPCPY
-# ifdef USE_AS_STRNCPY
-# define STRCPY_SSSE3 __stpncpy_ssse3
-# define STRCPY_SSE2 __stpncpy_sse2
-# define STRCPY_SSE2_UNALIGNED __stpncpy_sse2_unaligned
-# define __GI_STRCPY __GI_stpncpy
-# define __GI___STRCPY __GI___stpncpy
-# else
# define STRCPY_SSSE3 __stpcpy_ssse3
# define STRCPY_SSE2 __stpcpy_sse2
+# define STRCPY_AVX2 __stpcpy_avx2
# define STRCPY_SSE2_UNALIGNED __stpcpy_sse2_unaligned
# define __GI_STRCPY __GI_stpcpy
# define __GI___STRCPY __GI___stpcpy
-# endif
#else
-# ifdef USE_AS_STRNCPY
-# define STRCPY_SSSE3 __strncpy_ssse3
-# define STRCPY_SSE2 __strncpy_sse2
-# define STRCPY_SSE2_UNALIGNED __strncpy_sse2_unaligned
-# define __GI_STRCPY __GI_strncpy
-# else
# define STRCPY_SSSE3 __strcpy_ssse3
+# define STRCPY_AVX2 __strcpy_avx2
# define STRCPY_SSE2 __strcpy_sse2
# define STRCPY_SSE2_UNALIGNED __strcpy_sse2_unaligned
# define __GI_STRCPY __GI_strcpy
-# endif
#endif
@@ -64,7 +51,10 @@ ENTRY(STRCPY)
cmpl $0, __cpu_features+KIND_OFFSET(%rip)
jne 1f
call __init_cpu_features
-1: leaq STRCPY_SSE2_UNALIGNED(%rip), %rax
+1: leaq STRCPY_AVX2(%rip), %rax
+ testl $bit_AVX_Fast_Unaligned_Load, __cpu_features+FEATURE_OFFSET+index_AVX_Fast_Unaligned_Load(%rip)
+ jnz 2f
+ leaq STRCPY_SSE2_UNALIGNED(%rip), %rax
testl $bit_Fast_Unaligned_Load, __cpu_features+FEATURE_OFFSET+index_Fast_Unaligned_Load(%rip)
jnz 2f
leaq STRCPY_SSE2(%rip), %rax
diff --git a/sysdeps/x86_64/multiarch/strncpy-sse2-unaligned.S b/sysdeps/x86_64/multiarch/strncpy-sse2-unaligned.S
index fcc23a7..e4c98e7 100644
--- a/sysdeps/x86_64/multiarch/strncpy-sse2-unaligned.S
+++ b/sysdeps/x86_64/multiarch/strncpy-sse2-unaligned.S
@@ -1,3 +1,1888 @@
-#define USE_AS_STRNCPY
-#define STRCPY __strncpy_sse2_unaligned
-#include "strcpy-sse2-unaligned.S"
+/* strcpy with SSE2 and unaligned load
+ Copyright (C) 2011-2015 Free Software Foundation, Inc.
+ Contributed by Intel Corporation.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if IS_IN (libc)
+
+# ifndef USE_AS_STRCAT
+# include <sysdep.h>
+
+# ifndef STRCPY
+# define STRCPY __strncpy_sse2_unaligned
+# endif
+
+# define USE_AS_STRNCPY
+# endif
+
+# define JMPTBL(I, B) I - B
+# define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
+ lea TABLE(%rip), %r11; \
+ movslq (%r11, INDEX, SCALE), %rcx; \
+ lea (%r11, %rcx), %rcx; \
+ jmp *%rcx
+
+# ifndef USE_AS_STRCAT
+
+.text
+ENTRY (STRCPY)
+# ifdef USE_AS_STRNCPY
+ mov %rdx, %r8
+ test %r8, %r8
+ jz L(ExitZero)
+# endif
+ mov %rsi, %rcx
+# ifndef USE_AS_STPCPY
+ mov %rdi, %rax /* save result */
+# endif
+
+# endif
+
+ and $63, %rcx
+ cmp $32, %rcx
+ jbe L(SourceStringAlignmentLess32)
+
+ and $-16, %rsi
+ and $15, %rcx
+ pxor %xmm0, %xmm0
+ pxor %xmm1, %xmm1
+
+ pcmpeqb (%rsi), %xmm1
+ pmovmskb %xmm1, %rdx
+ shr %cl, %rdx
+
+# ifdef USE_AS_STRNCPY
+# if defined USE_AS_STPCPY || defined USE_AS_STRCAT
+ mov $16, %r10
+ sub %rcx, %r10
+ cmp %r10, %r8
+# else
+ mov $17, %r10
+ sub %rcx, %r10
+ cmp %r10, %r8
+# endif
+ jbe L(CopyFrom1To16BytesTailCase2OrCase3)
+# endif
+ test %rdx, %rdx
+ jnz L(CopyFrom1To16BytesTail)
+
+ pcmpeqb 16(%rsi), %xmm0
+ pmovmskb %xmm0, %rdx
+
+# ifdef USE_AS_STRNCPY
+ add $16, %r10
+ cmp %r10, %r8
+ jbe L(CopyFrom1To32BytesCase2OrCase3)
+# endif
+ test %rdx, %rdx
+ jnz L(CopyFrom1To32Bytes)
+
+ movdqu (%rsi, %rcx), %xmm1 /* copy 16 bytes */
+ movdqu %xmm1, (%rdi)
+
+/* If source address alignment != destination address alignment */
+ .p2align 4
+L(Unalign16Both):
+ sub %rcx, %rdi
+# ifdef USE_AS_STRNCPY
+ add %rcx, %r8
+# endif
+ mov $16, %rcx
+ movdqa (%rsi, %rcx), %xmm1
+ movaps 16(%rsi, %rcx), %xmm2
+ movdqu %xmm1, (%rdi, %rcx)
+ pcmpeqb %xmm2, %xmm0
+ pmovmskb %xmm0, %rdx
+ add $16, %rcx
+# ifdef USE_AS_STRNCPY
+ sub $48, %r8
+ jbe L(CopyFrom1To16BytesCase2OrCase3)
+# endif
+ test %rdx, %rdx
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ jnz L(CopyFrom1To16BytesUnalignedXmm2)
+# else
+ jnz L(CopyFrom1To16Bytes)
+# endif
+
+ movaps 16(%rsi, %rcx), %xmm3
+ movdqu %xmm2, (%rdi, %rcx)
+ pcmpeqb %xmm3, %xmm0
+ pmovmskb %xmm0, %rdx
+ add $16, %rcx
+# ifdef USE_AS_STRNCPY
+ sub $16, %r8
+ jbe L(CopyFrom1To16BytesCase2OrCase3)
+# endif
+ test %rdx, %rdx
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ jnz L(CopyFrom1To16BytesUnalignedXmm3)
+# else
+ jnz L(CopyFrom1To16Bytes)
+# endif
+
+ movaps 16(%rsi, %rcx), %xmm4
+ movdqu %xmm3, (%rdi, %rcx)
+ pcmpeqb %xmm4, %xmm0
+ pmovmskb %xmm0, %rdx
+ add $16, %rcx
+# ifdef USE_AS_STRNCPY
+ sub $16, %r8
+ jbe L(CopyFrom1To16BytesCase2OrCase3)
+# endif
+ test %rdx, %rdx
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ jnz L(CopyFrom1To16BytesUnalignedXmm4)
+# else
+ jnz L(CopyFrom1To16Bytes)
+# endif
+
+ movaps 16(%rsi, %rcx), %xmm1
+ movdqu %xmm4, (%rdi, %rcx)
+ pcmpeqb %xmm1, %xmm0
+ pmovmskb %xmm0, %rdx
+ add $16, %rcx
+# ifdef USE_AS_STRNCPY
+ sub $16, %r8
+ jbe L(CopyFrom1To16BytesCase2OrCase3)
+# endif
+ test %rdx, %rdx
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ jnz L(CopyFrom1To16BytesUnalignedXmm1)
+# else
+ jnz L(CopyFrom1To16Bytes)
+# endif
+
+ movaps 16(%rsi, %rcx), %xmm2
+ movdqu %xmm1, (%rdi, %rcx)
+ pcmpeqb %xmm2, %xmm0
+ pmovmskb %xmm0, %rdx
+ add $16, %rcx
+# ifdef USE_AS_STRNCPY
+ sub $16, %r8
+ jbe L(CopyFrom1To16BytesCase2OrCase3)
+# endif
+ test %rdx, %rdx
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ jnz L(CopyFrom1To16BytesUnalignedXmm2)
+# else
+ jnz L(CopyFrom1To16Bytes)
+# endif
+
+ movaps 16(%rsi, %rcx), %xmm3
+ movdqu %xmm2, (%rdi, %rcx)
+ pcmpeqb %xmm3, %xmm0
+ pmovmskb %xmm0, %rdx
+ add $16, %rcx
+# ifdef USE_AS_STRNCPY
+ sub $16, %r8
+ jbe L(CopyFrom1To16BytesCase2OrCase3)
+# endif
+ test %rdx, %rdx
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ jnz L(CopyFrom1To16BytesUnalignedXmm3)
+# else
+ jnz L(CopyFrom1To16Bytes)
+# endif
+
+ movdqu %xmm3, (%rdi, %rcx)
+ mov %rsi, %rdx
+ lea 16(%rsi, %rcx), %rsi
+ and $-0x40, %rsi
+ sub %rsi, %rdx
+ sub %rdx, %rdi
+# ifdef USE_AS_STRNCPY
+ lea 128(%r8, %rdx), %r8
+# endif
+L(Unaligned64Loop):
+ movaps (%rsi), %xmm2
+ movaps %xmm2, %xmm4
+ movaps 16(%rsi), %xmm5
+ movaps 32(%rsi), %xmm3
+ movaps %xmm3, %xmm6
+ movaps 48(%rsi), %xmm7
+ pminub %xmm5, %xmm2
+ pminub %xmm7, %xmm3
+ pminub %xmm2, %xmm3
+ pcmpeqb %xmm0, %xmm3
+ pmovmskb %xmm3, %rdx
+# ifdef USE_AS_STRNCPY
+ sub $64, %r8
+ jbe L(UnalignedLeaveCase2OrCase3)
+# endif
+ test %rdx, %rdx
+ jnz L(Unaligned64Leave)
+
+L(Unaligned64Loop_start):
+ add $64, %rdi
+ add $64, %rsi
+ movdqu %xmm4, -64(%rdi)
+ movaps (%rsi), %xmm2
+ movdqa %xmm2, %xmm4
+ movdqu %xmm5, -48(%rdi)
+ movaps 16(%rsi), %xmm5
+ pminub %xmm5, %xmm2
+ movaps 32(%rsi), %xmm3
+ movdqu %xmm6, -32(%rdi)
+ movaps %xmm3, %xmm6
+ movdqu %xmm7, -16(%rdi)
+ movaps 48(%rsi), %xmm7
+ pminub %xmm7, %xmm3
+ pminub %xmm2, %xmm3
+ pcmpeqb %xmm0, %xmm3
+ pmovmskb %xmm3, %rdx
+# ifdef USE_AS_STRNCPY
+ sub $64, %r8
+ jbe L(UnalignedLeaveCase2OrCase3)
+# endif
+ test %rdx, %rdx
+ jz L(Unaligned64Loop_start)
+
+L(Unaligned64Leave):
+ pxor %xmm1, %xmm1
+
+ pcmpeqb %xmm4, %xmm0
+ pcmpeqb %xmm5, %xmm1
+ pmovmskb %xmm0, %rdx
+ pmovmskb %xmm1, %rcx
+ test %rdx, %rdx
+ jnz L(CopyFrom1To16BytesUnaligned_0)
+ test %rcx, %rcx
+ jnz L(CopyFrom1To16BytesUnaligned_16)
+
+ pcmpeqb %xmm6, %xmm0
+ pcmpeqb %xmm7, %xmm1
+ pmovmskb %xmm0, %rdx
+ pmovmskb %xmm1, %rcx
+ test %rdx, %rdx
+ jnz L(CopyFrom1To16BytesUnaligned_32)
+
+ bsf %rcx, %rdx
+ movdqu %xmm4, (%rdi)
+ movdqu %xmm5, 16(%rdi)
+ movdqu %xmm6, 32(%rdi)
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+# ifdef USE_AS_STPCPY
+ lea 48(%rdi, %rdx), %rax
+# endif
+ movdqu %xmm7, 48(%rdi)
+ add $15, %r8
+ sub %rdx, %r8
+ lea 49(%rdi, %rdx), %rdi
+ jmp L(StrncpyFillTailWithZero)
+# else
+ add $48, %rsi
+ add $48, %rdi
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
+# endif
+
+/* If source address alignment == destination address alignment */
+
+L(SourceStringAlignmentLess32):
+ pxor %xmm0, %xmm0
+ movdqu (%rsi), %xmm1
+ movdqu 16(%rsi), %xmm2
+ pcmpeqb %xmm1, %xmm0
+ pmovmskb %xmm0, %rdx
+
+# ifdef USE_AS_STRNCPY
+# if defined USE_AS_STPCPY || defined USE_AS_STRCAT
+ cmp $16, %r8
+# else
+ cmp $17, %r8
+# endif
+ jbe L(CopyFrom1To16BytesTail1Case2OrCase3)
+# endif
+ test %rdx, %rdx
+ jnz L(CopyFrom1To16BytesTail1)
+
+ pcmpeqb %xmm2, %xmm0
+ movdqu %xmm1, (%rdi)
+ pmovmskb %xmm0, %rdx
+
+# ifdef USE_AS_STRNCPY
+# if defined USE_AS_STPCPY || defined USE_AS_STRCAT
+ cmp $32, %r8
+# else
+ cmp $33, %r8
+# endif
+ jbe L(CopyFrom1To32Bytes1Case2OrCase3)
+# endif
+ test %rdx, %rdx
+ jnz L(CopyFrom1To32Bytes1)
+
+ and $-16, %rsi
+ and $15, %rcx
+ jmp L(Unalign16Both)
+
+/*------End of main part with loops---------------------*/
+
+/* Case1 */
+
+# if (!defined USE_AS_STRNCPY) || (defined USE_AS_STRCAT)
+ .p2align 4
+L(CopyFrom1To16Bytes):
+ add %rcx, %rdi
+ add %rcx, %rsi
+ bsf %rdx, %rdx
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
+# endif
+ .p2align 4
+L(CopyFrom1To16BytesTail):
+ add %rcx, %rsi
+ bsf %rdx, %rdx
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
+
+ .p2align 4
+L(CopyFrom1To32Bytes1):
+ add $16, %rsi
+ add $16, %rdi
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $16, %r8
+# endif
+L(CopyFrom1To16BytesTail1):
+ bsf %rdx, %rdx
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
+
+ .p2align 4
+L(CopyFrom1To32Bytes):
+ bsf %rdx, %rdx
+ add %rcx, %rsi
+ add $16, %rdx
+ sub %rcx, %rdx
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
+
+ .p2align 4
+L(CopyFrom1To16BytesUnaligned_0):
+ bsf %rdx, %rdx
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+# ifdef USE_AS_STPCPY
+ lea (%rdi, %rdx), %rax
+# endif
+ movdqu %xmm4, (%rdi)
+ add $63, %r8
+ sub %rdx, %r8
+ lea 1(%rdi, %rdx), %rdi
+ jmp L(StrncpyFillTailWithZero)
+# else
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
+# endif
+
+ .p2align 4
+L(CopyFrom1To16BytesUnaligned_16):
+ bsf %rcx, %rdx
+ movdqu %xmm4, (%rdi)
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+# ifdef USE_AS_STPCPY
+ lea 16(%rdi, %rdx), %rax
+# endif
+ movdqu %xmm5, 16(%rdi)
+ add $47, %r8
+ sub %rdx, %r8
+ lea 17(%rdi, %rdx), %rdi
+ jmp L(StrncpyFillTailWithZero)
+# else
+ add $16, %rsi
+ add $16, %rdi
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
+# endif
+
+ .p2align 4
+L(CopyFrom1To16BytesUnaligned_32):
+ bsf %rdx, %rdx
+ movdqu %xmm4, (%rdi)
+ movdqu %xmm5, 16(%rdi)
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+# ifdef USE_AS_STPCPY
+ lea 32(%rdi, %rdx), %rax
+# endif
+ movdqu %xmm6, 32(%rdi)
+ add $31, %r8
+ sub %rdx, %r8
+ lea 33(%rdi, %rdx), %rdi
+ jmp L(StrncpyFillTailWithZero)
+# else
+ add $32, %rsi
+ add $32, %rdi
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
+# endif
+
+# ifdef USE_AS_STRNCPY
+# ifndef USE_AS_STRCAT
+ .p2align 4
+L(CopyFrom1To16BytesUnalignedXmm6):
+ movdqu %xmm6, (%rdi, %rcx)
+ jmp L(CopyFrom1To16BytesXmmExit)
+
+ .p2align 4
+L(CopyFrom1To16BytesUnalignedXmm5):
+ movdqu %xmm5, (%rdi, %rcx)
+ jmp L(CopyFrom1To16BytesXmmExit)
+
+ .p2align 4
+L(CopyFrom1To16BytesUnalignedXmm4):
+ movdqu %xmm4, (%rdi, %rcx)
+ jmp L(CopyFrom1To16BytesXmmExit)
+
+ .p2align 4
+L(CopyFrom1To16BytesUnalignedXmm3):
+ movdqu %xmm3, (%rdi, %rcx)
+ jmp L(CopyFrom1To16BytesXmmExit)
+
+ .p2align 4
+L(CopyFrom1To16BytesUnalignedXmm1):
+ movdqu %xmm1, (%rdi, %rcx)
+ jmp L(CopyFrom1To16BytesXmmExit)
+# endif
+
+ .p2align 4
+L(CopyFrom1To16BytesExit):
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitTable), %rdx, 4)
+
+/* Case2 */
+
+ .p2align 4
+L(CopyFrom1To16BytesCase2):
+ add $16, %r8
+ add %rcx, %rdi
+ add %rcx, %rsi
+ bsf %rdx, %rdx
+ cmp %r8, %rdx
+ jb L(CopyFrom1To16BytesExit)
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
+
+ .p2align 4
+L(CopyFrom1To32BytesCase2):
+ add %rcx, %rsi
+ bsf %rdx, %rdx
+ add $16, %rdx
+ sub %rcx, %rdx
+ cmp %r8, %rdx
+ jb L(CopyFrom1To16BytesExit)
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
+
+L(CopyFrom1To16BytesTailCase2):
+ add %rcx, %rsi
+ bsf %rdx, %rdx
+ cmp %r8, %rdx
+ jb L(CopyFrom1To16BytesExit)
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
+
+L(CopyFrom1To16BytesTail1Case2):
+ bsf %rdx, %rdx
+ cmp %r8, %rdx
+ jb L(CopyFrom1To16BytesExit)
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
+
+/* Case2 or Case3, Case3 */
+
+ .p2align 4
+L(CopyFrom1To16BytesCase2OrCase3):
+ test %rdx, %rdx
+ jnz L(CopyFrom1To16BytesCase2)
+L(CopyFrom1To16BytesCase3):
+ add $16, %r8
+ add %rcx, %rdi
+ add %rcx, %rsi
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
+
+ .p2align 4
+L(CopyFrom1To32BytesCase2OrCase3):
+ test %rdx, %rdx
+ jnz L(CopyFrom1To32BytesCase2)
+ add %rcx, %rsi
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
+
+ .p2align 4
+L(CopyFrom1To16BytesTailCase2OrCase3):
+ test %rdx, %rdx
+ jnz L(CopyFrom1To16BytesTailCase2)
+ add %rcx, %rsi
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
+
+ .p2align 4
+L(CopyFrom1To32Bytes1Case2OrCase3):
+ add $16, %rdi
+ add $16, %rsi
+ sub $16, %r8
+L(CopyFrom1To16BytesTail1Case2OrCase3):
+ test %rdx, %rdx
+ jnz L(CopyFrom1To16BytesTail1Case2)
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
+
+# endif
+
+/*------------End labels regarding with copying 1-16 bytes--and 1-32 bytes----*/
+
+ .p2align 4
+L(Exit1):
+ mov %dh, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea (%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $1, %r8
+ lea 1(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit2):
+ mov (%rsi), %dx
+ mov %dx, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea 1(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $2, %r8
+ lea 2(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit3):
+ mov (%rsi), %cx
+ mov %cx, (%rdi)
+ mov %dh, 2(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 2(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $3, %r8
+ lea 3(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit4):
+ mov (%rsi), %edx
+ mov %edx, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea 3(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $4, %r8
+ lea 4(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit5):
+ mov (%rsi), %ecx
+ mov %dh, 4(%rdi)
+ mov %ecx, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea 4(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $5, %r8
+ lea 5(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit6):
+ mov (%rsi), %ecx
+ mov 4(%rsi), %dx
+ mov %ecx, (%rdi)
+ mov %dx, 4(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 5(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $6, %r8
+ lea 6(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit7):
+ mov (%rsi), %ecx
+ mov 3(%rsi), %edx
+ mov %ecx, (%rdi)
+ mov %edx, 3(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 6(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $7, %r8
+ lea 7(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit8):
+ mov (%rsi), %rdx
+ mov %rdx, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea 7(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $8, %r8
+ lea 8(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit9):
+ mov (%rsi), %rcx
+ mov %dh, 8(%rdi)
+ mov %rcx, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea 8(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $9, %r8
+ lea 9(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit10):
+ mov (%rsi), %rcx
+ mov 8(%rsi), %dx
+ mov %rcx, (%rdi)
+ mov %dx, 8(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 9(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $10, %r8
+ lea 10(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit11):
+ mov (%rsi), %rcx
+ mov 7(%rsi), %edx
+ mov %rcx, (%rdi)
+ mov %edx, 7(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 10(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $11, %r8
+ lea 11(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit12):
+ mov (%rsi), %rcx
+ mov 8(%rsi), %edx
+ mov %rcx, (%rdi)
+ mov %edx, 8(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 11(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $12, %r8
+ lea 12(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit13):
+ mov (%rsi), %rcx
+ mov 5(%rsi), %rdx
+ mov %rcx, (%rdi)
+ mov %rdx, 5(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 12(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $13, %r8
+ lea 13(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit14):
+ mov (%rsi), %rcx
+ mov 6(%rsi), %rdx
+ mov %rcx, (%rdi)
+ mov %rdx, 6(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 13(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $14, %r8
+ lea 14(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit15):
+ mov (%rsi), %rcx
+ mov 7(%rsi), %rdx
+ mov %rcx, (%rdi)
+ mov %rdx, 7(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 14(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $15, %r8
+ lea 15(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit16):
+ movdqu (%rsi), %xmm0
+ movdqu %xmm0, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea 15(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $16, %r8
+ lea 16(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit17):
+ movdqu (%rsi), %xmm0
+ movdqu %xmm0, (%rdi)
+ mov %dh, 16(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 16(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $17, %r8
+ lea 17(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit18):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %cx
+ movdqu %xmm0, (%rdi)
+ mov %cx, 16(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 17(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $18, %r8
+ lea 18(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit19):
+ movdqu (%rsi), %xmm0
+ mov 15(%rsi), %ecx
+ movdqu %xmm0, (%rdi)
+ mov %ecx, 15(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 18(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $19, %r8
+ lea 19(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit20):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %ecx
+ movdqu %xmm0, (%rdi)
+ mov %ecx, 16(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 19(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $20, %r8
+ lea 20(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit21):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %ecx
+ movdqu %xmm0, (%rdi)
+ mov %ecx, 16(%rdi)
+ mov %dh, 20(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 20(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $21, %r8
+ lea 21(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit22):
+ movdqu (%rsi), %xmm0
+ mov 14(%rsi), %rcx
+ movdqu %xmm0, (%rdi)
+ mov %rcx, 14(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 21(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $22, %r8
+ lea 22(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit23):
+ movdqu (%rsi), %xmm0
+ mov 15(%rsi), %rcx
+ movdqu %xmm0, (%rdi)
+ mov %rcx, 15(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 22(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $23, %r8
+ lea 23(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit24):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %rcx
+ movdqu %xmm0, (%rdi)
+ mov %rcx, 16(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 23(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $24, %r8
+ lea 24(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit25):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %rcx
+ movdqu %xmm0, (%rdi)
+ mov %rcx, 16(%rdi)
+ mov %dh, 24(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 24(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $25, %r8
+ lea 25(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit26):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %rdx
+ mov 24(%rsi), %cx
+ movdqu %xmm0, (%rdi)
+ mov %rdx, 16(%rdi)
+ mov %cx, 24(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 25(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $26, %r8
+ lea 26(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit27):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %rdx
+ mov 23(%rsi), %ecx
+ movdqu %xmm0, (%rdi)
+ mov %rdx, 16(%rdi)
+ mov %ecx, 23(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 26(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $27, %r8
+ lea 27(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit28):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %rdx
+ mov 24(%rsi), %ecx
+ movdqu %xmm0, (%rdi)
+ mov %rdx, 16(%rdi)
+ mov %ecx, 24(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 27(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $28, %r8
+ lea 28(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit29):
+ movdqu (%rsi), %xmm0
+ movdqu 13(%rsi), %xmm2
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm2, 13(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 28(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $29, %r8
+ lea 29(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit30):
+ movdqu (%rsi), %xmm0
+ movdqu 14(%rsi), %xmm2
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm2, 14(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 29(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $30, %r8
+ lea 30(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit31):
+ movdqu (%rsi), %xmm0
+ movdqu 15(%rsi), %xmm2
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm2, 15(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 30(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $31, %r8
+ lea 31(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+ .p2align 4
+L(Exit32):
+ movdqu (%rsi), %xmm0
+ movdqu 16(%rsi), %xmm2
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm2, 16(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 31(%rdi), %rax
+# endif
+# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT
+ sub $32, %r8
+ lea 32(%rdi), %rdi
+ jnz L(StrncpyFillTailWithZero)
+# endif
+ ret
+
+# ifdef USE_AS_STRNCPY
+
+ .p2align 4
+L(StrncpyExit0):
+# ifdef USE_AS_STPCPY
+ mov %rdi, %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, (%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit1):
+ mov (%rsi), %dl
+ mov %dl, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea 1(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 1(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit2):
+ mov (%rsi), %dx
+ mov %dx, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea 2(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 2(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit3):
+ mov (%rsi), %cx
+ mov 2(%rsi), %dl
+ mov %cx, (%rdi)
+ mov %dl, 2(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 3(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 3(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit4):
+ mov (%rsi), %edx
+ mov %edx, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea 4(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 4(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit5):
+ mov (%rsi), %ecx
+ mov 4(%rsi), %dl
+ mov %ecx, (%rdi)
+ mov %dl, 4(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 5(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 5(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit6):
+ mov (%rsi), %ecx
+ mov 4(%rsi), %dx
+ mov %ecx, (%rdi)
+ mov %dx, 4(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 6(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 6(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit7):
+ mov (%rsi), %ecx
+ mov 3(%rsi), %edx
+ mov %ecx, (%rdi)
+ mov %edx, 3(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 7(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 7(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit8):
+ mov (%rsi), %rdx
+ mov %rdx, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea 8(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 8(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit9):
+ mov (%rsi), %rcx
+ mov 8(%rsi), %dl
+ mov %rcx, (%rdi)
+ mov %dl, 8(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 9(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 9(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit10):
+ mov (%rsi), %rcx
+ mov 8(%rsi), %dx
+ mov %rcx, (%rdi)
+ mov %dx, 8(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 10(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 10(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit11):
+ mov (%rsi), %rcx
+ mov 7(%rsi), %edx
+ mov %rcx, (%rdi)
+ mov %edx, 7(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 11(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 11(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit12):
+ mov (%rsi), %rcx
+ mov 8(%rsi), %edx
+ mov %rcx, (%rdi)
+ mov %edx, 8(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 12(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 12(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit13):
+ mov (%rsi), %rcx
+ mov 5(%rsi), %rdx
+ mov %rcx, (%rdi)
+ mov %rdx, 5(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 13(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 13(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit14):
+ mov (%rsi), %rcx
+ mov 6(%rsi), %rdx
+ mov %rcx, (%rdi)
+ mov %rdx, 6(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 14(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 14(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit15):
+ mov (%rsi), %rcx
+ mov 7(%rsi), %rdx
+ mov %rcx, (%rdi)
+ mov %rdx, 7(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 15(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 15(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit16):
+ movdqu (%rsi), %xmm0
+ movdqu %xmm0, (%rdi)
+# ifdef USE_AS_STPCPY
+ lea 16(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 16(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit17):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %cl
+ movdqu %xmm0, (%rdi)
+ mov %cl, 16(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 17(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 17(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit18):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %cx
+ movdqu %xmm0, (%rdi)
+ mov %cx, 16(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 18(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 18(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit19):
+ movdqu (%rsi), %xmm0
+ mov 15(%rsi), %ecx
+ movdqu %xmm0, (%rdi)
+ mov %ecx, 15(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 19(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 19(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit20):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %ecx
+ movdqu %xmm0, (%rdi)
+ mov %ecx, 16(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 20(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 20(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit21):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %ecx
+ mov 20(%rsi), %dl
+ movdqu %xmm0, (%rdi)
+ mov %ecx, 16(%rdi)
+ mov %dl, 20(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 21(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 21(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit22):
+ movdqu (%rsi), %xmm0
+ mov 14(%rsi), %rcx
+ movdqu %xmm0, (%rdi)
+ mov %rcx, 14(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 22(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 22(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit23):
+ movdqu (%rsi), %xmm0
+ mov 15(%rsi), %rcx
+ movdqu %xmm0, (%rdi)
+ mov %rcx, 15(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 23(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 23(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit24):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %rcx
+ movdqu %xmm0, (%rdi)
+ mov %rcx, 16(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 24(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 24(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit25):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %rdx
+ mov 24(%rsi), %cl
+ movdqu %xmm0, (%rdi)
+ mov %rdx, 16(%rdi)
+ mov %cl, 24(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 25(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 25(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit26):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %rdx
+ mov 24(%rsi), %cx
+ movdqu %xmm0, (%rdi)
+ mov %rdx, 16(%rdi)
+ mov %cx, 24(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 26(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 26(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit27):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %rdx
+ mov 23(%rsi), %ecx
+ movdqu %xmm0, (%rdi)
+ mov %rdx, 16(%rdi)
+ mov %ecx, 23(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 27(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 27(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit28):
+ movdqu (%rsi), %xmm0
+ mov 16(%rsi), %rdx
+ mov 24(%rsi), %ecx
+ movdqu %xmm0, (%rdi)
+ mov %rdx, 16(%rdi)
+ mov %ecx, 24(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 28(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 28(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit29):
+ movdqu (%rsi), %xmm0
+ movdqu 13(%rsi), %xmm2
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm2, 13(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 29(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 29(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit30):
+ movdqu (%rsi), %xmm0
+ movdqu 14(%rsi), %xmm2
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm2, 14(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 30(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 30(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit31):
+ movdqu (%rsi), %xmm0
+ movdqu 15(%rsi), %xmm2
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm2, 15(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 31(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 31(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit32):
+ movdqu (%rsi), %xmm0
+ movdqu 16(%rsi), %xmm2
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm2, 16(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 32(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 32(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(StrncpyExit33):
+ movdqu (%rsi), %xmm0
+ movdqu 16(%rsi), %xmm2
+ mov 32(%rsi), %cl
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm2, 16(%rdi)
+ mov %cl, 32(%rdi)
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 33(%rdi)
+# endif
+ ret
+
+# ifndef USE_AS_STRCAT
+
+ .p2align 4
+L(Fill0):
+ ret
+
+ .p2align 4
+L(Fill1):
+ mov %dl, (%rdi)
+ ret
+
+ .p2align 4
+L(Fill2):
+ mov %dx, (%rdi)
+ ret
+
+ .p2align 4
+L(Fill3):
+ mov %edx, -1(%rdi)
+ ret
+
+ .p2align 4
+L(Fill4):
+ mov %edx, (%rdi)
+ ret
+
+ .p2align 4
+L(Fill5):
+ mov %edx, (%rdi)
+ mov %dl, 4(%rdi)
+ ret
+
+ .p2align 4
+L(Fill6):
+ mov %edx, (%rdi)
+ mov %dx, 4(%rdi)
+ ret
+
+ .p2align 4
+L(Fill7):
+ mov %rdx, -1(%rdi)
+ ret
+
+ .p2align 4
+L(Fill8):
+ mov %rdx, (%rdi)
+ ret
+
+ .p2align 4
+L(Fill9):
+ mov %rdx, (%rdi)
+ mov %dl, 8(%rdi)
+ ret
+
+ .p2align 4
+L(Fill10):
+ mov %rdx, (%rdi)
+ mov %dx, 8(%rdi)
+ ret
+
+ .p2align 4
+L(Fill11):
+ mov %rdx, (%rdi)
+ mov %edx, 7(%rdi)
+ ret
+
+ .p2align 4
+L(Fill12):
+ mov %rdx, (%rdi)
+ mov %edx, 8(%rdi)
+ ret
+
+ .p2align 4
+L(Fill13):
+ mov %rdx, (%rdi)
+ mov %rdx, 5(%rdi)
+ ret
+
+ .p2align 4
+L(Fill14):
+ mov %rdx, (%rdi)
+ mov %rdx, 6(%rdi)
+ ret
+
+ .p2align 4
+L(Fill15):
+ movdqu %xmm0, -1(%rdi)
+ ret
+
+ .p2align 4
+L(Fill16):
+ movdqu %xmm0, (%rdi)
+ ret
+
+ .p2align 4
+L(CopyFrom1To16BytesUnalignedXmm2):
+ movdqu %xmm2, (%rdi, %rcx)
+
+ .p2align 4
+L(CopyFrom1To16BytesXmmExit):
+ bsf %rdx, %rdx
+ add $15, %r8
+ add %rcx, %rdi
+# ifdef USE_AS_STPCPY
+ lea (%rdi, %rdx), %rax
+# endif
+ sub %rdx, %r8
+ lea 1(%rdi, %rdx), %rdi
+
+ .p2align 4
+L(StrncpyFillTailWithZero):
+ pxor %xmm0, %xmm0
+ xor %rdx, %rdx
+ sub $16, %r8
+ jbe L(StrncpyFillExit)
+
+ movdqu %xmm0, (%rdi)
+ add $16, %rdi
+
+ mov %rdi, %rsi
+ and $0xf, %rsi
+ sub %rsi, %rdi
+ add %rsi, %r8
+ sub $64, %r8
+ jb L(StrncpyFillLess64)
+
+L(StrncpyFillLoopMovdqa):
+ movdqa %xmm0, (%rdi)
+ movdqa %xmm0, 16(%rdi)
+ movdqa %xmm0, 32(%rdi)
+ movdqa %xmm0, 48(%rdi)
+ add $64, %rdi
+ sub $64, %r8
+ jae L(StrncpyFillLoopMovdqa)
+
+L(StrncpyFillLess64):
+ add $32, %r8
+ jl L(StrncpyFillLess32)
+ movdqa %xmm0, (%rdi)
+ movdqa %xmm0, 16(%rdi)
+ add $32, %rdi
+ sub $16, %r8
+ jl L(StrncpyFillExit)
+ movdqa %xmm0, (%rdi)
+ add $16, %rdi
+ BRANCH_TO_JMPTBL_ENTRY (L(FillTable), %r8, 4)
+
+L(StrncpyFillLess32):
+ add $16, %r8
+ jl L(StrncpyFillExit)
+ movdqa %xmm0, (%rdi)
+ add $16, %rdi
+ BRANCH_TO_JMPTBL_ENTRY (L(FillTable), %r8, 4)
+
+L(StrncpyFillExit):
+ add $16, %r8
+ BRANCH_TO_JMPTBL_ENTRY (L(FillTable), %r8, 4)
+
+/* end of ifndef USE_AS_STRCAT */
+# endif
+
+ .p2align 4
+L(UnalignedLeaveCase2OrCase3):
+ test %rdx, %rdx
+ jnz L(Unaligned64LeaveCase2)
+L(Unaligned64LeaveCase3):
+ lea 64(%r8), %rcx
+ and $-16, %rcx
+ add $48, %r8
+ jl L(CopyFrom1To16BytesCase3)
+ movdqu %xmm4, (%rdi)
+ sub $16, %r8
+ jb L(CopyFrom1To16BytesCase3)
+ movdqu %xmm5, 16(%rdi)
+ sub $16, %r8
+ jb L(CopyFrom1To16BytesCase3)
+ movdqu %xmm6, 32(%rdi)
+ sub $16, %r8
+ jb L(CopyFrom1To16BytesCase3)
+ movdqu %xmm7, 48(%rdi)
+# ifdef USE_AS_STPCPY
+ lea 64(%rdi), %rax
+# endif
+# ifdef USE_AS_STRCAT
+ xor %ch, %ch
+ movb %ch, 64(%rdi)
+# endif
+ ret
+
+ .p2align 4
+L(Unaligned64LeaveCase2):
+ xor %rcx, %rcx
+ pcmpeqb %xmm4, %xmm0
+ pmovmskb %xmm0, %rdx
+ add $48, %r8
+ jle L(CopyFrom1To16BytesCase2OrCase3)
+ test %rdx, %rdx
+# ifndef USE_AS_STRCAT
+ jnz L(CopyFrom1To16BytesUnalignedXmm4)
+# else
+ jnz L(CopyFrom1To16Bytes)
+# endif
+ pcmpeqb %xmm5, %xmm0
+ pmovmskb %xmm0, %rdx
+ movdqu %xmm4, (%rdi)
+ add $16, %rcx
+ sub $16, %r8
+ jbe L(CopyFrom1To16BytesCase2OrCase3)
+ test %rdx, %rdx
+# ifndef USE_AS_STRCAT
+ jnz L(CopyFrom1To16BytesUnalignedXmm5)
+# else
+ jnz L(CopyFrom1To16Bytes)
+# endif
+
+ pcmpeqb %xmm6, %xmm0
+ pmovmskb %xmm0, %rdx
+ movdqu %xmm5, 16(%rdi)
+ add $16, %rcx
+ sub $16, %r8
+ jbe L(CopyFrom1To16BytesCase2OrCase3)
+ test %rdx, %rdx
+# ifndef USE_AS_STRCAT
+ jnz L(CopyFrom1To16BytesUnalignedXmm6)
+# else
+ jnz L(CopyFrom1To16Bytes)
+# endif
+
+ pcmpeqb %xmm7, %xmm0
+ pmovmskb %xmm0, %rdx
+ movdqu %xmm6, 32(%rdi)
+ lea 16(%rdi, %rcx), %rdi
+ lea 16(%rsi, %rcx), %rsi
+ bsf %rdx, %rdx
+ cmp %r8, %rdx
+ jb L(CopyFrom1To16BytesExit)
+ BRANCH_TO_JMPTBL_ENTRY (L(ExitStrncpyTable), %r8, 4)
+
+ .p2align 4
+L(ExitZero):
+# ifndef USE_AS_STRCAT
+ mov %rdi, %rax
+# endif
+ ret
+
+# endif
+
+# ifndef USE_AS_STRCAT
+END (STRCPY)
+# else
+END (STRCAT)
+# endif
+ .p2align 4
+ .section .rodata
+L(ExitTable):
+ .int JMPTBL(L(Exit1), L(ExitTable))
+ .int JMPTBL(L(Exit2), L(ExitTable))
+ .int JMPTBL(L(Exit3), L(ExitTable))
+ .int JMPTBL(L(Exit4), L(ExitTable))
+ .int JMPTBL(L(Exit5), L(ExitTable))
+ .int JMPTBL(L(Exit6), L(ExitTable))
+ .int JMPTBL(L(Exit7), L(ExitTable))
+ .int JMPTBL(L(Exit8), L(ExitTable))
+ .int JMPTBL(L(Exit9), L(ExitTable))
+ .int JMPTBL(L(Exit10), L(ExitTable))
+ .int JMPTBL(L(Exit11), L(ExitTable))
+ .int JMPTBL(L(Exit12), L(ExitTable))
+ .int JMPTBL(L(Exit13), L(ExitTable))
+ .int JMPTBL(L(Exit14), L(ExitTable))
+ .int JMPTBL(L(Exit15), L(ExitTable))
+ .int JMPTBL(L(Exit16), L(ExitTable))
+ .int JMPTBL(L(Exit17), L(ExitTable))
+ .int JMPTBL(L(Exit18), L(ExitTable))
+ .int JMPTBL(L(Exit19), L(ExitTable))
+ .int JMPTBL(L(Exit20), L(ExitTable))
+ .int JMPTBL(L(Exit21), L(ExitTable))
+ .int JMPTBL(L(Exit22), L(ExitTable))
+ .int JMPTBL(L(Exit23), L(ExitTable))
+ .int JMPTBL(L(Exit24), L(ExitTable))
+ .int JMPTBL(L(Exit25), L(ExitTable))
+ .int JMPTBL(L(Exit26), L(ExitTable))
+ .int JMPTBL(L(Exit27), L(ExitTable))
+ .int JMPTBL(L(Exit28), L(ExitTable))
+ .int JMPTBL(L(Exit29), L(ExitTable))
+ .int JMPTBL(L(Exit30), L(ExitTable))
+ .int JMPTBL(L(Exit31), L(ExitTable))
+ .int JMPTBL(L(Exit32), L(ExitTable))
+# ifdef USE_AS_STRNCPY
+L(ExitStrncpyTable):
+ .int JMPTBL(L(StrncpyExit0), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit1), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit2), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit3), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit4), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit5), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit6), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit7), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit8), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit9), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit10), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit11), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit12), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit13), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit14), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit15), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit16), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit17), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit18), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit19), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit20), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit21), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit22), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit23), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit24), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit25), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit26), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit27), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit28), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit29), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit30), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit31), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit32), L(ExitStrncpyTable))
+ .int JMPTBL(L(StrncpyExit33), L(ExitStrncpyTable))
+# ifndef USE_AS_STRCAT
+ .p2align 4
+L(FillTable):
+ .int JMPTBL(L(Fill0), L(FillTable))
+ .int JMPTBL(L(Fill1), L(FillTable))
+ .int JMPTBL(L(Fill2), L(FillTable))
+ .int JMPTBL(L(Fill3), L(FillTable))
+ .int JMPTBL(L(Fill4), L(FillTable))
+ .int JMPTBL(L(Fill5), L(FillTable))
+ .int JMPTBL(L(Fill6), L(FillTable))
+ .int JMPTBL(L(Fill7), L(FillTable))
+ .int JMPTBL(L(Fill8), L(FillTable))
+ .int JMPTBL(L(Fill9), L(FillTable))
+ .int JMPTBL(L(Fill10), L(FillTable))
+ .int JMPTBL(L(Fill11), L(FillTable))
+ .int JMPTBL(L(Fill12), L(FillTable))
+ .int JMPTBL(L(Fill13), L(FillTable))
+ .int JMPTBL(L(Fill14), L(FillTable))
+ .int JMPTBL(L(Fill15), L(FillTable))
+ .int JMPTBL(L(Fill16), L(FillTable))
+# endif
+# endif
+#endif
diff --git a/sysdeps/x86_64/multiarch/strncpy.S b/sysdeps/x86_64/multiarch/strncpy.S
index 6d87a0b..afbd870 100644
--- a/sysdeps/x86_64/multiarch/strncpy.S
+++ b/sysdeps/x86_64/multiarch/strncpy.S
@@ -1,5 +1,85 @@
-/* Multiple versions of strncpy
- All versions must be listed in ifunc-impl-list.c. */
-#define STRCPY strncpy
+/* Multiple versions of strcpy
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2009-2015 Free Software Foundation, Inc.
+ Contributed by Intel Corporation.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <init-arch.h>
+
#define USE_AS_STRNCPY
-#include "strcpy.S"
+#ifndef STRNCPY
+#define STRNCPY strncpy
+#endif
+
+#ifdef USE_AS_STPCPY
+# define STRNCPY_SSSE3 __stpncpy_ssse3
+# define STRNCPY_SSE2 __stpncpy_sse2
+# define STRNCPY_SSE2_UNALIGNED __stpncpy_sse2_unaligned
+# define __GI_STRNCPY __GI_stpncpy
+# define __GI___STRNCPY __GI___stpncpy
+#else
+# define STRNCPY_SSSE3 __strncpy_ssse3
+# define STRNCPY_SSE2 __strncpy_sse2
+# define STRNCPY_SSE2_UNALIGNED __strncpy_sse2_unaligned
+# define __GI_STRNCPY __GI_strncpy
+#endif
+
+
+/* Define multiple versions only for the definition in libc. */
+#if IS_IN (libc)
+ .text
+ENTRY(STRNCPY)
+ .type STRNCPY, @gnu_indirect_function
+ cmpl $0, __cpu_features+KIND_OFFSET(%rip)
+ jne 1f
+ call __init_cpu_features
+1: leaq STRNCPY_SSE2_UNALIGNED(%rip), %rax
+ testl $bit_Fast_Unaligned_Load, __cpu_features+FEATURE_OFFSET+index_Fast_Unaligned_Load(%rip)
+ jnz 2f
+ leaq STRNCPY_SSE2(%rip), %rax
+ testl $bit_SSSE3, __cpu_features+CPUID_OFFSET+index_SSSE3(%rip)
+ jz 2f
+ leaq STRNCPY_SSSE3(%rip), %rax
+2: ret
+END(STRNCPY)
+
+# undef ENTRY
+# define ENTRY(name) \
+ .type STRNCPY_SSE2, @function; \
+ .align 16; \
+ .globl STRNCPY_SSE2; \
+ .hidden STRNCPY_SSE2; \
+ STRNCPY_SSE2: cfi_startproc; \
+ CALL_MCOUNT
+# undef END
+# define END(name) \
+ cfi_endproc; .size STRNCPY_SSE2, .-STRNCPY_SSE2
+# undef libc_hidden_builtin_def
+/* It doesn't make sense to send libc-internal strcpy calls through a PLT.
+ The speedup we get from using SSSE3 instruction is likely eaten away
+ by the indirect call in the PLT. */
+# define libc_hidden_builtin_def(name) \
+ .globl __GI_STRNCPY; __GI_STRNCPY = STRNCPY_SSE2
+# undef libc_hidden_def
+# define libc_hidden_def(name) \
+ .globl __GI___STRNCPY; __GI___STRNCPY = STRNCPY_SSE2
+#endif
+
+#ifndef USE_AS_STRNCPY
+#include "../strcpy.S"
+#endif
-----------------------------------------------------------------------
hooks/post-receive
--
GNU C Library master sources