This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
PATCH: Add 32bit SSE2 strlen
- From: "H.J. Lu" <hongjiu dot lu at intel dot com>
- To: GNU C Library <libc-alpha at sourceware dot org>
- Date: Tue, 4 Aug 2009 13:48:10 -0700
- Subject: PATCH: Add 32bit SSE2 strlen
- Reply-to: "H.J. Lu" <hjl dot tools at gmail dot com>
Hi,
This patch adds 32bit SSE2 strlen. I added slow_vector to avoid SSE
vector instructions on Atom instead of disabling feature bits so that
SSE can be used in selected functions.
I also updated comments on hidden IFUNC functions in ia32 libc.so.
Tested on 32bit and 64bit Core 2 and Atom.
Thanks.
H.J.
---
2009-08-04 H.J. Lu <hongjiu.lu@intel.com>
* sysdeps/i386/i686/multiarch/ifunc-defines.sym (SLOW_VECTOR_OFFSET):
New.
* sysdeps/x86_64/multiarch/ifunc-defines.sym (SLOW_VECTOR_OFFSET):
Likewise.
* sysdeps/i386/i686/multiarch/init-arch.c (ENABLE_SSSE3_ON_ATOM):
Removed.
* sysdeps/i386/i686/multiarch/strcspn.S: Add comments for no
hidden IFUNC functions.
* sysdeps/i386/i686/multiarch/strspn.S: Likewise.
* sysdeps/i386/i686/multiarch/strlen.S: New.
* sysdeps/x86_64/multiarch/init-arch.c (__init_cpu_features): Set
slow_vector instead of clearing SSSE3 bit for Atom.
* sysdeps/x86_64/multiarch/init-arch.h (cpu_features): Add
slow_vector.
* sysdeps/x86_64/multiarch/strcpy.S (STRCPY): Check
SLOW_VECTOR_OFFSET.
diff --git a/sysdeps/i386/i686/multiarch/ifunc-defines.sym b/sysdeps/i386/i686/multiarch/ifunc-defines.sym
index e2021cd..a2a02d3 100644
--- a/sysdeps/i386/i686/multiarch/ifunc-defines.sym
+++ b/sysdeps/i386/i686/multiarch/ifunc-defines.sym
@@ -13,5 +13,6 @@ CPUID_ECX_OFFSET offsetof (struct cpuid_registers, ecx)
CPUID_EDX_OFFSET offsetof (struct cpuid_registers, edx)
FAMILY_OFFSET offsetof (struct cpu_features, family)
MODEL_OFFSET offsetof (struct cpu_features, model)
+SLOW_VECTOR_OFFSET offsetof (struct cpu_features, slow_vector)
COMMON_CPUID_INDEX_1
diff --git a/sysdeps/i386/i686/multiarch/init-arch.c b/sysdeps/i386/i686/multiarch/init-arch.c
index b371bae..00a94d8 100644
--- a/sysdeps/i386/i686/multiarch/init-arch.c
+++ b/sysdeps/i386/i686/multiarch/init-arch.c
@@ -1,3 +1 @@
-#define ENABLE_SSSE3_ON_ATOM
-
#include <sysdeps/x86_64/multiarch/init-arch.c>
diff --git a/sysdeps/i386/i686/multiarch/strcspn.S b/sysdeps/i386/i686/multiarch/strcspn.S
index f5ca092..473f447 100644
--- a/sysdeps/i386/i686/multiarch/strcspn.S
+++ b/sysdeps/i386/i686/multiarch/strcspn.S
@@ -83,9 +83,9 @@ END(STRCSPN)
# define END(name) \
cfi_endproc; .size STRCSPN_IA32, .-STRCSPN_IA32
# undef libc_hidden_builtin_def
-/* It doesn't make sense to send libc-internal strcspn calls through a PLT.
- The speedup we get from using SSE4.2 instruction is likely eaten away
- by the indirect call in the PLT. */
+/* IFUNC doesn't work with the hidden funcions in shared library since
+ they will be called without setting up EBX needed for PLT which is
+ used by IFUNC. */
# define libc_hidden_builtin_def(name) \
.globl __GI_STRCSPN; __GI_STRCSPN = STRCSPN_IA32
#endif
diff --git a/sysdeps/i386/i686/multiarch/strlen.S b/sysdeps/i386/i686/multiarch/strlen.S
new file mode 100644
index 0000000..bdb1de3
--- /dev/null
+++ b/sysdeps/i386/i686/multiarch/strlen.S
@@ -0,0 +1,158 @@
+/* Multiple versions of strlen
+ Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Intel Corporation.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <ifunc-defines.h>
+
+/* Define multiple versions only for the definition in libc and for the
+ DSO. In static binaries, we need strlen before the initialization
+ happened. */
+#if defined SHARED && !defined NOT_IN_libc
+ .section .gnu.linkonce.t.__i686.get_pc_thunk.bx,"ax",@progbits
+ .globl __i686.get_pc_thunk.bx
+ .hidden __i686.get_pc_thunk.bx
+ .p2align 4
+ .type __i686.get_pc_thunk.bx,@function
+__i686.get_pc_thunk.bx:
+ movl (%esp), %ebx
+ ret
+
+ .text
+ENTRY(strlen)
+ .type strlen, @gnu_indirect_function
+ pushl %ebx
+ cfi_adjust_cfa_offset (4)
+ cfi_rel_offset (ebx, 0)
+ call __i686.get_pc_thunk.bx
+ addl $_GLOBAL_OFFSET_TABLE_, %ebx
+ cmpl $0, KIND_OFFSET+__cpu_features@GOTOFF(%ebx)
+ jne 1f
+ call __init_cpu_features
+1: leal __strlen_ia32@GOTOFF(%ebx), %eax
+ testl $(1<<26), CPUID_OFFSET+COMMON_CPUID_INDEX_1*CPUID_SIZE+CPUID_EDX_OFFSET+__cpu_features@GOTOFF(%ebx)
+ jz 2f
+ cmpl $1, SLOW_VECTOR_OFFSET+__cpu_features@GOTOFF(%ebx)
+ je 2f
+ leal __strlen_sse2@GOTOFF(%ebx), %eax
+2: popl %ebx
+ cfi_adjust_cfa_offset (-4);
+ cfi_restore (ebx)
+ ret
+END(strlen)
+
+#define CFI_PUSH(REG) \
+ cfi_adjust_cfa_offset (4); \
+ cfi_rel_offset (REG, 0)
+
+#define CFI_POP(REG) \
+ cfi_adjust_cfa_offset (-4); \
+ cfi_restore (REG)
+
+#define PARMS LINKAGE+4 /* Preserve ESI. */
+#define STR PARMS
+#define ENTRANCE pushl %esi; CFI_PUSH (esi); ENTER
+#define RETURN popl %esi; CFI_POP (esi); LEAVE; ret
+
+ .text
+ENTRY (__strlen_sse2)
+/*
+ * This implementation uses SSE instructions to compare up to 16 bytes
+ * at a time looking for the end of string (null char).
+ */
+ ENTRANCE
+ mov STR(%esp), %eax
+ mov %eax, %ecx
+ pxor %xmm0, %xmm0 /* 16 null chars */
+ mov %eax, %esi
+ and $15, %ecx
+ jz 1f /* string is 16 byte aligned */
+
+ /*
+ * Unaligned case. Round down to 16-byte boundary before comparing
+ * 16 bytes for a null char. The code then compensates for any extra chars
+ * preceding the start of the string.
+ */
+ and $-16, %esi
+
+ pcmpeqb (%esi), %xmm0
+ lea 16(%eax), %esi
+ pmovmskb %xmm0, %edx
+
+ shr %cl, %edx /* Compensate for bytes preceding the string */
+ test %edx, %edx
+ jnz 2f
+ sub %ecx, %esi /* no null, adjust to next 16-byte boundary */
+ pxor %xmm0, %xmm0 /* clear xmm0, may have been changed... */
+
+ .p2align 4
+1: /* 16 byte aligned */
+ pcmpeqb (%esi), %xmm0 /* look for null bytes */
+ pmovmskb %xmm0, %edx /* move each byte mask of %xmm0 to edx */
+
+ add $16, %esi /* prepare to search next 16 bytes */
+ test %edx, %edx /* if no null byte, %edx must be 0 */
+ jnz 2f /* found a null */
+
+ pcmpeqb (%esi), %xmm0
+ pmovmskb %xmm0, %edx
+ add $16, %esi
+ test %edx, %edx
+ jnz 2f
+
+ pcmpeqb (%esi), %xmm0
+ pmovmskb %xmm0, %edx
+ add $16, %esi
+ test %edx, %edx
+ jnz 2f
+
+ pcmpeqb (%esi), %xmm0
+ pmovmskb %xmm0, %edx
+ add $16, %esi
+ test %edx, %edx
+ jz 1b
+
+2:
+ neg %eax
+ lea -16(%eax, %esi), %eax /* calculate exact offset */
+ bsf %edx, %ecx /* Least significant 1 bit is index of null */
+ add %ecx, %eax
+ RETURN
+
+END (__strlen_sse2)
+
+# undef ENTRY
+# define ENTRY(name) \
+ .type __strlen_ia32, @function; \
+ .globl __strlen_ia32; \
+ .p2align 4
+ __strlen_ia32: cfi_startproc; \
+ CALL_MCOUNT
+# undef END
+# define END(name) \
+ cfi_endproc; .size __strlen_ia32, .-__strlen_ia32
+# undef libc_hidden_builtin_def
+/* IFUNC doesn't work with the hidden funcions in shared library since
+ they will be called without setting up EBX needed for PLT which is
+ used by IFUNC. */
+# define libc_hidden_builtin_def(name) \
+ .globl __GI_strlen; __GI_strlen = __strlen_ia32
+#endif
+
+#include "../../i586/strlen.S"
diff --git a/sysdeps/i386/i686/multiarch/strspn.S b/sysdeps/i386/i686/multiarch/strspn.S
index 53db131..d09fc1e 100644
--- a/sysdeps/i386/i686/multiarch/strspn.S
+++ b/sysdeps/i386/i686/multiarch/strspn.S
@@ -68,9 +68,9 @@ END(strspn)
# define END(name) \
cfi_endproc; .size __strspn_ia32, .-__strspn_ia32
# undef libc_hidden_builtin_def
-/* It doesn't make sense to send libc-internal strspn calls through a PLT.
- The speedup we get from using SSE4.2 instruction is likely eaten away
- by the indirect call in the PLT. */
+/* IFUNC doesn't work with the hidden funcions in shared library since
+ they will be called without setting up EBX needed for PLT which is
+ used by IFUNC. */
# define libc_hidden_builtin_def(name) \
.globl __GI_strspn; __GI_strspn = __strspn_ia32
#endif
diff --git a/sysdeps/x86_64/multiarch/ifunc-defines.sym b/sysdeps/x86_64/multiarch/ifunc-defines.sym
index e2021cd..a2a02d3 100644
--- a/sysdeps/x86_64/multiarch/ifunc-defines.sym
+++ b/sysdeps/x86_64/multiarch/ifunc-defines.sym
@@ -13,5 +13,6 @@ CPUID_ECX_OFFSET offsetof (struct cpuid_registers, ecx)
CPUID_EDX_OFFSET offsetof (struct cpuid_registers, edx)
FAMILY_OFFSET offsetof (struct cpu_features, family)
MODEL_OFFSET offsetof (struct cpu_features, model)
+SLOW_VECTOR_OFFSET offsetof (struct cpu_features, slow_vector)
COMMON_CPUID_INDEX_1
diff --git a/sysdeps/x86_64/multiarch/init-arch.c b/sysdeps/x86_64/multiarch/init-arch.c
index c152ab2..e8581cf 100644
--- a/sysdeps/x86_64/multiarch/init-arch.c
+++ b/sysdeps/x86_64/multiarch/init-arch.c
@@ -67,11 +67,9 @@ __init_cpu_features (void)
{
__cpu_features.model += extended_model;
-#ifndef ENABLE_SSSE3_ON_ATOM
if (__cpu_features.model == 0x1c)
- /* Avoid SSSE3 on Atom since it is slow. */
- __cpu_features.cpuid[COMMON_CPUID_INDEX_1].ecx &= ~(1 << 9);
-#endif
+ /* Vector instructions are slow on Atom. */
+ __cpu_features.slow_vector = 1;
}
}
/* This spells out "AuthenticAMD". */
diff --git a/sysdeps/x86_64/multiarch/init-arch.h b/sysdeps/x86_64/multiarch/init-arch.h
index 8d9b1e8..883214f 100644
--- a/sysdeps/x86_64/multiarch/init-arch.h
+++ b/sysdeps/x86_64/multiarch/init-arch.h
@@ -44,6 +44,7 @@ extern struct cpu_features
} cpuid[COMMON_CPUID_INDEX_MAX];
unsigned int family;
unsigned int model;
+ unsigned int slow_vector;
} __cpu_features attribute_hidden;
diff --git a/sysdeps/x86_64/multiarch/strcpy.S b/sysdeps/x86_64/multiarch/strcpy.S
index 7e400a9..dfded65 100644
--- a/sysdeps/x86_64/multiarch/strcpy.S
+++ b/sysdeps/x86_64/multiarch/strcpy.S
@@ -65,6 +65,8 @@ ENTRY(STRCPY)
1: leaq STRCPY_SSE2(%rip), %rax
testl $(1<<9), __cpu_features+CPUID_OFFSET+COMMON_CPUID_INDEX_1*CPUID_SIZE+CPUID_ECX_OFFSET(%rip)
jz 2f
+ cmpl $1, __cpu_features+SLOW_VECTOR_OFFSET(%rip)
+ je 2f
leaq STRCPY_SSSE3(%rip), %rax
2: ret
END(STRCPY)