[PATCH V4] powerpc: Optimized strcmp for power10
Amrita H S
amritahs@linux.vnet.ibm.com
Thu Nov 30 16:11:59 GMT 2023
This patch is based on __strcmp_power9 and __strlen_power10.
Improvements from __strcmp_power9:
1. Uses new POWER10 instructions
- This code uses lxvp to decrease contention on load
by loading 32 bytes per instruction.
2. Performance implication
- This version has around 30% better performance on average.
- Performance regression is seen for a specific combination
of sizes and alignments. Some of them is observed without
changes also, while rest may be induced by the patch.
Signed-off-by: Amrita H S <amritahs@linux.vnet.ibm.com>
---
sysdeps/powerpc/powerpc64/le/power10/strcmp.S | 190 ++++++++++++++++++
sysdeps/powerpc/powerpc64/multiarch/Makefile | 3 +-
.../powerpc64/multiarch/ifunc-impl-list.c | 4 +
.../powerpc64/multiarch/strcmp-power10.S | 26 +++
sysdeps/powerpc/powerpc64/multiarch/strcmp.c | 4 +
5 files changed, 226 insertions(+), 1 deletion(-)
create mode 100644 sysdeps/powerpc/powerpc64/le/power10/strcmp.S
create mode 100644 sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
diff --git a/sysdeps/powerpc/powerpc64/le/power10/strcmp.S b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
new file mode 100644
index 0000000000..056efae948
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
@@ -0,0 +1,190 @@
+/* Optimized strcmp implementation for PowerPC64/POWER10.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+#include <sysdep.h>
+
+#ifndef STRCMP
+# define STRCMP strcmp
+#endif
+
+/* Implements the function
+ int [r3] strcmp (const char *s1 [r3], const char *s2 [r4]). */
+
+/* TODO: Change this to actual instructions when minimum binutils is upgraded
+ to 2.27. Macros are defined below for these newer instructions in order
+ to maintain compatibility. */
+
+#define LXVP(xtp,dq,ra) \
+ .long(((6)<<(32-6)) \
+ | ((((xtp)-32)>>1)<<(32-10)) \
+ | ((1)<<(32-11)) \
+ | ((ra)<<(32-16)) \
+ | dq)
+
+#define COMPARE_16(vreg1,vreg2,offset) \
+ lxv vreg1+32,offset(r3); \
+ lxv vreg2+32,offset(r4); \
+ vcmpnezb. v7,vreg1,vreg2; \
+ bne cr6,L(different); \
+
+#define COMPARE_32(vreg1,vreg2,offset,label1,label2) \
+ LXVP(vreg1+32,offset,r3); \
+ LXVP(vreg2+32,offset,r4); \
+ vcmpnezb. v7,vreg1+1,vreg2+1; \
+ bne cr6,L(label1); \
+ vcmpnezb. v7,vreg1,vreg2; \
+ bne cr6,L(label2); \
+
+#define TAIL(vreg1,vreg2) \
+ vctzlsbb r6,v7; \
+ vextubrx r5,r6,vreg1; \
+ vextubrx r4,r6,vreg2; \
+ subf r3,r4,r5; \
+ blr; \
+
+#define CHECK_N_BYTES(reg1,reg2,len_reg) \
+ mr r0,len_reg; \
+ sldi r0,r0,56; \
+ lxvl 32+v4,reg1,r0; \
+ lxvl 32+v5,reg2,r0; \
+ add reg1,reg1,len_reg; \
+ add reg2,reg2,len_reg; \
+ vcmpnezb. v7,v4,v5; \
+ vctzlsbb r6,v7; \
+ cmpld cr7,r6,len_reg; \
+ blt cr7,L(different); \
+
+ /* TODO: change this to .machine power10 when the minimum required
+ binutils allows it. */
+
+ .machine power9
+ENTRY_TOCLESS (STRCMP, 4)
+ li r11,16
+ /* r12 is used as swap status flag to indicate if source pointers
+ were swapped. */
+ li r12,0
+ vspltisb v19,-1
+ andi. r7,r3,15
+ sub r7,r11,r7 /* r7(nalign1) = 16 - (str1 & 15). */
+ andi. r9,r4,15
+ sub r5,r11,r9 /* r5(nalign2) = 16 - (str2 & 15). */
+ cmpld cr7,r7,r5
+ beq cr7,L(same_aligned)
+ blt cr7,L(nalign1_min)
+ /* Swap r3 and r4, and r7 and r8 such that r3 and r7 hold the
+ pointer which is closer to the next 16B boundary so that only
+ one CHECK_N_BYTES is needed before entering the loop below. */
+ mr r8,r4
+ mr r4,r3
+ mr r3,r8
+ mr r7,r5
+ li r12,1 /* Set flag on swapping source pointers. */
+L(nalign1_min):
+ CHECK_N_BYTES(r3,r4,r7)
+
+L(s1_aligned):
+ li r10,4096
+ andi. r9,r4,15
+ sub r8,r10,r9 /* r8 is rem_step = page_size - (str2&15). */
+ rlwinm r7,r4,28,0xFF /* r7 is rem = (4096-(str2&(4096-1)))/16. */
+ subfic r7,r7,255 /* This can be reduced to 255-(r4&4095)>>4. */
+ /* Below check is required only for first iteration. For second
+ iteration and beyond, new loop counter is always 255. */
+ cmpldi r7,0
+ ble L(L3)
+L(L1):
+ mtctr r7
+L(L2):
+ COMPARE_16(v4,v5,0) /* Load 16B blocks using lxv. */
+ addi r3,r3,16
+ addi r4,r4,16
+ bdnz L(L2)
+ /* Cross the page boundary of s2, carefully. r9 ans r5 is number of
+ bytes to be read after and before page boundary correspondingly. */
+L(L3):
+ andi. r9,r4,15
+ sub r5,r11,r9
+ CHECK_N_BYTES(r3,r4,r5)
+ CHECK_N_BYTES(r3,r4,r9)
+ li r7,255
+ b L(L1)
+
+L(same_aligned):
+ CHECK_N_BYTES(r3,r4,r7)
+ /* Align s1 to 32B and adjust s2 address.
+ Use lxvp only if both s1 and s2 are 32B aligned. */
+ COMPARE_16(v4,v5,0)
+ COMPARE_16(v4,v5,16)
+ COMPARE_16(v4,v5,32)
+ COMPARE_16(v4,v5,48)
+ addi r3,r3,64
+ addi r4,r4,64
+ COMPARE_16(v4,v5,0)
+ COMPARE_16(v4,v5,16)
+
+ clrldi r6,r3,59
+ subfic r5,r6,32
+ add r3,r3,r5
+ add r4,r4,r5
+ andi. r5,r4,0x1F
+ beq cr0,L(32B_aligned_loop)
+
+L(16B_aligned_loop):
+ COMPARE_16(v4,v5,0)
+ COMPARE_16(v4,v5,16)
+ COMPARE_16(v4,v5,32)
+ COMPARE_16(v4,v5,48)
+ addi r3,r3,64
+ addi r4,r4,64
+ b L(16B_aligned_loop)
+
+ /* Calculate and return the difference. */
+L(different):
+ vctzlsbb r6,v7
+ vextubrx r5,r6,v4
+ vextubrx r4,r6,v5
+ cmpldi r12,1
+ beq L(swapped)
+ subf r3,r4,r5
+ blr
+
+ /* If src pointers were swapped, then swap the
+ indices and calculate the return value. */
+L(swapped):
+ subf r3,r5,r4
+ blr
+
+L(32B_aligned_loop):
+ COMPARE_32(v14,v16,0,tail1,tail2)
+ COMPARE_32(v18,v20,32,tail3,tail4)
+ COMPARE_32(v22,v24,64,tail5,tail6)
+ COMPARE_32(v26,v28,96,tail7,tail8)
+ addi r3,r3,128
+ addi r4,r4,128
+ b L(32B_aligned_loop)
+
+L(tail1): TAIL(v15,v17)
+L(tail2): TAIL(v14,v16)
+L(tail3): TAIL(v19,v21)
+L(tail4): TAIL(v18,v20)
+L(tail5): TAIL(v23,v25)
+L(tail6): TAIL(v22,v24)
+L(tail7): TAIL(v27,v29)
+L(tail8): TAIL(v26,v28)
+
+END (STRCMP)
+libc_hidden_builtin_def (strcmp)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
index 27d8495503..d7824a922b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
@@ -33,7 +33,8 @@ sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
ifneq (,$(filter %le,$(config-machine)))
sysdep_routines += memcmp-power10 memcpy-power10 memmove-power10 memset-power10 \
rawmemchr-power9 rawmemchr-power10 \
- strcmp-power9 strncmp-power9 strcpy-power9 stpcpy-power9 \
+ strcmp-power9 strcmp-power10 strncmp-power9 \
+ strcpy-power9 stpcpy-power9 \
strlen-power9 strncpy-power9 stpncpy-power9 strlen-power10
endif
CFLAGS-strncase-power7.c += -mcpu=power7 -funroll-loops
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
index ebe9434052..ca1f57e1e2 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
@@ -376,6 +376,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strcmp.c. */
IFUNC_IMPL (i, name, strcmp,
#ifdef __LITTLE_ENDIAN__
+ IFUNC_IMPL_ADD (array, i, strcmp,
+ (hwcap2 & PPC_FEATURE2_ARCH_3_1)
+ && (hwcap & PPC_FEATURE_HAS_VSX),
+ __strcmp_power10)
IFUNC_IMPL_ADD (array, i, strcmp,
hwcap2 & PPC_FEATURE2_ARCH_3_00
&& hwcap & PPC_FEATURE_HAS_ALTIVEC,
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
new file mode 100644
index 0000000000..c80067ce33
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
@@ -0,0 +1,26 @@
+/* Optimized strcmp implementation for POWER10/PPC64.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#if defined __LITTLE_ENDIAN__ && IS_IN (libc)
+#define STRCMP __strcmp_power10
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/le/power10/strcmp.S>
+#endif /* __LITTLE_ENDIAN__ && IS_IN (libc) */
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp.c b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
index 31fcdee916..f1dac99b66 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
@@ -29,12 +29,16 @@ extern __typeof (strcmp) __strcmp_power7 attribute_hidden;
extern __typeof (strcmp) __strcmp_power8 attribute_hidden;
# ifdef __LITTLE_ENDIAN__
extern __typeof (strcmp) __strcmp_power9 attribute_hidden;
+extern __typeof (strcmp) __strcmp_power10 attribute_hidden;
# endif
# undef strcmp
libc_ifunc_redirected (__redirect_strcmp, strcmp,
# ifdef __LITTLE_ENDIAN__
+ (hwcap2 & PPC_FEATURE2_ARCH_3_1
+ && hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strcmp_power10 :
(hwcap2 & PPC_FEATURE2_ARCH_3_00
&& hwcap & PPC_FEATURE_HAS_ALTIVEC)
? __strcmp_power9 :
--
2.41.0
More information about the Libc-alpha
mailing list