[PATCH] x86_64: Implement evex512 version of memchr, rawmemchr and wmemchr

Noah Goldstein goldstein.w.n@gmail.com
Thu Sep 22 00:50:46 GMT 2022


On Wed, Sep 21, 2022 at 5:27 PM Sunil K Pandey via Libc-alpha
<libc-alpha@sourceware.org> wrote:
>
> This patch implements following evex512 version of string functions.
> evex512 version takes up to 30% less cycle as compared to evex,
> depending on length and alignment.

Please attach benchmark numbers.
>
> - memchr function using 512 bit vectors.
> - rawmemchr function using 512 bit vectors.
> - wmemchr function using 512 bit vectors.
>
> Code size data:
>
> memchr-evex.o           762 byte
> memchr-evex512.o        570 byte (-25%)
>
> rawmemchr-evex.o        461 byte
> rawmemchr-evex512.o     413 byte (-10%)
>
> wmemchr-evex.o          794 byte
> wmemchr-evex512.o       568 byte (-28%)
>
> Placeholder function, not used by any processor at the moment.
> ---
>  sysdeps/x86_64/multiarch/Makefile            |   3 +
>  sysdeps/x86_64/multiarch/ifunc-impl-list.c   |  15 +
>  sysdeps/x86_64/multiarch/memchr-evex-base.S  | 306 +++++++++++++++++++
>  sysdeps/x86_64/multiarch/memchr-evex512.S    |   7 +
>  sysdeps/x86_64/multiarch/rawmemchr-evex512.S |   7 +
>  sysdeps/x86_64/multiarch/wmemchr-evex512.S   |   8 +
>  6 files changed, 346 insertions(+)
>  create mode 100644 sysdeps/x86_64/multiarch/memchr-evex-base.S
>  create mode 100644 sysdeps/x86_64/multiarch/memchr-evex512.S
>  create mode 100644 sysdeps/x86_64/multiarch/rawmemchr-evex512.S
>  create mode 100644 sysdeps/x86_64/multiarch/wmemchr-evex512.S
>
> diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
> index df4601c294..e974b1ad97 100644
> --- a/sysdeps/x86_64/multiarch/Makefile
> +++ b/sysdeps/x86_64/multiarch/Makefile
> @@ -4,6 +4,7 @@ sysdep_routines += \
>    memchr-avx2 \
>    memchr-avx2-rtm \
>    memchr-evex \
> +  memchr-evex512 \
>    memchr-evex-rtm \
>    memchr-sse2 \
>    memcmp-avx2-movbe \
> @@ -36,6 +37,7 @@ sysdep_routines += \
>    rawmemchr-avx2 \
>    rawmemchr-avx2-rtm \
>    rawmemchr-evex \
> +  rawmemchr-evex512 \
>    rawmemchr-evex-rtm \
>    rawmemchr-sse2 \
>    stpcpy-avx2 \
> @@ -156,6 +158,7 @@ sysdep_routines += \
>    wmemchr-avx2 \
>    wmemchr-avx2-rtm \
>    wmemchr-evex \
> +  wmemchr-evex512 \
>    wmemchr-evex-rtm \
>    wmemchr-sse2 \
>    wmemcmp-avx2-movbe \
> diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> index a71444eccb..17f770318d 100644
> --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> @@ -63,6 +63,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
>                                       && CPU_FEATURE_USABLE (AVX512BW)
>                                       && CPU_FEATURE_USABLE (BMI2)),
>                                      __memchr_evex)
> +             X86_IFUNC_IMPL_ADD_V4 (array, i, memchr,
> +                                    (CPU_FEATURE_USABLE (AVX512VL)
> +                                     && CPU_FEATURE_USABLE (AVX512BW)
> +                                     && CPU_FEATURE_USABLE (BMI2)),
> +                                    __memchr_evex512)
>               X86_IFUNC_IMPL_ADD_V4 (array, i, memchr,
>                                      (CPU_FEATURE_USABLE (AVX512VL)
>                                       && CPU_FEATURE_USABLE (AVX512BW)
> @@ -329,6 +334,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
>                                       && CPU_FEATURE_USABLE (AVX512BW)
>                                       && CPU_FEATURE_USABLE (BMI2)),
>                                      __rawmemchr_evex)
> +             X86_IFUNC_IMPL_ADD_V4 (array, i, rawmemchr,
> +                                    (CPU_FEATURE_USABLE (AVX512VL)
> +                                     && CPU_FEATURE_USABLE (AVX512BW)
> +                                     && CPU_FEATURE_USABLE (BMI2)),
> +                                    __rawmemchr_evex512)
>               X86_IFUNC_IMPL_ADD_V4 (array, i, rawmemchr,
>                                      (CPU_FEATURE_USABLE (AVX512VL)
>                                       && CPU_FEATURE_USABLE (AVX512BW)
> @@ -903,6 +913,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
>                                       && CPU_FEATURE_USABLE (AVX512BW)
>                                       && CPU_FEATURE_USABLE (BMI2)),
>                                      __wmemchr_evex)
> +             X86_IFUNC_IMPL_ADD_V4 (array, i, wmemchr,
> +                                    (CPU_FEATURE_USABLE (AVX512VL)
> +                                     && CPU_FEATURE_USABLE (AVX512BW)
> +                                     && CPU_FEATURE_USABLE (BMI2)),
> +                                    __wmemchr_evex512)
>               X86_IFUNC_IMPL_ADD_V4 (array, i, wmemchr,
>                                      (CPU_FEATURE_USABLE (AVX512VL)
>                                       && CPU_FEATURE_USABLE (AVX512BW)
> diff --git a/sysdeps/x86_64/multiarch/memchr-evex-base.S b/sysdeps/x86_64/multiarch/memchr-evex-base.S
> new file mode 100644
> index 0000000000..524f0809b5
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/memchr-evex-base.S
> @@ -0,0 +1,306 @@
> +/* Placeholder function, not used by any processor at the moment.
> +   Copyright (C) 2022 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +/* UNUSED. Exists purely as reference implementation.  */
> +
> +#include <isa-level.h>
> +
> +#if ISA_SHOULD_BUILD (4)
> +
> +# include <sysdep.h>
> +
> +# ifdef USE_AS_WMEMCHR
> +#  define CHAR_SIZE    4
> +#  define VPBROADCAST   vpbroadcastd
> +#  define VPCMP                vpcmpd
> +# else
> +#  define CHAR_SIZE    1
> +#  define VPBROADCAST   vpbroadcastb
> +#  define VPCMP                vpcmpb
> +# endif
> +
> +# define PAGE_SIZE     4096
> +# define CHAR_PER_VEC  (VEC_SIZE / CHAR_SIZE)
> +# define XMM1           xmm17
> +
> +# if VEC_SIZE == 64
> +#  define KMOV         kmovq
> +#  define KOR          korq
> +#  define KORTEST      kortestq
> +#  define RAX          rax
> +#  define RCX          rcx
> +#  define SHR          shrq
> +#  define SARX         sarxq
> +#  define TEXTSUFFIX   evex512
> +#  define VMM0         zmm16
> +# elif VEC_SIZE == 32
> +/* Currently Unused.  */
> +#  define KMOV         kmovd
> +#  define KOR          kord
> +#  define KORTEST      kortestd
> +#  define RAX          eax
> +#  define RCX          ecx
> +#  define SHR          shrl
> +#  define SARX         sarxl
> +#  define TEXTSUFFIX   evex256
> +#  define VMM0         ymm16
> +# endif
> +
> +       .section .text.TEXTSUFFIX, "ax", @progbits
> +/* Aligning entry point to 64 byte, provides better performance for
> +   one vector length string.  */
> +ENTRY_P2ALIGN (MEMCHR, 6)
> +# ifndef USE_AS_RAWMEMCHR
> +       /* Check for zero length.  */
> +       test    %RDX_LP, %RDX_LP
> +       jz      L(zero)
> +
> +#  ifdef __ILP32__
> +       /* Clear the upper 32 bits.  */
> +       movl    %edx, %edx
> +#  endif
> +# endif
> +
> +       /* Broadcast CHAR to VMM0.  */
> +       VPBROADCAST %esi, %VMM0
> +       movl    %edi, %eax
> +       andl    $(PAGE_SIZE - 1), %eax
> +       cmpl    $(PAGE_SIZE - VEC_SIZE), %eax
> +       ja      L(page_cross)
> +
> +       /* Compare [w]char for null, mask bit will be set for match.  */
> +       VPCMP   $0, (%rdi), %VMM0, %k0
> +
> +       KMOV    %k0, %RAX
> +# ifndef USE_AS_RAWMEMCHR
> +       bsf     %RAX, %RCX
> +       jz      L(align_more)
> +       xor     %eax, %eax
> +#  ifdef USE_AS_WMEMCHR
> +       leaq    (%rdi, %rcx, CHAR_SIZE), %rdi
> +#  else
> +       addq    %rcx, %rdi
> +#  endif
> +       cmp     %rcx, %rdx
> +       cmova   %rdi, %rax
> +# else
> +       bsf     %RAX, %RAX
> +       jz      L(align_more)
> +       add     %rdi, %rax
> +# endif
> +       ret
> +
> +# ifndef USE_AS_RAWMEMCHR
> +L(zero):
> +       xorl    %eax, %eax
> +       ret
> +# endif
> +
> +       .p2align 5,,5
> +L(page_cross):
> +       movq    %rdi, %rcx
> +       andq    $-VEC_SIZE, %rcx
> +
> +       VPCMP   $0, (%rcx), %VMM0, %k0
> +       KMOV    %k0, %RCX
> +       SARX    %RAX, %RCX, %RAX
> +# ifndef USE_AS_RAWMEMCHR
> +       bsf     %RAX, %RCX
> +       jz      L(align_more)
> +       xor     %eax, %eax
> +#  ifdef USE_AS_WMEMCHR
> +       leaq    (%rdi, %rcx, CHAR_SIZE), %rdi
> +#  else
> +       addq    %rcx, %rdi
> +#  endif
> +       cmp     %rcx, %rdx
> +       cmovae  %rdi, %rax
> +
> +# else
> +       bsf     %rax, %rax
> +       jz      L(align_more)
> +       add     %rdi, %rax
> +# endif
> +       ret
> +
> +L(ret_vec_x2):
> +       subq    $-VEC_SIZE, %rdi
> +L(ret_vec_x1):
> +       bsf     %RAX, %RAX
> +# ifndef USE_AS_RAWMEMCHR
> +       jz      L(zero)
> +       cmp     %rax, %rdx
> +       jbe     L(zero)
> +# endif
> +# ifdef USE_AS_WMEMCHR
> +       leaq    (%rdi, %rax, CHAR_SIZE), %rax
> +# else
> +       add     %rdi, %rax
> +# endif
> +       ret
> +
> +       .p2align 5,,10
> +L(align_more):
> +# ifndef USE_AS_RAWMEMCHR
> +       xor     %eax, %eax
> +       subq    %rdi, %rax
> +# endif
> +
> +       subq    $-VEC_SIZE, %rdi
> +       /* Align rdi to VEC_SIZE.  */
> +       andq    $-VEC_SIZE, %rdi
> +
> +# ifndef USE_AS_RAWMEMCHR
> +       addq    %rdi, %rax
> +#  ifdef USE_AS_WMEMCHR
> +       sarl    $2, %eax
> +#  endif
> +       subq    %rax, %rdx
> +       jbe     L(zero)
> +# endif
> +
> +       /* Loop unroll 4 times for 4 vector loop.  */
> +       VPCMP   $0, (%rdi), %VMM0, %k0
> +
> +       KMOV    %k0, %RAX
> +       test    %RAX, %RAX
> +       jnz     L(ret_vec_x1)
> +
> +# ifndef USE_AS_RAWMEMCHR
> +       subq    $CHAR_PER_VEC, %rdx
> +       jbe     L(zero)
> +# endif
> +
> +       VPCMP   $0, VEC_SIZE(%rdi), %VMM0, %k0
> +
> +       KMOV    %k0, %RAX
> +       test    %RAX, %RAX
> +       jnz     L(ret_vec_x2)
> +
> +# ifndef USE_AS_RAWMEMCHR
> +       subq    $CHAR_PER_VEC, %rdx
> +       jbe     L(zero)
> +# endif
> +
> +       VPCMP   $0, (VEC_SIZE * 2)(%rdi), %VMM0, %k0
> +
> +       KMOV    %k0, %RAX
> +       test    %RAX, %RAX
> +       jnz     L(ret_vec_x3)
> +
> +# ifndef USE_AS_RAWMEMCHR
> +       subq    $CHAR_PER_VEC, %rdx
> +       jbe     L(zero)
> +# endif
> +
> +       VPCMP   $0, (VEC_SIZE * 3)(%rdi), %VMM0, %k0
> +
> +       KMOV    %k0, %RAX
> +       test    %RAX, %RAX
> +       jnz     L(ret_vec_x4)
> +
> +# ifndef USE_AS_RAWMEMCHR
> +       subq    $CHAR_PER_VEC, %rdx
> +       jbe     L(zero)
> +       /* Save pointer to find alignment adjustment.  */
> +       movq    %rdi, %rax
> +# endif
> +       /* Align address to VEC_SIZE * 4 for loop.  */
> +       andq    $-(VEC_SIZE * 4), %rdi
> +
> +       /* Add alignment difference to rdx.  */
> +# ifndef USE_AS_RAWMEMCHR
> +       subq    %rdi, %rax
> +#  ifdef USE_AS_WMEMCHR
> +       SHR     $2, %RAX
> +#  endif
> +       addq    %rax, %rdx
> +       jmp     L(loop_entry)
> +# endif
> +
> +       /* 4 vector loop.  */
> +       .p2align 5,,11
> +L(loop):
> +# ifndef USE_AS_RAWMEMCHR
> +       subq    $(CHAR_PER_VEC * 4), %rdx
> +       jbe     L(zero)
> +L(loop_entry):
> +# endif
> +       VPCMP   $0, (VEC_SIZE * 4)(%rdi), %VMM0, %k1
> +       VPCMP   $0, (VEC_SIZE * 5)(%rdi), %VMM0, %k2
> +       VPCMP   $0, (VEC_SIZE * 6)(%rdi), %VMM0, %k3
> +       VPCMP   $0, (VEC_SIZE * 7)(%rdi), %VMM0, %k4
> +       KOR     %k1, %k2, %k5
> +       KOR     %k3, %k4, %k6
> +
> +       subq    $-(VEC_SIZE * 4), %rdi
> +       KORTEST %k5, %k6
> +       jz      L(loop)
> +
> +       KMOV    %k1, %RAX
> +       test    %RAX, %RAX
> +       jnz     L(ret_vec_x1)
> +
> +# ifndef USE_AS_RAWMEMCHR
> +       subq    $CHAR_PER_VEC, %rdx
> +       jbe     L(zero)
> +# endif
> +
> +       KMOV    %k2, %RAX
> +       test    %RAX, %RAX
> +       jnz     L(ret_vec_x2)
> +
> +# ifndef USE_AS_RAWMEMCHR
> +       subq    $CHAR_PER_VEC, %rdx
> +       jbe     L(zero)
> +# endif
> +
> +       KMOV    %k3, %RAX
> +       test    %RAX, %RAX
> +       jnz     L(ret_vec_x3)
> +
> +# ifndef USE_AS_RAWMEMCHR
> +       subq    $CHAR_PER_VEC, %rdx
> +       jbe     L(zero)
> +# endif
> +
> +       /* At this point null [w]char must be in the fourth vector so no
> +          need to check.  */
> +       KMOV    %k4, %RAX
> +
> +L(ret_vec_x4):
> +       bsf     %RAX, %RAX
> +# ifndef USE_AS_RAWMEMCHR
> +       cmp     %rax, %rdx
> +       jbe     L(zero)
> +# endif
> +       leaq    (VEC_SIZE * 3)(%rdi, %rax, CHAR_SIZE), %rax
> +       ret
> +
> +       .p2align 5,,5
> +L(ret_vec_x3):
> +       bsf     %RAX, %RAX
> +# ifndef USE_AS_RAWMEMCHR
> +       cmp     %rax, %rdx
> +       jbe     L(zero)
> +# endif
> +       leaq    (VEC_SIZE * 2)(%rdi, %rax, CHAR_SIZE), %rax
> +       ret
> +
> +END (MEMCHR)
> +#endif
> diff --git a/sysdeps/x86_64/multiarch/memchr-evex512.S b/sysdeps/x86_64/multiarch/memchr-evex512.S
> new file mode 100644
> index 0000000000..47349d817a
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/memchr-evex512.S
> @@ -0,0 +1,7 @@
> +# ifndef MEMCHR
> +#  define MEMCHR       __memchr_evex512
> +# endif
> +
> +#define VEC_SIZE        64
> +
> +#include "memchr-evex-base.S"
> diff --git a/sysdeps/x86_64/multiarch/rawmemchr-evex512.S b/sysdeps/x86_64/multiarch/rawmemchr-evex512.S
> new file mode 100644
> index 0000000000..302d3cb055
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/rawmemchr-evex512.S
> @@ -0,0 +1,7 @@
> +#ifndef RAWMEMCHR
> +# define RAWMEMCHR     __rawmemchr_evex512
> +#endif
> +#define USE_AS_RAWMEMCHR       1
> +#define MEMCHR RAWMEMCHR
> +
> +#include "memchr-evex512.S"
> diff --git a/sysdeps/x86_64/multiarch/wmemchr-evex512.S b/sysdeps/x86_64/multiarch/wmemchr-evex512.S
> new file mode 100644
> index 0000000000..f45ed1db75
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/wmemchr-evex512.S
> @@ -0,0 +1,8 @@
> +#ifndef WMEMCHR
> +# define WMEMCHR       __wmemchr_evex512
> +#endif
> +
> +#define MEMCHR WMEMCHR
> +#define USE_AS_WMEMCHR 1
> +
> +#include "memchr-evex512.S"
> --
> 2.36.1
>


More information about the Libc-alpha mailing list