This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH 02/36] PowerPC: memcpy multilib for PowerPC32
- From: Adhemerval Zanella <azanella at linux dot vnet dot ibm dot com>
- To: "GNU C. Library" <libc-alpha at sourceware dot org>
- Date: Mon, 19 Aug 2013 18:27:39 -0300
- Subject: [PATCH 02/36] PowerPC: memcpy multilib for PowerPC32
- References: <52127ABC dot 40008 at linux dot vnet dot ibm dot com>
2013-08-19 Adhemerval Zanella <azanella@linux.vnet.ibm.com>
* string/memcpy.c (memcpy): Using macro to redefine symbol name.
* sysdeps/powerpc/powerpc32/a2/memcpy.S: Move to ...
* sysdeps/powerpc/powerpc32/multiarch/memcpy-a2.S: ... here.
(__memcpy): Rename symbol name to __memcpy_a2 and removed the
libc_hidden_builtin_def.
* sysdeps/powerpc/powerpc32/cell/memcpy.S: Move to ...
* sysdeps/powerpc/powerpc32/multiarch/memcpy-cell.S: ... here.
(__memcpy): Renamesymbol name to __memcpy_cell and removed the
libc_hidden_builtin_def.
* sysdeps/powerpc/powerpc32/power4/memcpy.S: Move to ...
* sysdeps/powerpc/powerpc32/multiarch/memcpy-power4.S: ... here.
(__memcpy): Rename symbol name to __memcpy_power4 and removed the
libc_hidden_builtin_def.
* sysdeps/powerpc/powerpc32/power6/memcpy.S: Move to ...
* sysdeps/powerpc/powerpc32/multiarch/memcpy-power6.S: ... here.
(__memcpy): Rename symbol name to __memcpy_power6 and removed the
libc_hidden_builtin_def.
* sysdeps/powerpc/powerpc32/power7/memcpy.S: Move to ...
* sysdeps/powerpc/powerpc32/multiarch/memcpy-power7.S: ... here.
(__memcpy): Rename symbol name to __memcpy_power7 and removed the
libc_hidden_builtin_def.
* sysdeps/powerpc/powerpc32/multiarch/memcpy.c: New file: multiarch
for memcpy for PPC32.
* sysdeps/powerpc/powerpc32/multiarch/memcpy-ppc32.c: New file:
default memcpy PPC32 implementation.
* sysdeps/powerpc/powerpc32/multiarch/Makefile: Add memcpy multiarch
objects.
* sysdep/powerpc/powerpc32/multiarch/ifunc-impl-list.c
(__libc_ifunc_impl_list): Likewise.
--
diff --git a/string/memcpy.c b/string/memcpy.c
index 3be8e35..a5b97ef 100644
--- a/string/memcpy.c
+++ b/string/memcpy.c
@@ -24,8 +24,12 @@
#undef memcpy
+#ifndef MEMCPY
+# define MEMCPY memcpy
+#endif
+
void *
-memcpy (dstpp, srcpp, len)
+MEMCPY (dstpp, srcpp, len)
void *dstpp;
const void *srcpp;
size_t len;
diff --git a/sysdeps/powerpc/powerpc32/a2/memcpy.S b/sysdeps/powerpc/powerpc32/a2/memcpy.S
deleted file mode 100644
index f2f63b1..0000000
--- a/sysdeps/powerpc/powerpc32/a2/memcpy.S
+++ /dev/null
@@ -1,527 +0,0 @@
-/* Optimized memcpy implementation for PowerPC A2.
- Copyright (C) 2010-2013 Free Software Foundation, Inc.
- Contributed by Michael Brutman <brutman@us.ibm.com>.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-
-#define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
-#define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
-
- .machine a2
-EALIGN (memcpy, 5, 0)
- CALL_MCOUNT
-
- dcbt 0,r4 /* Prefetch ONE SRC cacheline */
- cmplwi cr1,r5,16 /* is size < 16 ? */
- mr r6,r3 /* Copy dest reg to r6; */
- blt+ cr1,L(shortcopy)
-
-
- /* Big copy (16 bytes or more)
-
- Figure out how far to the nearest quadword boundary, or if we are
- on one already.
-
- r3 - return value (always)
- r4 - current source addr
- r5 - copy length
- r6 - current dest addr
- */
-
- neg r8,r3 /* LS 4 bits = # bytes to 8-byte dest bdry */
- clrlwi r8,r8,32-4 /* align to 16byte boundary */
- sub r7,r4,r3 /* compute offset to src from dest */
- cmplwi cr0,r8,0 /* Were we aligned on a 16 byte bdy? */
- beq+ L(dst_aligned)
-
-
-
- /* Destination is not aligned on quadword boundary. Get us to one.
-
- r3 - return value (always)
- r4 - current source addr
- r5 - copy length
- r6 - current dest addr
- r7 - offset to src from dest
- r8 - number of bytes to quadword boundary
- */
-
- mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
- subf r5,r8,r5 /* adjust remaining len */
-
- bf cr7*4+3,1f
- lbzx r0,r7,r6 /* copy 1 byte addr */
- stb r0,0(r6)
- addi r6,r6,1
-1:
- bf cr7*4+2,2f
- lhzx r0,r7,r6 /* copy 2 byte addr */
- sth r0,0(r6)
- addi r6,r6,2
-2:
- bf cr7*4+1,4f
- lwzx r0,r7,r6 /* copy 4 byte addr */
- stw r0,0(r6)
- addi r6,r6,4
-4:
- bf cr7*4+0,8f
- lfdx r0,r7,r6 /* copy 8 byte addr */
- stfd r0,0(r6)
- addi r6,r6,8
-8:
- add r4,r7,r6 /* update src addr */
-
-
-
- /* Dest is quadword aligned now.
-
- Lots of decisions to make. If we are copying less than a cache
- line we won't be here long. If we are not on a cache line
- boundary we need to get there. And then we need to figure out
- how many cache lines ahead to pre-touch.
-
- r3 - return value (always)
- r4 - current source addr
- r5 - copy length
- r6 - current dest addr
- */
-
-
- .align 4
-L(dst_aligned):
-
-
-#ifdef SHARED
- mflr r0
-/* Establishes GOT addressability so we can load __cache_line_size
- from static. This value was set from the aux vector during startup. */
- SETUP_GOT_ACCESS(r9,got_label)
- addis r9,r9,__cache_line_size-got_label@ha
- lwz r9,__cache_line_size-got_label@l(r9)
- mtlr r0
-#else
-/* Load __cache_line_size from static. This value was set from the
- aux vector during startup. */
- lis r9,__cache_line_size@ha
- lwz r9,__cache_line_size@l(r9)
-#endif
-
- cmplwi cr5, r9, 0
- bne+ cr5,L(cachelineset)
-
-/* __cache_line_size not set: generic byte copy without much optimization */
- andi. r0,r5,1 /* If length is odd copy one byte. */
- beq L(cachelinenotset_align)
- lbz r7,0(r4) /* Read one byte from source. */
- addi r5,r5,-1 /* Update length. */
- addi r4,r4,1 /* Update source pointer address. */
- stb r7,0(r6) /* Store one byte on dest. */
- addi r6,r6,1 /* Update dest pointer address. */
-L(cachelinenotset_align):
- cmpwi cr7,r5,0 /* If length is 0 return. */
- beqlr cr7
- ori r2,r2,0 /* Force a new dispatch group. */
-L(cachelinenotset_loop):
- addic. r5,r5,-2 /* Update length. */
- lbz r7,0(r4) /* Load 2 bytes from source. */
- lbz r8,1(r4)
- addi r4,r4,2 /* Update source pointer address. */
- stb r7,0(r6) /* Store 2 bytes on dest. */
- stb r8,1(r6)
- addi r6,r6,2 /* Update dest pointer address. */
- bne L(cachelinenotset_loop)
- blr
-
-
-L(cachelineset):
-
- addi r10,r9,-1
-
- cmpw cr5,r5,r10 /* Less than a cacheline to go? */
-
- neg r7,r6 /* How far to next cacheline bdy? */
-
- addi r6,r6,-8 /* prepare for stdu */
- cmpwi cr0,r9,128
- addi r4,r4,-8 /* prepare for ldu */
-
-
- ble+ cr5,L(lessthancacheline)
-
- beq- cr0,L(big_lines) /* 128 byte line code */
-
-
-
-
- /* More than a cacheline left to go, and using 64 byte cachelines */
-
- clrlwi r7,r7,32-6 /* How far to next cacheline bdy? */
-
- cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
-
- /* Reduce total len by what it takes to get to the next cache line */
- subf r5,r7,r5
- srwi r7,r7,4 /* How many qws to get to the line bdy? */
-
- /* How many full cache lines to copy after getting to a line bdy? */
- srwi r10,r5,6
-
- cmplwi r10,0 /* If no full cache lines to copy ... */
- li r11,0 /* number cachelines to copy with prefetch */
- beq L(nocacheprefetch)
-
-
- /* We are here because we have at least one full cache line to copy,
- and therefore some pre-touching to do. */
-
- cmplwi r10,PREFETCH_AHEAD
- li r12,64+8 /* prefetch distance */
- ble L(lessthanmaxprefetch)
-
- /* We can only do so much pre-fetching. R11 will have the count of
- lines left to prefetch after the initial batch of prefetches
- are executed. */
-
- subi r11,r10,PREFETCH_AHEAD
- li r10,PREFETCH_AHEAD
-
-L(lessthanmaxprefetch):
- mtctr r10
-
- /* At this point r10/ctr hold the number of lines to prefetch in this
- initial batch, and r11 holds any remainder. */
-
-L(prefetchSRC):
- dcbt r12,r4
- addi r12,r12,64
- bdnz L(prefetchSRC)
-
-
- /* Prefetching is done, or was not needed.
-
- cr6 - are we on a cacheline boundary already?
- r7 - number of quadwords to the next cacheline boundary
- */
-
-L(nocacheprefetch):
- mtctr r7
-
- cmplwi cr1,r5,64 /* Less than a cache line to copy? */
-
- /* How many bytes are left after we copy whatever full
- cache lines we can get? */
- clrlwi r5,r5,32-6
-
- beq cr6,L(cachelinealigned)
-
-
- /* Copy quadwords up to the next cacheline boundary */
-
-L(aligntocacheline):
- lfd fp9,0x08(r4)
- lfdu fp10,0x10(r4)
- stfd fp9,0x08(r6)
- stfdu fp10,0x10(r6)
- bdnz L(aligntocacheline)
-
-
- .align 4
-L(cachelinealigned): /* copy while cache lines */
-
- blt- cr1,L(lessthancacheline) /* size <64 */
-
-L(outerloop):
- cmpwi r11,0
- mtctr r11
- beq- L(endloop)
-
- li r11,64*ZERO_AHEAD +8 /* DCBZ dist */
-
- .align 4
- /* Copy whole cachelines, optimized by prefetching SRC cacheline */
-L(loop): /* Copy aligned body */
- dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
- lfd fp9, 0x08(r4)
- dcbz r11,r6
- lfd fp10, 0x10(r4)
- lfd fp11, 0x18(r4)
- lfd fp12, 0x20(r4)
- stfd fp9, 0x08(r6)
- stfd fp10, 0x10(r6)
- stfd fp11, 0x18(r6)
- stfd fp12, 0x20(r6)
- lfd fp9, 0x28(r4)
- lfd fp10, 0x30(r4)
- lfd fp11, 0x38(r4)
- lfdu fp12, 0x40(r4)
- stfd fp9, 0x28(r6)
- stfd fp10, 0x30(r6)
- stfd fp11, 0x38(r6)
- stfdu fp12, 0x40(r6)
-
- bdnz L(loop)
-
-
-L(endloop):
- cmpwi r10,0
- beq- L(endloop2)
- mtctr r10
-
-L(loop2): /* Copy aligned body */
- lfd fp9, 0x08(r4)
- lfd fp10, 0x10(r4)
- lfd fp11, 0x18(r4)
- lfd fp12, 0x20(r4)
- stfd fp9, 0x08(r6)
- stfd fp10, 0x10(r6)
- stfd fp11, 0x18(r6)
- stfd fp12, 0x20(r6)
- lfd fp9, 0x28(r4)
- lfd fp10, 0x30(r4)
- lfd fp11, 0x38(r4)
- lfdu fp12, 0x40(r4)
- stfd fp9, 0x28(r6)
- stfd fp10, 0x30(r6)
- stfd fp11, 0x38(r6)
- stfdu fp12, 0x40(r6)
-
- bdnz L(loop2)
-L(endloop2):
-
-
- .align 4
-L(lessthancacheline): /* Was there less than cache to do ? */
- cmplwi cr0,r5,16
- srwi r7,r5,4 /* divide size by 16 */
- blt- L(do_lt16)
- mtctr r7
-
-L(copy_remaining):
- lfd fp9, 0x08(r4)
- lfdu fp10, 0x10(r4)
- stfd fp9, 0x08(r6)
- stfdu fp10, 0x10(r6)
- bdnz L(copy_remaining)
-
-L(do_lt16): /* less than 16 ? */
- cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */
- beqlr+ /* no rest to copy */
- addi r4,r4,8
- addi r6,r6,8
-
-L(shortcopy): /* SIMPLE COPY to handle size =< 15 bytes */
- mtcrf 0x01,r5
- sub r7,r4,r6
- bf- cr7*4+0,8f
- lfdx fp9,r7,r6 /* copy 8 byte */
- stfd fp9,0(r6)
- addi r6,r6,8
-8:
- bf cr7*4+1,4f
- lwzx r0,r7,r6 /* copy 4 byte */
- stw r0,0(r6)
- addi r6,r6,4
-4:
- bf cr7*4+2,2f
- lhzx r0,r7,r6 /* copy 2 byte */
- sth r0,0(r6)
- addi r6,r6,2
-2:
- bf cr7*4+3,1f
- lbzx r0,r7,r6 /* copy 1 byte */
- stb r0,0(r6)
-1:
- blr
-
-
-
-
-
- /* Similar to above, but for use with 128 byte lines. */
-
-
-L(big_lines):
-
- clrlwi r7,r7,32-7 /* How far to next cacheline bdy? */
-
- cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
-
- /* Reduce total len by what it takes to get to the next cache line */
- subf r5,r7,r5
- srwi r7,r7,4 /* How many qw to get to the line bdy? */
-
- /* How many full cache lines to copy after getting to a line bdy? */
- srwi r10,r5,7
-
- cmplwi r10,0 /* If no full cache lines to copy ... */
- li r11,0 /* number cachelines to copy with prefetch */
- beq L(nocacheprefetch_128)
-
-
- /* We are here because we have at least one full cache line to copy,
- and therefore some pre-touching to do. */
-
- cmplwi r10,PREFETCH_AHEAD
- li r12,128+8 /* prefetch distance */
- ble L(lessthanmaxprefetch_128)
-
- /* We can only do so much pre-fetching. R11 will have the count of
- lines left to prefetch after the initial batch of prefetches
- are executed. */
-
- subi r11,r10,PREFETCH_AHEAD
- li r10,PREFETCH_AHEAD
-
-L(lessthanmaxprefetch_128):
- mtctr r10
-
- /* At this point r10/ctr hold the number of lines to prefetch in this
- initial batch, and r11 holds any remainder. */
-
-L(prefetchSRC_128):
- dcbt r12,r4
- addi r12,r12,128
- bdnz L(prefetchSRC_128)
-
-
- /* Prefetching is done, or was not needed.
-
- cr6 - are we on a cacheline boundary already?
- r7 - number of quadwords to the next cacheline boundary
- */
-
-L(nocacheprefetch_128):
- mtctr r7
-
- cmplwi cr1,r5,128 /* Less than a cache line to copy? */
-
- /* How many bytes are left after we copy whatever full
- cache lines we can get? */
- clrlwi r5,r5,32-7
-
- beq cr6,L(cachelinealigned_128)
-
-
- /* Copy quadwords up to the next cacheline boundary */
-
-L(aligntocacheline_128):
- lfd fp9,0x08(r4)
- lfdu fp10,0x10(r4)
- stfd fp9,0x08(r6)
- stfdu fp10,0x10(r6)
- bdnz L(aligntocacheline_128)
-
-
-L(cachelinealigned_128): /* copy while cache lines */
-
- blt- cr1,L(lessthancacheline) /* size <128 */
-
-L(outerloop_128):
- cmpwi r11,0
- mtctr r11
- beq- L(endloop_128)
-
- li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
-
- .align 4
- /* Copy whole cachelines, optimized by prefetching SRC cacheline */
-L(loop_128): /* Copy aligned body */
- dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
- lfd fp9, 0x08(r4)
- dcbz r11,r6
- lfd fp10, 0x10(r4)
- lfd fp11, 0x18(r4)
- lfd fp12, 0x20(r4)
- stfd fp9, 0x08(r6)
- stfd fp10, 0x10(r6)
- stfd fp11, 0x18(r6)
- stfd fp12, 0x20(r6)
- lfd fp9, 0x28(r4)
- lfd fp10, 0x30(r4)
- lfd fp11, 0x38(r4)
- lfd fp12, 0x40(r4)
- stfd fp9, 0x28(r6)
- stfd fp10, 0x30(r6)
- stfd fp11, 0x38(r6)
- stfd fp12, 0x40(r6)
- lfd fp9, 0x48(r4)
- lfd fp10, 0x50(r4)
- lfd fp11, 0x58(r4)
- lfd fp12, 0x60(r4)
- stfd fp9, 0x48(r6)
- stfd fp10, 0x50(r6)
- stfd fp11, 0x58(r6)
- stfd fp12, 0x60(r6)
- lfd fp9, 0x68(r4)
- lfd fp10, 0x70(r4)
- lfd fp11, 0x78(r4)
- lfdu fp12, 0x80(r4)
- stfd fp9, 0x68(r6)
- stfd fp10, 0x70(r6)
- stfd fp11, 0x78(r6)
- stfdu fp12, 0x80(r6)
-
- bdnz L(loop_128)
-
-
-L(endloop_128):
- cmpwi r10,0
- beq- L(endloop2_128)
- mtctr r10
-
-L(loop2_128): /* Copy aligned body */
- lfd fp9, 0x08(r4)
- lfd fp10, 0x10(r4)
- lfd fp11, 0x18(r4)
- lfd fp12, 0x20(r4)
- stfd fp9, 0x08(r6)
- stfd fp10, 0x10(r6)
- stfd fp11, 0x18(r6)
- stfd fp12, 0x20(r6)
- lfd fp9, 0x28(r4)
- lfd fp10, 0x30(r4)
- lfd fp11, 0x38(r4)
- lfd fp12, 0x40(r4)
- stfd fp9, 0x28(r6)
- stfd fp10, 0x30(r6)
- stfd fp11, 0x38(r6)
- stfd fp12, 0x40(r6)
- lfd fp9, 0x48(r4)
- lfd fp10, 0x50(r4)
- lfd fp11, 0x58(r4)
- lfd fp12, 0x60(r4)
- stfd fp9, 0x48(r6)
- stfd fp10, 0x50(r6)
- stfd fp11, 0x58(r6)
- stfd fp12, 0x60(r6)
- lfd fp9, 0x68(r4)
- lfd fp10, 0x70(r4)
- lfd fp11, 0x78(r4)
- lfdu fp12, 0x80(r4)
- stfd fp9, 0x68(r6)
- stfd fp10, 0x70(r6)
- stfd fp11, 0x78(r6)
- stfdu fp12, 0x80(r6)
- bdnz L(loop2_128)
-L(endloop2_128):
-
- b L(lessthancacheline)
-
-
-END (memcpy)
-libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc32/cell/memcpy.S b/sysdeps/powerpc/powerpc32/cell/memcpy.S
deleted file mode 100644
index f3605d7..0000000
--- a/sysdeps/powerpc/powerpc32/cell/memcpy.S
+++ /dev/null
@@ -1,242 +0,0 @@
-/* Optimized memcpy implementation for CELL BE PowerPC.
- Copyright (C) 2010-2013 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-
-#define PREFETCH_AHEAD 6 /* no cache lines SRC prefetching ahead */
-#define ZERO_AHEAD 4 /* no cache lines DST zeroing ahead */
-
-/* memcpy routine optimized for CELL-BE-PPC v2.0
- *
- * The CELL PPC core has 1 integer unit and 1 load/store unit
- * CELL:
- * 1st level data cache = 32K
- * 2nd level data cache = 512K
- * 3rd level data cache = 0K
- * With 3.2 GHz clockrate the latency to 2nd level cache is >36 clocks,
- * latency to memory is >400 clocks
- * To improve copy performance we need to prefetch source data
- * far ahead to hide this latency
- * For best performance instruction forms ending in "." like "andi."
- * should be avoided as the are implemented in microcode on CELL.
- * The below code is loop unrolled for the CELL cache line of 128 bytes
- */
-
-.align 7
-
-EALIGN (memcpy, 5, 0)
- CALL_MCOUNT
-
- dcbt 0,r4 /* Prefetch ONE SRC cacheline */
- cmplwi cr1,r5,16 /* is size < 16 ? */
- mr r6,r3
- blt+ cr1,.Lshortcopy
-
-.Lbigcopy:
- neg r8,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
- clrlwi r8,r8,32-4 /* aling to 16byte boundary */
- sub r7,r4,r3
- cmplwi cr0,r8,0
- beq+ .Ldst_aligned
-
-.Ldst_unaligned:
- mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
- subf r5,r8,r5
-
- bf cr7*4+3,1f
- lbzx r0,r7,r6 /* copy 1 byte */
- stb r0,0(r6)
- addi r6,r6,1
-1: bf cr7*4+2,2f
- lhzx r0,r7,r6 /* copy 2 byte */
- sth r0,0(r6)
- addi r6,r6,2
-2: bf cr7*4+1,4f
- lwzx r0,r7,r6 /* copy 4 byte */
- stw r0,0(r6)
- addi r6,r6,4
-4: bf cr7*4+0,8f
- lfdx fp9,r7,r6 /* copy 8 byte */
- stfd fp9,0(r6)
- addi r6,r6,8
-8:
- add r4,r7,r6
-
-.Ldst_aligned:
-
- cmpwi cr5,r5,128-1
-
- neg r7,r6
- addi r6,r6,-8 /* prepare for stfdu */
- addi r4,r4,-8 /* prepare for lfdu */
-
- clrlwi r7,r7,32-7 /* align to cacheline boundary */
- ble+ cr5,.Llessthancacheline
-
- cmplwi cr6,r7,0
- subf r5,r7,r5
- srwi r7,r7,4 /* divide size by 16 */
- srwi r10,r5,7 /* number of cache lines to copy */
-
- cmplwi r10,0
- li r11,0 /* number cachelines to copy with prefetch */
- beq .Lnocacheprefetch
-
- cmplwi r10,PREFETCH_AHEAD
- li r12,128+8 /* prefetch distance */
- ble .Llessthanmaxprefetch
-
- subi r11,r10,PREFETCH_AHEAD
- li r10,PREFETCH_AHEAD
-
-.Llessthanmaxprefetch:
- mtctr r10
-
-.LprefetchSRC:
- dcbt r12,r4
- addi r12,r12,128
- bdnz .LprefetchSRC
-
-.Lnocacheprefetch:
- mtctr r7
- cmplwi cr1,r5,128
- clrlwi r5,r5,32-7
- beq cr6,.Lcachelinealigned
-
-.Laligntocacheline:
- lfd fp9,0x08(r4)
- lfdu fp10,0x10(r4)
- stfd fp9,0x08(r6)
- stfdu fp10,0x10(r6)
- bdnz .Laligntocacheline
-
-
-.Lcachelinealigned: /* copy while cache lines */
-
- blt- cr1,.Llessthancacheline /* size <128 */
-
-.Louterloop:
- cmpwi r11,0
- mtctr r11
- beq- .Lendloop
-
- li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
-
-.align 4
- /* Copy whole cachelines, optimized by prefetching SRC cacheline */
-.Lloop: /* Copy aligned body */
- dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
- lfd fp9, 0x08(r4)
- dcbz r11,r6
- lfd fp10, 0x10(r4) /* 4 register stride copy is optimal */
- lfd fp11, 0x18(r4) /* to hide 1st level cache latency. */
- lfd fp12, 0x20(r4)
- stfd fp9, 0x08(r6)
- stfd fp10, 0x10(r6)
- stfd fp11, 0x18(r6)
- stfd fp12, 0x20(r6)
- lfd fp9, 0x28(r4)
- lfd fp10, 0x30(r4)
- lfd fp11, 0x38(r4)
- lfd fp12, 0x40(r4)
- stfd fp9, 0x28(r6)
- stfd fp10, 0x30(r6)
- stfd fp11, 0x38(r6)
- stfd fp12, 0x40(r6)
- lfd fp9, 0x48(r4)
- lfd fp10, 0x50(r4)
- lfd fp11, 0x58(r4)
- lfd fp12, 0x60(r4)
- stfd fp9, 0x48(r6)
- stfd fp10, 0x50(r6)
- stfd fp11, 0x58(r6)
- stfd fp12, 0x60(r6)
- lfd fp9, 0x68(r4)
- lfd fp10, 0x70(r4)
- lfd fp11, 0x78(r4)
- lfdu fp12, 0x80(r4)
- stfd fp9, 0x68(r6)
- stfd fp10, 0x70(r6)
- stfd fp11, 0x78(r6)
- stfdu fp12, 0x80(r6)
-
- bdnz .Lloop
-
-.Lendloop:
- cmpwi r10,0
- slwi r10,r10,2 /* adjust from 128 to 32 byte stride */
- beq- .Lendloop2
- mtctr r10
-
-.Lloop2: /* Copy aligned body */
- lfd fp9, 0x08(r4)
- lfd fp10, 0x10(r4)
- lfd fp11, 0x18(r4)
- lfdu fp12, 0x20(r4)
- stfd fp9, 0x08(r6)
- stfd fp10, 0x10(r6)
- stfd fp11, 0x18(r6)
- stfdu fp12, 0x20(r6)
-
- bdnz .Lloop2
-.Lendloop2:
-
-.Llessthancacheline: /* less than cache to do ? */
- cmplwi cr0,r5,16
- srwi r7,r5,4 /* divide size by 16 */
- blt- .Ldo_lt16
- mtctr r7
-
-.Lcopy_remaining:
- lfd fp9,0x08(r4)
- lfdu fp10,0x10(r4)
- stfd fp9,0x08(r6)
- stfdu fp10,0x10(r6)
- bdnz .Lcopy_remaining
-
-.Ldo_lt16: /* less than 16 ? */
- cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */
- beqlr+ /* no rest to copy */
- addi r4,r4,8
- addi r6,r6,8
-
-.Lshortcopy: /* SIMPLE COPY to handle size =< 15 bytes */
- mtcrf 0x01,r5
- sub r7,r4,r6
- bf- cr7*4+0,8f
- lfdx fp9,r7,r6 /* copy 8 byte */
- stfd fp9,0(r6)
- addi r6,r6,8
-8:
- bf cr7*4+1,4f
- lwzx r0,r7,r6 /* copy 4 byte */
- stw r0,0(r6)
- addi r6,r6,4
-4:
- bf cr7*4+2,2f
- lhzx r0,r7,r6 /* copy 2 byte */
- sth r0,0(r6)
- addi r6,r6,2
-2:
- bf cr7*4+3,1f
- lbzx r0,r7,r6 /* copy 1 byte */
- stb r0,0(r6)
-1: blr
-
-END (memcpy)
-libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc32/multiarch/Makefile b/sysdeps/powerpc/powerpc32/multiarch/Makefile
index af8d496..d8d84f5 100644
--- a/sysdeps/powerpc/powerpc32/multiarch/Makefile
+++ b/sysdeps/powerpc/powerpc32/multiarch/Makefile
@@ -1,3 +1,4 @@
ifeq ($(subdir),string)
-sysdep_routines +=
+sysdep_routines += memcpy-power7 memcpy-a2 memcpy-power6 memcpy-cell \
+ memcpy-power4 memcpy-ppc32
endif
diff --git a/sysdeps/powerpc/powerpc32/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc32/multiarch/ifunc-impl-list.c
index f4e55ae..3bf0447 100644
--- a/sysdeps/powerpc/powerpc32/multiarch/ifunc-impl-list.c
+++ b/sysdeps/powerpc/powerpc32/multiarch/ifunc-impl-list.c
@@ -46,5 +46,21 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
else if (hwcap & PPC_FEATURE_POWER5)
hwcap |= PPC_FEATURE_POWER4;
+#ifdef SHARED
+ /* Support sysdeps/powerpc/powerpc32/multiarch/memcpy.c. */
+ IFUNC_IMPL (i, name, memcpy,
+ IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_HAS_VSX,
+ __memcpy_power7)
+ IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_ARCH_2_06,
+ __memcpy_a2)
+ IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_ARCH_2_05,
+ __memcpy_power6)
+ IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_CELL_BE,
+ __memcpy_cell)
+ IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_POWER4,
+ __memcpy_power4)
+ IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_ppc32))
+#endif
+
return i;
}
diff --git a/sysdeps/powerpc/powerpc32/multiarch/memcpy-a2.S b/sysdeps/powerpc/powerpc32/multiarch/memcpy-a2.S
new file mode 100644
index 0000000..67e0f1b
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/multiarch/memcpy-a2.S
@@ -0,0 +1,526 @@
+/* Optimized memcpy implementation for PowerPC A2.
+ Copyright (C) 2010-2013 Free Software Foundation, Inc.
+ Contributed by Michael Brutman <brutman@us.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+#define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
+#define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
+
+ .machine a2
+EALIGN (__memcpy_a2, 5, 0)
+ CALL_MCOUNT
+
+ dcbt 0,r4 /* Prefetch ONE SRC cacheline */
+ cmplwi cr1,r5,16 /* is size < 16 ? */
+ mr r6,r3 /* Copy dest reg to r6; */
+ blt+ cr1,L(shortcopy)
+
+
+ /* Big copy (16 bytes or more)
+
+ Figure out how far to the nearest quadword boundary, or if we are
+ on one already.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+ neg r8,r3 /* LS 4 bits = # bytes to 8-byte dest bdry */
+ clrlwi r8,r8,32-4 /* align to 16byte boundary */
+ sub r7,r4,r3 /* compute offset to src from dest */
+ cmplwi cr0,r8,0 /* Were we aligned on a 16 byte bdy? */
+ beq+ L(dst_aligned)
+
+
+
+ /* Destination is not aligned on quadword boundary. Get us to one.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ r7 - offset to src from dest
+ r8 - number of bytes to quadword boundary
+ */
+
+ mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
+ subf r5,r8,r5 /* adjust remaining len */
+
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte addr */
+ stb r0,0(r6)
+ addi r6,r6,1
+1:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte addr */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte addr */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+0,8f
+ lfdx r0,r7,r6 /* copy 8 byte addr */
+ stfd r0,0(r6)
+ addi r6,r6,8
+8:
+ add r4,r7,r6 /* update src addr */
+
+
+
+ /* Dest is quadword aligned now.
+
+ Lots of decisions to make. If we are copying less than a cache
+ line we won't be here long. If we are not on a cache line
+ boundary we need to get there. And then we need to figure out
+ how many cache lines ahead to pre-touch.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+
+ .align 4
+L(dst_aligned):
+
+
+#ifdef SHARED
+ mflr r0
+/* Establishes GOT addressability so we can load __cache_line_size
+ from static. This value was set from the aux vector during startup. */
+ SETUP_GOT_ACCESS(r9,got_label)
+ addis r9,r9,__cache_line_size-got_label@ha
+ lwz r9,__cache_line_size-got_label@l(r9)
+ mtlr r0
+#else
+/* Load __cache_line_size from static. This value was set from the
+ aux vector during startup. */
+ lis r9,__cache_line_size@ha
+ lwz r9,__cache_line_size@l(r9)
+#endif
+
+ cmplwi cr5, r9, 0
+ bne+ cr5,L(cachelineset)
+
+/* __cache_line_size not set: generic byte copy without much optimization */
+ andi. r0,r5,1 /* If length is odd copy one byte. */
+ beq L(cachelinenotset_align)
+ lbz r7,0(r4) /* Read one byte from source. */
+ addi r5,r5,-1 /* Update length. */
+ addi r4,r4,1 /* Update source pointer address. */
+ stb r7,0(r6) /* Store one byte on dest. */
+ addi r6,r6,1 /* Update dest pointer address. */
+L(cachelinenotset_align):
+ cmpwi cr7,r5,0 /* If length is 0 return. */
+ beqlr cr7
+ ori r2,r2,0 /* Force a new dispatch group. */
+L(cachelinenotset_loop):
+ addic. r5,r5,-2 /* Update length. */
+ lbz r7,0(r4) /* Load 2 bytes from source. */
+ lbz r8,1(r4)
+ addi r4,r4,2 /* Update source pointer address. */
+ stb r7,0(r6) /* Store 2 bytes on dest. */
+ stb r8,1(r6)
+ addi r6,r6,2 /* Update dest pointer address. */
+ bne L(cachelinenotset_loop)
+ blr
+
+
+L(cachelineset):
+
+ addi r10,r9,-1
+
+ cmpw cr5,r5,r10 /* Less than a cacheline to go? */
+
+ neg r7,r6 /* How far to next cacheline bdy? */
+
+ addi r6,r6,-8 /* prepare for stdu */
+ cmpwi cr0,r9,128
+ addi r4,r4,-8 /* prepare for ldu */
+
+
+ ble+ cr5,L(lessthancacheline)
+
+ beq- cr0,L(big_lines) /* 128 byte line code */
+
+
+
+
+ /* More than a cacheline left to go, and using 64 byte cachelines */
+
+ clrlwi r7,r7,32-6 /* How far to next cacheline bdy? */
+
+ cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srwi r7,r7,4 /* How many qws to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srwi r10,r5,6
+
+ cmplwi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmplwi r10,PREFETCH_AHEAD
+ li r12,64+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC):
+ dcbt r12,r4
+ addi r12,r12,64
+ bdnz L(prefetchSRC)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch):
+ mtctr r7
+
+ cmplwi cr1,r5,64 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrlwi r5,r5,32-6
+
+ beq cr6,L(cachelinealigned)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline):
+ lfd fp9,0x08(r4)
+ lfdu fp10,0x10(r4)
+ stfd fp9,0x08(r6)
+ stfdu fp10,0x10(r6)
+ bdnz L(aligntocacheline)
+
+
+ .align 4
+L(cachelinealigned): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <64 */
+
+L(outerloop):
+ cmpwi r11,0
+ mtctr r11
+ beq- L(endloop)
+
+ li r11,64*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ lfd fp9, 0x08(r4)
+ dcbz r11,r6
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfdu fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfdu fp12, 0x40(r6)
+
+ bdnz L(loop)
+
+
+L(endloop):
+ cmpwi r10,0
+ beq- L(endloop2)
+ mtctr r10
+
+L(loop2): /* Copy aligned body */
+ lfd fp9, 0x08(r4)
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfdu fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfdu fp12, 0x40(r6)
+
+ bdnz L(loop2)
+L(endloop2):
+
+
+ .align 4
+L(lessthancacheline): /* Was there less than cache to do ? */
+ cmplwi cr0,r5,16
+ srwi r7,r5,4 /* divide size by 16 */
+ blt- L(do_lt16)
+ mtctr r7
+
+L(copy_remaining):
+ lfd fp9, 0x08(r4)
+ lfdu fp10, 0x10(r4)
+ stfd fp9, 0x08(r6)
+ stfdu fp10, 0x10(r6)
+ bdnz L(copy_remaining)
+
+L(do_lt16): /* less than 16 ? */
+ cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */
+ beqlr+ /* no rest to copy */
+ addi r4,r4,8
+ addi r6,r6,8
+
+L(shortcopy): /* SIMPLE COPY to handle size =< 15 bytes */
+ mtcrf 0x01,r5
+ sub r7,r4,r6
+ bf- cr7*4+0,8f
+ lfdx fp9,r7,r6 /* copy 8 byte */
+ stfd fp9,0(r6)
+ addi r6,r6,8
+8:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte */
+ stb r0,0(r6)
+1:
+ blr
+
+
+
+
+
+ /* Similar to above, but for use with 128 byte lines. */
+
+
+L(big_lines):
+
+ clrlwi r7,r7,32-7 /* How far to next cacheline bdy? */
+
+ cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srwi r7,r7,4 /* How many qw to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srwi r10,r5,7
+
+ cmplwi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch_128)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmplwi r10,PREFETCH_AHEAD
+ li r12,128+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch_128)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch_128):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC_128):
+ dcbt r12,r4
+ addi r12,r12,128
+ bdnz L(prefetchSRC_128)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch_128):
+ mtctr r7
+
+ cmplwi cr1,r5,128 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrlwi r5,r5,32-7
+
+ beq cr6,L(cachelinealigned_128)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline_128):
+ lfd fp9,0x08(r4)
+ lfdu fp10,0x10(r4)
+ stfd fp9,0x08(r6)
+ stfdu fp10,0x10(r6)
+ bdnz L(aligntocacheline_128)
+
+
+L(cachelinealigned_128): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <128 */
+
+L(outerloop_128):
+ cmpwi r11,0
+ mtctr r11
+ beq- L(endloop_128)
+
+ li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop_128): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ lfd fp9, 0x08(r4)
+ dcbz r11,r6
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfd fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfd fp12, 0x40(r6)
+ lfd fp9, 0x48(r4)
+ lfd fp10, 0x50(r4)
+ lfd fp11, 0x58(r4)
+ lfd fp12, 0x60(r4)
+ stfd fp9, 0x48(r6)
+ stfd fp10, 0x50(r6)
+ stfd fp11, 0x58(r6)
+ stfd fp12, 0x60(r6)
+ lfd fp9, 0x68(r4)
+ lfd fp10, 0x70(r4)
+ lfd fp11, 0x78(r4)
+ lfdu fp12, 0x80(r4)
+ stfd fp9, 0x68(r6)
+ stfd fp10, 0x70(r6)
+ stfd fp11, 0x78(r6)
+ stfdu fp12, 0x80(r6)
+
+ bdnz L(loop_128)
+
+
+L(endloop_128):
+ cmpwi r10,0
+ beq- L(endloop2_128)
+ mtctr r10
+
+L(loop2_128): /* Copy aligned body */
+ lfd fp9, 0x08(r4)
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfd fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfd fp12, 0x40(r6)
+ lfd fp9, 0x48(r4)
+ lfd fp10, 0x50(r4)
+ lfd fp11, 0x58(r4)
+ lfd fp12, 0x60(r4)
+ stfd fp9, 0x48(r6)
+ stfd fp10, 0x50(r6)
+ stfd fp11, 0x58(r6)
+ stfd fp12, 0x60(r6)
+ lfd fp9, 0x68(r4)
+ lfd fp10, 0x70(r4)
+ lfd fp11, 0x78(r4)
+ lfdu fp12, 0x80(r4)
+ stfd fp9, 0x68(r6)
+ stfd fp10, 0x70(r6)
+ stfd fp11, 0x78(r6)
+ stfdu fp12, 0x80(r6)
+ bdnz L(loop2_128)
+L(endloop2_128):
+
+ b L(lessthancacheline)
+
+
+END (__memcpy_a2)
diff --git a/sysdeps/powerpc/powerpc32/multiarch/memcpy-cell.S b/sysdeps/powerpc/powerpc32/multiarch/memcpy-cell.S
new file mode 100644
index 0000000..8b0bf40
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/multiarch/memcpy-cell.S
@@ -0,0 +1,241 @@
+/* Optimized memcpy implementation for CELL BE PowerPC.
+ Copyright (C) 2010-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+#define PREFETCH_AHEAD 6 /* no cache lines SRC prefetching ahead */
+#define ZERO_AHEAD 4 /* no cache lines DST zeroing ahead */
+
+/* memcpy routine optimized for CELL-BE-PPC v2.0
+ *
+ * The CELL PPC core has 1 integer unit and 1 load/store unit
+ * CELL:
+ * 1st level data cache = 32K
+ * 2nd level data cache = 512K
+ * 3rd level data cache = 0K
+ * With 3.2 GHz clockrate the latency to 2nd level cache is >36 clocks,
+ * latency to memory is >400 clocks
+ * To improve copy performance we need to prefetch source data
+ * far ahead to hide this latency
+ * For best performance instruction forms ending in "." like "andi."
+ * should be avoided as the are implemented in microcode on CELL.
+ * The below code is loop unrolled for the CELL cache line of 128 bytes
+ */
+
+.align 7
+
+EALIGN (__memcpy_cell, 5, 0)
+ CALL_MCOUNT
+
+ dcbt 0,r4 /* Prefetch ONE SRC cacheline */
+ cmplwi cr1,r5,16 /* is size < 16 ? */
+ mr r6,r3
+ blt+ cr1,.Lshortcopy
+
+.Lbigcopy:
+ neg r8,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
+ clrlwi r8,r8,32-4 /* aling to 16byte boundary */
+ sub r7,r4,r3
+ cmplwi cr0,r8,0
+ beq+ .Ldst_aligned
+
+.Ldst_unaligned:
+ mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
+ subf r5,r8,r5
+
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte */
+ stb r0,0(r6)
+ addi r6,r6,1
+1: bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte */
+ sth r0,0(r6)
+ addi r6,r6,2
+2: bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte */
+ stw r0,0(r6)
+ addi r6,r6,4
+4: bf cr7*4+0,8f
+ lfdx fp9,r7,r6 /* copy 8 byte */
+ stfd fp9,0(r6)
+ addi r6,r6,8
+8:
+ add r4,r7,r6
+
+.Ldst_aligned:
+
+ cmpwi cr5,r5,128-1
+
+ neg r7,r6
+ addi r6,r6,-8 /* prepare for stfdu */
+ addi r4,r4,-8 /* prepare for lfdu */
+
+ clrlwi r7,r7,32-7 /* align to cacheline boundary */
+ ble+ cr5,.Llessthancacheline
+
+ cmplwi cr6,r7,0
+ subf r5,r7,r5
+ srwi r7,r7,4 /* divide size by 16 */
+ srwi r10,r5,7 /* number of cache lines to copy */
+
+ cmplwi r10,0
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq .Lnocacheprefetch
+
+ cmplwi r10,PREFETCH_AHEAD
+ li r12,128+8 /* prefetch distance */
+ ble .Llessthanmaxprefetch
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+.Llessthanmaxprefetch:
+ mtctr r10
+
+.LprefetchSRC:
+ dcbt r12,r4
+ addi r12,r12,128
+ bdnz .LprefetchSRC
+
+.Lnocacheprefetch:
+ mtctr r7
+ cmplwi cr1,r5,128
+ clrlwi r5,r5,32-7
+ beq cr6,.Lcachelinealigned
+
+.Laligntocacheline:
+ lfd fp9,0x08(r4)
+ lfdu fp10,0x10(r4)
+ stfd fp9,0x08(r6)
+ stfdu fp10,0x10(r6)
+ bdnz .Laligntocacheline
+
+
+.Lcachelinealigned: /* copy while cache lines */
+
+ blt- cr1,.Llessthancacheline /* size <128 */
+
+.Louterloop:
+ cmpwi r11,0
+ mtctr r11
+ beq- .Lendloop
+
+ li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
+
+.align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+.Lloop: /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ lfd fp9, 0x08(r4)
+ dcbz r11,r6
+ lfd fp10, 0x10(r4) /* 4 register stride copy is optimal */
+ lfd fp11, 0x18(r4) /* to hide 1st level cache latency. */
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfd fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfd fp12, 0x40(r6)
+ lfd fp9, 0x48(r4)
+ lfd fp10, 0x50(r4)
+ lfd fp11, 0x58(r4)
+ lfd fp12, 0x60(r4)
+ stfd fp9, 0x48(r6)
+ stfd fp10, 0x50(r6)
+ stfd fp11, 0x58(r6)
+ stfd fp12, 0x60(r6)
+ lfd fp9, 0x68(r4)
+ lfd fp10, 0x70(r4)
+ lfd fp11, 0x78(r4)
+ lfdu fp12, 0x80(r4)
+ stfd fp9, 0x68(r6)
+ stfd fp10, 0x70(r6)
+ stfd fp11, 0x78(r6)
+ stfdu fp12, 0x80(r6)
+
+ bdnz .Lloop
+
+.Lendloop:
+ cmpwi r10,0
+ slwi r10,r10,2 /* adjust from 128 to 32 byte stride */
+ beq- .Lendloop2
+ mtctr r10
+
+.Lloop2: /* Copy aligned body */
+ lfd fp9, 0x08(r4)
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfdu fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfdu fp12, 0x20(r6)
+
+ bdnz .Lloop2
+.Lendloop2:
+
+.Llessthancacheline: /* less than cache to do ? */
+ cmplwi cr0,r5,16
+ srwi r7,r5,4 /* divide size by 16 */
+ blt- .Ldo_lt16
+ mtctr r7
+
+.Lcopy_remaining:
+ lfd fp9,0x08(r4)
+ lfdu fp10,0x10(r4)
+ stfd fp9,0x08(r6)
+ stfdu fp10,0x10(r6)
+ bdnz .Lcopy_remaining
+
+.Ldo_lt16: /* less than 16 ? */
+ cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */
+ beqlr+ /* no rest to copy */
+ addi r4,r4,8
+ addi r6,r6,8
+
+.Lshortcopy: /* SIMPLE COPY to handle size =< 15 bytes */
+ mtcrf 0x01,r5
+ sub r7,r4,r6
+ bf- cr7*4+0,8f
+ lfdx fp9,r7,r6 /* copy 8 byte */
+ stfd fp9,0(r6)
+ addi r6,r6,8
+8:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte */
+ stb r0,0(r6)
+1: blr
+
+END (__memcpy_cell)
diff --git a/sysdeps/powerpc/powerpc32/multiarch/memcpy-power4.S b/sysdeps/powerpc/powerpc32/multiarch/memcpy-power4.S
new file mode 100644
index 0000000..a9b4e6c
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/multiarch/memcpy-power4.S
@@ -0,0 +1,421 @@
+/* Optimized memcpy implementation for PowerPC32.
+ Copyright (C) 2003-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+ Returns 'dst'.
+
+ Memcpy handles short copies (< 32-bytes) using a binary move blocks
+ (no loops) of lwz/stw. The tail (remaining 1-3) bytes is handled
+ with the appropriate combination of byte and halfword load/stores.
+ There is minimal effort to optimize the alignment of short moves.
+
+ Longer moves (>= 32-bytes) justify the effort to get at least the
+ destination word (4-byte) aligned. Further optimization is
+ possible when both source and destination are word aligned.
+ Each case has an optimized unrolled loop. */
+
+ .machine power4
+EALIGN (__memcpy_power4, 5, 0)
+ CALL_MCOUNT
+
+ stwu 1,-32(1)
+ cfi_adjust_cfa_offset(32)
+ stw 30,20(1)
+ cfi_offset(30,(20-32))
+ mr 30,3
+ cmplwi cr1,5,31
+ stw 31,24(1)
+ cfi_offset(31,(24-32))
+ neg 0,3
+ andi. 11,3,3 /* check alignment of dst. */
+ clrlwi 0,0,30 /* Number of bytes until the 1st word of dst. */
+ clrlwi 10,4,30 /* check alignment of src. */
+ cmplwi cr6,5,8
+ ble- cr1,.L2 /* If move < 32 bytes use short move code. */
+ cmplw cr6,10,11
+ mr 12,4
+ srwi 9,5,2 /* Number of full words remaining. */
+ mtcrf 0x01,0
+ mr 31,5
+ beq .L0
+
+ subf 31,0,5
+ /* Move 0-3 bytes as needed to get the destination word aligned. */
+1: bf 31,2f
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+2: bf 30,0f
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+0:
+ clrlwi 10,12,30 /* check alignment of src again. */
+ srwi 9,31,2 /* Number of full words remaining. */
+
+ /* Copy words from source to destination, assuming the destination is
+ aligned on a word boundary.
+
+ At this point we know there are at least 25 bytes left (32-7) to copy.
+ The next step is to determine if the source is also word aligned.
+ If not branch to the unaligned move code at .L6. which uses
+ a load, shift, store strategy.
+
+ Otherwise source and destination are word aligned, and we can use
+ the optimized word copy loop. */
+.L0:
+ clrlwi 11,31,30 /* calculate the number of tail bytes */
+ mtcrf 0x01,9
+ bne- cr6,.L6 /* If source is not word aligned. */
+
+ /* Move words where destination and source are word aligned.
+ Use an unrolled loop to copy 4 words (16-bytes) per iteration.
+ If the copy is not an exact multiple of 16 bytes, 1-3
+ words are copied as needed to set up the main loop. After
+ the main loop exits there may be a tail of 1-3 bytes. These bytes are
+ copied a halfword/byte at a time as needed to preserve alignment. */
+
+ srwi 8,31,4 /* calculate the 16 byte loop count */
+ cmplwi cr1,9,4
+ cmplwi cr6,11,0
+ mr 11,12
+
+ bf 30,1f
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 11,12,8
+ mtctr 8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 10,3,8
+ bf 31,4f
+ lwz 0,8(12)
+ stw 0,8(3)
+ blt cr1,3f
+ addi 11,12,12
+ addi 10,3,12
+ b 4f
+ .align 4
+1:
+ mr 10,3
+ mtctr 8
+ bf 31,4f
+ lwz 6,0(12)
+ addi 11,12,4
+ stw 6,0(3)
+ addi 10,3,4
+
+ .align 4
+4:
+ lwz 6,0(11)
+ lwz 7,4(11)
+ lwz 8,8(11)
+ lwz 0,12(11)
+ stw 6,0(10)
+ stw 7,4(10)
+ stw 8,8(10)
+ stw 0,12(10)
+ addi 11,11,16
+ addi 10,10,16
+ bdnz 4b
+3:
+ clrrwi 0,31,2
+ mtcrf 0x01,31
+ beq cr6,0f
+.L9:
+ add 3,3,0
+ add 12,12,0
+
+/* At this point we have a tail of 0-3 bytes and we know that the
+ destination is word aligned. */
+2: bf 30,1f
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: bf 31,0f
+ lbz 6,0(12)
+ stb 6,0(3)
+0:
+ /* Return original dst pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ lwz 31,24(1)
+ addi 1,1,32
+ blr
+
+/* Copy up to 31 bytes. This is divided into two cases 0-8 bytes and
+ 9-31 bytes. Each case is handled without loops, using binary
+ (1,2,4,8) tests.
+
+ In the short (0-8 byte) case no attempt is made to force alignment
+ of either source or destination. The hardware will handle the
+ unaligned load/stores with small delays for crossing 32- 64-byte, and
+ 4096-byte boundaries. Since these short moves are unlikely to be
+ unaligned or cross these boundaries, the overhead to force
+ alignment is not justified.
+
+ The longer (9-31 byte) move is more likely to cross 32- or 64-byte
+ boundaries. Since only loads are sensitive to the 32-/64-byte
+ boundaries it is more important to align the source than the
+ destination. If the source is not already word aligned, we first
+ move 1-3 bytes as needed. While the destination and stores may
+ still be unaligned, this is only an issue for page (4096 byte
+ boundary) crossing, which should be rare for these short moves.
+ The hardware handles this case automatically with a small delay. */
+
+ .align 4
+.L2:
+ mtcrf 0x01,5
+ neg 8,4
+ clrrwi 11,4,2
+ andi. 0,8,3
+ ble cr6,.LE8 /* Handle moves of 0-8 bytes. */
+/* At least 9 bytes left. Get the source word aligned. */
+ cmplwi cr1,5,16
+ mr 10,5
+ mr 12,4
+ cmplwi cr6,0,2
+ beq .L3 /* If the source is already word aligned skip this. */
+/* Copy 1-3 bytes to get source address word aligned. */
+ lwz 6,0(11)
+ subf 10,0,5
+ add 12,4,0
+ blt cr6,5f
+ srwi 7,6,16
+ bgt cr6,3f
+ sth 6,0(3)
+ b 7f
+ .align 4
+3:
+ stb 7,0(3)
+ sth 6,1(3)
+ b 7f
+ .align 4
+5:
+ stb 6,0(3)
+7:
+ cmplwi cr1,10,16
+ add 3,3,0
+ mtcrf 0x01,10
+ .align 4
+.L3:
+/* At least 6 bytes left and the source is word aligned. */
+ blt cr1,8f
+16: /* Move 16 bytes. */
+ lwz 6,0(12)
+ lwz 7,4(12)
+ stw 6,0(3)
+ lwz 6,8(12)
+ stw 7,4(3)
+ lwz 7,12(12)
+ addi 12,12,16
+ stw 6,8(3)
+ stw 7,12(3)
+ addi 3,3,16
+8: /* Move 8 bytes. */
+ bf 28,4f
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 12,12,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+4: /* Move 4 bytes. */
+ bf 29,2f
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Move 2-3 bytes. */
+ bf 30,1f
+ lhz 6,0(12)
+ sth 6,0(3)
+ bf 31,0f
+ lbz 7,2(12)
+ stb 7,2(3)
+ mr 3,30
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+1: /* Move 1 byte. */
+ bf 31,0f
+ lbz 6,0(12)
+ stb 6,0(3)
+0:
+ /* Return original dst pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+/* Special case to copy 0-8 bytes. */
+ .align 4
+.LE8:
+ mr 12,4
+ bne cr6,4f
+ lwz 6,0(4)
+ lwz 7,4(4)
+ stw 6,0(3)
+ stw 7,4(3)
+ /* Return original dst pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+ .align 4
+4: bf 29,2b
+ lwz 6,0(4)
+ stw 6,0(3)
+6:
+ bf 30,5f
+ lhz 7,4(4)
+ sth 7,4(3)
+ bf 31,0f
+ lbz 8,6(4)
+ stb 8,6(3)
+ mr 3,30
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+ .align 4
+5:
+ bf 31,0f
+ lbz 6,4(4)
+ stb 6,4(3)
+ .align 4
+0:
+ /* Return original dst pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ .align 4
+.L6:
+
+ /* Copy words where the destination is aligned but the source is
+ not. Use aligned word loads from the source, shifted to realign
+ the data, to allow aligned destination stores.
+ Use an unrolled loop to copy 4 words (16-bytes) per iteration.
+ A single word is retained for storing at loop exit to avoid walking
+ off the end of a page within the loop.
+ If the copy is not an exact multiple of 16 bytes, 1-3
+ words are copied as needed to set up the main loop. After
+ the main loop exits there may be a tail of 1-3 bytes. These bytes are
+ copied a halfword/byte at a time as needed to preserve alignment. */
+
+
+ cmplwi cr6,11,0 /* are there tail bytes left ? */
+ subf 5,10,12 /* back up src pointer to prev word alignment */
+ slwi 10,10,3 /* calculate number of bits to shift 1st word left */
+ addi 11,9,-1 /* we move one word after the loop */
+ srwi 8,11,2 /* calculate the 16 byte loop count */
+ lwz 6,0(5) /* load 1st src word into R6 */
+ mr 4,3
+ lwz 7,4(5) /* load 2nd src word into R7 */
+ mtcrf 0x01,11
+ subfic 9,10,32 /* number of bits to shift 2nd word right */
+ mtctr 8
+ bf 30,1f
+
+ /* there are at least two words to copy, so copy them */
+ slw 0,6,10 /* shift 1st src word to left align it in R0 */
+ srw 8,7,9 /* shift 2nd src word to right align it in R8 */
+ or 0,0,8 /* or them to get word to store */
+ lwz 6,8(5) /* load the 3rd src word */
+ stw 0,0(4) /* store the 1st dst word */
+ slw 0,7,10 /* now left align 2nd src word into R0 */
+ srw 8,6,9 /* shift 3rd src word to right align it in R8 */
+ or 0,0,8 /* or them to get word to store */
+ lwz 7,12(5)
+ stw 0,4(4) /* store the 2nd dst word */
+ addi 4,4,8
+ addi 5,5,16
+ bf 31,4f
+ /* there is a third word to copy, so copy it */
+ slw 0,6,10 /* shift 3rd src word to left align it in R0 */
+ srw 8,7,9 /* shift 4th src word to right align it in R8 */
+ or 0,0,8 /* or them to get word to store */
+ stw 0,0(4) /* store 3rd dst word */
+ mr 6,7
+ lwz 7,0(5)
+ addi 5,5,4
+ addi 4,4,4
+ b 4f
+ .align 4
+1:
+ slw 0,6,10 /* shift 1st src word to left align it in R0 */
+ srw 8,7,9 /* shift 2nd src word to right align it in R8 */
+ addi 5,5,8
+ or 0,0,8 /* or them to get word to store */
+ bf 31,4f
+ mr 6,7
+ lwz 7,0(5)
+ addi 5,5,4
+ stw 0,0(4) /* store the 1st dst word */
+ addi 4,4,4
+
+ .align 4
+4:
+ /* copy 16 bytes at a time */
+ slw 0,6,10
+ srw 8,7,9
+ or 0,0,8
+ lwz 6,0(5)
+ stw 0,0(4)
+ slw 0,7,10
+ srw 8,6,9
+ or 0,0,8
+ lwz 7,4(5)
+ stw 0,4(4)
+ slw 0,6,10
+ srw 8,7,9
+ or 0,0,8
+ lwz 6,8(5)
+ stw 0,8(4)
+ slw 0,7,10
+ srw 8,6,9
+ or 0,0,8
+ lwz 7,12(5)
+ stw 0,12(4)
+ addi 5,5,16
+ addi 4,4,16
+ bdnz+ 4b
+8:
+ /* calculate and store the final word */
+ slw 0,6,10
+ srw 8,7,9
+ or 0,0,8
+ stw 0,0(4)
+3:
+ clrrwi 0,31,2
+ mtcrf 0x01,31
+ bne cr6,.L9 /* If the tail is 0 bytes we are done! */
+
+ /* Return original dst pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ lwz 31,24(1)
+ addi 1,1,32
+ blr
+END (__memcpy_power4)
diff --git a/sysdeps/powerpc/powerpc32/multiarch/memcpy-power6.S b/sysdeps/powerpc/powerpc32/multiarch/memcpy-power6.S
new file mode 100644
index 0000000..4e39046
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/multiarch/memcpy-power6.S
@@ -0,0 +1,838 @@
+/* Optimized memcpy implementation for PowerPC32 on POWER6.
+ Copyright (C) 2003-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+ Returns 'dst'.
+
+ Memcpy handles short copies (< 32-bytes) using a binary move blocks
+ (no loops) of lwz/stw. The tail (remaining 1-3) bytes is handled
+ with the appropriate combination of byte and halfword load/stores.
+ There is minimal effort to optimize the alignment of short moves.
+
+ Longer moves (>= 32-bytes) justify the effort to get at least the
+ destination word (4-byte) aligned. Further optimization is
+ possible when both source and destination are word aligned.
+ Each case has an optimized unrolled loop. */
+
+ .machine power6
+EALIGN (__memcpy_power6, 5, 0)
+ CALL_MCOUNT
+
+ stwu 1,-32(1)
+ cfi_adjust_cfa_offset(32)
+ cmplwi cr1,5,31 /* check for short move. */
+ neg 0,3
+ cmplwi cr1,5,31
+ clrlwi 10,4,30 /* check alignment of src. */
+ andi. 11,3,3 /* check alignment of dst. */
+ clrlwi 0,0,30 /* Number of bytes until the 1st word of dst. */
+ ble- cr1,L(word_unaligned_short) /* If move < 32 bytes. */
+ cmplw cr6,10,11
+ stw 31,24(1)
+ cfi_offset(31,(24-32))
+ stw 30,20(1)
+ cfi_offset(30,(20-32))
+ mr 30,3
+ beq .L0
+ mtcrf 0x01,0
+ subf 31,0,5 /* Length after alignment. */
+ add 12,4,0 /* Compute src addr after alignment. */
+ /* Move 0-3 bytes as needed to get the destination word aligned. */
+1: bf 31,2f
+ lbz 6,0(4)
+ bf 30,3f
+ lhz 7,1(4)
+ stb 6,0(3)
+ sth 7,1(3)
+ addi 3,3,3
+ b 0f
+3:
+ stb 6,0(3)
+ addi 3,3,1
+ b 0f
+2: bf 30,0f
+ lhz 6,0(4)
+ sth 6,0(3)
+ addi 3,3,2
+0:
+ clrlwi 10,12,30 /* check alignment of src again. */
+ srwi 9,31,2 /* Number of full words remaining. */
+ bne- cr6,L(wdu) /* If source is not word aligned. .L6 */
+ clrlwi 11,31,30 /* calculate the number of tail bytes */
+ b L(word_aligned)
+ /* Copy words from source to destination, assuming the destination is
+ aligned on a word boundary.
+
+ At this point we know there are at least 29 bytes left (32-3) to copy.
+ The next step is to determine if the source is also word aligned.
+ If not branch to the unaligned move code at .L6. which uses
+ a load, shift, store strategy.
+
+ Otherwise source and destination are word aligned, and we can use
+ the optimized word copy loop. */
+ .align 4
+.L0:
+ mr 31,5
+ mr 12,4
+ bne- cr6,L(wdu) /* If source is not word aligned. .L6 */
+ srwi 9,5,2 /* Number of full words remaining. */
+ clrlwi 11,5,30 /* calculate the number of tail bytes */
+
+ /* Move words where destination and source are word aligned.
+ Use an unrolled loop to copy 4 words (16-bytes) per iteration.
+ If the copy is not an exact multiple of 16 bytes, 1-3
+ words are copied as needed to set up the main loop. After
+ the main loop exits there may be a tail of 1-3 bytes. These bytes are
+ copied a halfword/byte at a time as needed to preserve alignment. */
+L(word_aligned):
+ mtcrf 0x01,9
+ srwi 8,31,4 /* calculate the 16 byte loop count */
+ cmplwi cr1,9,4
+ cmplwi cr6,11,0
+ mr 11,12
+
+ bf 30,1f
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 11,12,8
+ mtctr 8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 10,3,8
+ bf 31,4f
+ lwz 0,8(12)
+ stw 0,8(3)
+ blt cr1,3f
+ addi 11,12,12
+ addi 10,3,12
+ b 4f
+ .align 4
+1:
+ mr 10,3
+ mtctr 8
+ bf 31,4f
+ lwz 6,0(12)
+ addi 11,12,4
+ stw 6,0(3)
+ addi 10,3,4
+
+ .align 4
+4:
+ lwz 6,0(11)
+ lwz 7,4(11)
+ lwz 8,8(11)
+ lwz 0,12(11)
+ stw 6,0(10)
+ stw 7,4(10)
+ stw 8,8(10)
+ stw 0,12(10)
+ addi 11,11,16
+ addi 10,10,16
+ bdnz 4b
+3:
+ clrrwi 0,31,2
+ mtcrf 0x01,31
+ beq cr6,0f
+.L9:
+ add 3,3,0
+ add 12,12,0
+
+/* At this point we have a tail of 0-3 bytes and we know that the
+ destination is word aligned. */
+2: bf 30,1f
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: bf 31,0f
+ lbz 6,0(12)
+ stb 6,0(3)
+0:
+ /* Return original dst pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ lwz 31,24(1)
+ addi 1,1,32
+ blr
+
+/* Copy up to 31 bytes. This divided into two cases 0-8 bytes and 9-31
+ bytes. Each case is handled without loops, using binary (1,2,4,8)
+ tests.
+
+ In the short (0-8 byte) case no attempt is made to force alignment
+ of either source or destination. The hardware will handle the
+ unaligned load/stores with small delays for crossing 32- 128-byte,
+ and 4096-byte boundaries. Since these short moves are unlikely to be
+ unaligned or cross these boundaries, the overhead to force
+ alignment is not justified.
+
+ The longer (9-31 byte) move is more likely to cross 32- or 128-byte
+ boundaries. Since only loads are sensitive to the 32-/128-byte
+ boundaries it is more important to align the source then the
+ destination. If the source is not already word aligned, we first
+ move 1-3 bytes as needed. Since we are only word aligned we don't
+ use double word load/stores to insure that all loads are aligned.
+ While the destination and stores may still be unaligned, this
+ is only an issue for page (4096 byte boundary) crossing, which
+ should be rare for these short moves. The hardware handles this
+ case automatically with a small (~20 cycle) delay. */
+ .align 4
+
+ cfi_same_value (31)
+ cfi_same_value (30)
+L(word_unaligned_short):
+ mtcrf 0x01,5
+ cmplwi cr6,5,8
+ neg 8,4
+ clrrwi 9,4,2
+ andi. 0,8,3
+ beq cr6,L(wus_8) /* Handle moves of 8 bytes. */
+/* At least 9 bytes left. Get the source word aligned. */
+ cmplwi cr1,5,16
+ mr 12,4
+ ble cr6,L(wus_4) /* Handle moves of 0-8 bytes. */
+ mr 11,3
+ mr 10,5
+ cmplwi cr6,0,2
+ beq L(wus_tail) /* If the source is already word aligned skip this. */
+/* Copy 1-3 bytes to get source address word aligned. */
+ lwz 6,0(9)
+ subf 10,0,5
+ add 12,4,0
+ blt cr6,5f
+ srwi 7,6,16
+ bgt cr6,3f
+ sth 6,0(3)
+ b 7f
+ .align 4
+3:
+ stb 7,0(3)
+ sth 6,1(3)
+ b 7f
+ .align 4
+5:
+ stb 6,0(3)
+7:
+ cmplwi cr1,10,16
+ add 11,3,0
+ mtcrf 0x01,10
+ .align 4
+L(wus_tail):
+/* At least 6 bytes left and the source is word aligned. This allows
+ some speculative loads up front. */
+/* We need to special case the fall-through because the biggest delays
+ are due to address computation not being ready in time for the
+ AGEN. */
+ lwz 6,0(12)
+ lwz 7,4(12)
+ blt cr1,L(wus_tail8)
+ cmplwi cr0,10,24
+L(wus_tail16): /* Move 16 bytes. */
+ stw 6,0(11)
+ stw 7,4(11)
+ lwz 6,8(12)
+ lwz 7,12(12)
+ stw 6,8(11)
+ stw 7,12(11)
+/* Move 8 bytes more. */
+ bf 28,L(wus_tail16p8)
+ cmplwi cr1,10,28
+ lwz 6,16(12)
+ lwz 7,20(12)
+ stw 6,16(11)
+ stw 7,20(11)
+/* Move 4 bytes more. */
+ bf 29,L(wus_tail16p4)
+ lwz 6,24(12)
+ stw 6,24(11)
+ addi 12,12,28
+ addi 11,11,28
+ bgt cr1,L(wus_tail2)
+ /* exactly 28 bytes. Return original dst pointer and exit. */
+ addi 1,1,32
+ blr
+ .align 4
+L(wus_tail16p8): /* less then 8 bytes left. */
+ beq cr1,L(wus_tailX) /* exactly 16 bytes, early exit. */
+ cmplwi cr1,10,20
+ bf 29,L(wus_tail16p2)
+/* Move 4 bytes more. */
+ lwz 6,16(12)
+ stw 6,16(11)
+ addi 12,12,20
+ addi 11,11,20
+ bgt cr1,L(wus_tail2)
+ /* exactly 20 bytes. Return original dst pointer and exit. */
+ addi 1,1,32
+ blr
+ .align 4
+L(wus_tail16p4): /* less then 4 bytes left. */
+ addi 12,12,24
+ addi 11,11,24
+ bgt cr0,L(wus_tail2)
+ /* exactly 24 bytes. Return original dst pointer and exit. */
+ addi 1,1,32
+ blr
+ .align 4
+L(wus_tail16p2): /* 16 bytes moved, less then 4 bytes left. */
+ addi 12,12,16
+ addi 11,11,16
+ b L(wus_tail2)
+
+ .align 4
+L(wus_tail8): /* Move 8 bytes. */
+/* r6, r7 already loaded speculatively. */
+ cmplwi cr1,10,8
+ cmplwi cr0,10,12
+ bf 28,L(wus_tail4)
+ stw 6,0(11)
+ stw 7,4(11)
+/* Move 4 bytes more. */
+ bf 29,L(wus_tail8p4)
+ lwz 6,8(12)
+ stw 6,8(11)
+ addi 12,12,12
+ addi 11,11,12
+ bgt cr0,L(wus_tail2)
+ /* exactly 12 bytes. Return original dst pointer and exit. */
+ addi 1,1,32
+ blr
+ .align 4
+L(wus_tail8p4): /* less then 4 bytes left. */
+ addi 12,12,8
+ addi 11,11,8
+ bgt cr1,L(wus_tail2)
+ /* exactly 8 bytes. Return original dst pointer and exit. */
+ addi 1,1,32
+ blr
+
+ .align 4
+L(wus_tail4): /* Move 4 bytes. */
+/* r6 already loaded speculatively. If we are here we know there is
+ more then 4 bytes left. So there is no need to test. */
+ addi 12,12,4
+ stw 6,0(11)
+ addi 11,11,4
+L(wus_tail2): /* Move 2-3 bytes. */
+ bf 30,L(wus_tail1)
+ lhz 6,0(12)
+ sth 6,0(11)
+ bf 31,L(wus_tailX)
+ lbz 7,2(12)
+ stb 7,2(11)
+ addi 1,1,32
+ blr
+L(wus_tail1): /* Move 1 byte. */
+ bf 31,L(wus_tailX)
+ lbz 6,0(12)
+ stb 6,0(11)
+L(wus_tailX):
+ /* Return original dst pointer. */
+ addi 1,1,32
+ blr
+
+/* Special case to copy 0-8 bytes. */
+ .align 4
+L(wus_8):
+ lwz 6,0(4)
+ lwz 7,4(4)
+ stw 6,0(3)
+ stw 7,4(3)
+ /* Return original dst pointer. */
+ addi 1,1,32
+ blr
+ .align 4
+L(wus_4):
+ bf 29,L(wus_2)
+ lwz 6,0(4)
+ stw 6,0(3)
+ bf 30,L(wus_5)
+ lhz 7,4(4)
+ sth 7,4(3)
+ bf 31,L(wus_0)
+ lbz 8,6(4)
+ stb 8,6(3)
+ addi 1,1,32
+ blr
+ .align 4
+L(wus_5):
+ bf 31,L(wus_0)
+ lbz 6,4(4)
+ stb 6,4(3)
+ /* Return original dst pointer. */
+ addi 1,1,32
+ blr
+ .align 4
+L(wus_2): /* Move 2-3 bytes. */
+ bf 30,L(wus_1)
+ lhz 6,0(4)
+ sth 6,0(3)
+ bf 31,L(wus_0)
+ lbz 7,2(4)
+ stb 7,2(3)
+ addi 1,1,32
+ blr
+ .align 4
+L(wus_1): /* Move 1 byte. */
+ bf 31,L(wus_0)
+ lbz 6,0(4)
+ stb 6,0(3)
+ .align 3
+L(wus_0):
+ /* Return original dst pointer. */
+ addi 1,1,32
+ blr
+
+ .align 4
+ cfi_offset(31,(24-32))
+ cfi_offset(30,(20-32))
+L(wdu):
+
+ /* Copy words where the destination is aligned but the source is
+ not. For power4, power5 and power6 machines there is penalty for
+ unaligned loads (src) that cross 32-byte, cacheline, or page
+ boundaries. So we want to use simple (unaligned) loads where
+ possible but avoid them where we know the load would span a 32-byte
+ boundary.
+
+ At this point we know we have at least 29 (32-3) bytes to copy
+ the src is unaligned. and we may cross at least one 32-byte
+ boundary. Also we have the following register values:
+ r3 == adjusted dst, word aligned
+ r4 == unadjusted src
+ r5 == unadjusted len
+ r9 == adjusted Word length
+ r10 == src alignment (1-3)
+ r12 == adjusted src, not aligned
+ r31 == adjusted len
+
+ First we need to copy word up to but not crossing the next 32-byte
+ boundary. Then perform aligned loads just before and just after
+ the boundary and use shifts and or to generate the next aligned
+ word for dst. If more then 32 bytes remain we copy (unaligned src)
+ the next 7 words and repeat the loop until less then 32-bytes
+ remain.
+
+ Then if more then 4 bytes remain we again use aligned loads,
+ shifts and or to generate the next dst word. We then process the
+ remaining words using unaligned loads as needed. Finally we check
+ if there more then 0 bytes (1-3) bytes remaining and use
+ halfword and or byte load/stores to complete the copy.
+*/
+ mr 4,12 /* restore unaligned adjusted src ptr */
+ clrlwi 0,12,27 /* Find dist from previous 32-byte boundary. */
+ slwi 10,10,3 /* calculate number of bits to shift 1st word left */
+ cmplwi cr5,0,16
+ subfic 8,0,32 /* Number of bytes to next 32-byte boundary. */
+
+ mtcrf 0x01,8
+ cmplwi cr1,10,16
+ subfic 9,10,32 /* number of bits to shift 2nd word right */
+/* This test is reversed because the timing to compare the bytes to
+ 32-byte boundary could not be meet. So we compare the bytes from
+ previous 32-byte boundary and invert the test. */
+ bge cr5,L(wdu_h32_8)
+ .align 4
+ lwz 6,0(4)
+ lwz 7,4(4)
+ addi 12,4,16 /* generate alternate pointers to avoid agen */
+ addi 11,3,16 /* timing issues downstream. */
+ stw 6,0(3)
+ stw 7,4(3)
+ subi 31,31,16
+ lwz 6,8(4)
+ lwz 7,12(4)
+ addi 4,4,16
+ stw 6,8(3)
+ stw 7,12(3)
+ addi 3,3,16
+ bf 28,L(wdu_h32_4)
+ lwz 6,0(12)
+ lwz 7,4(12)
+ subi 31,31,8
+ addi 4,4,8
+ stw 6,0(11)
+ stw 7,4(11)
+ addi 3,3,8
+ bf 29,L(wdu_h32_0)
+ lwz 6,8(12)
+ addi 4,4,4
+ subi 31,31,4
+ stw 6,8(11)
+ addi 3,3,4
+ b L(wdu_h32_0)
+ .align 4
+L(wdu_h32_8):
+ bf 28,L(wdu_h32_4)
+ lwz 6,0(4)
+ lwz 7,4(4)
+ subi 31,31,8
+ bf 29,L(wdu_h32_8x)
+ stw 6,0(3)
+ stw 7,4(3)
+ lwz 6,8(4)
+ addi 4,4,12
+ subi 31,31,4
+ stw 6,8(3)
+ addi 3,3,12
+ b L(wdu_h32_0)
+ .align 4
+L(wdu_h32_8x):
+ addi 4,4,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+ b L(wdu_h32_0)
+ .align 4
+L(wdu_h32_4):
+ bf 29,L(wdu_h32_0)
+ lwz 6,0(4)
+ subi 31,31,4
+ addi 4,4,4
+ stw 6,0(3)
+ addi 3,3,4
+ .align 4
+L(wdu_h32_0):
+/* set up for 32-byte boundary crossing word move and possibly 32-byte
+ move loop. */
+ clrrwi 12,4,2
+ cmplwi cr5,31,32
+ bge cr1,L(wdu2_32)
+#if 0
+ b L(wdu1_32)
+/*
+ cmplwi cr1,10,8
+ beq cr1,L(wdu1_32)
+ cmplwi cr1,10,16
+ beq cr1,L(wdu2_32)
+ cmplwi cr1,10,24
+ beq cr1,L(wdu3_32)
+*/
+L(wdu_32):
+ lwz 6,0(12)
+ cmplwi cr6,31,4
+ srwi 8,31,5 /* calculate the 32 byte loop count */
+ slw 0,6,10
+ clrlwi 31,31,27 /* The remaining bytes, < 32. */
+ blt cr5,L(wdu_32tail)
+ mtctr 8
+ cmplwi cr6,31,4
+ .align 4
+L(wdu_loop32):
+ /* copy 32 bytes at a time */
+ lwz 8,4(12)
+ addi 12,12,32
+ lwz 7,4(4)
+ srw 8,8,9
+ or 0,0,8
+ stw 0,0(3)
+ stw 7,4(3)
+ lwz 6,8(4)
+ lwz 7,12(4)
+ stw 6,8(3)
+ stw 7,12(3)
+ lwz 6,16(4)
+ lwz 7,20(4)
+ stw 6,16(3)
+ stw 7,20(3)
+ lwz 6,24(4)
+ lwz 7,28(4)
+ lwz 8,0(12)
+ addi 4,4,32
+ stw 6,24(3)
+ stw 7,28(3)
+ addi 3,3,32
+ slw 0,8,10
+ bdnz+ L(wdu_loop32)
+
+L(wdu_32tail):
+ mtcrf 0x01,31
+ cmplwi cr5,31,16
+ blt cr6,L(wdu_4tail)
+ /* calculate and store the final word */
+ lwz 8,4(12)
+ srw 8,8,9
+ or 6,0,8
+ b L(wdu_32tailx)
+#endif
+ .align 4
+L(wdu1_32):
+ lwz 6,-1(4)
+ cmplwi cr6,31,4
+ srwi 8,31,5 /* calculate the 32 byte loop count */
+ slwi 6,6,8
+ clrlwi 31,31,27 /* The remaining bytes, < 32. */
+ blt cr5,L(wdu1_32tail)
+ mtctr 8
+ cmplwi cr6,31,4
+
+ lwz 8,3(4)
+ lwz 7,4(4)
+/* Equivalent to: srwi 8,8,32-8; or 6,6,8 */
+ rlwimi 6,8,8,(32-8),31
+ b L(wdu1_loop32x)
+ .align 4
+L(wdu1_loop32):
+ /* copy 32 bytes at a time */
+ lwz 8,3(4)
+ lwz 7,4(4)
+ stw 10,-8(3)
+ stw 11,-4(3)
+/* Equivalent to srwi 8,8,32-8; or 6,6,8 */
+ rlwimi 6,8,8,(32-8),31
+L(wdu1_loop32x):
+ lwz 10,8(4)
+ lwz 11,12(4)
+ stw 6,0(3)
+ stw 7,4(3)
+ lwz 6,16(4)
+ lwz 7,20(4)
+ stw 10,8(3)
+ stw 11,12(3)
+ lwz 10,24(4)
+ lwz 11,28(4)
+ lwz 8,32-1(4)
+ addi 4,4,32
+ stw 6,16(3)
+ stw 7,20(3)
+ addi 3,3,32
+ slwi 6,8,8
+ bdnz+ L(wdu1_loop32)
+ stw 10,-8(3)
+ stw 11,-4(3)
+
+L(wdu1_32tail):
+ mtcrf 0x01,31
+ cmplwi cr5,31,16
+ blt cr6,L(wdu_4tail)
+ /* calculate and store the final word */
+ lwz 8,3(4)
+/* Equivalent to: srwi 8,8,32-9; or 6,6,8 */
+ rlwimi 6,8,8,(32-8),31
+ b L(wdu_32tailx)
+
+L(wdu2_32):
+ bgt cr1,L(wdu3_32)
+ lwz 6,-2(4)
+ cmplwi cr6,31,4
+ srwi 8,31,5 /* calculate the 32 byte loop count */
+ slwi 6,6,16
+ clrlwi 31,31,27 /* The remaining bytes, < 32. */
+ blt cr5,L(wdu2_32tail)
+ mtctr 8
+ cmplwi cr6,31,4
+
+ lwz 8,2(4)
+ lwz 7,4(4)
+/* Equivalent to: srwi 8,8,32-8; or 6,6,8 */
+ rlwimi 6,8,16,(32-16),31
+ b L(wdu2_loop32x)
+ .align 4
+L(wdu2_loop32):
+ /* copy 32 bytes at a time */
+ lwz 8,2(4)
+ lwz 7,4(4)
+ stw 10,-8(3)
+ stw 11,-4(3)
+/* Equivalent to srwi 8,8,32-8; or 6,6,8 */
+ rlwimi 6,8,16,(32-16),31
+L(wdu2_loop32x):
+ lwz 10,8(4)
+ lwz 11,12(4)
+ stw 6,0(3)
+ stw 7,4(3)
+ lwz 6,16(4)
+ lwz 7,20(4)
+ stw 10,8(3)
+ stw 11,12(3)
+ lwz 10,24(4)
+ lwz 11,28(4)
+/* lwz 8,0(12) */
+ lwz 8,32-2(4)
+ addi 4,4,32
+ stw 6,16(3)
+ stw 7,20(3)
+ addi 3,3,32
+ slwi 6,8,16
+ bdnz+ L(wdu2_loop32)
+ stw 10,-8(3)
+ stw 11,-4(3)
+
+L(wdu2_32tail):
+ mtcrf 0x01,31
+ cmplwi cr5,31,16
+ blt cr6,L(wdu_4tail)
+ /* calculate and store the final word */
+ lwz 8,2(4)
+/* Equivalent to: srwi 8,8,32-9; or 6,6,8 */
+ rlwimi 6,8,16,(32-16),31
+ b L(wdu_32tailx)
+
+L(wdu3_32):
+/* lwz 6,0(12) */
+ lwz 6,-3(4)
+ cmplwi cr6,31,4
+ srwi 8,31,5 /* calculate the 32 byte loop count */
+ slwi 6,6,24
+ clrlwi 31,31,27 /* The remaining bytes, < 32. */
+ blt cr5,L(wdu3_32tail)
+ mtctr 8
+ cmplwi cr6,31,4
+
+ lwz 8,1(4)
+ lwz 7,4(4)
+/* Equivalent to: srwi 8,8,32-8; or 6,6,8 */
+ rlwimi 6,8,24,(32-24),31
+ b L(wdu3_loop32x)
+ .align 4
+L(wdu3_loop32):
+ /* copy 32 bytes at a time */
+ lwz 8,1(4)
+ lwz 7,4(4)
+ stw 10,-8(3)
+ stw 11,-4(3)
+/* Equivalent to srwi 8,8,32-8; or 6,6,8 */
+ rlwimi 6,8,24,(32-24),31
+L(wdu3_loop32x):
+ lwz 10,8(4)
+ lwz 11,12(4)
+ stw 6,0(3)
+ stw 7,4(3)
+ lwz 6,16(4)
+ lwz 7,20(4)
+ stw 10,8(3)
+ stw 11,12(3)
+ lwz 10,24(4)
+ lwz 11,28(4)
+ lwz 8,32-3(4)
+ addi 4,4,32
+ stw 6,16(3)
+ stw 7,20(3)
+ addi 3,3,32
+ slwi 6,8,24
+ bdnz+ L(wdu3_loop32)
+ stw 10,-8(3)
+ stw 11,-4(3)
+
+L(wdu3_32tail):
+ mtcrf 0x01,31
+ cmplwi cr5,31,16
+ blt cr6,L(wdu_4tail)
+ /* calculate and store the final word */
+ lwz 8,1(4)
+/* Equivalent to: srwi 8,8,32-9; or 6,6,8 */
+ rlwimi 6,8,24,(32-24),31
+ b L(wdu_32tailx)
+ .align 4
+L(wdu_32tailx):
+ blt cr5,L(wdu_t32_8)
+ lwz 7,4(4)
+ addi 12,4,16 /* generate alternate pointers to avoid agen */
+ addi 11,3,16 /* timing issues downstream. */
+ stw 6,0(3)
+ stw 7,4(3)
+ subi 31,31,16
+ lwz 6,8(4)
+ lwz 7,12(4)
+ addi 4,4,16
+ stw 6,8(3)
+ stw 7,12(3)
+ addi 3,3,16
+ bf 28,L(wdu_t32_4x)
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 4,4,8
+ subi 31,31,8
+ stw 6,0(11)
+ stw 7,4(11)
+ addi 3,3,8
+ bf 29,L(wdu_t32_0)
+ lwz 6,8(12)
+ addi 4,4,4
+ subi 31,31,4
+ stw 6,8(11)
+ addi 3,3,4
+ b L(wdu_t32_0)
+ .align 4
+L(wdu_t32_4x):
+ bf 29,L(wdu_t32_0)
+ lwz 6,0(4)
+ addi 4,4,4
+ subi 31,31,4
+ stw 6,0(3)
+ addi 3,3,4
+ b L(wdu_t32_0)
+ .align 4
+L(wdu_t32_8):
+ bf 28,L(wdu_t32_4)
+ lwz 7,4(4)
+ subi 31,31,8
+ bf 29,L(wdu_t32_8x)
+ stw 6,0(3)
+ stw 7,4(3)
+ lwz 6,8(4)
+ subi 31,31,4
+ addi 4,4,12
+ stw 6,8(3)
+ addi 3,3,12
+ b L(wdu_t32_0)
+ .align 4
+L(wdu_t32_8x):
+ addi 4,4,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+ b L(wdu_t32_0)
+ .align 4
+L(wdu_t32_4):
+ subi 31,31,4
+ stw 6,0(3)
+ addi 4,4,4
+ addi 3,3,4
+ .align 4
+L(wdu_t32_0):
+L(wdu_4tail):
+ cmplwi cr6,31,0
+ beq cr6,L(wdus_0) /* If the tail is 0 bytes we are done! */
+ bf 30,L(wdus_3)
+ lhz 7,0(4)
+ sth 7,0(3)
+ bf 31,L(wdus_0)
+ lbz 8,2(4)
+ stb 8,2(3)
+ mr 3,30
+ lwz 30,20(1)
+ lwz 31,24(1)
+ addi 1,1,32
+ blr
+ .align 4
+L(wdus_3):
+ bf 31,L(wus_0)
+ lbz 6,0(4)
+ stb 6,0(3)
+ .align 4
+L(wdus_0):
+ /* Return original dst pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ lwz 31,24(1)
+ addi 1,1,32
+ blr
+END (__memcpy_power6)
diff --git a/sysdeps/powerpc/powerpc32/multiarch/memcpy-power7.S b/sysdeps/powerpc/powerpc32/multiarch/memcpy-power7.S
new file mode 100644
index 0000000..00e9fd7
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/multiarch/memcpy-power7.S
@@ -0,0 +1,523 @@
+/* Optimized memcpy implementation for PowerPC32/POWER7.
+ Copyright (C) 2010-2013 Free Software Foundation, Inc.
+ Contributed by Luis Machado <luisgpm@br.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+ Returns 'dst'. */
+
+ .machine power7
+EALIGN (__memcpy_power7, 5, 0)
+ CALL_MCOUNT
+
+ stwu 1,-32(1)
+ cfi_adjust_cfa_offset(32)
+ stw 30,20(1)
+ cfi_offset(30,(20-32))
+ stw 31,24(1)
+ mr 30,3
+ cmplwi cr1,5,31
+ neg 0,3
+ cfi_offset(31,-8)
+ ble cr1, L(copy_LT_32) /* If move < 32 bytes use short move
+ code. */
+
+ andi. 11,3,7 /* Check alignment of DST. */
+ clrlwi 10,4,29 /* Check alignment of SRC. */
+ cmplw cr6,10,11 /* SRC and DST alignments match? */
+ mr 12,4
+ mr 31,5
+ bne cr6,L(copy_GE_32_unaligned)
+
+ srwi 9,5,3 /* Number of full quadwords remaining. */
+
+ beq L(copy_GE_32_aligned_cont)
+
+ clrlwi 0,0,29
+ mtcrf 0x01,0
+ subf 31,0,5
+
+ /* Get the SRC aligned to 8 bytes. */
+
+1: bf 31,2f
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+2: bf 30,4f
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+4: bf 29,0f
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+0:
+ clrlwi 10,12,29 /* Check alignment of SRC again. */
+ srwi 9,31,3 /* Number of full doublewords remaining. */
+
+L(copy_GE_32_aligned_cont):
+
+ clrlwi 11,31,29
+ mtcrf 0x01,9
+
+ srwi 8,31,5
+ cmplwi cr1,9,4
+ cmplwi cr6,11,0
+ mr 11,12
+
+ /* Copy 1~3 doublewords so the main loop starts
+ at a multiple of 32 bytes. */
+
+ bf 30,1f
+ lfd 6,0(12)
+ lfd 7,8(12)
+ addi 11,12,16
+ mtctr 8
+ stfd 6,0(3)
+ stfd 7,8(3)
+ addi 10,3,16
+ bf 31,4f
+ lfd 0,16(12)
+ stfd 0,16(3)
+ blt cr1,3f
+ addi 11,12,24
+ addi 10,3,24
+ b 4f
+
+ .align 4
+1: /* Copy 1 doubleword and set the counter. */
+ mr 10,3
+ mtctr 8
+ bf 31,4f
+ lfd 6,0(12)
+ addi 11,12,8
+ stfd 6,0(3)
+ addi 10,3,8
+
+L(aligned_copy):
+ /* Main aligned copy loop. Copies up to 128-bytes at a time. */
+ .align 4
+4:
+ /* check for any 32-byte or 64-byte lumps that are outside of a
+ nice 128-byte range. R8 contains the number of 32-byte
+ lumps, so drop this into the CR, and use the SO/EQ bits to help
+ handle the 32- or 64- byte lumps. Then handle the rest with an
+ unrolled 128-bytes-at-a-time copy loop. */
+ mtocrf 1,8
+ li 6,16 # 16() index
+ li 7,32 # 32() index
+ li 8,48 # 48() index
+
+L(aligned_32byte):
+ /* if the SO bit (indicating a 32-byte lump) is not set, move along. */
+ bns cr7,L(aligned_64byte)
+ lxvd2x 6,0,11
+ lxvd2x 7,11,6
+ addi 11,11,32
+ stxvd2x 6,0,10
+ stxvd2x 7,10,6
+ addi 10,10,32
+
+L(aligned_64byte):
+ /* if the EQ bit (indicating a 64-byte lump) is not set, move along. */
+ bne cr7,L(aligned_128setup)
+ lxvd2x 6,0,11
+ lxvd2x 7,11,6
+ lxvd2x 8,11,7
+ lxvd2x 9,11,8
+ addi 11,11,64
+ stxvd2x 6,0,10
+ stxvd2x 7,10,6
+ stxvd2x 8,10,7
+ stxvd2x 9,10,8
+ addi 10,10,64
+
+L(aligned_128setup):
+ /* Set up for the 128-byte at a time copy loop. */
+ srwi 8,31,7
+ cmpwi 8,0 # Any 4x lumps left?
+ beq 3f # if not, move along.
+ lxvd2x 6,0,11
+ lxvd2x 7,11,6
+ mtctr 8 # otherwise, load the ctr and begin.
+ li 8,48 # 48() index
+ b L(aligned_128loop)
+
+L(aligned_128head):
+ /* for the 2nd + iteration of this loop. */
+ lxvd2x 6,0,11
+ lxvd2x 7,11,6
+L(aligned_128loop):
+ lxvd2x 8,11,7
+ lxvd2x 9,11,8
+ stxvd2x 6,0,10
+ addi 11,11,64
+ stxvd2x 7,10,6
+ stxvd2x 8,10,7
+ stxvd2x 9,10,8
+ lxvd2x 6,0,11
+ lxvd2x 7,11,6
+ addi 10,10,64
+ lxvd2x 8,11,7
+ lxvd2x 9,11,8
+ addi 11,11,64
+ stxvd2x 6,0,10
+ stxvd2x 7,10,6
+ stxvd2x 8,10,7
+ stxvd2x 9,10,8
+ addi 10,10,64
+ bdnz L(aligned_128head)
+
+3:
+ /* Check for tail bytes. */
+ clrrwi 0,31,3
+ mtcrf 0x01,31
+ beq cr6,0f
+
+.L9:
+ add 3,3,0
+ add 12,12,0
+
+ /* At this point we have a tail of 0-7 bytes and we know that the
+ destination is doubleword-aligned. */
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return original DST pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ lwz 31,24(1)
+ addi 1,1,32
+ blr
+
+ /* Handle copies of 0~31 bytes. */
+ .align 4
+L(copy_LT_32):
+ cmplwi cr6,5,8
+ mr 12,4
+ mtcrf 0x01,5
+ ble cr6,L(copy_LE_8)
+
+ /* At least 9 bytes to go. */
+ neg 8,4
+ clrrwi 11,4,2
+ andi. 0,8,3
+ cmplwi cr1,5,16
+ mr 10,5
+ beq L(copy_LT_32_aligned)
+
+ /* Force 4-bytes alignment for SRC. */
+ mtocrf 0x01,0
+ subf 10,0,5
+2: bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: bf 31,L(end_4bytes_alignment)
+
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+
+ .align 4
+L(end_4bytes_alignment):
+ cmplwi cr1,10,16
+ mtcrf 0x01,10
+
+L(copy_LT_32_aligned):
+ /* At least 6 bytes to go, and SRC is word-aligned. */
+ blt cr1,8f
+
+ /* Copy 16 bytes. */
+ lwz 6,0(12)
+ lwz 7,4(12)
+ stw 6,0(3)
+ lwz 8,8(12)
+ stw 7,4(3)
+ lwz 6,12(12)
+ addi 12,12,16
+ stw 8,8(3)
+ stw 6,12(3)
+ addi 3,3,16
+8: /* Copy 8 bytes. */
+ bf 28,4f
+
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 12,12,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2-3 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ sth 6,0(3)
+ bf 31,0f
+ lbz 7,2(12)
+ stb 7,2(3)
+
+ /* Return original DST pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ .align 4
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return original DST pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ /* Handles copies of 0~8 bytes. */
+ .align 4
+L(copy_LE_8):
+ bne cr6,4f
+
+ /* Though we could've used lfd/stfd here, they are still
+ slow for unaligned cases. */
+
+ lwz 6,0(4)
+ lwz 7,4(4)
+ stw 6,0(3)
+ stw 7,4(3)
+
+ /* Return original DST pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ .align 4
+4: /* Copies 4~7 bytes. */
+ bf 29,2b
+
+ lwz 6,0(4)
+ stw 6,0(3)
+ bf 30,5f
+ lhz 7,4(4)
+ sth 7,4(3)
+ bf 31,0f
+ lbz 8,6(4)
+ stb 8,6(3)
+
+ /* Return original DST pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ .align 4
+5: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,4(4)
+ stb 6,4(3)
+
+0: /* Return original DST pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
+ SRC is not. Use aligned quadword loads from SRC, shifted to realign
+ the data, allowing for aligned DST stores. */
+ .align 4
+L(copy_GE_32_unaligned):
+ andi. 11,3,15 /* Check alignment of DST. */
+ clrlwi 0,0,28 /* Number of bytes until the 1st
+ quadword of DST. */
+ srwi 9,5,4 /* Number of full quadwords remaining. */
+
+ beq L(copy_GE_32_unaligned_cont)
+
+ /* SRC is not quadword aligned, get it aligned. */
+
+ mtcrf 0x01,0
+ subf 31,0,5
+
+ /* Vector instructions work best when proper alignment (16-bytes)
+ is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
+1: /* Copy 1 byte. */
+ bf 31,2f
+
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+2: /* Copy 2 bytes. */
+ bf 30,4f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+4: /* Copy 4 bytes. */
+ bf 29,8f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+8: /* Copy 8 bytes. */
+ bf 28,0f
+
+ lfd 6,0(12)
+ addi 12,12,8
+ stfd 6,0(3)
+ addi 3,3,8
+0:
+ clrlwi 10,12,28 /* Check alignment of SRC. */
+ srwi 9,31,4 /* Number of full quadwords remaining. */
+
+ /* The proper alignment is present, it is OK to copy the bytes now. */
+L(copy_GE_32_unaligned_cont):
+
+ /* Setup two indexes to speed up the indexed vector operations. */
+ clrlwi 11,31,28
+ li 6,16 /* Index for 16-bytes offsets. */
+ li 7,32 /* Index for 32-bytes offsets. */
+ cmplwi cr1,11,0
+ srwi 8,31,5 /* Setup the loop counter. */
+ mr 10,3
+ mr 11,12
+ mtcrf 0x01,9
+ cmplwi cr6,9,1
+ lvsl 5,0,12
+ lvx 3,0,12
+ bf 31,L(setup_unaligned_loop)
+
+ /* Copy another 16 bytes to align to 32-bytes due to the loop . */
+ lvx 4,12,6
+ vperm 6,3,4,5
+ addi 11,12,16
+ addi 10,3,16
+ stvx 6,0,3
+ vor 3,4,4
+
+L(setup_unaligned_loop):
+ mtctr 8
+ ble cr6,L(end_unaligned_loop)
+
+ /* Copy 32 bytes at a time using vector instructions. */
+ .align 4
+L(unaligned_loop):
+
+ /* Note: vr6/vr10 may contain data that was already copied,
+ but in order to get proper alignment, we may have to copy
+ some portions again. This is faster than having unaligned
+ vector instructions though. */
+
+ lvx 4,11,6 /* vr4 = r11+16. */
+ vperm 6,3,4,5 /* Merge the correctly-aligned portions
+ of vr3/vr4 into vr6. */
+ lvx 3,11,7 /* vr3 = r11+32. */
+ vperm 10,4,3,5 /* Merge the correctly-aligned portions
+ of vr3/vr4 into vr10. */
+ addi 11,11,32
+ stvx 6,0,10
+ stvx 10,10,6
+ addi 10,10,32
+
+ bdnz L(unaligned_loop)
+
+ .align 4
+L(end_unaligned_loop):
+
+ /* Check for tail bytes. */
+ clrrwi 0,31,4
+ mtcrf 0x01,31
+ beq cr1,0f
+
+ add 3,3,0
+ add 12,12,0
+
+ /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
+8: /* Copy 8 bytes. */
+ bf 28,4f
+
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 12,12,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2~3 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return original DST pointer. */
+ mr 3,30
+ lwz 30,20(1)
+ lwz 31,24(1)
+ addi 1,1,32
+ blr
+
+END (__memcpy_power7)
diff --git a/sysdeps/powerpc/powerpc32/multiarch/memcpy-ppc32.c b/sysdeps/powerpc/powerpc32/multiarch/memcpy-ppc32.c
new file mode 100644
index 0000000..8157bed
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/multiarch/memcpy-ppc32.c
@@ -0,0 +1,30 @@
+/* Default memcpy for PowerPC32.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <string.h>
+
+#define MEMCPY __memcpy_ppc32
+#if defined SHARED && !defined NOT_IN_libc
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name) \
+ __hidden_ver1(__memcpy_ppc32, __GI_memcpy, __memcpy_ppc32);
+#endif
+
+extern __typeof (memcpy) __memcpy_ppc32 attribute_hidden;
+
+#include <string/memcpy.c>
diff --git a/sysdeps/powerpc/powerpc32/multiarch/memcpy.c b/sysdeps/powerpc/powerpc32/multiarch/memcpy.c
new file mode 100644
index 0000000..572cc69
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/multiarch/memcpy.c
@@ -0,0 +1,50 @@
+/* Multiple versions of memcpy.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Define multiple versions only for the definition in lib and for
+ DSO. In static binaries we need memcpy before the initialization
+ happened. */
+#if defined SHARED && !defined NOT_IN_libc
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (memcpy) __memcpy_ppc32 attribute_hidden;
+extern __typeof (memcpy) __memcpy_power4 attribute_hidden;
+extern __typeof (memcpy) __memcpy_cell attribute_hidden;
+extern __typeof (memcpy) __memcpy_power6 attribute_hidden;
+extern __typeof (memcpy) __memcpy_a2 attribute_hidden;
+extern __typeof (memcpy) __memcpy_power7 attribute_hidden;
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+ ifunc symbol properly. */
+libc_ifunc (memcpy,
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __memcpy_power7 :
+ (hwcap & PPC_FEATURE_ARCH_2_06)
+ ? __memcpy_a2 :
+ (hwcap & PPC_FEATURE_ARCH_2_05)
+ ? __memcpy_power6 :
+ (hwcap & PPC_FEATURE_CELL_BE)
+ ? __memcpy_cell :
+ (hwcap & PPC_FEATURE_POWER4)
+ ? __memcpy_power4
+ : __memcpy_ppc32);
+#else
+#include <string/memcpy.c>
+#endif
diff --git a/sysdeps/powerpc/powerpc32/power4/memcpy.S b/sysdeps/powerpc/powerpc32/power4/memcpy.S
deleted file mode 100644
index d914663..0000000
--- a/sysdeps/powerpc/powerpc32/power4/memcpy.S
+++ /dev/null
@@ -1,423 +0,0 @@
-/* Optimized memcpy implementation for PowerPC32 on PowerPC64.
- Copyright (C) 2003-2013 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-
-/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
- Returns 'dst'.
-
- Memcpy handles short copies (< 32-bytes) using a binary move blocks
- (no loops) of lwz/stw. The tail (remaining 1-3) bytes is handled
- with the appropriate combination of byte and halfword load/stores.
- There is minimal effort to optimize the alignment of short moves.
-
- Longer moves (>= 32-bytes) justify the effort to get at least the
- destination word (4-byte) aligned. Further optimization is
- possible when both source and destination are word aligned.
- Each case has an optimized unrolled loop. */
-
- .machine power4
-EALIGN (memcpy, 5, 0)
- CALL_MCOUNT
-
- stwu 1,-32(1)
- cfi_adjust_cfa_offset(32)
- stw 30,20(1)
- cfi_offset(30,(20-32))
- mr 30,3
- cmplwi cr1,5,31
- stw 31,24(1)
- cfi_offset(31,(24-32))
- neg 0,3
- andi. 11,3,3 /* check alignment of dst. */
- clrlwi 0,0,30 /* Number of bytes until the 1st word of dst. */
- clrlwi 10,4,30 /* check alignment of src. */
- cmplwi cr6,5,8
- ble- cr1,.L2 /* If move < 32 bytes use short move code. */
- cmplw cr6,10,11
- mr 12,4
- srwi 9,5,2 /* Number of full words remaining. */
- mtcrf 0x01,0
- mr 31,5
- beq .L0
-
- subf 31,0,5
- /* Move 0-3 bytes as needed to get the destination word aligned. */
-1: bf 31,2f
- lbz 6,0(12)
- addi 12,12,1
- stb 6,0(3)
- addi 3,3,1
-2: bf 30,0f
- lhz 6,0(12)
- addi 12,12,2
- sth 6,0(3)
- addi 3,3,2
-0:
- clrlwi 10,12,30 /* check alignment of src again. */
- srwi 9,31,2 /* Number of full words remaining. */
-
- /* Copy words from source to destination, assuming the destination is
- aligned on a word boundary.
-
- At this point we know there are at least 25 bytes left (32-7) to copy.
- The next step is to determine if the source is also word aligned.
- If not branch to the unaligned move code at .L6. which uses
- a load, shift, store strategy.
-
- Otherwise source and destination are word aligned, and we can use
- the optimized word copy loop. */
-.L0:
- clrlwi 11,31,30 /* calculate the number of tail bytes */
- mtcrf 0x01,9
- bne- cr6,.L6 /* If source is not word aligned. */
-
- /* Move words where destination and source are word aligned.
- Use an unrolled loop to copy 4 words (16-bytes) per iteration.
- If the copy is not an exact multiple of 16 bytes, 1-3
- words are copied as needed to set up the main loop. After
- the main loop exits there may be a tail of 1-3 bytes. These bytes are
- copied a halfword/byte at a time as needed to preserve alignment. */
-
- srwi 8,31,4 /* calculate the 16 byte loop count */
- cmplwi cr1,9,4
- cmplwi cr6,11,0
- mr 11,12
-
- bf 30,1f
- lwz 6,0(12)
- lwz 7,4(12)
- addi 11,12,8
- mtctr 8
- stw 6,0(3)
- stw 7,4(3)
- addi 10,3,8
- bf 31,4f
- lwz 0,8(12)
- stw 0,8(3)
- blt cr1,3f
- addi 11,12,12
- addi 10,3,12
- b 4f
- .align 4
-1:
- mr 10,3
- mtctr 8
- bf 31,4f
- lwz 6,0(12)
- addi 11,12,4
- stw 6,0(3)
- addi 10,3,4
-
- .align 4
-4:
- lwz 6,0(11)
- lwz 7,4(11)
- lwz 8,8(11)
- lwz 0,12(11)
- stw 6,0(10)
- stw 7,4(10)
- stw 8,8(10)
- stw 0,12(10)
- addi 11,11,16
- addi 10,10,16
- bdnz 4b
-3:
- clrrwi 0,31,2
- mtcrf 0x01,31
- beq cr6,0f
-.L9:
- add 3,3,0
- add 12,12,0
-
-/* At this point we have a tail of 0-3 bytes and we know that the
- destination is word aligned. */
-2: bf 30,1f
- lhz 6,0(12)
- addi 12,12,2
- sth 6,0(3)
- addi 3,3,2
-1: bf 31,0f
- lbz 6,0(12)
- stb 6,0(3)
-0:
- /* Return original dst pointer. */
- mr 3,30
- lwz 30,20(1)
- lwz 31,24(1)
- addi 1,1,32
- blr
-
-/* Copy up to 31 bytes. This is divided into two cases 0-8 bytes and
- 9-31 bytes. Each case is handled without loops, using binary
- (1,2,4,8) tests.
-
- In the short (0-8 byte) case no attempt is made to force alignment
- of either source or destination. The hardware will handle the
- unaligned load/stores with small delays for crossing 32- 64-byte, and
- 4096-byte boundaries. Since these short moves are unlikely to be
- unaligned or cross these boundaries, the overhead to force
- alignment is not justified.
-
- The longer (9-31 byte) move is more likely to cross 32- or 64-byte
- boundaries. Since only loads are sensitive to the 32-/64-byte
- boundaries it is more important to align the source than the
- destination. If the source is not already word aligned, we first
- move 1-3 bytes as needed. While the destination and stores may
- still be unaligned, this is only an issue for page (4096 byte
- boundary) crossing, which should be rare for these short moves.
- The hardware handles this case automatically with a small delay. */
-
- .align 4
-.L2:
- mtcrf 0x01,5
- neg 8,4
- clrrwi 11,4,2
- andi. 0,8,3
- ble cr6,.LE8 /* Handle moves of 0-8 bytes. */
-/* At least 9 bytes left. Get the source word aligned. */
- cmplwi cr1,5,16
- mr 10,5
- mr 12,4
- cmplwi cr6,0,2
- beq .L3 /* If the source is already word aligned skip this. */
-/* Copy 1-3 bytes to get source address word aligned. */
- lwz 6,0(11)
- subf 10,0,5
- add 12,4,0
- blt cr6,5f
- srwi 7,6,16
- bgt cr6,3f
- sth 6,0(3)
- b 7f
- .align 4
-3:
- stb 7,0(3)
- sth 6,1(3)
- b 7f
- .align 4
-5:
- stb 6,0(3)
-7:
- cmplwi cr1,10,16
- add 3,3,0
- mtcrf 0x01,10
- .align 4
-.L3:
-/* At least 6 bytes left and the source is word aligned. */
- blt cr1,8f
-16: /* Move 16 bytes. */
- lwz 6,0(12)
- lwz 7,4(12)
- stw 6,0(3)
- lwz 6,8(12)
- stw 7,4(3)
- lwz 7,12(12)
- addi 12,12,16
- stw 6,8(3)
- stw 7,12(3)
- addi 3,3,16
-8: /* Move 8 bytes. */
- bf 28,4f
- lwz 6,0(12)
- lwz 7,4(12)
- addi 12,12,8
- stw 6,0(3)
- stw 7,4(3)
- addi 3,3,8
-4: /* Move 4 bytes. */
- bf 29,2f
- lwz 6,0(12)
- addi 12,12,4
- stw 6,0(3)
- addi 3,3,4
-2: /* Move 2-3 bytes. */
- bf 30,1f
- lhz 6,0(12)
- sth 6,0(3)
- bf 31,0f
- lbz 7,2(12)
- stb 7,2(3)
- mr 3,30
- lwz 30,20(1)
- addi 1,1,32
- blr
-1: /* Move 1 byte. */
- bf 31,0f
- lbz 6,0(12)
- stb 6,0(3)
-0:
- /* Return original dst pointer. */
- mr 3,30
- lwz 30,20(1)
- addi 1,1,32
- blr
-
-/* Special case to copy 0-8 bytes. */
- .align 4
-.LE8:
- mr 12,4
- bne cr6,4f
- lwz 6,0(4)
- lwz 7,4(4)
- stw 6,0(3)
- stw 7,4(3)
- /* Return original dst pointer. */
- mr 3,30
- lwz 30,20(1)
- addi 1,1,32
- blr
- .align 4
-4: bf 29,2b
- lwz 6,0(4)
- stw 6,0(3)
-6:
- bf 30,5f
- lhz 7,4(4)
- sth 7,4(3)
- bf 31,0f
- lbz 8,6(4)
- stb 8,6(3)
- mr 3,30
- lwz 30,20(1)
- addi 1,1,32
- blr
- .align 4
-5:
- bf 31,0f
- lbz 6,4(4)
- stb 6,4(3)
- .align 4
-0:
- /* Return original dst pointer. */
- mr 3,30
- lwz 30,20(1)
- addi 1,1,32
- blr
-
- .align 4
-.L6:
-
- /* Copy words where the destination is aligned but the source is
- not. Use aligned word loads from the source, shifted to realign
- the data, to allow aligned destination stores.
- Use an unrolled loop to copy 4 words (16-bytes) per iteration.
- A single word is retained for storing at loop exit to avoid walking
- off the end of a page within the loop.
- If the copy is not an exact multiple of 16 bytes, 1-3
- words are copied as needed to set up the main loop. After
- the main loop exits there may be a tail of 1-3 bytes. These bytes are
- copied a halfword/byte at a time as needed to preserve alignment. */
-
-
- cmplwi cr6,11,0 /* are there tail bytes left ? */
- subf 5,10,12 /* back up src pointer to prev word alignment */
- slwi 10,10,3 /* calculate number of bits to shift 1st word left */
- addi 11,9,-1 /* we move one word after the loop */
- srwi 8,11,2 /* calculate the 16 byte loop count */
- lwz 6,0(5) /* load 1st src word into R6 */
- mr 4,3
- lwz 7,4(5) /* load 2nd src word into R7 */
- mtcrf 0x01,11
- subfic 9,10,32 /* number of bits to shift 2nd word right */
- mtctr 8
- bf 30,1f
-
- /* there are at least two words to copy, so copy them */
- slw 0,6,10 /* shift 1st src word to left align it in R0 */
- srw 8,7,9 /* shift 2nd src word to right align it in R8 */
- or 0,0,8 /* or them to get word to store */
- lwz 6,8(5) /* load the 3rd src word */
- stw 0,0(4) /* store the 1st dst word */
- slw 0,7,10 /* now left align 2nd src word into R0 */
- srw 8,6,9 /* shift 3rd src word to right align it in R8 */
- or 0,0,8 /* or them to get word to store */
- lwz 7,12(5)
- stw 0,4(4) /* store the 2nd dst word */
- addi 4,4,8
- addi 5,5,16
- bf 31,4f
- /* there is a third word to copy, so copy it */
- slw 0,6,10 /* shift 3rd src word to left align it in R0 */
- srw 8,7,9 /* shift 4th src word to right align it in R8 */
- or 0,0,8 /* or them to get word to store */
- stw 0,0(4) /* store 3rd dst word */
- mr 6,7
- lwz 7,0(5)
- addi 5,5,4
- addi 4,4,4
- b 4f
- .align 4
-1:
- slw 0,6,10 /* shift 1st src word to left align it in R0 */
- srw 8,7,9 /* shift 2nd src word to right align it in R8 */
- addi 5,5,8
- or 0,0,8 /* or them to get word to store */
- bf 31,4f
- mr 6,7
- lwz 7,0(5)
- addi 5,5,4
- stw 0,0(4) /* store the 1st dst word */
- addi 4,4,4
-
- .align 4
-4:
- /* copy 16 bytes at a time */
- slw 0,6,10
- srw 8,7,9
- or 0,0,8
- lwz 6,0(5)
- stw 0,0(4)
- slw 0,7,10
- srw 8,6,9
- or 0,0,8
- lwz 7,4(5)
- stw 0,4(4)
- slw 0,6,10
- srw 8,7,9
- or 0,0,8
- lwz 6,8(5)
- stw 0,8(4)
- slw 0,7,10
- srw 8,6,9
- or 0,0,8
- lwz 7,12(5)
- stw 0,12(4)
- addi 5,5,16
- addi 4,4,16
- bdnz+ 4b
-8:
- /* calculate and store the final word */
- slw 0,6,10
- srw 8,7,9
- or 0,0,8
- stw 0,0(4)
-3:
- clrrwi 0,31,2
- mtcrf 0x01,31
- bne cr6,.L9 /* If the tail is 0 bytes we are done! */
-
- /* Return original dst pointer. */
- mr 3,30
- lwz 30,20(1)
- lwz 31,24(1)
- addi 1,1,32
- blr
-END (memcpy)
-
-libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc32/power6/memcpy.S b/sysdeps/powerpc/powerpc32/power6/memcpy.S
deleted file mode 100644
index c3d55b7..0000000
--- a/sysdeps/powerpc/powerpc32/power6/memcpy.S
+++ /dev/null
@@ -1,840 +0,0 @@
-/* Optimized memcpy implementation for PowerPC32 on POWER6.
- Copyright (C) 2003-2013 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-
-/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
- Returns 'dst'.
-
- Memcpy handles short copies (< 32-bytes) using a binary move blocks
- (no loops) of lwz/stw. The tail (remaining 1-3) bytes is handled
- with the appropriate combination of byte and halfword load/stores.
- There is minimal effort to optimize the alignment of short moves.
-
- Longer moves (>= 32-bytes) justify the effort to get at least the
- destination word (4-byte) aligned. Further optimization is
- possible when both source and destination are word aligned.
- Each case has an optimized unrolled loop. */
-
- .machine power6
-EALIGN (memcpy, 5, 0)
- CALL_MCOUNT
-
- stwu 1,-32(1)
- cfi_adjust_cfa_offset(32)
- cmplwi cr1,5,31 /* check for short move. */
- neg 0,3
- cmplwi cr1,5,31
- clrlwi 10,4,30 /* check alignment of src. */
- andi. 11,3,3 /* check alignment of dst. */
- clrlwi 0,0,30 /* Number of bytes until the 1st word of dst. */
- ble- cr1,L(word_unaligned_short) /* If move < 32 bytes. */
- cmplw cr6,10,11
- stw 31,24(1)
- cfi_offset(31,(24-32))
- stw 30,20(1)
- cfi_offset(30,(20-32))
- mr 30,3
- beq .L0
- mtcrf 0x01,0
- subf 31,0,5 /* Length after alignment. */
- add 12,4,0 /* Compute src addr after alignment. */
- /* Move 0-3 bytes as needed to get the destination word aligned. */
-1: bf 31,2f
- lbz 6,0(4)
- bf 30,3f
- lhz 7,1(4)
- stb 6,0(3)
- sth 7,1(3)
- addi 3,3,3
- b 0f
-3:
- stb 6,0(3)
- addi 3,3,1
- b 0f
-2: bf 30,0f
- lhz 6,0(4)
- sth 6,0(3)
- addi 3,3,2
-0:
- clrlwi 10,12,30 /* check alignment of src again. */
- srwi 9,31,2 /* Number of full words remaining. */
- bne- cr6,L(wdu) /* If source is not word aligned. .L6 */
- clrlwi 11,31,30 /* calculate the number of tail bytes */
- b L(word_aligned)
- /* Copy words from source to destination, assuming the destination is
- aligned on a word boundary.
-
- At this point we know there are at least 29 bytes left (32-3) to copy.
- The next step is to determine if the source is also word aligned.
- If not branch to the unaligned move code at .L6. which uses
- a load, shift, store strategy.
-
- Otherwise source and destination are word aligned, and we can use
- the optimized word copy loop. */
- .align 4
-.L0:
- mr 31,5
- mr 12,4
- bne- cr6,L(wdu) /* If source is not word aligned. .L6 */
- srwi 9,5,2 /* Number of full words remaining. */
- clrlwi 11,5,30 /* calculate the number of tail bytes */
-
- /* Move words where destination and source are word aligned.
- Use an unrolled loop to copy 4 words (16-bytes) per iteration.
- If the copy is not an exact multiple of 16 bytes, 1-3
- words are copied as needed to set up the main loop. After
- the main loop exits there may be a tail of 1-3 bytes. These bytes are
- copied a halfword/byte at a time as needed to preserve alignment. */
-L(word_aligned):
- mtcrf 0x01,9
- srwi 8,31,4 /* calculate the 16 byte loop count */
- cmplwi cr1,9,4
- cmplwi cr6,11,0
- mr 11,12
-
- bf 30,1f
- lwz 6,0(12)
- lwz 7,4(12)
- addi 11,12,8
- mtctr 8
- stw 6,0(3)
- stw 7,4(3)
- addi 10,3,8
- bf 31,4f
- lwz 0,8(12)
- stw 0,8(3)
- blt cr1,3f
- addi 11,12,12
- addi 10,3,12
- b 4f
- .align 4
-1:
- mr 10,3
- mtctr 8
- bf 31,4f
- lwz 6,0(12)
- addi 11,12,4
- stw 6,0(3)
- addi 10,3,4
-
- .align 4
-4:
- lwz 6,0(11)
- lwz 7,4(11)
- lwz 8,8(11)
- lwz 0,12(11)
- stw 6,0(10)
- stw 7,4(10)
- stw 8,8(10)
- stw 0,12(10)
- addi 11,11,16
- addi 10,10,16
- bdnz 4b
-3:
- clrrwi 0,31,2
- mtcrf 0x01,31
- beq cr6,0f
-.L9:
- add 3,3,0
- add 12,12,0
-
-/* At this point we have a tail of 0-3 bytes and we know that the
- destination is word aligned. */
-2: bf 30,1f
- lhz 6,0(12)
- addi 12,12,2
- sth 6,0(3)
- addi 3,3,2
-1: bf 31,0f
- lbz 6,0(12)
- stb 6,0(3)
-0:
- /* Return original dst pointer. */
- mr 3,30
- lwz 30,20(1)
- lwz 31,24(1)
- addi 1,1,32
- blr
-
-/* Copy up to 31 bytes. This divided into two cases 0-8 bytes and 9-31
- bytes. Each case is handled without loops, using binary (1,2,4,8)
- tests.
-
- In the short (0-8 byte) case no attempt is made to force alignment
- of either source or destination. The hardware will handle the
- unaligned load/stores with small delays for crossing 32- 128-byte,
- and 4096-byte boundaries. Since these short moves are unlikely to be
- unaligned or cross these boundaries, the overhead to force
- alignment is not justified.
-
- The longer (9-31 byte) move is more likely to cross 32- or 128-byte
- boundaries. Since only loads are sensitive to the 32-/128-byte
- boundaries it is more important to align the source then the
- destination. If the source is not already word aligned, we first
- move 1-3 bytes as needed. Since we are only word aligned we don't
- use double word load/stores to insure that all loads are aligned.
- While the destination and stores may still be unaligned, this
- is only an issue for page (4096 byte boundary) crossing, which
- should be rare for these short moves. The hardware handles this
- case automatically with a small (~20 cycle) delay. */
- .align 4
-
- cfi_same_value (31)
- cfi_same_value (30)
-L(word_unaligned_short):
- mtcrf 0x01,5
- cmplwi cr6,5,8
- neg 8,4
- clrrwi 9,4,2
- andi. 0,8,3
- beq cr6,L(wus_8) /* Handle moves of 8 bytes. */
-/* At least 9 bytes left. Get the source word aligned. */
- cmplwi cr1,5,16
- mr 12,4
- ble cr6,L(wus_4) /* Handle moves of 0-8 bytes. */
- mr 11,3
- mr 10,5
- cmplwi cr6,0,2
- beq L(wus_tail) /* If the source is already word aligned skip this. */
-/* Copy 1-3 bytes to get source address word aligned. */
- lwz 6,0(9)
- subf 10,0,5
- add 12,4,0
- blt cr6,5f
- srwi 7,6,16
- bgt cr6,3f
- sth 6,0(3)
- b 7f
- .align 4
-3:
- stb 7,0(3)
- sth 6,1(3)
- b 7f
- .align 4
-5:
- stb 6,0(3)
-7:
- cmplwi cr1,10,16
- add 11,3,0
- mtcrf 0x01,10
- .align 4
-L(wus_tail):
-/* At least 6 bytes left and the source is word aligned. This allows
- some speculative loads up front. */
-/* We need to special case the fall-through because the biggest delays
- are due to address computation not being ready in time for the
- AGEN. */
- lwz 6,0(12)
- lwz 7,4(12)
- blt cr1,L(wus_tail8)
- cmplwi cr0,10,24
-L(wus_tail16): /* Move 16 bytes. */
- stw 6,0(11)
- stw 7,4(11)
- lwz 6,8(12)
- lwz 7,12(12)
- stw 6,8(11)
- stw 7,12(11)
-/* Move 8 bytes more. */
- bf 28,L(wus_tail16p8)
- cmplwi cr1,10,28
- lwz 6,16(12)
- lwz 7,20(12)
- stw 6,16(11)
- stw 7,20(11)
-/* Move 4 bytes more. */
- bf 29,L(wus_tail16p4)
- lwz 6,24(12)
- stw 6,24(11)
- addi 12,12,28
- addi 11,11,28
- bgt cr1,L(wus_tail2)
- /* exactly 28 bytes. Return original dst pointer and exit. */
- addi 1,1,32
- blr
- .align 4
-L(wus_tail16p8): /* less then 8 bytes left. */
- beq cr1,L(wus_tailX) /* exactly 16 bytes, early exit. */
- cmplwi cr1,10,20
- bf 29,L(wus_tail16p2)
-/* Move 4 bytes more. */
- lwz 6,16(12)
- stw 6,16(11)
- addi 12,12,20
- addi 11,11,20
- bgt cr1,L(wus_tail2)
- /* exactly 20 bytes. Return original dst pointer and exit. */
- addi 1,1,32
- blr
- .align 4
-L(wus_tail16p4): /* less then 4 bytes left. */
- addi 12,12,24
- addi 11,11,24
- bgt cr0,L(wus_tail2)
- /* exactly 24 bytes. Return original dst pointer and exit. */
- addi 1,1,32
- blr
- .align 4
-L(wus_tail16p2): /* 16 bytes moved, less then 4 bytes left. */
- addi 12,12,16
- addi 11,11,16
- b L(wus_tail2)
-
- .align 4
-L(wus_tail8): /* Move 8 bytes. */
-/* r6, r7 already loaded speculatively. */
- cmplwi cr1,10,8
- cmplwi cr0,10,12
- bf 28,L(wus_tail4)
- stw 6,0(11)
- stw 7,4(11)
-/* Move 4 bytes more. */
- bf 29,L(wus_tail8p4)
- lwz 6,8(12)
- stw 6,8(11)
- addi 12,12,12
- addi 11,11,12
- bgt cr0,L(wus_tail2)
- /* exactly 12 bytes. Return original dst pointer and exit. */
- addi 1,1,32
- blr
- .align 4
-L(wus_tail8p4): /* less then 4 bytes left. */
- addi 12,12,8
- addi 11,11,8
- bgt cr1,L(wus_tail2)
- /* exactly 8 bytes. Return original dst pointer and exit. */
- addi 1,1,32
- blr
-
- .align 4
-L(wus_tail4): /* Move 4 bytes. */
-/* r6 already loaded speculatively. If we are here we know there is
- more then 4 bytes left. So there is no need to test. */
- addi 12,12,4
- stw 6,0(11)
- addi 11,11,4
-L(wus_tail2): /* Move 2-3 bytes. */
- bf 30,L(wus_tail1)
- lhz 6,0(12)
- sth 6,0(11)
- bf 31,L(wus_tailX)
- lbz 7,2(12)
- stb 7,2(11)
- addi 1,1,32
- blr
-L(wus_tail1): /* Move 1 byte. */
- bf 31,L(wus_tailX)
- lbz 6,0(12)
- stb 6,0(11)
-L(wus_tailX):
- /* Return original dst pointer. */
- addi 1,1,32
- blr
-
-/* Special case to copy 0-8 bytes. */
- .align 4
-L(wus_8):
- lwz 6,0(4)
- lwz 7,4(4)
- stw 6,0(3)
- stw 7,4(3)
- /* Return original dst pointer. */
- addi 1,1,32
- blr
- .align 4
-L(wus_4):
- bf 29,L(wus_2)
- lwz 6,0(4)
- stw 6,0(3)
- bf 30,L(wus_5)
- lhz 7,4(4)
- sth 7,4(3)
- bf 31,L(wus_0)
- lbz 8,6(4)
- stb 8,6(3)
- addi 1,1,32
- blr
- .align 4
-L(wus_5):
- bf 31,L(wus_0)
- lbz 6,4(4)
- stb 6,4(3)
- /* Return original dst pointer. */
- addi 1,1,32
- blr
- .align 4
-L(wus_2): /* Move 2-3 bytes. */
- bf 30,L(wus_1)
- lhz 6,0(4)
- sth 6,0(3)
- bf 31,L(wus_0)
- lbz 7,2(4)
- stb 7,2(3)
- addi 1,1,32
- blr
- .align 4
-L(wus_1): /* Move 1 byte. */
- bf 31,L(wus_0)
- lbz 6,0(4)
- stb 6,0(3)
- .align 3
-L(wus_0):
- /* Return original dst pointer. */
- addi 1,1,32
- blr
-
- .align 4
- cfi_offset(31,(24-32))
- cfi_offset(30,(20-32))
-L(wdu):
-
- /* Copy words where the destination is aligned but the source is
- not. For power4, power5 and power6 machines there is penalty for
- unaligned loads (src) that cross 32-byte, cacheline, or page
- boundaries. So we want to use simple (unaligned) loads where
- possible but avoid them where we know the load would span a 32-byte
- boundary.
-
- At this point we know we have at least 29 (32-3) bytes to copy
- the src is unaligned. and we may cross at least one 32-byte
- boundary. Also we have the following register values:
- r3 == adjusted dst, word aligned
- r4 == unadjusted src
- r5 == unadjusted len
- r9 == adjusted Word length
- r10 == src alignment (1-3)
- r12 == adjusted src, not aligned
- r31 == adjusted len
-
- First we need to copy word up to but not crossing the next 32-byte
- boundary. Then perform aligned loads just before and just after
- the boundary and use shifts and or to generate the next aligned
- word for dst. If more then 32 bytes remain we copy (unaligned src)
- the next 7 words and repeat the loop until less then 32-bytes
- remain.
-
- Then if more then 4 bytes remain we again use aligned loads,
- shifts and or to generate the next dst word. We then process the
- remaining words using unaligned loads as needed. Finally we check
- if there more then 0 bytes (1-3) bytes remaining and use
- halfword and or byte load/stores to complete the copy.
-*/
- mr 4,12 /* restore unaligned adjusted src ptr */
- clrlwi 0,12,27 /* Find dist from previous 32-byte boundary. */
- slwi 10,10,3 /* calculate number of bits to shift 1st word left */
- cmplwi cr5,0,16
- subfic 8,0,32 /* Number of bytes to next 32-byte boundary. */
-
- mtcrf 0x01,8
- cmplwi cr1,10,16
- subfic 9,10,32 /* number of bits to shift 2nd word right */
-/* This test is reversed because the timing to compare the bytes to
- 32-byte boundary could not be meet. So we compare the bytes from
- previous 32-byte boundary and invert the test. */
- bge cr5,L(wdu_h32_8)
- .align 4
- lwz 6,0(4)
- lwz 7,4(4)
- addi 12,4,16 /* generate alternate pointers to avoid agen */
- addi 11,3,16 /* timing issues downstream. */
- stw 6,0(3)
- stw 7,4(3)
- subi 31,31,16
- lwz 6,8(4)
- lwz 7,12(4)
- addi 4,4,16
- stw 6,8(3)
- stw 7,12(3)
- addi 3,3,16
- bf 28,L(wdu_h32_4)
- lwz 6,0(12)
- lwz 7,4(12)
- subi 31,31,8
- addi 4,4,8
- stw 6,0(11)
- stw 7,4(11)
- addi 3,3,8
- bf 29,L(wdu_h32_0)
- lwz 6,8(12)
- addi 4,4,4
- subi 31,31,4
- stw 6,8(11)
- addi 3,3,4
- b L(wdu_h32_0)
- .align 4
-L(wdu_h32_8):
- bf 28,L(wdu_h32_4)
- lwz 6,0(4)
- lwz 7,4(4)
- subi 31,31,8
- bf 29,L(wdu_h32_8x)
- stw 6,0(3)
- stw 7,4(3)
- lwz 6,8(4)
- addi 4,4,12
- subi 31,31,4
- stw 6,8(3)
- addi 3,3,12
- b L(wdu_h32_0)
- .align 4
-L(wdu_h32_8x):
- addi 4,4,8
- stw 6,0(3)
- stw 7,4(3)
- addi 3,3,8
- b L(wdu_h32_0)
- .align 4
-L(wdu_h32_4):
- bf 29,L(wdu_h32_0)
- lwz 6,0(4)
- subi 31,31,4
- addi 4,4,4
- stw 6,0(3)
- addi 3,3,4
- .align 4
-L(wdu_h32_0):
-/* set up for 32-byte boundary crossing word move and possibly 32-byte
- move loop. */
- clrrwi 12,4,2
- cmplwi cr5,31,32
- bge cr1,L(wdu2_32)
-#if 0
- b L(wdu1_32)
-/*
- cmplwi cr1,10,8
- beq cr1,L(wdu1_32)
- cmplwi cr1,10,16
- beq cr1,L(wdu2_32)
- cmplwi cr1,10,24
- beq cr1,L(wdu3_32)
-*/
-L(wdu_32):
- lwz 6,0(12)
- cmplwi cr6,31,4
- srwi 8,31,5 /* calculate the 32 byte loop count */
- slw 0,6,10
- clrlwi 31,31,27 /* The remaining bytes, < 32. */
- blt cr5,L(wdu_32tail)
- mtctr 8
- cmplwi cr6,31,4
- .align 4
-L(wdu_loop32):
- /* copy 32 bytes at a time */
- lwz 8,4(12)
- addi 12,12,32
- lwz 7,4(4)
- srw 8,8,9
- or 0,0,8
- stw 0,0(3)
- stw 7,4(3)
- lwz 6,8(4)
- lwz 7,12(4)
- stw 6,8(3)
- stw 7,12(3)
- lwz 6,16(4)
- lwz 7,20(4)
- stw 6,16(3)
- stw 7,20(3)
- lwz 6,24(4)
- lwz 7,28(4)
- lwz 8,0(12)
- addi 4,4,32
- stw 6,24(3)
- stw 7,28(3)
- addi 3,3,32
- slw 0,8,10
- bdnz+ L(wdu_loop32)
-
-L(wdu_32tail):
- mtcrf 0x01,31
- cmplwi cr5,31,16
- blt cr6,L(wdu_4tail)
- /* calculate and store the final word */
- lwz 8,4(12)
- srw 8,8,9
- or 6,0,8
- b L(wdu_32tailx)
-#endif
- .align 4
-L(wdu1_32):
- lwz 6,-1(4)
- cmplwi cr6,31,4
- srwi 8,31,5 /* calculate the 32 byte loop count */
- slwi 6,6,8
- clrlwi 31,31,27 /* The remaining bytes, < 32. */
- blt cr5,L(wdu1_32tail)
- mtctr 8
- cmplwi cr6,31,4
-
- lwz 8,3(4)
- lwz 7,4(4)
-/* Equivalent to: srwi 8,8,32-8; or 6,6,8 */
- rlwimi 6,8,8,(32-8),31
- b L(wdu1_loop32x)
- .align 4
-L(wdu1_loop32):
- /* copy 32 bytes at a time */
- lwz 8,3(4)
- lwz 7,4(4)
- stw 10,-8(3)
- stw 11,-4(3)
-/* Equivalent to srwi 8,8,32-8; or 6,6,8 */
- rlwimi 6,8,8,(32-8),31
-L(wdu1_loop32x):
- lwz 10,8(4)
- lwz 11,12(4)
- stw 6,0(3)
- stw 7,4(3)
- lwz 6,16(4)
- lwz 7,20(4)
- stw 10,8(3)
- stw 11,12(3)
- lwz 10,24(4)
- lwz 11,28(4)
- lwz 8,32-1(4)
- addi 4,4,32
- stw 6,16(3)
- stw 7,20(3)
- addi 3,3,32
- slwi 6,8,8
- bdnz+ L(wdu1_loop32)
- stw 10,-8(3)
- stw 11,-4(3)
-
-L(wdu1_32tail):
- mtcrf 0x01,31
- cmplwi cr5,31,16
- blt cr6,L(wdu_4tail)
- /* calculate and store the final word */
- lwz 8,3(4)
-/* Equivalent to: srwi 8,8,32-9; or 6,6,8 */
- rlwimi 6,8,8,(32-8),31
- b L(wdu_32tailx)
-
-L(wdu2_32):
- bgt cr1,L(wdu3_32)
- lwz 6,-2(4)
- cmplwi cr6,31,4
- srwi 8,31,5 /* calculate the 32 byte loop count */
- slwi 6,6,16
- clrlwi 31,31,27 /* The remaining bytes, < 32. */
- blt cr5,L(wdu2_32tail)
- mtctr 8
- cmplwi cr6,31,4
-
- lwz 8,2(4)
- lwz 7,4(4)
-/* Equivalent to: srwi 8,8,32-8; or 6,6,8 */
- rlwimi 6,8,16,(32-16),31
- b L(wdu2_loop32x)
- .align 4
-L(wdu2_loop32):
- /* copy 32 bytes at a time */
- lwz 8,2(4)
- lwz 7,4(4)
- stw 10,-8(3)
- stw 11,-4(3)
-/* Equivalent to srwi 8,8,32-8; or 6,6,8 */
- rlwimi 6,8,16,(32-16),31
-L(wdu2_loop32x):
- lwz 10,8(4)
- lwz 11,12(4)
- stw 6,0(3)
- stw 7,4(3)
- lwz 6,16(4)
- lwz 7,20(4)
- stw 10,8(3)
- stw 11,12(3)
- lwz 10,24(4)
- lwz 11,28(4)
-/* lwz 8,0(12) */
- lwz 8,32-2(4)
- addi 4,4,32
- stw 6,16(3)
- stw 7,20(3)
- addi 3,3,32
- slwi 6,8,16
- bdnz+ L(wdu2_loop32)
- stw 10,-8(3)
- stw 11,-4(3)
-
-L(wdu2_32tail):
- mtcrf 0x01,31
- cmplwi cr5,31,16
- blt cr6,L(wdu_4tail)
- /* calculate and store the final word */
- lwz 8,2(4)
-/* Equivalent to: srwi 8,8,32-9; or 6,6,8 */
- rlwimi 6,8,16,(32-16),31
- b L(wdu_32tailx)
-
-L(wdu3_32):
-/* lwz 6,0(12) */
- lwz 6,-3(4)
- cmplwi cr6,31,4
- srwi 8,31,5 /* calculate the 32 byte loop count */
- slwi 6,6,24
- clrlwi 31,31,27 /* The remaining bytes, < 32. */
- blt cr5,L(wdu3_32tail)
- mtctr 8
- cmplwi cr6,31,4
-
- lwz 8,1(4)
- lwz 7,4(4)
-/* Equivalent to: srwi 8,8,32-8; or 6,6,8 */
- rlwimi 6,8,24,(32-24),31
- b L(wdu3_loop32x)
- .align 4
-L(wdu3_loop32):
- /* copy 32 bytes at a time */
- lwz 8,1(4)
- lwz 7,4(4)
- stw 10,-8(3)
- stw 11,-4(3)
-/* Equivalent to srwi 8,8,32-8; or 6,6,8 */
- rlwimi 6,8,24,(32-24),31
-L(wdu3_loop32x):
- lwz 10,8(4)
- lwz 11,12(4)
- stw 6,0(3)
- stw 7,4(3)
- lwz 6,16(4)
- lwz 7,20(4)
- stw 10,8(3)
- stw 11,12(3)
- lwz 10,24(4)
- lwz 11,28(4)
- lwz 8,32-3(4)
- addi 4,4,32
- stw 6,16(3)
- stw 7,20(3)
- addi 3,3,32
- slwi 6,8,24
- bdnz+ L(wdu3_loop32)
- stw 10,-8(3)
- stw 11,-4(3)
-
-L(wdu3_32tail):
- mtcrf 0x01,31
- cmplwi cr5,31,16
- blt cr6,L(wdu_4tail)
- /* calculate and store the final word */
- lwz 8,1(4)
-/* Equivalent to: srwi 8,8,32-9; or 6,6,8 */
- rlwimi 6,8,24,(32-24),31
- b L(wdu_32tailx)
- .align 4
-L(wdu_32tailx):
- blt cr5,L(wdu_t32_8)
- lwz 7,4(4)
- addi 12,4,16 /* generate alternate pointers to avoid agen */
- addi 11,3,16 /* timing issues downstream. */
- stw 6,0(3)
- stw 7,4(3)
- subi 31,31,16
- lwz 6,8(4)
- lwz 7,12(4)
- addi 4,4,16
- stw 6,8(3)
- stw 7,12(3)
- addi 3,3,16
- bf 28,L(wdu_t32_4x)
- lwz 6,0(12)
- lwz 7,4(12)
- addi 4,4,8
- subi 31,31,8
- stw 6,0(11)
- stw 7,4(11)
- addi 3,3,8
- bf 29,L(wdu_t32_0)
- lwz 6,8(12)
- addi 4,4,4
- subi 31,31,4
- stw 6,8(11)
- addi 3,3,4
- b L(wdu_t32_0)
- .align 4
-L(wdu_t32_4x):
- bf 29,L(wdu_t32_0)
- lwz 6,0(4)
- addi 4,4,4
- subi 31,31,4
- stw 6,0(3)
- addi 3,3,4
- b L(wdu_t32_0)
- .align 4
-L(wdu_t32_8):
- bf 28,L(wdu_t32_4)
- lwz 7,4(4)
- subi 31,31,8
- bf 29,L(wdu_t32_8x)
- stw 6,0(3)
- stw 7,4(3)
- lwz 6,8(4)
- subi 31,31,4
- addi 4,4,12
- stw 6,8(3)
- addi 3,3,12
- b L(wdu_t32_0)
- .align 4
-L(wdu_t32_8x):
- addi 4,4,8
- stw 6,0(3)
- stw 7,4(3)
- addi 3,3,8
- b L(wdu_t32_0)
- .align 4
-L(wdu_t32_4):
- subi 31,31,4
- stw 6,0(3)
- addi 4,4,4
- addi 3,3,4
- .align 4
-L(wdu_t32_0):
-L(wdu_4tail):
- cmplwi cr6,31,0
- beq cr6,L(wdus_0) /* If the tail is 0 bytes we are done! */
- bf 30,L(wdus_3)
- lhz 7,0(4)
- sth 7,0(3)
- bf 31,L(wdus_0)
- lbz 8,2(4)
- stb 8,2(3)
- mr 3,30
- lwz 30,20(1)
- lwz 31,24(1)
- addi 1,1,32
- blr
- .align 4
-L(wdus_3):
- bf 31,L(wus_0)
- lbz 6,0(4)
- stb 6,0(3)
- .align 4
-L(wdus_0):
- /* Return original dst pointer. */
- mr 3,30
- lwz 30,20(1)
- lwz 31,24(1)
- addi 1,1,32
- blr
-END (memcpy)
-
-libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc32/power7/memcpy.S b/sysdeps/powerpc/powerpc32/power7/memcpy.S
deleted file mode 100644
index 7f00778..0000000
--- a/sysdeps/powerpc/powerpc32/power7/memcpy.S
+++ /dev/null
@@ -1,524 +0,0 @@
-/* Optimized memcpy implementation for PowerPC32/POWER7.
- Copyright (C) 2010-2013 Free Software Foundation, Inc.
- Contributed by Luis Machado <luisgpm@br.ibm.com>.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-
-/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
- Returns 'dst'. */
-
- .machine power7
-EALIGN (memcpy, 5, 0)
- CALL_MCOUNT
-
- stwu 1,-32(1)
- cfi_adjust_cfa_offset(32)
- stw 30,20(1)
- cfi_offset(30,(20-32))
- stw 31,24(1)
- mr 30,3
- cmplwi cr1,5,31
- neg 0,3
- cfi_offset(31,-8)
- ble cr1, L(copy_LT_32) /* If move < 32 bytes use short move
- code. */
-
- andi. 11,3,7 /* Check alignment of DST. */
- clrlwi 10,4,29 /* Check alignment of SRC. */
- cmplw cr6,10,11 /* SRC and DST alignments match? */
- mr 12,4
- mr 31,5
- bne cr6,L(copy_GE_32_unaligned)
-
- srwi 9,5,3 /* Number of full quadwords remaining. */
-
- beq L(copy_GE_32_aligned_cont)
-
- clrlwi 0,0,29
- mtcrf 0x01,0
- subf 31,0,5
-
- /* Get the SRC aligned to 8 bytes. */
-
-1: bf 31,2f
- lbz 6,0(12)
- addi 12,12,1
- stb 6,0(3)
- addi 3,3,1
-2: bf 30,4f
- lhz 6,0(12)
- addi 12,12,2
- sth 6,0(3)
- addi 3,3,2
-4: bf 29,0f
- lwz 6,0(12)
- addi 12,12,4
- stw 6,0(3)
- addi 3,3,4
-0:
- clrlwi 10,12,29 /* Check alignment of SRC again. */
- srwi 9,31,3 /* Number of full doublewords remaining. */
-
-L(copy_GE_32_aligned_cont):
-
- clrlwi 11,31,29
- mtcrf 0x01,9
-
- srwi 8,31,5
- cmplwi cr1,9,4
- cmplwi cr6,11,0
- mr 11,12
-
- /* Copy 1~3 doublewords so the main loop starts
- at a multiple of 32 bytes. */
-
- bf 30,1f
- lfd 6,0(12)
- lfd 7,8(12)
- addi 11,12,16
- mtctr 8
- stfd 6,0(3)
- stfd 7,8(3)
- addi 10,3,16
- bf 31,4f
- lfd 0,16(12)
- stfd 0,16(3)
- blt cr1,3f
- addi 11,12,24
- addi 10,3,24
- b 4f
-
- .align 4
-1: /* Copy 1 doubleword and set the counter. */
- mr 10,3
- mtctr 8
- bf 31,4f
- lfd 6,0(12)
- addi 11,12,8
- stfd 6,0(3)
- addi 10,3,8
-
-L(aligned_copy):
- /* Main aligned copy loop. Copies up to 128-bytes at a time. */
- .align 4
-4:
- /* check for any 32-byte or 64-byte lumps that are outside of a
- nice 128-byte range. R8 contains the number of 32-byte
- lumps, so drop this into the CR, and use the SO/EQ bits to help
- handle the 32- or 64- byte lumps. Then handle the rest with an
- unrolled 128-bytes-at-a-time copy loop. */
- mtocrf 1,8
- li 6,16 # 16() index
- li 7,32 # 32() index
- li 8,48 # 48() index
-
-L(aligned_32byte):
- /* if the SO bit (indicating a 32-byte lump) is not set, move along. */
- bns cr7,L(aligned_64byte)
- lxvd2x 6,0,11
- lxvd2x 7,11,6
- addi 11,11,32
- stxvd2x 6,0,10
- stxvd2x 7,10,6
- addi 10,10,32
-
-L(aligned_64byte):
- /* if the EQ bit (indicating a 64-byte lump) is not set, move along. */
- bne cr7,L(aligned_128setup)
- lxvd2x 6,0,11
- lxvd2x 7,11,6
- lxvd2x 8,11,7
- lxvd2x 9,11,8
- addi 11,11,64
- stxvd2x 6,0,10
- stxvd2x 7,10,6
- stxvd2x 8,10,7
- stxvd2x 9,10,8
- addi 10,10,64
-
-L(aligned_128setup):
- /* Set up for the 128-byte at a time copy loop. */
- srwi 8,31,7
- cmpwi 8,0 # Any 4x lumps left?
- beq 3f # if not, move along.
- lxvd2x 6,0,11
- lxvd2x 7,11,6
- mtctr 8 # otherwise, load the ctr and begin.
- li 8,48 # 48() index
- b L(aligned_128loop)
-
-L(aligned_128head):
- /* for the 2nd + iteration of this loop. */
- lxvd2x 6,0,11
- lxvd2x 7,11,6
-L(aligned_128loop):
- lxvd2x 8,11,7
- lxvd2x 9,11,8
- stxvd2x 6,0,10
- addi 11,11,64
- stxvd2x 7,10,6
- stxvd2x 8,10,7
- stxvd2x 9,10,8
- lxvd2x 6,0,11
- lxvd2x 7,11,6
- addi 10,10,64
- lxvd2x 8,11,7
- lxvd2x 9,11,8
- addi 11,11,64
- stxvd2x 6,0,10
- stxvd2x 7,10,6
- stxvd2x 8,10,7
- stxvd2x 9,10,8
- addi 10,10,64
- bdnz L(aligned_128head)
-
-3:
- /* Check for tail bytes. */
- clrrwi 0,31,3
- mtcrf 0x01,31
- beq cr6,0f
-
-.L9:
- add 3,3,0
- add 12,12,0
-
- /* At this point we have a tail of 0-7 bytes and we know that the
- destination is doubleword-aligned. */
-4: /* Copy 4 bytes. */
- bf 29,2f
-
- lwz 6,0(12)
- addi 12,12,4
- stw 6,0(3)
- addi 3,3,4
-2: /* Copy 2 bytes. */
- bf 30,1f
-
- lhz 6,0(12)
- addi 12,12,2
- sth 6,0(3)
- addi 3,3,2
-1: /* Copy 1 byte. */
- bf 31,0f
-
- lbz 6,0(12)
- stb 6,0(3)
-0: /* Return original DST pointer. */
- mr 3,30
- lwz 30,20(1)
- lwz 31,24(1)
- addi 1,1,32
- blr
-
- /* Handle copies of 0~31 bytes. */
- .align 4
-L(copy_LT_32):
- cmplwi cr6,5,8
- mr 12,4
- mtcrf 0x01,5
- ble cr6,L(copy_LE_8)
-
- /* At least 9 bytes to go. */
- neg 8,4
- clrrwi 11,4,2
- andi. 0,8,3
- cmplwi cr1,5,16
- mr 10,5
- beq L(copy_LT_32_aligned)
-
- /* Force 4-bytes alignment for SRC. */
- mtocrf 0x01,0
- subf 10,0,5
-2: bf 30,1f
-
- lhz 6,0(12)
- addi 12,12,2
- sth 6,0(3)
- addi 3,3,2
-1: bf 31,L(end_4bytes_alignment)
-
- lbz 6,0(12)
- addi 12,12,1
- stb 6,0(3)
- addi 3,3,1
-
- .align 4
-L(end_4bytes_alignment):
- cmplwi cr1,10,16
- mtcrf 0x01,10
-
-L(copy_LT_32_aligned):
- /* At least 6 bytes to go, and SRC is word-aligned. */
- blt cr1,8f
-
- /* Copy 16 bytes. */
- lwz 6,0(12)
- lwz 7,4(12)
- stw 6,0(3)
- lwz 8,8(12)
- stw 7,4(3)
- lwz 6,12(12)
- addi 12,12,16
- stw 8,8(3)
- stw 6,12(3)
- addi 3,3,16
-8: /* Copy 8 bytes. */
- bf 28,4f
-
- lwz 6,0(12)
- lwz 7,4(12)
- addi 12,12,8
- stw 6,0(3)
- stw 7,4(3)
- addi 3,3,8
-4: /* Copy 4 bytes. */
- bf 29,2f
-
- lwz 6,0(12)
- addi 12,12,4
- stw 6,0(3)
- addi 3,3,4
-2: /* Copy 2-3 bytes. */
- bf 30,1f
-
- lhz 6,0(12)
- sth 6,0(3)
- bf 31,0f
- lbz 7,2(12)
- stb 7,2(3)
-
- /* Return original DST pointer. */
- mr 3,30
- lwz 30,20(1)
- addi 1,1,32
- blr
-
- .align 4
-1: /* Copy 1 byte. */
- bf 31,0f
-
- lbz 6,0(12)
- stb 6,0(3)
-0: /* Return original DST pointer. */
- mr 3,30
- lwz 30,20(1)
- addi 1,1,32
- blr
-
- /* Handles copies of 0~8 bytes. */
- .align 4
-L(copy_LE_8):
- bne cr6,4f
-
- /* Though we could've used lfd/stfd here, they are still
- slow for unaligned cases. */
-
- lwz 6,0(4)
- lwz 7,4(4)
- stw 6,0(3)
- stw 7,4(3)
-
- /* Return original DST pointer. */
- mr 3,30
- lwz 30,20(1)
- addi 1,1,32
- blr
-
- .align 4
-4: /* Copies 4~7 bytes. */
- bf 29,2b
-
- lwz 6,0(4)
- stw 6,0(3)
- bf 30,5f
- lhz 7,4(4)
- sth 7,4(3)
- bf 31,0f
- lbz 8,6(4)
- stb 8,6(3)
-
- /* Return original DST pointer. */
- mr 3,30
- lwz 30,20(1)
- addi 1,1,32
- blr
-
- .align 4
-5: /* Copy 1 byte. */
- bf 31,0f
-
- lbz 6,4(4)
- stb 6,4(3)
-
-0: /* Return original DST pointer. */
- mr 3,30
- lwz 30,20(1)
- addi 1,1,32
- blr
-
- /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
- SRC is not. Use aligned quadword loads from SRC, shifted to realign
- the data, allowing for aligned DST stores. */
- .align 4
-L(copy_GE_32_unaligned):
- andi. 11,3,15 /* Check alignment of DST. */
- clrlwi 0,0,28 /* Number of bytes until the 1st
- quadword of DST. */
- srwi 9,5,4 /* Number of full quadwords remaining. */
-
- beq L(copy_GE_32_unaligned_cont)
-
- /* SRC is not quadword aligned, get it aligned. */
-
- mtcrf 0x01,0
- subf 31,0,5
-
- /* Vector instructions work best when proper alignment (16-bytes)
- is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
-1: /* Copy 1 byte. */
- bf 31,2f
-
- lbz 6,0(12)
- addi 12,12,1
- stb 6,0(3)
- addi 3,3,1
-2: /* Copy 2 bytes. */
- bf 30,4f
-
- lhz 6,0(12)
- addi 12,12,2
- sth 6,0(3)
- addi 3,3,2
-4: /* Copy 4 bytes. */
- bf 29,8f
-
- lwz 6,0(12)
- addi 12,12,4
- stw 6,0(3)
- addi 3,3,4
-8: /* Copy 8 bytes. */
- bf 28,0f
-
- lfd 6,0(12)
- addi 12,12,8
- stfd 6,0(3)
- addi 3,3,8
-0:
- clrlwi 10,12,28 /* Check alignment of SRC. */
- srwi 9,31,4 /* Number of full quadwords remaining. */
-
- /* The proper alignment is present, it is OK to copy the bytes now. */
-L(copy_GE_32_unaligned_cont):
-
- /* Setup two indexes to speed up the indexed vector operations. */
- clrlwi 11,31,28
- li 6,16 /* Index for 16-bytes offsets. */
- li 7,32 /* Index for 32-bytes offsets. */
- cmplwi cr1,11,0
- srwi 8,31,5 /* Setup the loop counter. */
- mr 10,3
- mr 11,12
- mtcrf 0x01,9
- cmplwi cr6,9,1
- lvsl 5,0,12
- lvx 3,0,12
- bf 31,L(setup_unaligned_loop)
-
- /* Copy another 16 bytes to align to 32-bytes due to the loop . */
- lvx 4,12,6
- vperm 6,3,4,5
- addi 11,12,16
- addi 10,3,16
- stvx 6,0,3
- vor 3,4,4
-
-L(setup_unaligned_loop):
- mtctr 8
- ble cr6,L(end_unaligned_loop)
-
- /* Copy 32 bytes at a time using vector instructions. */
- .align 4
-L(unaligned_loop):
-
- /* Note: vr6/vr10 may contain data that was already copied,
- but in order to get proper alignment, we may have to copy
- some portions again. This is faster than having unaligned
- vector instructions though. */
-
- lvx 4,11,6 /* vr4 = r11+16. */
- vperm 6,3,4,5 /* Merge the correctly-aligned portions
- of vr3/vr4 into vr6. */
- lvx 3,11,7 /* vr3 = r11+32. */
- vperm 10,4,3,5 /* Merge the correctly-aligned portions
- of vr3/vr4 into vr10. */
- addi 11,11,32
- stvx 6,0,10
- stvx 10,10,6
- addi 10,10,32
-
- bdnz L(unaligned_loop)
-
- .align 4
-L(end_unaligned_loop):
-
- /* Check for tail bytes. */
- clrrwi 0,31,4
- mtcrf 0x01,31
- beq cr1,0f
-
- add 3,3,0
- add 12,12,0
-
- /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
-8: /* Copy 8 bytes. */
- bf 28,4f
-
- lwz 6,0(12)
- lwz 7,4(12)
- addi 12,12,8
- stw 6,0(3)
- stw 7,4(3)
- addi 3,3,8
-4: /* Copy 4 bytes. */
- bf 29,2f
-
- lwz 6,0(12)
- addi 12,12,4
- stw 6,0(3)
- addi 3,3,4
-2: /* Copy 2~3 bytes. */
- bf 30,1f
-
- lhz 6,0(12)
- addi 12,12,2
- sth 6,0(3)
- addi 3,3,2
-1: /* Copy 1 byte. */
- bf 31,0f
-
- lbz 6,0(12)
- stb 6,0(3)
-0: /* Return original DST pointer. */
- mr 3,30
- lwz 30,20(1)
- lwz 31,24(1)
- addi 1,1,32
- blr
-
-END (memcpy)
-libc_hidden_builtin_def (memcpy)