This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] powerpc: power7-optimized 64-bit and 32-bit memcpy


Hi,

The following patch adds 32-bit and 64-bit POWER7-specific memcpy
functions.

The code handles copies differently based on the copy length, and is
based on the POWER4 memcpy code.

* very short copies (0~8 bytes): simple loopless copy code.

* short copies (9~31 bytes): pre-alignment plus simple loopless copy
code.

* aligned long moves (32+ bytes with equally-aligned SRC/DST):
Pre-alignment to doubleword and copy loop of 32 bytes per iteration.

* unaligned long moves (32+ bytes with unaligned SRC/DST): Pre-alignment
of DST to quadword and copy loop of 32 bytes per iteration with a
Load/Shift/Store strategy and VMX instructions.

For 64-bit, the main improvement comes from the use of VMX instructions
on unaligned long moves. This way we need less instructions to perform
the same, compared to the use of simple ld/shift/std instructions.

As for 32-bit code, the improvement comes from a bigger copy loop on the
aligned long move (32 bytes block instead of the previous 16 bytes
block), from the use of VMX instructions in the unaligned long move and
from the use of floating point load/store instructions where there's a
need to copy 8 bytes and we know that the alignment is OK.

Also, the branch hints were all removed since the hardware's branch
prediction works best. This brought us some performance in the short
copies mostly.

The performance has improved all-around, specially in those cases
mentioned above.

The best improvements came from short copies and unaligned long moves
(50+% reduction in runtime). The aligned long moves didn't change much
for 64-bit (expected) and only improved slightly for 32-bit (10~20 %
reduction in runtime due to the bigger loop).

Regtested on powerpc/powerpc64. No problems found.

Any Comments? Ok for trunk?

Regards,
Luis

---

2010-03-10  Luis Machado  <luisgpm@br.ibm.com>

	* sysdeps/powerpc/powerpc64/power7/memcpy.S: New POWER7-optimized
	64-bit memcpy.
	* sysdeps/powerpc/powerpc32/power7/memcpy.S: New POWER7-optimized
	32-bit memcpy.

diff --git a/sysdeps/powerpc/powerpc32/power7/memcpy.S b/sysdeps/powerpc/powerpc32/power7/memcpy.S
new file mode 100644
index 0000000..d96ef20
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/power7/memcpy.S
@@ -0,0 +1,469 @@
+/* Optimized memcpy implementation for PowerPC32/POWER7.
+   Copyright (C) 2010 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+   Returns 'dst'.  */
+
+	.machine  power7
+EALIGN (BP_SYM (memcpy), 5, 0)
+	CALL_MCOUNT
+
+	stwu    1,-32(1)
+	cfi_adjust_cfa_offset(32)
+	stw	30,20(1)
+	cfi_offset(30,(20-32))
+	stw	31,24(1)
+	mr      30,3
+	cmplwi  cr1,5,31
+	neg	0,3
+	cfi_offset(31,-8)
+	ble	cr1, L(copy_LT_32)  /* If move < 32 bytes use short move
+				    code.  */
+
+	andi.   11,3,7	      /* Check alignment of DST.  */
+	clrlwi  10,4,29	      /* Check alignment of SRC.  */
+	cmplw   cr6,10,11     /* SRC and DST alignments match?  */
+	mr	12,4
+	mr	31,5
+	bne	cr6,L(copy_GE_32_unaligned)
+
+	srwi    9,5,3	      /* Number of full quadwords remaining.  */
+
+	beq	L(copy_GE_32_aligned_cont)
+
+	clrlwi  0,0,29
+	mtcrf   0x01,0
+	subf    31,0,5
+
+	/* Get the SRC aligned to 8 bytes.  */
+
+1:  	bf	31,2f
+    	lbz	6,0(12)
+    	addi    12,12,1
+    	stb	6,0(3)
+    	addi    3,3,1
+2:  	bf      30,4f
+    	lhz     6,0(12)
+    	addi    12,12,2
+    	sth     6,0(3)
+    	addi    3,3,2
+4:  	bf      29,0f
+    	lwz     6,0(12)
+    	addi    12,12,4
+    	stw     6,0(3)
+    	addi    3,3,4
+0:
+    	clrlwi  10,12,29      /* Check alignment of SRC again.  */
+    	srwi    9,31,3	      /* Number of full doublewords remaining.  */
+
+L(copy_GE_32_aligned_cont):
+
+    	clrlwi  11,31,29
+    	mtcrf   0x01,9
+
+    	srwi    8,31,5
+    	cmplwi  cr1,9,4
+    	cmplwi  cr6,11,0
+    	mr	11,12
+
+    	/* Copy 1~3 doublewords so the main loop starts
+    	at a multiple of 32 bytes.  */
+
+    	bf	30,1f
+    	lfd     6,0(12)
+    	lfd     7,8(12)
+    	addi    11,12,16
+    	mtctr   8
+    	stfd    6,0(3)
+    	stfd    7,8(3)
+    	addi    10,3,16
+    	bf      31,4f
+    	lfd     0,16(12)
+    	stfd    0,16(3)
+    	blt     cr1,3f
+    	addi    11,12,24
+    	addi    10,3,24
+    	b       4f
+
+    	.align  4
+1:  	/* Copy 1 doubleword and set the counter.  */
+    	mr	10,3
+    	mtctr   8
+    	bf      31,4f
+    	lfd     6,0(12)
+    	addi    11,12,8
+    	stfd    6,0(3)
+    	addi    10,3,8
+
+    	.align  4
+4:  	/* Main aligned copy loop. Copies 32-bytes at a time.  */
+    	lfd	6,0(11)
+    	lfd     7,8(11)
+    	lfd     8,16(11)
+    	lfd     0,24(11)
+    	addi    11,11,32
+
+    	stfd    6,0(10)
+    	stfd    7,8(10)
+    	stfd    8,16(10)
+    	stfd    0,24(10)
+    	addi    10,10,32
+    	bdnz    4b
+3:
+
+    	/* Check for tail bytes.  */
+
+    	clrrwi  0,31,3
+    	mtcrf   0x01,31
+    	beq	cr6,0f
+
+.L9:
+    	add	3,3,0
+    	add	12,12,0
+
+    	/*  At this point we have a tail of 0-7 bytes and we know that the
+    	destination is doubleword-aligned.  */
+4:  	/* Copy 4 bytes.  */
+    	bf	29,2f
+
+    	lwz     6,0(12)
+    	addi    12,12,4
+    	stw     6,0(3)
+    	addi    3,3,4
+2:  	/* Copy 2 bytes.  */
+    	bf	30,1f
+
+    	lhz     6,0(12)
+    	addi    12,12,2
+    	sth     6,0(3)
+    	addi    3,3,2
+1:  	/* Copy 1 byte.  */
+    	bf	31,0f
+
+    	lbz	6,0(12)
+    	stb	6,0(3)
+0:  	/* Return original DST pointer.  */
+    	mr	3,30
+    	lwz	30,20(1)
+    	lwz     31,24(1)
+    	addi    1,1,32
+    	blr
+
+    	/* Handle copies of 0~31 bytes.  */
+    	.align  4
+L(copy_LT_32):
+    	cmplwi  cr6,5,8
+    	mr	12,4
+    	mtcrf   0x01,5
+    	ble	cr6,L(copy_LE_8)
+
+    	/* At least 9 bytes to go.  */
+    	neg	8,4
+    	clrrwi  11,4,2
+    	andi.   0,8,3
+    	cmplwi  cr1,5,16
+    	mr	10,5
+    	beq	L(copy_LT_32_aligned)
+
+    	/* Force 4-bytes alignment for SRC.  */
+    	mtocrf  0x01,0
+    	subf    10,0,5
+2:  	bf	30,1f
+
+    	lhz	6,0(12)
+    	addi    12,12,2
+    	sth	6,0(3)
+    	addi    3,3,2
+1:  	bf	31,L(end_4bytes_alignment)
+
+    	lbz	6,0(12)
+    	addi    12,12,1
+    	stb	6,0(3)
+    	addi    3,3,1
+
+    	.align  4
+L(end_4bytes_alignment):
+    	cmplwi  cr1,10,16
+    	mtcrf   0x01,10
+
+L(copy_LT_32_aligned):
+    	/* At least 6 bytes to go, and SRC is word-aligned.  */
+    	blt	cr1,8f
+
+    	/* Copy 16 bytes.  */
+    	lwz	6,0(12)
+    	lwz     7,4(12)
+    	stw     6,0(3)
+    	lwz     8,8(12)
+    	stw     7,4(3)
+    	lwz     6,12(12)
+    	addi    12,12,16
+    	stw     8,8(3)
+    	stw     6,12(3)
+    	addi    3,3,16
+8:  	/* Copy 8 bytes.  */
+    	bf	28,4f
+
+    	lwz     6,0(12)
+    	lwz     7,4(12)
+    	addi    12,12,8
+    	stw     6,0(3)
+    	stw     7,4(3)
+    	addi    3,3,8
+4:  	/* Copy 4 bytes.  */
+    	bf	29,2f
+
+    	lwz     6,0(12)
+    	addi    12,12,4
+    	stw     6,0(3)
+    	addi    3,3,4
+2:  	/* Copy 2-3 bytes.  */
+    	bf	30,1f
+
+    	lhz     6,0(12)
+    	sth     6,0(3)
+    	bf      31,0f
+    	lbz     7,2(12)
+    	stb     7,2(3)
+
+    	/* Return original DST pointer.  */
+    	mr      3,30
+    	lwz     30,20(1)
+    	addi    1,1,32
+    	blr
+
+    	.align  4
+1:  	/* Copy 1 byte.  */
+    	bf	31,0f
+
+    	lbz	6,0(12)
+    	stb	6,0(3)
+0:  	/* Return original DST pointer.  */
+    	mr	3,30
+    	lwz	30,20(1)
+    	addi    1,1,32
+    	blr
+
+    	/* Handles copies of 0~8 bytes.  */
+    	.align  4
+L(copy_LE_8):
+    	bne	cr6,4f
+
+    	/* Though we could've used lfd/stfd here, they are still
+    	slow for unaligned cases.  */
+
+    	lwz	6,0(4)
+    	lwz     7,4(4)
+    	stw     6,0(3)
+    	stw     7,4(3)
+
+    	/* Return original DST pointer.  */
+    	mr      3,30
+    	lwz     30,20(1)
+    	addi    1,1,32
+    	blr
+
+    	.align  4
+4:  	/* Copies 4~7 bytes.  */
+    	bf	29,2b
+
+    	lwz	6,0(4)
+    	stw     6,0(3)
+    	bf      30,5f
+    	lhz     7,4(4)
+    	sth     7,4(3)
+    	bf      31,0f
+    	lbz     8,6(4)
+    	stb     8,6(3)
+
+    	/* Return original DST pointer.  */
+    	mr      3,30
+    	lwz     30,20(1)
+    	addi    1,1,32
+    	blr
+
+    	.align  4
+5:  	/* Copy 1 byte.  */
+    	bf	31,0f
+
+    	lbz	6,4(4)
+    	stb	6,4(3)
+
+0:  	/* Return original DST pointer.  */
+    	mr	3,30
+    	lwz     30,20(1)
+    	addi    1,1,32
+    	blr
+
+    	/* Handle copies of 32+ bytes where DST is aligned (to quadword) but
+    	SRC is not. Use aligned quadword loads from SRC, shifted to realign
+    	the data, allowing for aligned DST stores.  */
+    	.align  4
+L(copy_GE_32_unaligned):
+    	andi.   11,3,15	      /* Check alignment of DST.  */
+    	clrlwi  0,0,28	      /* Number of bytes until the 1st
+    	    		      quadword of DST.  */
+    	srwi    9,5,4	      /* Number of full quadwords remaining.  */
+
+    	beq    L(copy_GE_32_unaligned_cont)
+
+    	/* SRC is not quadword aligned, get it aligned.  */
+
+    	mtcrf   0x01,0
+    	subf    31,0,5
+
+    	/* Vector instructions work best when proper alignment (16-bytes)
+    	is present.  Move 0~15 bytes as needed to get DST quadword-aligned.  */
+1:  	/* Copy 1 byte.  */
+    	bf	31,2f
+
+    	lbz	6,0(12)
+    	addi    12,12,1
+    	stb	6,0(3)
+    	addi    3,3,1
+2:  	/* Copy 2 bytes.  */
+    	bf	    30,4f
+
+    	lhz     6,0(12)
+    	addi    12,12,2
+    	sth     6,0(3)
+    	addi    3,3,2
+4:  	/* Copy 4 bytes.  */
+    	bf	29,8f
+
+    	lwz     6,0(12)
+    	addi    12,12,4
+    	stw     6,0(3)
+    	addi    3,3,4
+8:  	/* Copy 8 bytes.  */
+    	bf	28,0f
+
+    	lfd	6,0(12)
+    	addi    12,12,8
+    	stfd    6,0(3)
+    	addi    3,3,8
+0:
+    	clrlwi  10,12,28      /* Check alignment of SRC.  */
+    	srdi    9,31,4	      /* Number of full quadwords remaining.  */
+
+    	/* The proper alignment is present, it is OK to copy the bytes now.  */
+L(copy_GE_32_unaligned_cont):
+
+    	/* Setup two indexes to speed up the indexed vector operations.  */
+    	clrlwi  11,31,28
+    	li      6,16	      /* Index for 16-bytes offsets.  */
+    	li	7,32	      /* Index for 32-bytes offsets.  */
+    	cmplwi  cr1,11,0
+    	srdi    8,31,5	      /* Setup the loop counter.  */
+    	mr      10,3
+    	mr      11,12
+    	mtcrf   0x01,9
+    	cmplwi  cr6,9,1
+    	lvsl    5,0,12
+    	lvx     3,0,12
+    	bf      31,L(setup_unaligned_loop)
+
+    	/* Copy another 16 bytes to align to 32-bytes due to the loop .  */
+    	lvx     4,12,6
+    	vperm   6,3,4,5
+    	addi    11,12,16
+    	addi    10,3,16
+    	stvx    6,0,3
+    	vor	3,4,4
+
+L(setup_unaligned_loop):
+    	mtctr   8
+    	ble     cr6,L(end_unaligned_loop)
+
+    	/* Copy 32 bytes at a time using vector instructions.  */
+    	.align  4
+L(unaligned_loop):
+
+    	/* Note: vr6/vr10 may contain data that was already copied,
+    	but in order to get proper alignment, we may have to copy
+    	some portions again. This is faster than having unaligned
+    	vector instructions though.  */
+
+    	lvx	4,11,6	      /* vr4 = r11+16.  */
+    	vperm   6,3,4,5	      /* Merge the correctly-aligned portions
+    	    		      of vr3/vr4 into vr6.  */
+    	lvx	3,11,7	      /* vr3 = r11+32.  */
+    	vperm   10,4,3,5      /* Merge the correctly-aligned portions
+    	    		      of vr3/vr4 into vr10.  */
+    	addi    11,11,32
+    	stvx    6,0,10
+    	stvx    10,10,6
+    	addi    10,10,32
+
+    	bdnz    L(unaligned_loop)
+
+    	.align  4
+L(end_unaligned_loop):
+
+    	/* Check for tail bytes.  */
+    	clrrwi  0,31,4
+    	mtcrf   0x01,31
+    	beq	cr1,0f
+
+    	add	3,3,0
+    	add	12,12,0
+
+    	/*  We have 1~15 tail bytes to copy, and DST is quadword aligned.  */
+8:  	/* Copy 8 bytes.  */
+    	bf	28,4f
+
+    	lwz	6,0(12)
+    	lwz	7,4(12)
+    	addi    12,12,8
+    	stw	6,0(3)
+    	stw	7,4(3)
+    	addi    3,3,8
+4:  	/* Copy 4 bytes.  */
+    	bf	29,2f
+
+    	lwz	6,0(12)
+    	addi    12,12,4
+    	stw	6,0(3)
+    	addi    3,3,4
+2:  	/* Copy 2~3 bytes.  */
+    	bf	30,1f
+
+    	lhz	6,0(12)
+    	addi    12,12,2
+    	sth	6,0(3)
+    	addi    3,3,2
+1:  	/* Copy 1 byte.  */
+    	bf	31,0f
+
+    	lbz	6,0(12)
+    	stb	6,0(3)
+0:  	/* Return original DST pointer.  */
+    	mr	3,30
+    	lwz     30,20(1)
+    	lwz	31,24(1)
+    	addi    1,1,32
+    	blr
+
+END (BP_SYM (memcpy))
+libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc64/power7/memcpy.S b/sysdeps/powerpc/powerpc64/power7/memcpy.S
new file mode 100644
index 0000000..a2a1f82
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power7/memcpy.S
@@ -0,0 +1,449 @@
+/* Optimized memcpy implementation for PowerPC64/POWER7.
+   Copyright (C) 2010 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+   Returns 'dst'.  */
+
+	.machine power7
+EALIGN (BP_SYM (memcpy), 5, 0)
+	CALL_MCOUNT 3
+
+	cmpldi  cr1,5,31
+	neg	0,3
+	std	3,-16(1)
+	std	31,-8(1)
+	cfi_offset(31,-8)
+	ble	cr1, L(copy_LT_32)  /* If move < 32 bytes use short move
+				    code.  */
+
+	andi.   11,3,7	      /* Check alignment of DST.  */
+
+
+	clrldi  10,4,61       /* Check alignment of SRC.  */
+	cmpld   cr6,10,11     /* SRC and DST alignments match?  */
+	mr	12,4
+	mr	31,5
+	bne	cr6,L(copy_GE_32_unaligned)
+
+	srdi    9,5,3	      /* Number of full quadwords remaining.  */
+
+	beq    L(copy_GE_32_aligned_cont)
+
+	clrldi  0,0,61
+	mtcrf   0x01,0
+	subf    31,0,5
+
+	/* Get the SRC aligned to 8 bytes.  */
+
+1:	bf	31,2f
+    	lbz	6,0(12)
+    	addi    12,12,1
+    	stb	6,0(3)
+    	addi    3,3,1
+2:  	bf      30,4f
+    	lhz     6,0(12)
+    	addi    12,12,2
+    	sth     6,0(3)
+    	addi    3,3,2
+4:  	bf      29,0f
+    	lwz     6,0(12)
+    	addi    12,12,4
+    	stw     6,0(3)
+    	addi    3,3,4
+0:
+    	clrldi  10,12,61      /* Check alignment of SRC again.  */
+    	srdi    9,31,3	      /* Number of full doublewords remaining.  */
+
+L(copy_GE_32_aligned_cont):
+
+	clrldi  11,31,61
+	mtcrf   0x01,9
+
+	srdi    8,31,5
+	cmpldi  cr1,9,4
+	cmpldi  cr6,11,0
+	mr	11,12
+
+	/* Copy 1~3 doublewords so the main loop starts
+	at a multiple of 32 bytes.  */
+
+	bf	30,1f
+	ld      6,0(12)
+	ld      7,8(12)
+	addi    11,12,16
+	mtctr   8
+	std     6,0(3)
+	std     7,8(3)
+	addi    10,3,16
+	bf      31,4f
+	ld      0,16(12)
+	std     0,16(3)
+	blt     cr1,3f
+	addi    11,12,24
+	addi    10,3,24
+	b       4f
+
+	.align  4
+1:	/* Copy 1 doubleword and set the counter.  */
+    	mr	10,3
+    	mtctr   8
+    	bf      31,4f
+    	ld      6,0(12)
+    	addi    11,12,8
+    	std     6,0(3)
+    	addi    10,3,8
+
+    	/* Main aligned copy loop. Copies 32-bytes at a time.  */
+    	.align  4
+4:
+    	ld	6,0(11)
+    	ld      7,8(11)
+    	ld      8,16(11)
+    	ld      0,24(11)
+    	addi    11,11,32
+
+    	std     6,0(10)
+    	std     7,8(10)
+    	std     8,16(10)
+    	std     0,24(10)
+    	addi    10,10,32
+    	bdnz    4b
+3:
+
+    	/* Check for tail bytes.  */
+    	rldicr  0,31,0,60
+    	mtcrf   0x01,31
+    	beq	cr6,0f
+
+.L9:
+    	add	3,3,0
+    	add	12,12,0
+
+    	/*  At this point we have a tail of 0-7 bytes and we know that the
+    	destination is doubleword-aligned.  */
+4:  	/* Copy 4 bytes.  */
+    	bf	29,2f
+
+    	lwz     6,0(12)
+    	addi    12,12,4
+    	stw     6,0(3)
+    	addi    3,3,4
+2:  	/* Copy 2 bytes.  */
+    	bf	30,1f
+
+    	lhz     6,0(12)
+    	addi    12,12,2
+    	sth     6,0(3)
+    	addi    3,3,2
+1:  	/* Copy 1 byte.  */
+    	bf	31,0f
+
+    	lbz	6,0(12)
+    	stb	6,0(3)
+0:  	/* Return original DST pointer.  */
+    	ld	31,-8(1)
+    	ld	3,-16(1)
+    	blr
+
+    	/* Handle copies of 0~31 bytes.  */
+    	.align  4
+L(copy_LT_32):
+	cmpldi  cr6,5,8
+    	mr	12,4
+    	mtcrf   0x01,5
+    	ble	cr6,L(copy_LE_8)
+
+    	/* At least 9 bytes to go.  */
+    	neg	8,4
+    	clrrdi  11,4,2
+    	andi.   0,8,3
+    	cmpldi  cr1,5,16
+    	mr	10,5
+    	beq	L(copy_LT_32_aligned)
+
+    	/* Force 4-bytes alignment for SRC.  */
+    	mtocrf  0x01,0
+    	subf    10,0,5
+2:  	bf	30,1f
+
+    	lhz	6,0(12)
+    	addi    12,12,2
+    	sth	6,0(3)
+    	addi    3,3,2
+1:  	bf	31,L(end_4bytes_alignment)
+
+    	lbz	6,0(12)
+    	addi    12,12,1
+    	stb	6,0(3)
+    	addi    3,3,1
+
+    	.align  4
+L(end_4bytes_alignment):
+	cmpldi  cr1,10,16
+    	mtcrf   0x01,10
+
+L(copy_LT_32_aligned):
+	/* At least 6 bytes to go, and SRC is word-aligned.  */
+    	blt	cr1,8f
+
+    	/* Copy 16 bytes.  */
+    	lwz	6,0(12)
+    	lwz     7,4(12)
+    	stw     6,0(3)
+    	lwz     8,8(12)
+    	stw     7,4(3)
+    	lwz     6,12(12)
+    	addi    12,12,16
+    	stw     8,8(3)
+    	stw     6,12(3)
+    	addi    3,3,16
+8:  	/* Copy 8 bytes.  */
+    	bf	28,4f
+
+    	lwz     6,0(12)
+    	lwz     7,4(12)
+    	addi    12,12,8
+    	stw     6,0(3)
+    	stw     7,4(3)
+    	addi    3,3,8
+4:  	/* Copy 4 bytes.  */
+    	bf	29,2f
+
+    	lwz     6,0(12)
+    	addi    12,12,4
+    	stw     6,0(3)
+    	addi    3,3,4
+2:  	/* Copy 2-3 bytes.  */
+    	bf	30,1f
+
+    	lhz     6,0(12)
+    	sth     6,0(3)
+    	bf      31,0f
+    	lbz     7,2(12)
+    	stb     7,2(3)
+    	ld	3,-16(1)
+    	blr
+
+    	.align  4
+1:  	/* Copy 1 byte.  */
+    	bf	31,0f
+
+    	lbz	6,0(12)
+    	stb	6,0(3)
+0:  	/* Return original DST pointer.  */
+    	ld	3,-16(1)
+    	blr
+
+    	/* Handles copies of 0~8 bytes.  */
+    	.align  4
+L(copy_LE_8):
+	bne	cr6,4f
+
+    	/* Though we could've used ld/std here, they are still
+    	slow for unaligned cases.  */
+
+    	lwz	6,0(4)
+    	lwz     7,4(4)
+    	stw     6,0(3)
+    	stw     7,4(3)
+    	ld      3,-16(1)      /* Return original DST pointers.  */
+    	blr
+
+    	.align  4
+4:  	/* Copies 4~7 bytes.  */
+    	bf	29,2b
+
+    	lwz	6,0(4)
+    	stw     6,0(3)
+    	bf      30,5f
+    	lhz     7,4(4)
+    	sth     7,4(3)
+    	bf      31,0f
+    	lbz     8,6(4)
+    	stb     8,6(3)
+    	ld	3,-16(1)
+    	blr
+
+    	.align  4
+5:  	/* Copy 1 byte.  */
+    	bf	31,0f
+
+    	lbz	6,4(4)
+    	stb	6,4(3)
+
+0:  	/* Return original DST pointer.  */
+    	ld	3,-16(1)
+    	blr
+
+    	/* Handle copies of 32+ bytes where DST is aligned (to quadword) but
+    	SRC is not.  Use aligned quadword loads from SRC, shifted to realign
+    	the data, allowing for aligned DST stores.  */
+    	.align  4
+L(copy_GE_32_unaligned):
+	clrldi  0,0,60	      /* Number of bytes until the 1st
+    	    		      quadword.  */
+    	andi.   11,3,15       /* Check alignment of DST (against
+    	    		      quadwords).  */
+    	srdi    9,5,4	      /* Number of full quadwords remaining.  */
+
+    	beq	L(copy_GE_32_unaligned_cont)
+
+    	/* SRC is not quadword aligned, get it aligned.  */
+
+    	mtcrf   0x01,0
+    	subf    31,0,5
+
+    	/* Vector instructions work best when proper alignment (16-bytes)
+    	is present.  Move 0~15 bytes as needed to get DST quadword-aligned.  */
+1:  	/* Copy 1 byte.  */
+    	bf	31,2f
+
+    	lbz	6,0(12)
+    	addi    12,12,1
+    	stb	6,0(3)
+    	addi    3,3,1
+2:  	/* Copy 2 bytes.  */
+    	bf	30,4f
+
+    	lhz     6,0(12)
+    	addi    12,12,2
+    	sth     6,0(3)
+    	addi    3,3,2
+4:  	/* Copy 4 bytes.  */
+    	bf	29,8f
+
+    	lwz     6,0(12)
+    	addi    12,12,4
+    	stw     6,0(3)
+    	addi    3,3,4
+8:  	/* Copy 8 bytes.  */
+    	bf	28,0f
+
+    	ld	6,0(12)
+    	addi    12,12,8
+    	std	6,0(3)
+    	addi    3,3,8
+0:
+    	clrldi  10,12,60      /* Check alignment of SRC.  */
+    	srdi    9,31,4	      /* Number of full quadwords remaining.  */
+
+    	/* The proper alignment is present, it is OK to copy the bytes now.  */
+L(copy_GE_32_unaligned_cont):
+
+    	/* Setup two indexes to speed up the indexed vector operations.  */
+    	clrldi  11,31,60
+    	li      6,16	      /* Index for 16-bytes offsets.  */
+    	li	7,32	      /* Index for 32-bytes offsets.  */
+    	cmpldi  cr1,11,0
+    	srdi    8,31,5	      /* Setup the loop counter.  */
+    	mr      10,3
+    	mr      11,12
+    	mtcrf   0x01,9
+    	cmpldi  cr6,9,1
+    	lvsl    5,0,12
+    	lvx     3,0,12
+    	bf      31,L(setup_unaligned_loop)
+
+    	/* Copy another 16 bytes to align to 32-bytes due to the loop .  */
+    	lvx     4,12,6
+    	vperm   6,3,4,5
+    	addi    11,12,16
+    	addi    10,3,16
+    	stvx    6,0,3
+    	vor	3,4,4
+
+L(setup_unaligned_loop):
+    	mtctr   8
+    	ble     cr6,L(end_unaligned_loop)
+
+    	/* Copy 32 bytes at a time using vector instructions.  */
+    	.align  4
+L(unaligned_loop):
+
+    	/* Note: vr6/vr10 may contain data that was already copied,
+    	but in order to get proper alignment, we may have to copy
+    	some portions again. This is faster than having unaligned
+    	vector instructions though.  */
+
+    	lvx	4,11,6	      /* vr4 = r11+16.  */
+    	vperm   6,3,4,5	      /* Merge the correctly-aligned portions
+    	    		      of vr3/vr4 into vr6.  */
+    	lvx	3,11,7	      /* vr3 = r11+32.  */
+    	vperm   10,4,3,5      /* Merge the correctly-aligned portions
+    	    		      of vr3/vr4 into vr10.  */
+    	addi    11,11,32
+    	stvx    6,0,10
+    	stvx    10,10,6
+    	addi    10,10,32
+
+    	bdnz	L(unaligned_loop)
+
+    	.align  4
+L(end_unaligned_loop):
+
+    	/* Check for tail bytes.  */
+    	rldicr  0,31,0,59
+    	mtcrf   0x01,31
+    	beq	cr1,0f
+
+    	add	3,3,0
+    	add	12,12,0
+
+    	/*  We have 1~15 tail bytes to copy, and DST is quadword aligned.  */
+8:  	/* Copy 8 bytes.  */
+    	bf	28,4f
+
+    	lwz	6,0(12)
+    	lwz	7,4(12)
+    	addi    12,12,8
+    	stw	6,0(3)
+    	stw	7,4(3)
+    	addi    3,3,8
+4:  	/* Copy 4 bytes.  */
+    	bf	29,2f
+
+    	lwz	6,0(12)
+    	addi    12,12,4
+    	stw	6,0(3)
+    	addi    3,3,4
+2:  	/* Copy 2~3 bytes.  */
+    	bf	30,1f
+
+    	lhz	6,0(12)
+    	addi    12,12,2
+    	sth	6,0(3)
+    	addi    3,3,2
+1:  	/* Copy 1 byte.  */
+    	bf	31,0f
+
+    	lbz	6,0(12)
+    	stb	6,0(3)
+0:  	/* Return original DST pointer.  */
+    	ld	31,-8(1)
+    	ld	3,-16(1)
+    	blr
+
+END_GEN_TB (BP_SYM (memcpy),TB_TOCLESS)
+libc_hidden_builtin_def (memcpy)



Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]