Patch for faster memcpy on MIPS

Jeff Johnston jjohnstn@redhat.com
Fri Dec 14 20:46:00 GMT 2012


On 12/11/2012 04:09 PM, Steve Ellcey wrote:
> On Tue, 2012-12-11 at 15:39 -0500, Jeff Johnston wrote:
>
>>
>> That would be perfect.  Please resubmit the patch with the updated licenses.
>>
>> -- Jeff J.
>
> Here is the updated patch.
>

Patch checked in.  Thanks.

> Steve Ellcey
> sellcey@mips.com
>
>
>
> 2012-12-11  Steve Ellcey  <sellcey@mips.com>
>
> 	* libc/machine/mips/memcpy.c: Remove.
> 	* libc/machine/mips/memcpy.S: New.
> 	* libc/machine/mips/Makefile.am (lib_a_CCASFLAGS): Add
> 	-D_COMPILING_NEWLIB.
> 	(lib_a_CFLAGS): Ditto.
>
> diff --git a/newlib/libc/machine/mips/Makefile.am b/newlib/libc/machine/mips/Makefile.am
> index 5a00534..16371df 100644
> --- a/newlib/libc/machine/mips/Makefile.am
> +++ b/newlib/libc/machine/mips/Makefile.am
> @@ -8,9 +8,9 @@ AM_CCASFLAGS = $(INCLUDES)
>
>   noinst_LIBRARIES = lib.a
>
> -lib_a_SOURCES = setjmp.S strlen.c strcmp.c strncpy.c memset.c memcpy.c
> -lib_a_CCASFLAGS=$(AM_CCASFLAGS)
> -lib_a_CFLAGS=$(AM_CFLAGS)
> +lib_a_SOURCES = setjmp.S strlen.c strcmp.c strncpy.c memset.c memcpy.S
> +lib_a_CCASFLAGS=$(AM_CCASFLAGS) -D_COMPILING_NEWLIB
> +lib_a_CFLAGS=$(AM_CFLAGS) -D_COMPILING_NEWLIB
>
>   ACLOCAL_AMFLAGS = -I ../../.. -I ../../../..
>   CONFIG_STATUS_DEPENDENCIES = $(newlib_basedir)/configure.host
> diff --git a/newlib/libc/machine/mips/Makefile.in b/newlib/libc/machine/mips/Makefile.in
> index 524a813..e92df0e 100644
> --- a/newlib/libc/machine/mips/Makefile.in
> +++ b/newlib/libc/machine/mips/Makefile.in
> @@ -175,9 +175,9 @@ AUTOMAKE_OPTIONS = cygnus
>   INCLUDES = $(NEWLIB_CFLAGS) $(CROSS_CFLAGS) $(TARGET_CFLAGS)
>   AM_CCASFLAGS = $(INCLUDES)
>   noinst_LIBRARIES = lib.a
> -lib_a_SOURCES = setjmp.S strlen.c strcmp.c strncpy.c memset.c memcpy.c
> -lib_a_CCASFLAGS = $(AM_CCASFLAGS)
> -lib_a_CFLAGS = $(AM_CFLAGS)
> +lib_a_SOURCES = setjmp.S strlen.c strcmp.c strncpy.c memset.c memcpy.S
> +lib_a_CCASFLAGS = $(AM_CCASFLAGS) -D_COMPILING_NEWLIB
> +lib_a_CFLAGS = $(AM_CFLAGS) -D_COMPILING_NEWLIB
>   ACLOCAL_AMFLAGS = -I ../../.. -I ../../../..
>   CONFIG_STATUS_DEPENDENCIES = $(newlib_basedir)/configure.host
>   all: all-am
> @@ -244,6 +244,12 @@ lib_a-setjmp.o: setjmp.S
>   lib_a-setjmp.obj: setjmp.S
>   	$(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-setjmp.obj `if test -f 'setjmp.S'; then $(CYGPATH_W) 'setjmp.S'; else $(CYGPATH_W) '$(srcdir)/setjmp.S'; fi`
>
> +lib_a-memcpy.o: memcpy.S
> +	$(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-memcpy.o `test -f 'memcpy.S' || echo '$(srcdir)/'`memcpy.S
> +
> +lib_a-memcpy.obj: memcpy.S
> +	$(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-memcpy.obj `if test -f 'memcpy.S'; then $(CYGPATH_W) 'memcpy.S'; else $(CYGPATH_W) '$(srcdir)/memcpy.S'; fi`
> +
>   .c.o:
>   	$(COMPILE) -c $<
>
> @@ -274,12 +280,6 @@ lib_a-memset.o: memset.c
>   lib_a-memset.obj: memset.c
>   	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-memset.obj `if test -f 'memset.c'; then $(CYGPATH_W) 'memset.c'; else $(CYGPATH_W) '$(srcdir)/memset.c'; fi`
>
> -lib_a-memcpy.o: memcpy.c
> -	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-memcpy.o `test -f 'memcpy.c' || echo '$(srcdir)/'`memcpy.c
> -
> -lib_a-memcpy.obj: memcpy.c
> -	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-memcpy.obj `if test -f 'memcpy.c'; then $(CYGPATH_W) 'memcpy.c'; else $(CYGPATH_W) '$(srcdir)/memcpy.c'; fi`
> -
>   ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
>   	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
>   	unique=`for i in $$list; do \
> diff --git a/newlib/libc/machine/mips/memcpy.S b/newlib/libc/machine/mips/memcpy.S
> new file mode 100644
> index 0000000..fe7cb15
> --- /dev/null
> +++ b/newlib/libc/machine/mips/memcpy.S
> @@ -0,0 +1,689 @@
> +/*
> + * Copyright (c) 2012
> + *      MIPS Technologies, Inc., California.
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
> + *    contributors may be used to endorse or promote products derived from
> + *    this software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED.  IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#ifdef ANDROID_CHANGES
> +#include "machine/asm.h"
> +#include "machine/regdef.h"
> +#define USE_MEMMOVE_FOR_OVERLAP
> +#define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD_STREAMED
> +#define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
> +#elif _LIBC
> +#include <sysdep.h>
> +#include <regdef.h>
> +#include <sys/asm.h>
> +#define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD_STREAMED
> +#define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
> +#elif _COMPILING_NEWLIB
> +#include "machine/asm.h"
> +#include "machine/regdef.h"
> +#define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD_STREAMED
> +#define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
> +#else
> +#include <regdef.h>
> +#include <sys/asm.h>
> +#endif
> +
> +#if (_MIPS_ISA == _MIPS_ISA_MIPS4) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
> +    (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
> +#ifndef DISABLE_PREFETCH
> +#define USE_PREFETCH
> +#endif
> +#endif
> +
> +#if (_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32)
> +#ifndef DISABLE_DOUBLE
> +#define USE_DOUBLE
> +#endif
> +#endif
> +
> +
> +
> +/* Some asm.h files do not have the L macro definition.  */
> +#ifndef L
> +#if _MIPS_SIM == _ABIO32
> +# define L(label) $L ## label
> +#else
> +# define L(label) .L ## label
> +#endif
> +#endif
> +
> +/* Some asm.h files do not have the PTR_ADDIU macro definition.  */
> +#ifndef PTR_ADDIU
> +#ifdef USE_DOUBLE
> +#define PTR_ADDIU	daddiu
> +#else
> +#define PTR_ADDIU	addiu
> +#endif
> +#endif
> +
> +/* Some asm.h files do not have the PTR_SRA macro definition.  */
> +#ifndef PTR_SRA
> +#ifdef USE_DOUBLE
> +#define PTR_SRA		dsra
> +#else
> +#define PTR_SRA		sra
> +#endif
> +#endif
> +
> +
> +/*
> + * Using PREFETCH_HINT_LOAD_STREAMED instead of PREFETCH_LOAD on load
> + * prefetches appears to offer a slight preformance advantage.
> + *
> + * Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE
> + * or PREFETCH_STORE_STREAMED offers a large performance advantage
> + * but PREPAREFORSTORE has some special restrictions to consider.
> + *
> + * Prefetch with the 'prepare for store' hint does not copy a memory
> + * location into the cache, it just allocates a cache line and zeros
> + * it out.  This means that if you do not write to the entire cache
> + * line before writing it out to memory some data will get zero'ed out
> + * when the cache line is written back to memory and data will be lost.
> + *
> + * Also if you are using this memcpy to copy overlapping buffers it may
> + * not behave correctly when using the 'prepare for store' hint.  If you
> + * use the 'prepare for store' prefetch on a memory area that is in the
> + * memcpy source (as well as the memcpy destination), then you will get
> + * some data zero'ed out before you have a chance to read it and data will
> + * be lost.
> + *
> + * If you are going to use this memcpy routine with the 'prepare for store'
> + * prefetch you may want to set USE_MEMMOVE_FOR_OVERLAP in order to avoid
> + * the problem of running memcpy on overlapping buffers.
> + *
> + * There are ifdef'ed sections of this memcpy to make sure that it does not
> + * do prefetches on cache lines that are not going to be completely written.
> + * This code is only needed and only used when PREFETCH_STORE_HINT is set to
> + * PREFETCH_HINT_PREPAREFORSTORE.  This code assumes that cache lines are
> + * 32 bytes and if the cache line is larger it will not work correctly.
> + */
> +
> +#ifdef USE_PREFETCH
> +# define PREFETCH_HINT_LOAD		0
> +# define PREFETCH_HINT_STORE		1
> +# define PREFETCH_HINT_LOAD_STREAMED	4
> +# define PREFETCH_HINT_STORE_STREAMED	5
> +# define PREFETCH_HINT_LOAD_RETAINED	6
> +# define PREFETCH_HINT_STORE_RETAINED	7
> +# define PREFETCH_HINT_WRITEBACK_INVAL	25
> +# define PREFETCH_HINT_PREPAREFORSTORE	30
> +
> +/*
> + * If we have not picked out what hints to use at this point use the
> + * standard load and store prefetch hints.
> + */
> +#ifndef PREFETCH_STORE_HINT
> +# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE
> +#endif
> +#ifndef PREFETCH_LOAD_HINT
> +# define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD
> +#endif
> +
> +/*
> + * We double everything when USE_DOUBLE is true so we do 2 prefetches to
> + * get 64 bytes in that case.  The assumption is that each individual
> + * prefetch brings in 32 bytes.
> + */
> +
> +#ifdef USE_DOUBLE
> +# define PREFETCH_CHUNK 64
> +# define PREFETCH_FOR_LOAD(chunk, reg) \
> + pref PREFETCH_LOAD_HINT, (chunk)*64(reg); \
> + pref PREFETCH_LOAD_HINT, ((chunk)*64)+32(reg)
> +# define PREFETCH_FOR_STORE(chunk, reg) \
> + pref PREFETCH_STORE_HINT, (chunk)*64(reg); \
> + pref PREFETCH_STORE_HINT, ((chunk)*64)+32(reg)
> +#else
> +# define PREFETCH_CHUNK 32
> +# define PREFETCH_FOR_LOAD(chunk, reg) \
> + pref PREFETCH_LOAD_HINT, (chunk)*32(reg)
> +# define PREFETCH_FOR_STORE(chunk, reg) \
> + pref PREFETCH_STORE_HINT, (chunk)*32(reg)
> +#endif
> +/* MAX_PREFETCH_SIZE is the maximum size of a prefetch, it must not be less
> + * then PREFETCH_CHUNK, the assumed size of each prefetch.  If the real size
> + * of a prefetch is greater then MAX_PREFETCH_SIZE and the PREPAREFORSTORE
> + * hint is used, the code will not work corrrectly.  If PREPAREFORSTORE is not
> + * used then MAX_PREFETCH_SIZE does not matter.  */
> +#define MAX_PREFETCH_SIZE 128
> +/* PREFETCH_LIMIT is set based on the fact that we neve use an offset greater
> + * then 5 on a STORE prefetch and that a single prefetch can never be larger
> + * then MAX_PREFETCH_SIZE.  We add the extra 32 when USE_DOUBLE is set because
> + * we actually do two prefetches in that case, one 32 bytes after the other.  */
> +#ifdef USE_DOUBLE
> +# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + 32 + MAX_PREFETCH_SIZE
> +#else
> +# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + MAX_PREFETCH_SIZE
> +#endif
> +#if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) \
> +    && ((PREFETCH_CHUNK * 4) < MAX_PREFETCH_SIZE)
> +/* We cannot handle this because the initial prefetches may fetch bytes that
> + * are before the buffer being copied.  We start copies with an offset
> + * of 4 so avoid this situation when using PREPAREFORSTORE.  */
> +#error "PREFETCH_CHUNK is too large and/or MAX_PREFETCH_SIZE is too small."
> +#endif
> +#else /* USE_PREFETCH not defined */
> +# define PREFETCH_FOR_LOAD(offset, reg)
> +# define PREFETCH_FOR_STORE(offset, reg)
> +#endif
> +
> +/* Allow the routine to be named something else if desired.  */
> +#ifndef MEMCPY_NAME
> +#define MEMCPY_NAME memcpy
> +#endif
> +
> +/* We use these 32/64 bit registers as temporaries to do the copying.  */
> +#define REG0 t0
> +#define REG1 t1
> +#define REG2 t2
> +#define REG3 t3
> +#if _MIPS_SIM == _ABIO32
> +#  define REG4 t4
> +#  define REG5 t5
> +#  define REG6 t6
> +#  define REG7 t7
> +#else
> +#  define REG4 ta0
> +#  define REG5 ta1
> +#  define REG6 ta2
> +#  define REG7 ta3
> +#endif
> +
> +/* We load/store 64 bits at a time when USE_DOUBLE is true.
> + * The C_ prefix stands for CHUNK and is used to avoid macro name
> + * conflicts with system header files.  */
> +
> +#ifdef USE_DOUBLE
> +#  define C_ST	sd
> +#  define C_LD	ld
> +#if __MIPSEB
> +#  define C_LDHI	ldl	/* high part is left in big-endian	*/
> +#  define C_STHI	sdl	/* high part is left in big-endian	*/
> +#  define C_LDLO	ldr	/* low part is right in big-endian	*/
> +#  define C_STLO	sdr	/* low part is right in big-endian	*/
> +#else
> +#  define C_LDHI	ldr	/* high part is right in little-endian	*/
> +#  define C_STHI	sdr	/* high part is right in little-endian	*/
> +#  define C_LDLO	ldl	/* low part is left in little-endian	*/
> +#  define C_STLO	sdl	/* low part is left in little-endian	*/
> +#endif
> +#else
> +#  define C_ST	sw
> +#  define C_LD	lw
> +#if __MIPSEB
> +#  define C_LDHI	lwl	/* high part is left in big-endian	*/
> +#  define C_STHI	swl	/* high part is left in big-endian	*/
> +#  define C_LDLO	lwr	/* low part is right in big-endian	*/
> +#  define C_STLO	swr	/* low part is right in big-endian	*/
> +#else
> +#  define C_LDHI	lwr	/* high part is right in little-endian	*/
> +#  define C_STHI	swr	/* high part is right in little-endian	*/
> +#  define C_LDLO	lwl	/* low part is left in little-endian	*/
> +#  define C_STLO	swl	/* low part is left in little-endian	*/
> +#endif
> +#endif
> +
> +/* Bookkeeping values for 32 vs. 64 bit mode.  */
> +#ifdef USE_DOUBLE
> +#  define NSIZE 8
> +#  define NSIZEMASK 0x3f
> +#  define NSIZEDMASK 0x7f
> +#else
> +#  define NSIZE 4
> +#  define NSIZEMASK 0x1f
> +#  define NSIZEDMASK 0x3f
> +#endif
> +#define UNIT(unit) ((unit)*NSIZE)
> +#define UNITM1(unit) (((unit)*NSIZE)-1)
> +
> +#ifdef ANDROID_CHANGES
> +LEAF(MEMCPY_NAME, 0)
> +#else
> +LEAF(MEMCPY_NAME)
> +#endif
> +	.set	nomips16
> +	.set	noreorder
> +/*
> + * Below we handle the case where memcpy is called with overlapping src and dst.
> + * Although memcpy is not required to handle this case, some parts of Android
> + * like Skia rely on such usage. We call memmove to handle such cases.
> + */
> +#ifdef USE_MEMMOVE_FOR_OVERLAP
> +	PTR_SUBU t0,a0,a1
> +	PTR_SRA	t2,t0,31
> +	xor	t1,t0,t2
> +	PTR_SUBU t0,t1,t2
> +	sltu	t2,t0,a2
> +	beq	t2,zero,L(memcpy)
> +	la	t9,memmove
> +	jr	t9
> +	 nop
> +L(memcpy):
> +#endif
> +/*
> + * If the size is less then 2*NSIZE (8 or 16), go to L(lastb).  Regardless of
> + * size, copy dst pointer to v0 for the return value.
> + */
> +	slti	t2,a2,(2 * NSIZE)
> +	bne	t2,zero,L(lastb)
> +#if defined(RETURN_FIRST_PREFETCH) || defined(RETURN_LAST_PREFETCH)
> +	move	v0,zero
> +#else
> +	move	v0,a0
> +#endif
> +/*
> + * If src and dst have different alignments, go to L(unaligned), if they
> + * have the same alignment (but are not actually aligned) do a partial
> + * load/store to make them aligned.  If they are both already aligned
> + * we can start copying at L(aligned).
> + */
> +	xor	t8,a1,a0
> +	andi	t8,t8,(NSIZE-1)		/* t8 is a0/a1 word-displacement */
> +	bne	t8,zero,L(unaligned)
> +	PTR_SUBU a3, zero, a0
> +
> +	andi	a3,a3,(NSIZE-1)		/* copy a3 bytes to align a0/a1	  */
> +	beq	a3,zero,L(aligned)	/* if a3=0, it is already aligned */
> +	PTR_SUBU a2,a2,a3		/* a2 is the remining bytes count */
> +
> +	C_LDHI	t8,0(a1)
> +	PTR_ADDU a1,a1,a3
> +	C_STHI	t8,0(a0)
> +	PTR_ADDU a0,a0,a3
> +
> +/*
> + * Now dst/src are both aligned to (word or double word) aligned addresses
> + * Set a2 to count how many bytes we have to copy after all the 64/128 byte
> + * chunks are copied and a3 to the dst pointer after all the 64/128 byte
> + * chunks have been copied.  We will loop, incrementing a0 and a1 until a0
> + * equals a3.
> + */
> +
> +L(aligned):
> +	andi	t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
> +	beq	a2,t8,L(chkw)	 /* if a2==t8, no 64-byte/128-byte chunks */
> +	PTR_SUBU a3,a2,t8	 /* subtract from a2 the reminder */
> +	PTR_ADDU a3,a0,a3	 /* Now a3 is the final dst after loop */
> +
> +/* When in the loop we may prefetch with the 'prepare to store' hint,
> + * in this case the a0+x should not be past the "t0-32" address.  This
> + * means: for x=128 the last "safe" a0 address is "t0-160".  Alternatively,
> + * for x=64 the last "safe" a0 address is "t0-96" In the current version we
> + * will use "prefetch hint,128(a0)", so "t0-160" is the limit.
> + */
> +#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
> +	PTR_ADDU t0,a0,a2		/* t0 is the "past the end" address */
> +	PTR_SUBU t9,t0,PREFETCH_LIMIT	/* t9 is the "last safe pref" address */
> +#endif
> +	PREFETCH_FOR_LOAD  (0, a1)
> +	PREFETCH_FOR_LOAD  (1, a1)
> +	PREFETCH_FOR_LOAD  (2, a1)
> +	PREFETCH_FOR_LOAD  (3, a1)
> +#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
> +	PREFETCH_FOR_STORE (1, a0)
> +	PREFETCH_FOR_STORE (2, a0)
> +	PREFETCH_FOR_STORE (3, a0)
> +#endif
> +#if defined(RETURN_FIRST_PREFETCH) && defined(USE_PREFETCH)
> +#if PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE
> +	sltu    v1,t9,a0
> +	bgtz    v1,L(skip_set)
> +	nop
> +	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*4)
> +L(skip_set):
> +#else
> +	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*1)
> +#endif
> +#endif
> +#if defined(RETURN_LAST_PREFETCH) && defined(USE_PREFETCH) \
> +    && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
> +	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*3)
> +#ifdef USE_DOUBLE
> +	PTR_ADDIU v0,v0,32
> +#endif
> +#endif
> +L(loop16w):
> +	C_LD	t0,UNIT(0)(a1)
> +#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
> +	sltu	v1,t9,a0		/* If a0 > t9 don't use next prefetch */
> +	bgtz	v1,L(skip_pref)
> +#endif
> +	C_LD	t1,UNIT(1)(a1)
> +	PREFETCH_FOR_STORE (4, a0)
> +	PREFETCH_FOR_STORE (5, a0)
> +#if defined(RETURN_LAST_PREFETCH) && defined(USE_PREFETCH)
> +	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*5)
> +#ifdef USE_DOUBLE
> +	PTR_ADDIU v0,v0,32
> +#endif
> +#endif
> +L(skip_pref):
> +	C_LD	REG2,UNIT(2)(a1)
> +	C_LD	REG3,UNIT(3)(a1)
> +	C_LD	REG4,UNIT(4)(a1)
> +	C_LD	REG5,UNIT(5)(a1)
> +	C_LD	REG6,UNIT(6)(a1)
> +	C_LD	REG7,UNIT(7)(a1)
> +        PREFETCH_FOR_LOAD (4, a1)
> +
> +	C_ST	t0,UNIT(0)(a0)
> +	C_ST	t1,UNIT(1)(a0)
> +	C_ST	REG2,UNIT(2)(a0)
> +	C_ST	REG3,UNIT(3)(a0)
> +	C_ST	REG4,UNIT(4)(a0)
> +	C_ST	REG5,UNIT(5)(a0)
> +	C_ST	REG6,UNIT(6)(a0)
> +	C_ST	REG7,UNIT(7)(a0)
> +
> +	C_LD	t0,UNIT(8)(a1)
> +	C_LD	t1,UNIT(9)(a1)
> +	C_LD	REG2,UNIT(10)(a1)
> +	C_LD	REG3,UNIT(11)(a1)
> +	C_LD	REG4,UNIT(12)(a1)
> +	C_LD	REG5,UNIT(13)(a1)
> +	C_LD	REG6,UNIT(14)(a1)
> +	C_LD	REG7,UNIT(15)(a1)
> +        PREFETCH_FOR_LOAD (5, a1)
> +	C_ST	t0,UNIT(8)(a0)
> +	C_ST	t1,UNIT(9)(a0)
> +	C_ST	REG2,UNIT(10)(a0)
> +	C_ST	REG3,UNIT(11)(a0)
> +	C_ST	REG4,UNIT(12)(a0)
> +	C_ST	REG5,UNIT(13)(a0)
> +	C_ST	REG6,UNIT(14)(a0)
> +	C_ST	REG7,UNIT(15)(a0)
> +	PTR_ADDIU a0,a0,UNIT(16)	/* adding 64/128 to dest */
> +	bne	a0,a3,L(loop16w)
> +	PTR_ADDIU a1,a1,UNIT(16)	/* adding 64/128 to src */
> +	move	a2,t8
> +
> +/* Here we have src and dest word-aligned but less than 64-bytes or
> + * 128 bytes to go.  Check for a 32(64) byte chunk and copy if if there
> + * is one.  Otherwise jump down to L(chk1w) to handle the tail end of
> + * the copy.
> + */
> +
> +L(chkw):
> +	PREFETCH_FOR_LOAD (0, a1)
> +	andi	t8,a2,NSIZEMASK	/* Is there a 32-byte/64-byte chunk.  */
> +				/* The t8 is the reminder count past 32-bytes */
> +	beq	a2,t8,L(chk1w)	/* When a2=t8, no 32-byte chunk  */
> +	nop
> +	C_LD	t0,UNIT(0)(a1)
> +	C_LD	t1,UNIT(1)(a1)
> +	C_LD	REG2,UNIT(2)(a1)
> +	C_LD	REG3,UNIT(3)(a1)
> +	C_LD	REG4,UNIT(4)(a1)
> +	C_LD	REG5,UNIT(5)(a1)
> +	C_LD	REG6,UNIT(6)(a1)
> +	C_LD	REG7,UNIT(7)(a1)
> +	PTR_ADDIU a1,a1,UNIT(8)
> +	C_ST	t0,UNIT(0)(a0)
> +	C_ST	t1,UNIT(1)(a0)
> +	C_ST	REG2,UNIT(2)(a0)
> +	C_ST	REG3,UNIT(3)(a0)
> +	C_ST	REG4,UNIT(4)(a0)
> +	C_ST	REG5,UNIT(5)(a0)
> +	C_ST	REG6,UNIT(6)(a0)
> +	C_ST	REG7,UNIT(7)(a0)
> +	PTR_ADDIU a0,a0,UNIT(8)
> +
> +/*
> + * Here we have less then 32(64) bytes to copy.  Set up for a loop to
> + * copy one word (or double word) at a time.  Set a2 to count how many
> + * bytes we have to copy after all the word (or double word) chunks are
> + * copied and a3 to the dst pointer after all the (d)word chunks have
> + * been copied.  We will loop, incrementing a0 and a1 until a0 equals a3.
> + */
> +L(chk1w):
> +	andi	a2,t8,(NSIZE-1)	/* a2 is the reminder past one (d)word chunks */
> +	beq	a2,t8,L(lastb)
> +	PTR_SUBU a3,t8,a2	/* a3 is count of bytes in one (d)word chunks */
> +	PTR_ADDU a3,a0,a3	/* a3 is the dst address after loop */
> +
> +/* copying in words (4-byte or 8-byte chunks) */
> +L(wordCopy_loop):
> +	C_LD	REG3,UNIT(0)(a1)
> +	PTR_ADDIU a0,a0,UNIT(1)
> +	PTR_ADDIU a1,a1,UNIT(1)
> +	bne	a0,a3,L(wordCopy_loop)
> +	C_ST	REG3,UNIT(-1)(a0)
> +
> +/* Copy the last 8 (or 16) bytes */
> +L(lastb):
> +	blez	a2,L(leave)
> +	PTR_ADDU a3,a0,a2	/* a3 is the last dst address */
> +L(lastbloop):
> +	lb	v1,0(a1)
> +	PTR_ADDIU a0,a0,1
> +	PTR_ADDIU a1,a1,1
> +	bne	a0,a3,L(lastbloop)
> +	sb	v1,-1(a0)
> +L(leave):
> +	j	ra
> +	nop
> +/*
> + * UNALIGNED case, got here with a3 = "negu a0"
> + * This code is nearly identical to the aligned code above
> + * but only the destination (not the source) gets aligned
> + * so we need to do partial loads of the source followed
> + * by normal stores to the destination (once we have aligned
> + * the destination).
> + */
> +
> +L(unaligned):
> +	andi	a3,a3,(NSIZE-1)	/* copy a3 bytes to align a0/a1 */
> +	beqz	a3,L(ua_chk16w) /* if a3=0, it is already aligned */
> +	PTR_SUBU a2,a2,a3	/* a2 is the remining bytes count */
> +
> +	C_LDHI	v1,UNIT(0)(a1)
> +	C_LDLO	v1,UNITM1(1)(a1)
> +	PTR_ADDU a1,a1,a3
> +	C_STHI	v1,UNIT(0)(a0)
> +	PTR_ADDU a0,a0,a3
> +
> +/*
> + *  Now the destination (but not the source) is aligned
> + * Set a2 to count how many bytes we have to copy after all the 64/128 byte
> + * chunks are copied and a3 to the dst pointer after all the 64/128 byte
> + * chunks have been copied.  We will loop, incrementing a0 and a1 until a0
> + * equals a3.
> + */
> +
> +L(ua_chk16w):
> +	andi	t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
> +	beq	a2,t8,L(ua_chkw) /* if a2==t8, no 64-byte/128-byte chunks */
> +	PTR_SUBU a3,a2,t8	 /* subtract from a2 the reminder */
> +	PTR_ADDU a3,a0,a3	 /* Now a3 is the final dst after loop */
> +
> +#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
> +	PTR_ADDU t0,a0,a2	  /* t0 is the "past the end" address */
> +	PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */
> +#endif
> +	PREFETCH_FOR_LOAD  (0, a1)
> +	PREFETCH_FOR_LOAD  (1, a1)
> +	PREFETCH_FOR_LOAD  (2, a1)
> +#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
> +	PREFETCH_FOR_STORE (1, a0)
> +	PREFETCH_FOR_STORE (2, a0)
> +	PREFETCH_FOR_STORE (3, a0)
> +#endif
> +#if defined(RETURN_FIRST_PREFETCH) && defined(USE_PREFETCH)
> +#if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
> +	sltu    v1,t9,a0
> +	bgtz    v1,L(ua_skip_set)
> +	nop
> +	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*4)
> +L(ua_skip_set):
> +#else
> +	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*1)
> +#endif
> +#endif
> +L(ua_loop16w):
> +	PREFETCH_FOR_LOAD  (3, a1)
> +	C_LDHI	t0,UNIT(0)(a1)
> +	C_LDHI	t1,UNIT(1)(a1)
> +	C_LDHI	REG2,UNIT(2)(a1)
> +#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
> +	sltu	v1,t9,a0
> +	bgtz	v1,L(ua_skip_pref)
> +#endif
> +	C_LDHI	REG3,UNIT(3)(a1)
> +	PREFETCH_FOR_STORE (4, a0)
> +	PREFETCH_FOR_STORE (5, a0)
> +L(ua_skip_pref):
> +	C_LDHI	REG4,UNIT(4)(a1)
> +	C_LDHI	REG5,UNIT(5)(a1)
> +	C_LDHI	REG6,UNIT(6)(a1)
> +	C_LDHI	REG7,UNIT(7)(a1)
> +	C_LDLO	t0,UNITM1(1)(a1)
> +	C_LDLO	t1,UNITM1(2)(a1)
> +	C_LDLO	REG2,UNITM1(3)(a1)
> +	C_LDLO	REG3,UNITM1(4)(a1)
> +	C_LDLO	REG4,UNITM1(5)(a1)
> +	C_LDLO	REG5,UNITM1(6)(a1)
> +	C_LDLO	REG6,UNITM1(7)(a1)
> +	C_LDLO	REG7,UNITM1(8)(a1)
> +        PREFETCH_FOR_LOAD (4, a1)
> +	C_ST	t0,UNIT(0)(a0)
> +	C_ST	t1,UNIT(1)(a0)
> +	C_ST	REG2,UNIT(2)(a0)
> +	C_ST	REG3,UNIT(3)(a0)
> +	C_ST	REG4,UNIT(4)(a0)
> +	C_ST	REG5,UNIT(5)(a0)
> +	C_ST	REG6,UNIT(6)(a0)
> +	C_ST	REG7,UNIT(7)(a0)
> +	C_LDHI	t0,UNIT(8)(a1)
> +	C_LDHI	t1,UNIT(9)(a1)
> +	C_LDHI	REG2,UNIT(10)(a1)
> +	C_LDHI	REG3,UNIT(11)(a1)
> +	C_LDHI	REG4,UNIT(12)(a1)
> +	C_LDHI	REG5,UNIT(13)(a1)
> +	C_LDHI	REG6,UNIT(14)(a1)
> +	C_LDHI	REG7,UNIT(15)(a1)
> +	C_LDLO	t0,UNITM1(9)(a1)
> +	C_LDLO	t1,UNITM1(10)(a1)
> +	C_LDLO	REG2,UNITM1(11)(a1)
> +	C_LDLO	REG3,UNITM1(12)(a1)
> +	C_LDLO	REG4,UNITM1(13)(a1)
> +	C_LDLO	REG5,UNITM1(14)(a1)
> +	C_LDLO	REG6,UNITM1(15)(a1)
> +	C_LDLO	REG7,UNITM1(16)(a1)
> +        PREFETCH_FOR_LOAD (5, a1)
> +	C_ST	t0,UNIT(8)(a0)
> +	C_ST	t1,UNIT(9)(a0)
> +	C_ST	REG2,UNIT(10)(a0)
> +	C_ST	REG3,UNIT(11)(a0)
> +	C_ST	REG4,UNIT(12)(a0)
> +	C_ST	REG5,UNIT(13)(a0)
> +	C_ST	REG6,UNIT(14)(a0)
> +	C_ST	REG7,UNIT(15)(a0)
> +	PTR_ADDIU a0,a0,UNIT(16)	/* adding 64/128 to dest */
> +	bne	a0,a3,L(ua_loop16w)
> +	PTR_ADDIU a1,a1,UNIT(16)	/* adding 64/128 to src */
> +	move	a2,t8
> +
> +/* Here we have src and dest word-aligned but less than 64-bytes or
> + * 128 bytes to go.  Check for a 32(64) byte chunk and copy if if there
> + * is one.  Otherwise jump down to L(ua_chk1w) to handle the tail end of
> + * the copy.  */
> +
> +L(ua_chkw):
> +	PREFETCH_FOR_LOAD (0, a1)
> +	andi	t8,a2,NSIZEMASK	  /* Is there a 32-byte/64-byte chunk.  */
> +				  /* t8 is the reminder count past 32-bytes */
> +	beq	a2,t8,L(ua_chk1w) /* When a2=t8, no 32-byte chunk */
> +	nop
> +	C_LDHI	t0,UNIT(0)(a1)
> +	C_LDHI	t1,UNIT(1)(a1)
> +	C_LDHI	REG2,UNIT(2)(a1)
> +	C_LDHI	REG3,UNIT(3)(a1)
> +	C_LDHI	REG4,UNIT(4)(a1)
> +	C_LDHI	REG5,UNIT(5)(a1)
> +	C_LDHI	REG6,UNIT(6)(a1)
> +	C_LDHI	REG7,UNIT(7)(a1)
> +	C_LDLO	t0,UNITM1(1)(a1)
> +	C_LDLO	t1,UNITM1(2)(a1)
> +	C_LDLO	REG2,UNITM1(3)(a1)
> +	C_LDLO	REG3,UNITM1(4)(a1)
> +	C_LDLO	REG4,UNITM1(5)(a1)
> +	C_LDLO	REG5,UNITM1(6)(a1)
> +	C_LDLO	REG6,UNITM1(7)(a1)
> +	C_LDLO	REG7,UNITM1(8)(a1)
> +	PTR_ADDIU a1,a1,UNIT(8)
> +	C_ST	t0,UNIT(0)(a0)
> +	C_ST	t1,UNIT(1)(a0)
> +	C_ST	REG2,UNIT(2)(a0)
> +	C_ST	REG3,UNIT(3)(a0)
> +	C_ST	REG4,UNIT(4)(a0)
> +	C_ST	REG5,UNIT(5)(a0)
> +	C_ST	REG6,UNIT(6)(a0)
> +	C_ST	REG7,UNIT(7)(a0)
> +	PTR_ADDIU a0,a0,UNIT(8)
> +/*
> + * Here we have less then 32(64) bytes to copy.  Set up for a loop to
> + * copy one word (or double word) at a time.
> + */
> +L(ua_chk1w):
> +	andi	a2,t8,(NSIZE-1)	/* a2 is the reminder past one (d)word chunks */
> +	beq	a2,t8,L(ua_smallCopy)
> +	PTR_SUBU a3,t8,a2	/* a3 is count of bytes in one (d)word chunks */
> +	PTR_ADDU a3,a0,a3	/* a3 is the dst address after loop */
> +
> +/* copying in words (4-byte or 8-byte chunks) */
> +L(ua_wordCopy_loop):
> +	C_LDHI	v1,UNIT(0)(a1)
> +	C_LDLO	v1,UNITM1(1)(a1)
> +	PTR_ADDIU a0,a0,UNIT(1)
> +	PTR_ADDIU a1,a1,UNIT(1)
> +	bne	a0,a3,L(ua_wordCopy_loop)
> +	C_ST	v1,UNIT(-1)(a0)
> +
> +/* Copy the last 8 (or 16) bytes */
> +L(ua_smallCopy):
> +	beqz	a2,L(leave)
> +	PTR_ADDU a3,a0,a2	/* a3 is the last dst address */
> +L(ua_smallCopy_loop):
> +	lb	v1,0(a1)
> +	PTR_ADDIU a0,a0,1
> +	PTR_ADDIU a1,a1,1
> +	bne	a0,a3,L(ua_smallCopy_loop)
> +	sb	v1,-1(a0)
> +
> +	j	ra
> +	nop
> +
> +	.set	at
> +	.set	reorder
> +END(MEMCPY_NAME)
> +#ifndef ANDROID_CHANGES
> +#ifdef _LIBC
> +libc_hidden_builtin_def (MEMCPY_NAME)
> +#endif
> +#endif
> diff --git a/newlib/libc/machine/mips/memcpy.c b/newlib/libc/machine/mips/memcpy.c
> deleted file mode 100644
> index 761f7e9..0000000
> --- a/newlib/libc/machine/mips/memcpy.c
> +++ /dev/null
> @@ -1,164 +0,0 @@
> -/*
> -FUNCTION
> -        <<memcpy>>---copy memory regions, optimized for the mips processors
> -
> -ANSI_SYNOPSIS
> -        #include <string.h>
> -        void* memcpy(void *<[out]>, const void *<[in]>, size_t <[n]>);
> -
> -TRAD_SYNOPSIS
> -        void *memcpy(<[out]>, <[in]>, <[n]>
> -        void *<[out]>;
> -        void *<[in]>;
> -        size_t <[n]>;
> -
> -DESCRIPTION
> -        This function copies <[n]> bytes from the memory region
> -        pointed to by <[in]> to the memory region pointed to by
> -        <[out]>.
> -
> -        If the regions overlap, the behavior is undefined.
> -
> -RETURNS
> -        <<memcpy>> returns a pointer to the first byte of the <[out]>
> -        region.
> -
> -PORTABILITY
> -<<memcpy>> is ANSI C.
> -
> -<<memcpy>> requires no supporting OS subroutines.
> -
> -QUICKREF
> -        memcpy ansi pure
> -	*/
> -
> -#include <_ansi.h>
> -#include <stddef.h>
> -#include <limits.h>
> -
> -#ifdef __mips64
> -#define wordtype long long
> -#else
> -#define wordtype long
> -#endif
> -
> -/* Nonzero if either X or Y is not aligned on a "long" boundary.  */
> -#define UNALIGNED(X, Y) \
> -  (((long)X & (sizeof (wordtype) - 1)) | ((long)Y & (sizeof (wordtype) - 1)))
> -
> -/* How many bytes are copied each iteration of the 4X unrolled loop.  */
> -#define BIGBLOCKSIZE    (sizeof (wordtype) << 2)
> -
> -/* How many bytes are copied each iteration of the word copy loop.  */
> -#define LITTLEBLOCKSIZE (sizeof (wordtype))
> -
> -/* Threshhold for punting to the byte copier.  */
> -#define TOO_SMALL(LEN)  ((LEN) < BIGBLOCKSIZE)
> -
> -_PTR
> -_DEFUN (memcpy, (dst0, src0, len0),
> -	_PTR dst0 _AND
> -	_CONST _PTR src0 _AND
> -	size_t len0)
> -{
> -#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) || defined(__mips16)
> -  char *dst = (char *) dst0;
> -  char *src = (char *) src0;
> -
> -  _PTR save = dst0;
> -
> -  while (len0--)
> -    {
> -      *dst++ = *src++;
> -    }
> -
> -  return save;
> -#else
> -  char *dst = dst0;
> -  _CONST char *src = src0;
> -  wordtype *aligned_dst;
> -  _CONST wordtype *aligned_src;
> -  int   len =  len0;
> -  size_t iter;
> -
> -  /* Handle aligned moves here.  */
> -  if (!UNALIGNED (src, dst))
> -    {
> -      iter = len / BIGBLOCKSIZE;
> -      len = len % BIGBLOCKSIZE;
> -      aligned_dst = (wordtype *)dst;
> -      aligned_src = (wordtype *)src;
> -
> -	  /* Copy 4X long or long long words at a time if possible.  */
> -      while (iter > 0)
> -	{
> -	  wordtype tmp0 = aligned_src[0];
> -	  wordtype tmp1 = aligned_src[1];
> -	  wordtype tmp2 = aligned_src[2];
> -	  wordtype tmp3 = aligned_src[3];
> -
> -	  aligned_dst[0] = tmp0;
> -	  aligned_dst[1] = tmp1;
> -	  aligned_dst[2] = tmp2;
> -	  aligned_dst[3] = tmp3;
> -	  aligned_src += 4;
> -	  aligned_dst += 4;
> -	  iter--;
> -	}
> -
> -      /* Copy one long or long long word at a time if possible.  */
> -      iter = len / LITTLEBLOCKSIZE;
> -      len = len % LITTLEBLOCKSIZE;
> -
> -      while (iter > 0)
> -	{
> -	  *aligned_dst++ = *aligned_src++;
> -	  iter--;
> -	}
> -
> -      /* Pick up any residual with a byte copier.  */
> -      dst = (char*)aligned_dst;
> -      src = (char*)aligned_src;
> -
> -      while (len > 0)
> -	{
> -	  *dst++ = *src++;
> -	  len--;
> -	}
> -
> -      return dst0;
> -    }
> -
> -  /* Handle unaligned moves here, using lwr/lwl and swr/swl where possible */
> -  else
> -    {
> -#ifndef NO_UNALIGNED_LOADSTORE
> -      int tmp;
> -      int *int_src = (int *)src;
> -      int *int_dst = (int *)dst;
> -      iter = len / 4;
> -      len = len % 4;
> -      while (iter > 0)
> -	{
> -	  __asm__ ("ulw %0,%1" : "=r" (tmp) : "m" (*int_src));
> -	  iter--;
> -	  int_src++;
> -	  __asm__ ("usw %1,%0" : "=m" (*int_dst) : "r" (tmp));
> -	  int_dst++;
> -	}
> -
> -      /* Pick up any residual with a byte copier.  */
> -      dst = (char*)int_dst;
> -      src = (char*)int_src;
> -#endif
> -
> -      while (len > 0)
> -	{
> -	  *dst++ = *src++;
> -	  len--;
> -	}
> -
> -      return dst0;
> -    }
> -#endif /* not PREFER_SIZE_OVER_SPEED */
> -}
>
>
>
>



More information about the Newlib mailing list