[PATCH 03/21] Add boot code for MIPS architecture

Aleksandar Rikalo arikalo@gmail.com
Thu Oct 31 05:49:19 GMT 2024


From: Jaydeep Patil <jaydeep.patil@imgtec.com>

Signed-off-by: Jaydeep Patil <jaydeep.patil@imgtec.com>
Signed-off-by: Aleksandar Rikalo <arikalo@gmail.com>
---
 libgloss/mips/boot/corecheck_predef.S      | 229 ++++++++++++++
 libgloss/mips/boot/init_caches.S           | 180 +++++++++++
 libgloss/mips/boot/init_caches_predef.S    | 185 +++++++++++
 libgloss/mips/boot/init_cm3l2.S            | 147 +++++++++
 libgloss/mips/boot/init_cm3l2_predef.S     | 125 ++++++++
 libgloss/mips/boot/init_cp0.S              | 107 +++++++
 libgloss/mips/boot/init_cp0_predef.S       | 133 ++++++++
 libgloss/mips/boot/init_l23caches.S        | 143 +++++++++
 libgloss/mips/boot/init_l23caches_predef.S | 163 ++++++++++
 libgloss/mips/boot/init_tlb.S              | 350 +++++++++++++++++++++
 libgloss/mips/boot/init_tlb_predef.S       | 151 +++++++++
 libgloss/mips/boot/predef.h                | 155 +++++++++
 libgloss/mips/boot/reset.S                 | 224 +++++++++++++
 libgloss/mips/boot/reset_predef.S          | 245 +++++++++++++++
 libgloss/mips/bootcode.ld                  |  14 +
 15 files changed, 2551 insertions(+)
 create mode 100644 libgloss/mips/boot/corecheck_predef.S
 create mode 100644 libgloss/mips/boot/init_caches.S
 create mode 100644 libgloss/mips/boot/init_caches_predef.S
 create mode 100644 libgloss/mips/boot/init_cm3l2.S
 create mode 100644 libgloss/mips/boot/init_cm3l2_predef.S
 create mode 100644 libgloss/mips/boot/init_cp0.S
 create mode 100644 libgloss/mips/boot/init_cp0_predef.S
 create mode 100644 libgloss/mips/boot/init_l23caches.S
 create mode 100644 libgloss/mips/boot/init_l23caches_predef.S
 create mode 100644 libgloss/mips/boot/init_tlb.S
 create mode 100644 libgloss/mips/boot/init_tlb_predef.S
 create mode 100644 libgloss/mips/boot/predef.h
 create mode 100644 libgloss/mips/boot/reset.S
 create mode 100644 libgloss/mips/boot/reset_predef.S
 create mode 100644 libgloss/mips/bootcode.ld

diff --git a/libgloss/mips/boot/corecheck_predef.S b/libgloss/mips/boot/corecheck_predef.S
new file mode 100644
index 000000000..c7bafc10d
--- /dev/null
+++ b/libgloss/mips/boot/corecheck_predef.S
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2015-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _RESETCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+#include <mips/cm3.h>
+#include "predef.h"
+
+MIPS_NOMIPS16
+
+/* Config 0 has some RO fields. */
+#if defined (C0_CONFIG0_VALUE)
+#define C0_CONFIG0_RO	(CFG0_M | CFG0_AT_MASK | CFG0_AR_MASK | \
+			CFG0_MT_MASK | CFG0_VI)
+#define C0_CONFIG0_EXP	(C0_CONFIG0_RO & C0_CONFIG0_VALUE)
+#endif
+
+/* Config 1 has no RW fields. */
+#if defined(C0_CONFIG1_VALUE)
+#define C0_CONFIG1_RO (0xffffffff)
+#define C0_CONFIG1_EXP	(C0_CONFIG1_RO & C0_CONFIG1_VALUE)
+#endif
+
+/* Config 2 has 2 RW fields. */
+#if defined(C0_CONFIG2_VALUE)
+#define C0_CONFIG2_RO ~(CFG2_TU_MASK | CFG2_SU_MASK)
+#define C0_CONFIG2_EXP	(C0_CONFIG2_RO & C0_CONFIG2_VALUE)
+#endif
+
+/* Config 3 has only 1 R/W bit (microMIPS on exception) */
+#if defined(C0_CONFIG3_VALUE)
+#define C0_CONFIG3_RO	(~CFG3_IOE)
+#define C0_CONFIG3_EXP	(C0_CONFIG3_RO & C0_CONFIG3_VALUE)
+#endif
+
+
+/* Config 4 has only 1 field R/W (FTLB page size) */
+#if defined(C0_CONFIG4_VALUE)
+#define C0_CONFIG4_RO	(~CFG4_FTLBPS_MASK)
+#define C0_CONFIG4_EXP	(C0_CONFIG4_RO & C0_CONFIG4_VALUE)
+#endif
+
+/* Config 5 has only a few fields and some of them are RO. */
+#if defined(C0_CONFIG5_VALUE)
+#define C0_CONFIG5_RO	(CFG5_MVH | CFG5_LLB | CFG5_MRP | CFG5_NF)
+#define C0_CONFIG5_EXP	(C0_CONFIG5_RO & C0_CONFIG5_VALUE)
+#endif
+
+#if defined(C0_CMGCRBASE_VALUE)
+#define C0_CMGCRBASE_ADDR ((C0_CMGCRBASE_VALUE << 4) | (0xb << 28))
+#define C0_CMGCRBASE_RO	(0xffffffff)
+#define C0_CMGCRBASE_EXP C0_CMGCRBASE_VALUE
+#endif
+
+/* GCR L2 config has REG_EXISTS and L2 config as readonly nozero fields. */
+#if defined(GCR_L2_CONFIG)
+#define GCR_L2_CONFIG_RO ((1<<31) | ((1<<16) - 1))
+#define GCR_L2_CONFIG_EXP GCR_L2_CONFIG_VALUE
+#endif
+
+LEAF(__core_check)
+	/*
+	 * Compare the expected value to the RO fields
+	 * of the config register
+	 */
+#if defined(C0_CONFIG0_VALUE)
+	li	t0, C0_CONFIG0_EXP
+	li	t1, C0_CONFIG0_RO
+	mfc0	t3, C0_CONFIG
+	and	t2, t1, t3
+	bne	t2, t0, 1f
+#endif
+
+#if defined(C0_CONFIG1_VALUE)
+	li	t0, C0_CONFIG1_EXP
+	li	t1, C0_CONFIG1_RO
+	mfc0	t3, C0_CONFIG1
+	and	t2, t1, t3
+	bne	t2, t0, 1f
+#endif
+
+#if defined(C0_CONFIG2_VALUE)
+	li	t0, C0_CONFIG2_EXP
+	li	t1, C0_CONFIG2_RO
+	mfc0	t3, C0_CONFIG2
+	and	t2, t1, t3
+	bne	t2, t0, 1f
+#endif
+
+#if defined(C0_CONFIG3_VALUE)
+	li	t0, C0_CONFIG3_EXP
+	li	t1, C0_CONFIG3_RO
+	mfc0	t3, C0_CONFIG3
+	and	t2, t1, t3
+	bne	t2, t0, 1f
+#endif
+
+#if defined(C0_CONFIG4_VALUE)
+	li	t0, C0_CONFIG4_EXP
+	li	t1, C0_CONFIG4_RO
+	mfc0	t3, C0_CONFIG4
+	and	t2, t1, t3
+	bne	t2, t0, 1f
+#endif
+
+#if defined(C0_CONFIG5_VALUE)
+	li	t0, C0_CONFIG5_EXP
+	li	t1, C0_CONFIG5_RO
+	mfc0	t3, C0_CONFIG5
+	and	t2, t1, t3
+	bne	t2, t0, 1f
+#endif
+
+#if defined(C0_CMGCRBASE_VALUE)
+	li	t0, C0_CMGCRBASE_EXP
+	li	t1, C0_CMGCRBASE_RO
+	mfc0	t3, C0_CMGCRBASE
+	and	t2, t1, t3
+	bne	t2, t0, 1f
+#if defined(GCR_L2_CONFIG_VALUE)
+	li	t0, GCR_L2_CONFIG_EXP
+	li	t1, GCR_L2_CONFIG_RO
+	li	t2, C0_CMGCRBASE_ADDR
+	lw	t3, GCR_L2_CONFIG(t2)
+	and	t2, t1, t3
+	bne	t2, t0, 1f
+#endif
+#endif
+
+#if defined(C0_WATCHHI_VALUE)
+	mfc0	t0, C0_WATCHHI, 0
+	ext	t0, t0, WATCHHI_M_SHIFT, 1
+
+	li	t1, ((C0_WATCHHI_VALUE & WATCHHI_M) >> WATCHHI_M_SHIFT)
+	bne	t0, t1, 1f
+#endif
+
+#if defined(C0_WATCHHI1_VALUE)
+	mfc0	t0, C0_WATCHHI, 1
+	ext	t0, t0, WATCHHI_M_SHIFT, 1
+
+	li	t1, ((C0_WATCHHI1_VALUE & WATCHHI_M) >> WATCHHI_M_SHIFT)
+	bne	t0, t1, 1f
+#endif
+
+#if defined(C0_WATCHHI2_VALUE)
+	mfc0	t0, C0_WATCHHI, 2
+	ext	t0, t0, WATCHHI_M_SHIFT, 1
+
+	li	t1, ((C0_WATCHHI2_VALUE & WATCHHI_M) >> WATCHHI_M_SHIFT)
+	bne	t0, t1, 1f
+#endif
+
+#if defined(C0_WATCHHI3_VALUE)
+	mfc0	t0, C0_WATCHHI, 3
+	ext	t0, t0, WATCHHI_M_SHIFT, 1
+
+	li	t1, ((C0_WATCHHI3_VALUE & WATCHHI_M) >> WATCHHI_M_SHIFT)
+	bne	t0, t1, 1f
+#endif
+
+#if defined(C0_WATCHHI4_VALUE)
+	mfc0	t0, C0_WATCHHI, 4
+	ext	t0, t0, WATCHHI_M_SHIFT, 1
+
+	li	t1, ((C0_WATCHHI4_VALUE & WATCHHI_M) >> WATCHHI_M_SHIFT)
+	bne	t0, t1, 1f
+#endif
+
+#if defined(C0_WATCHHI5_VALUE)
+	mfc0	t0, C0_WATCHHI, 5
+	ext	t0, t0, WATCHHI_M_SHIFT, 1
+
+	li	t1, ((C0_WATCHHI5_VALUE & WATCHHI_M) >> WATCHHI_M_SHIFT)
+	bne	t0, t1, 1f
+#endif
+
+#if defined(C0_WATCHHI6_VALUE)
+	mfc0	t0, C0_WATCHHI, 6
+	ext	t0, t0, WATCHHI_M_SHIFT, 1
+
+	li	t1, ((C0_WATCHHI6_VALUE & WATCHHI_M) >> WATCHHI_M_SHIFT)
+	bne	t0, t1, 1f
+#endif
+
+	b	2f
+1:
+	/* Incorrect config supplied, report a boot failure through UHI */
+	li	t9, 23
+	/* Reason - Predef config incorrect */
+	li	a0, 2
+	/* Trigger the UHI operation */
+	sdbbp	1
+	/* In case a debugger corrects this failure */
+
+2:
+	jr	ra
+
+END(__core_check)
diff --git a/libgloss/mips/boot/init_caches.S b/libgloss/mips/boot/init_caches.S
new file mode 100644
index 000000000..6377813ae
--- /dev/null
+++ b/libgloss/mips/boot/init_caches.S
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2014-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _BOOTCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+
+MIPS_NOMIPS16
+
+#define LINE_SIZE     vt0
+#define SET_SIZE      vt1
+#define ASSOC	      a0
+#define TOTAL_BYTES   a1
+#define CURR_ADDR1    a2
+#define CURR_ADDR2    a3
+#define END_ADDR      t0 
+#define CONFIG	      t1
+#define CONFIG1	      t2
+#define TEMP	      t3
+
+    .set    noat
+
+/*
+ * init_icache invalidates all instruction cache entries
+ */
+
+LEAF(__init_icache)
+	mfc0	CONFIG1, C0_CONFIG1
+
+	ext	LINE_SIZE, CONFIG1, CFG1_IL_SHIFT, 3
+
+	/* Skip ahead if No I$ */
+	li	TEMP, 2
+	beqz	LINE_SIZE, $Ldone_icache
+	sllv	LINE_SIZE, TEMP, LINE_SIZE	  /* Now have true I$ line size in bytes */
+
+	ext	SET_SIZE, CONFIG1, CFG1_IS_SHIFT, 3
+	addiu	SET_SIZE, SET_SIZE, 1		  /* Rotate to account for 7 == 32 sets */
+	andi	SET_SIZE, SET_SIZE, 7		  /* Mask down to 3-bit */
+	li	TEMP,	32
+	sllv	SET_SIZE, TEMP, SET_SIZE	  /* I$ Sets per way */
+
+	// Config1IA == I$ Assoc - 1
+	ext	ASSOC, CONFIG1, CFG1_IA_SHIFT, 3
+	addiu	ASSOC, ASSOC, 1
+
+	mul	SET_SIZE, SET_SIZE, ASSOC	  /* Total number of sets */
+	mul	TOTAL_BYTES, SET_SIZE, LINE_SIZE  /* Total number of bytes */
+
+	li	CURR_ADDR2, 0x80000000		  /* Get a KSeg0 address for cacheops */
+	subu	CURR_ADDR2, CURR_ADDR2, LINE_SIZE /* Pre-bias the addresses as the loop */
+	subu	CURR_ADDR1, CURR_ADDR2, LINE_SIZE /* increments them first */
+
+	addu	END_ADDR, CURR_ADDR1, TOTAL_BYTES /* END_ADDR is last line to invalidate */
+	sll	LINE_SIZE, LINE_SIZE, 1		  /* Double line size as we process two */
+						  /* per loop */
+
+	/* Clear TagLo/TagHi registers */
+	mtc0	zero, C0_TAGLO
+	mtc0	zero, C0_TAGHI
+
+$Lnext_icache_tag:
+	/*
+	 * Index Store Tag Cache Op will invalidate the tag entry, clear
+	 * the lock bit, and clear the LRF bit
+	 */
+	addu	CURR_ADDR1, LINE_SIZE
+	addu	CURR_ADDR2, LINE_SIZE
+	cache	Index_Store_Tag_I, 0(CURR_ADDR1)
+	cache	Index_Store_Tag_I, 0(CURR_ADDR2)
+	bne	CURR_ADDR1, END_ADDR, $Lnext_icache_tag
+
+$Ldone_icache:
+	jr	ra
+END(__init_icache)
+
+/*
+ * init_dcache invalidates all data cache entries
+ */
+
+LEAF(__init_dcache)
+	mfc0	CONFIG1, C0_CONFIG1
+	ext	LINE_SIZE, CONFIG1, CFG1_DL_SHIFT, 3
+
+	/* Skip ahead if No D$ */
+	li	TEMP, 2
+	beqz	LINE_SIZE, $Ldone_dcache
+
+	sllv	LINE_SIZE, TEMP, LINE_SIZE	  /* Now have true D$ line size in bytes */
+
+	ext	SET_SIZE, CONFIG1, CFG1_DS_SHIFT, 3
+	addiu	SET_SIZE, SET_SIZE, 1		  /* Rotate to account for 7 == 32 sets */
+	andi	SET_SIZE, SET_SIZE, 7		  /* Mask down to 3-bit */
+	li	TEMP, 32
+	sllv	SET_SIZE, TEMP, SET_SIZE	  /* D$ Sets per way */
+
+	/* Config1.DA == D$ Assoc - 1 */
+	ext	ASSOC, CONFIG1, CFG1_DA_SHIFT, 3
+	addiu	ASSOC, 1
+
+	mul	SET_SIZE, SET_SIZE, ASSOC	  /* Get total number of sets */
+	mul	TOTAL_BYTES, SET_SIZE, LINE_SIZE  /* Total number of bytes */
+
+	li	CURR_ADDR2, 0x80000000		  /* Get a KSeg0 address for cacheops */
+	subu	CURR_ADDR2, CURR_ADDR2, LINE_SIZE /* Pre-bias the addresses as the loop */
+	subu	CURR_ADDR1, CURR_ADDR2, LINE_SIZE /* increments them first */
+
+	addu	END_ADDR, CURR_ADDR1, TOTAL_BYTES /* END_ADDR is last line to invalidate */
+	sll	LINE_SIZE, LINE_SIZE, 1		  /* Double line size as we process two */
+						  /* per loop*/
+
+	/* Clear TagLo/TagHi registers */
+	mtc0	zero, C0_TAGLO
+	mtc0	zero, C0_TAGHI
+	mtc0	zero, C0_TAGLO, 2
+	mtc0	zero, C0_TAGHI, 2
+
+$Lnext_dcache_tag:
+	/*
+	 * Index Store Tag Cache Op will invalidate the tag entry, clear
+	 * the lock bit, and clear the LRF bit
+	 */
+	addu	CURR_ADDR1, LINE_SIZE
+	addu	CURR_ADDR2, LINE_SIZE
+	cache	Index_Store_Tag_D, 0(CURR_ADDR1)
+	cache	Index_Store_Tag_D, 0(CURR_ADDR2)
+	bne	CURR_ADDR1, END_ADDR, $Lnext_dcache_tag
+
+$Ldone_dcache:
+	jr	ra
+
+END(__init_dcache)
+
+/*
+ * __change_k0_cca essentially turns the cache on
+ */
+
+LEAF(__change_k0_cca)
+	/*
+	 * NOTE! This code must be executed in KSEG1 (not KSEG0 uncached)
+	 * Set CCA for kseg0 to cacheable
+	 */
+	mfc0	CONFIG, C0_CONFIG
+	li	TEMP, CFG_C_NONCOHERENT
+
+$Lset_kseg0_cca:
+	ins	CONFIG, TEMP, 0, 3
+	mtc0	CONFIG, C0_CONFIG
+	MIPS_JRHB (ra)
+
+END(__change_k0_cca)
diff --git a/libgloss/mips/boot/init_caches_predef.S b/libgloss/mips/boot/init_caches_predef.S
new file mode 100644
index 000000000..b5efae57a
--- /dev/null
+++ b/libgloss/mips/boot/init_caches_predef.S
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2014-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _BOOTCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+#include "predef.h"
+
+MIPS_NOMIPS16
+
+/*
+ * Depending on the range of the displacement field of the CACHE instruction
+ * we can do multiple cacheops per iteration.  With a cache present there
+ * is a guarantee of 32 lines minimum so a power of 2 less than or equal
+ * to 32 means there is no remainder after the loop.
+ * The maximum number of lines per iteration is the range of the CACHE
+ * displacement divided by the line_size.  We cap this at 8 as a sensible
+ * bound.
+ */
+
+#if __mips_isa_rev < 6
+/* MicroMIPS Release 3 has a 12-bit displacement for CACHE */
+# define ILINES_PER_ITER 8
+# define DLINES_PER_ITER 8
+#else
+/* MIPS Release 6 has a 9-bit signed displacement for CACHE */
+#if ILINE_SIZE == 128
+# define ILINES_PER_ITER 4 /* Requires both positive and negative disp */
+#else
+# define ILINES_PER_ITER 8
+#endif
+#if DLINE_SIZE == 128
+# define DLINES_PER_ITER 4 /* Requires both positive and negative disp */
+#else
+# define DLINES_PER_ITER 8
+#endif
+#endif
+
+/*
+ * Start off pointing to one block below where we want to invalidate the cache
+ * as the pointer is moved on at the start of the loop. Also offset the start
+ * address for each set of cache lines so that the positive and negative
+ * displacements from the CACHE ops can be used.
+ */
+
+#define ICACHE_START (0x80000000 - (ILINE_SIZE * ILINES_PER_ITER / 2))
+#define ICACHE_END (0x80000000 + ITOTAL_BYTES - (ILINE_SIZE * ILINES_PER_ITER / 2))
+#define DCACHE_START (0x80000000 - (DLINE_SIZE * DLINES_PER_ITER / 2))
+#define DCACHE_END (0x80000000 + DTOTAL_BYTES - (DLINE_SIZE * DLINES_PER_ITER / 2))
+
+#define CURRENT_ADDR  a0
+#define END_ADDR      a1
+#define CONFIG	      a2
+#define TEMP	      a3
+
+	.set    noat
+
+/*
+ * init_icache invalidates all instruction cache entries
+ */
+#if defined(ILINE_ENC) && ILINE_ENC != 0
+LEAF(__init_icache)
+	/* Use KSEG0 base address */
+	li	CURRENT_ADDR, ICACHE_START
+	/* Get the address of the last batch of lines */
+	li	END_ADDR, ICACHE_END
+
+	/* Clear TagLo/TagHi registers */
+	mtc0	zero, C0_TAGLO
+	mtc0	zero, C0_TAGHI
+
+	/*
+	 * Index Store Tag Cache Op will invalidate the tag entry, clear
+	 * the lock bit, and clear the LRF bit
+	 */
+$Lnext_icache_tag:
+	addu	CURRENT_ADDR, (ILINE_SIZE * ILINES_PER_ITER)
+	cache	Index_Store_Tag_I, (ILINE_SIZE*-2)(CURRENT_ADDR)
+	cache	Index_Store_Tag_I, (ILINE_SIZE*-1)(CURRENT_ADDR)
+	cache	Index_Store_Tag_I, (ILINE_SIZE*0)(CURRENT_ADDR)
+	cache	Index_Store_Tag_I, (ILINE_SIZE*1)(CURRENT_ADDR)
+#if ILINES_PER_ITER == 8
+	cache	Index_Store_Tag_I, (ILINE_SIZE*-4)(CURRENT_ADDR)
+	cache	Index_Store_Tag_I, (ILINE_SIZE*-3)(CURRENT_ADDR)
+	cache	Index_Store_Tag_I, (ILINE_SIZE*2)(CURRENT_ADDR)
+	cache	Index_Store_Tag_I, (ILINE_SIZE*3)(CURRENT_ADDR)
+#endif
+	bne	CURRENT_ADDR, END_ADDR, $Lnext_icache_tag
+
+$Ldone_icache:
+	jr	ra
+END(__init_icache)
+
+#endif // ILINE_ENC != 0
+
+/*
+ * init_dcache invalidates all data cache entries
+ */
+
+#if defined(DLINE_ENC) && DLINE_ENC != 0
+LEAF(__init_dcache)
+	/* Use KSEG0 base address */
+	li	CURRENT_ADDR, DCACHE_START
+	/* Get the address of the last batch of lines */
+	li	END_ADDR, DCACHE_END
+
+	/* Clear TagLo/TagHi registers */
+	mtc0	zero, C0_TAGLO
+	mtc0	zero, C0_TAGHI
+	mtc0	zero, C0_TAGLO, 2
+	mtc0	zero, C0_TAGHI, 2
+
+	/*
+	 * Index Store Tag Cache Op will invalidate the tag entry, clear
+	 * the lock bit, and clear the LRF bit
+	 */
+$Lnext_dcache_tag:
+	addu	CURRENT_ADDR, (DLINE_SIZE * DLINES_PER_ITER)
+	cache	Index_Store_Tag_D, (DLINE_SIZE*-2)(CURRENT_ADDR)
+	cache	Index_Store_Tag_D, (DLINE_SIZE*-1)(CURRENT_ADDR)
+	cache	Index_Store_Tag_D, (DLINE_SIZE*0)(CURRENT_ADDR)
+	cache	Index_Store_Tag_D, (DLINE_SIZE*1)(CURRENT_ADDR)
+#if DLINES_PER_ITER == 8
+	cache	Index_Store_Tag_D, (DLINE_SIZE*-4)(CURRENT_ADDR)
+	cache	Index_Store_Tag_D, (DLINE_SIZE*-3)(CURRENT_ADDR)
+	cache	Index_Store_Tag_D, (DLINE_SIZE*2)(CURRENT_ADDR)
+	cache	Index_Store_Tag_D, (DLINE_SIZE*3)(CURRENT_ADDR)
+#endif
+	bne	CURRENT_ADDR, END_ADDR, $Lnext_dcache_tag
+
+$Ldone_dcache:
+	jr	ra
+
+END(__init_dcache)
+#endif // DLINE_ENC != 0
+
+/*
+ * __change_k0_cca essentially turns the cache on
+ */
+
+#if ILINE_ENC != 0 || DLINE_ENC != 0
+LEAF(__change_k0_cca)
+	/*
+	 * NOTE! This code must be executed in KSEG1 (not KSEG0 uncached)
+	 * Set CCA for kseg0 to cacheable
+	 */
+	mfc0	CONFIG, C0_CONFIG
+	li	TEMP, CFG_C_NONCOHERENT
+
+$Lset_kseg0_cca:
+	ins	CONFIG, TEMP, 0, 3
+	mtc0	CONFIG, C0_CONFIG
+	MIPS_JRHB (ra)
+
+END(__change_k0_cca)
+#endif // ILINE_ENC != 0 || DLINE_ENC != 0
diff --git a/libgloss/mips/boot/init_cm3l2.S b/libgloss/mips/boot/init_cm3l2.S
new file mode 100644
index 000000000..a1402a02b
--- /dev/null
+++ b/libgloss/mips/boot/init_cm3l2.S
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2015-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _BOOTCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+#include <mips/cm3.h>
+
+MIPS_NOMIPS16
+
+#define CM3_BASE	t8
+LEAF(__init_l23cache)
+	/* Check for memory mapped L2 cache config */
+	mfc0	t0, C0_CONFIG3
+	ext	t1, t0, CFG3_M_SHIFT, 1
+	beqz	t1, err
+
+	mfc0	t0, C0_CONFIG4
+	ext	t1, t0, CFG4_M_SHIFT, 1
+	beqz	t1, err
+
+	mfc0	t0, C0_CONFIG5
+	ext	t1, t0, CFG5_L2C_SHIFT, 1
+	bnez	t1, disable_cache
+err:
+	/*
+	 * CM3 L2 code supplied but we have a Config2 L2 setup
+	 * Report a Boot failure through UHI
+	 */
+	li	t9, 23
+	/* Reason - L2 cache config */
+	li	a0, 1
+	/* Trigger the UHI operation */
+	sdbbp 	1
+	/* In case a debugger corrects this failure */
+	jr	ra
+
+disable_cache:
+	/* Read CMGCRBase to find CMGCR_BASE_ADDR */
+	PTR_MFC0 t1,C0_CMGCRBASE
+	sll	t1, t1, CMGCR_BASE_ADDR_LSHIFT
+	li	CM3_BASE, 0xb0000000		/* Make it virtual */
+	or	CM3_BASE, CM3_BASE, t1
+
+	/* Disable L2 cache by setting it to bypass mode */
+	PTR_L	t0, GCR_L2_CONFIG(CM3_BASE)
+	li	a2, 1
+	ins	t0, a2, GCR_L2_BYPASS_SHIFT, GCR_L2_BYPASS_BITS
+	PTR_S	t0, GCR_L2_CONFIG(CM3_BASE)
+ret:
+	jr	ra
+END(__init_l23cache)
+
+LEAF(__init_l23cache_cached)
+	/* Read CMGCRBase to find CMGCR_BASE_ADDR */
+	PTR_MFC0 t3,C0_CMGCRBASE
+	sll	t3, t3, CMGCR_BASE_ADDR_LSHIFT
+	li	CM3_BASE, 0xb0000000		/* Make it virtual */
+	or	CM3_BASE, CM3_BASE, t3
+
+	/* Read GCR_L2_CONFIG */
+	PTR_L	t2, GCR_L2_CONFIG(CM3_BASE)
+	ext	t3, t2, GCR_L2_SL_SHIFT, GCR_L2_SL_BITS
+	beqz	t3, done_cm3l2cache		# No L2 cache
+
+	/* Unusual case, hardware cache initialization support & init finished. */
+	PTR_L	t1, GCR_L2_RAM_CONFIG(CM3_BASE)
+	ext	t0, t1, GCR_L2_RAM_HCIS_SHIFT, (GCR_L2_RAM_HCID_BITS +\
+						GCR_L2_RAM_HCIS_BITS)
+	li	t1, 3
+	beq	t0, t1, done_cm3l2cache
+
+	li	a2, 2
+	sllv	a1, a2, t3			/* Now have L2 line size */
+
+	ext	a0, t2, GCR_L2_SS_SHIFT, GCR_L2_SS_BITS
+	li	a2, 64
+	sllv	a0, a2, a0			/* Now have L2 sets/way */
+
+	ext	t3, t2, GCR_L2_SA_SHIFT, GCR_L2_SA_BITS
+	addiu	t3, t3, 1			/* Set associativity */
+	mul	a0, t3, a0			/* Get total number of sets */
+
+	sw	zero, GCR_TAG_ADDR(CM3_BASE)
+	sw	zero, (GCR_TAG_ADDR+4)(CM3_BASE)
+	sw	zero, GCR_TAG_STATE(CM3_BASE)
+	sw	zero, (GCR_TAG_STATE+4)(CM3_BASE)
+	sw	zero, GCR_TAG_DATA(CM3_BASE)
+	sw	zero, (GCR_TAG_DATA+4)(CM3_BASE)
+	sw	zero, GCR_TAG_ECC(CM3_BASE)
+	sw	zero, (GCR_TAG_ECC+4)(CM3_BASE)
+	sync
+
+	/* Reg exists, L2 cache does TAG/DATA ECC. */
+	li	t0, 0x8000FFFF
+	and	t2, t2, t0
+	/* LRU is updated on store tag operation */
+	li	t0, (1<<GCR_L2_LRU_WE_SHIFT)
+	or	t2, t2, t0
+	sw	t2, GCR_L2_CONFIG(CM3_BASE)
+	sync
+
+	li	a2, 0x80000000
+
+next_cm3l2cache_tag:
+	cache	Index_Store_Tag_S, 0(a2)
+	addiu	a0, a0, -1
+	addu	a2, a2, a1
+	bnez	a0, next_cm3l2cache_tag
+
+done_cm3l2cache:
+	move	a2, zero
+	PTR_L	t0, GCR_L2_CONFIG(CM3_BASE)
+	ins	t0, a2, GCR_L2_BYPASS_SHIFT, GCR_L2_BYPASS_BITS
+	PTR_S	t0, GCR_L2_CONFIG(CM3_BASE)
+
+	jr	ra
+END(__init_l23cache_cached)
diff --git a/libgloss/mips/boot/init_cm3l2_predef.S b/libgloss/mips/boot/init_cm3l2_predef.S
new file mode 100644
index 000000000..edaf3968f
--- /dev/null
+++ b/libgloss/mips/boot/init_cm3l2_predef.S
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2015-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _BOOTCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+#include <mips/cm3.h>
+#include "predef.h"
+
+MIPS_NOMIPS16
+
+#define CM3_BASE	a3
+
+# ifndef C0_CMGCRBASE_VALUE
+#  error "Static CM3 cache initialization decisions require C0_CMGCRBASE_VALUE"
+# else
+#  define C0_CMGCRBASE_ADDR ((C0_CMGCRBASE_VALUE << 4) | (0xb << 28))
+#  ifndef GCR_L2_CONFIG_VALUE
+#   error "Static CM3 cache initialization decisions require GCR_L2_CONFIG_VALUE"
+#  endif
+# endif
+
+#undef SLINE_ENC
+#undef SSET_ENC
+#undef SASSOC_ENC
+#undef SLINE_SIZE
+#undef SSET_SIZE
+#undef SASSOC
+
+#define SLINE_ENC    ((GCR_L2_CONFIG_VALUE & GCR_L2_SL_MASK) >> GCRL2_CFG_SL_SHIFT)
+#define SSET_ENC    ((GCR_L2_CONFIG_VALUE & GCR_L2_SS_MASK) >> GCRL2_CFG_SS_SHIFT)
+#define SASSOC_ENC    ((GCR_L2_CONFIG_VALUE & GCR_L2_SA_MASK) >> GCRL2_CFG_SA_SHIFT)
+#define SLINE_SIZE   (2 << SLINE_ENC)
+#define SSET_SIZE    (64 << SSET_ENC)
+#define SASSOC	      (SASSOC_ENC + 1)
+
+
+LEAF(__init_l23cache)
+	li	CM3_BASE, C0_CMGCRBASE_ADDR
+	/* Disable L2 cache */
+	PTR_L	t0, GCR_L2_CONFIG(CM3_BASE)
+	li	a2, 1
+	ins	t0, a2, GCR_L2_BYPASS_SHIFT, GCR_L2_BYPASS_BITS
+	PTR_S	t0, GCR_L2_CONFIG(CM3_BASE)
+
+	jr	ra
+END(__init_l23cache)
+
+LEAF(__init_l23cache_cached)
+	li	CM3_BASE, C0_CMGCRBASE_ADDR
+
+#if SLINE_ENC != 0
+	/* Unusual case, hardware cache initialization support & init finished. */
+	PTR_L	t1, GCR_L2_RAM_CONFIG(CM3_BASE)
+	ext	t0, t1, GCR_L2_RAM_HCIS_SHIFT, (GCR_L2_RAM_HCID_BITS +\
+						GCR_L2_RAM_HCIS_BITS)
+	li	t1, 3
+	beq	t0, t1, done_cm3l2cache
+
+	/* Compute L2 cache size */
+	li	a1, SLINE_SIZE
+	li	a0, SSET_SIZE * SASSOC
+
+	sw	zero, GCR_TAG_ADDR(CM3_BASE)
+	sw	zero, (GCR_TAG_ADDR+4)(CM3_BASE)
+	sw	zero, GCR_TAG_STATE(CM3_BASE)
+	sw	zero, (GCR_TAG_STATE+4)(CM3_BASE)
+	sw	zero, GCR_TAG_DATA(CM3_BASE)
+	sw	zero, (GCR_TAG_DATA+4)(CM3_BASE)
+	sw	zero, GCR_TAG_ECC(CM3_BASE)
+	sw	zero, (GCR_TAG_ECC+4)(CM3_BASE)
+
+	/* Reg exists, L2 cache does TAG/DATA ECC. */
+	li	t1, (GCR_L2_CONFIG_VALUE & 0x8000FFFF)
+	/* LRU is updated on store tag operation */
+	li	t0, (1<<GCR_L2_LRU_WE_SHIFT)
+	or	t1, t1, t0
+	sw	t1, GCR_L2_CONFIG(CM3_BASE)
+	sync
+
+	lui	a2, 0x8000
+next_cm3l2cache_tag:
+	cache	Index_Store_Tag_S, 0(a2)
+	addiu	a0, a0, -1
+	addu	a2, a2, a1
+	bnez	a0, next_cm3l2cache_tag
+#endif /* SLINE_ENC != 0 */
+
+done_cm3l2cache:
+	move	a2, zero
+	PTR_L	t0, GCR_L2_CONFIG(CM3_BASE)
+	ins	t0, a2, GCR_L2_BYPASS_SHIFT, GCR_L2_BYPASS_BITS
+	PTR_S	t0, GCR_L2_CONFIG(CM3_BASE)
+
+	jr	ra
+END(__init_l23cache_cached)
diff --git a/libgloss/mips/boot/init_cp0.S b/libgloss/mips/boot/init_cp0.S
new file mode 100644
index 000000000..66f24f063
--- /dev/null
+++ b/libgloss/mips/boot/init_cp0.S
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2014-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _BOOTCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+
+MIPS_NOMIPS16
+
+      .set  noat
+
+LEAF(__init_cp0)
+
+	/* Initialize Status */
+	li	t1, SR_ERL | SR_BEV
+	mtc0	t1, C0_STATUS
+
+	/* Initialize Watch registers if implemented */
+	mfc0	t0, C0_CONFIG1
+	ext	t1, t0, CFG1_WR_SHIFT, 1
+	beqz	t1, $Ldone_wr
+	li	t1, WATCHHI_I | WATCHHI_R | WATCHHI_W
+
+	/* Clear Watch Status bits and disable watch exceptions */
+	mtc0	t1, C0_WATCHHI
+	mfc0	t0, C0_WATCHHI
+	mtc0	zero, C0_WATCHLO
+	bgez	t0, $Ldone_wr
+
+	mtc0	t1, C0_WATCHHI, 1
+	mfc0	t0, C0_WATCHHI, 1
+	mtc0	zero, C0_WATCHLO, 1
+	bgez	t0, $Ldone_wr
+
+	mtc0	t1, C0_WATCHHI, 2
+	mfc0	t0, C0_WATCHHI, 2
+	mtc0	zero, C0_WATCHLO, 2
+	bgez	t0, $Ldone_wr
+
+	mtc0	t1, C0_WATCHHI, 3
+	mfc0	t0, C0_WATCHHI, 3
+	mtc0	zero, C0_WATCHLO, 3
+	bgez	t0, $Ldone_wr
+
+	mtc0	t1, C0_WATCHHI, 4
+	mfc0	t0, C0_WATCHHI, 4
+	mtc0	zero, C0_WATCHLO, 4
+	bgez	t0, $Ldone_wr
+
+	mtc0	t1, C0_WATCHHI, 5
+	mfc0	t0, C0_WATCHHI, 5
+	mtc0	zero, C0_WATCHLO, 5
+	bgez	t0, $Ldone_wr
+
+	mtc0	t1, C0_WATCHHI, 6
+	mfc0	t0, C0_WATCHHI, 6
+	mtc0	zero, C0_WATCHLO, 6
+	bgez	t0, $Ldone_wr
+
+	mtc0	t1, C0_WATCHHI, 7
+	mfc0	t0, C0_WATCHHI, 7
+	mtc0	zero, C0_WATCHLO, 7
+
+$Ldone_wr:
+	/*
+	 * Clear WP bit to avoid watch exception upon user code entry, IV,
+	 * and software interrupts.
+	 */
+	mtc0	zero, C0_CAUSE
+
+	/*
+	 * Clear timer interrupt. (Count was cleared at the reset vector to
+	 * allow timing boot.)
+	 */
+	mtc0	zero, C0_COMPARE
+
+	jr	ra
+END(__init_cp0)
diff --git a/libgloss/mips/boot/init_cp0_predef.S b/libgloss/mips/boot/init_cp0_predef.S
new file mode 100644
index 000000000..7c1397123
--- /dev/null
+++ b/libgloss/mips/boot/init_cp0_predef.S
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2014-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _BOOTCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+#include "predef.h"
+
+MIPS_NOMIPS16
+
+	.set  noat
+
+LEAF(__init_cp0)
+
+	/* Initialize Status */
+	li	t1, SR_BEV | SR_ERL
+	mtc0	t1, C0_STATUS
+
+#if (C0_CONFIG1_VALUE & CFG1_WR) != 0
+	li	t1, WATCHHI_I | WATCHHI_R | WATCHHI_W
+
+	/* Clear Watch Status bits and disable watch exceptions */
+	mtc0	t1, C0_WATCHHI
+	mtc0	zero, C0_WATCHLO
+
+# ifndef C0_WATCHHI_VALUE
+#  error "C0_WATCHHI_VALUE is required"
+# endif
+
+# if (C0_WATCHHI_VALUE & WATCHHI_M) != 0
+	mtc0	t1, C0_WATCHHI, 1
+	mtc0	zero, C0_WATCHLO, 1
+
+#  ifndef C0_WATCHHI1_VALUE
+#   error "C0_WATCHHI1_VALUE is required"
+#  endif
+
+#  if (C0_WATCHHI1_VALUE & WATCHHI_M) != 0
+	mtc0	t1, C0_WATCHHI, 2
+	mtc0	zero, C0_WATCHLO, 2
+
+#   ifndef C0_WATCHHI2_VALUE
+#    error "C0_WATCHHI2_VALUE is required"
+#   endif
+
+#   if (C0_WATCHHI2_VALUE & WATCHHI_M) != 0
+	mtc0	t1, C0_WATCHHI, 3
+	mtc0	zero, C0_WATCHLO, 3
+
+#    ifndef C0_WATCHHI3_VALUE
+#     error "C0_WATCHHI3_VALUE is required"
+#    endif
+
+#    if (C0_WATCHHI3_VALUE & WATCHHI_M) != 0
+	mtc0	t1, C0_WATCHHI, 4
+	mtc0	zero, C0_WATCHLO, 4
+
+#     ifndef C0_WATCHHI4_VALUE
+#      error "C0_WATCHHI4_VALUE is required"
+#     endif
+
+#     if (C0_WATCHHI4_VALUE & WATCHHI_M) != 0
+	mtc0	t1, C0_WATCHHI, 5
+	mtc0	zero, C0_WATCHLO, 5
+
+#      ifndef C0_WATCHHI5_VALUE
+#       error "C0_WATCHHI5_VALUE is required"
+#      endif
+
+#      if (C0_WATCHHI5_VALUE & WATCHHI_M) != 0
+	mtc0	t1, C0_WATCHHI, 6
+	mtc0	zero, C0_WATCHLO, 6
+
+#       ifndef C0_WATCHHI6_VALUE
+#        error "C0_WATCHHI6_VALUE is required"
+#       endif
+
+#       if (C0_WATCHHI6_VALUE & WATCHHI_M) != 0
+	mtc0	t1, C0_WATCHHI, 7
+	mtc0	zero, C0_WATCHLO, 7
+
+#       endif
+#      endif
+#     endif
+#    endif
+#   endif
+#  endif
+# endif
+#endif
+
+	/*
+	 * Clear WP bit to avoid watch exception upon user code entry, IV, and
+	 * software interrupts.
+	 */
+	mtc0	zero, C0_CAUSE
+
+	/*
+	 * Clear timer interrupt. (Count was cleared at the reset vector to
+	 * allow timing boot.)
+	 */
+	mtc0	zero, C0_COMPARE
+
+	jr	ra
+END(__init_cp0)
diff --git a/libgloss/mips/boot/init_l23caches.S b/libgloss/mips/boot/init_l23caches.S
new file mode 100644
index 000000000..277dacf0b
--- /dev/null
+++ b/libgloss/mips/boot/init_l23caches.S
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2014-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _BOOTCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+
+MIPS_NOMIPS16
+	.set	noat
+
+LEAF(__init_l23cache)
+	/* L2 Cache initialization routine */
+	/* Check for memory mapped L2 cache config */
+	mfc0	a3, C0_CONFIG3
+	ext	a3, a3, CFG3_M_SHIFT, 1
+	beqz	a3, l23_init
+	mfc0	a3, C0_CONFIG4
+	ext	a3, a3, CFG4_M_SHIFT, 1
+	beqz	a3, l23_init
+	mfc0	a3, C0_CONFIG5
+	ext	a3, a3, CFG5_L2C_SHIFT, 1
+	beqz	a3, l23_init
+
+	/*
+	 * No CM3 code supplied but we have a memory mapped L2 configuration
+	 * Report a Boot failure through UHI
+	 */
+	li	t9, 23
+	/* Reason - L2 cache config */
+	li	a0, 1
+	/* Trigger the UHI operation */
+	sdbbp	1
+	/* In case a debugger corrects this failure */
+	b	done_l3cache
+
+l23_init:
+	/* Check L2 cache size */
+	mfc0	t0, C0_CONFIG2
+
+	/* Isolate L2$ Line Size */
+	ext	t1, t0, CFG2_SL_SHIFT, CFG2_SL_BITS
+
+	/* Skip ahead if No L2$ */
+	beqz	t1, done_l2cache
+
+	li	a2, 2
+	sllv	a1, a2, t1		/* Decode L2$ line size in bytes */
+
+	/* Isolate L2$ Sets per Way */
+	ext	a0, t0, CFG2_SS_SHIFT, CFG2_SS_BITS
+	li	a2, 64
+	sllv	a0, a2, a0		/* L2$ Sets per way */
+
+	/* Isolate L2$ Associativity */
+	ext	t1, t0, CFG2_SA_SHIFT, CFG2_SA_BITS
+	addiu	t1, t1, 1
+
+	mul	a0, a0, t1		/* Get total number of sets */
+
+l2cache_init:
+	li	a2, 0x80000000		/* Get a KSeg0 address for cacheops */
+
+	/* Clear L23TagLo/L23TagHi registers */
+	mtc0    zero, C0_TAGLO, 4
+
+	/*
+	 * L2$ Index Store Tag Cache Op will invalidate the tag entry, clear
+	 * the lock bit, and clear the LRF bit
+	 */
+next_L2cache_tag:
+	cache	Index_Store_Tag_S, 0(a2)
+	addiu	a0, a0, -1
+	addu	a2, a2, a1
+	bnez	a0, next_L2cache_tag
+
+done_l2cache:
+	/* Isolate L3$ Line Size */
+	ext	t1, t0, CFG2_TL_SHIFT, CFG2_TL_BITS
+
+	/* Skip ahead if No L3$ */
+	beqz	t1, done_l3cache
+
+	li	a2, 2
+	sllv	a1, a2, t1		/* Decode L3$ line size in bytes */
+
+	/* Isolate L3$ Sets per Way */
+	ext	a0, t0, CFG2_TS_SHIFT, CFG2_TS_BITS
+	li	a2, 64
+	sllv	a0, a2, a0		/* Decode L3 Sets per way */
+
+	/* Isolate L3$ Associativity */
+	ext	t1, t0, CFG2_TA_SHIFT, CFG2_TA_BITS
+	addiu	t1, t1, 1		/* Decode L3 associativity (number of sets) */
+	mul	a0, a0, t1		/* Compute total number of sets */
+
+l3cache_init:
+	li	a2, 0x80000000	   	/* Get a KSeg0 address for cacheops */
+
+	/* Clear L23Tag register */
+	mtc0    zero, C0_TAGLO, 4
+
+	/*
+	 * L3$ Index Store Tag Cache Op will invalidate the tag entry, clear
+	 * the lock bit, and clear the LRF bit
+	 */
+next_L3cache_tag:
+	cache	Index_Store_Tag_T, 0(a2)
+	addiu	a0, a0, -1
+	addu	a2, a2, a1
+	bnez	a0, next_L3cache_tag
+
+done_l3cache:
+	jr	ra
+END(__init_l23cache)
diff --git a/libgloss/mips/boot/init_l23caches_predef.S b/libgloss/mips/boot/init_l23caches_predef.S
new file mode 100644
index 000000000..36892a42e
--- /dev/null
+++ b/libgloss/mips/boot/init_l23caches_predef.S
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2014-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _BOOTCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+#include "predef.h"
+
+MIPS_NOMIPS16
+
+/*
+ * Depending on the range of the displacement field of the CACHE instruction
+ * we can do multiple cacheops per iteration.  With a cache present there
+ * is a guarantee of 32 lines minimum so a power of 2 less than or equal
+ * to 32 means there is no remainder after the loop.
+ * The maximum number of lines per iteration is the range of the CACHE
+ * displacement divided by the line_size.  We cap this at 8 as a sensible
+ * bound.
+ */
+
+#if __mips_isa_rev < 6
+/* MicroMIPS Release 3 has a 12-bit displacement for CACHE */
+# define SLINES_PER_ITER 8
+# define TLINES_PER_ITER 8
+#else
+/* MIPS Release 6 has a 9-bit signed displacement for CACHE */
+#if SLINE_SIZE == 128
+# define SLINES_PER_ITER 4 /* Requires both positive and negative disp */
+#else
+# define SLINES_PER_ITER 8
+#endif
+#if TLINE_SIZE == 128
+# define TLINES_PER_ITER 4 /* Requires both positive and negative disp */
+#else
+# define TLINES_PER_ITER 8
+#endif
+#endif
+
+#ifdef MEM_MAPPED_L2C
+# error MEM_MAPPED_L2C used with Config L2 code
+#endif
+
+/*
+ * Start off pointing to one block below where we want to invalidate the cache
+ * as the pointer is moved on at the start of the loop. Also offset the start
+ * address for each set of cache lines so that the positive and negative
+ * displacements from the CACHE ops can be used.
+ */
+
+#define SCACHE_START (0x80000000 - (SLINE_SIZE * SLINES_PER_ITER / 2))
+#define SCACHE_END (0x80000000 + STOTAL_BYTES - (SLINE_SIZE * SLINES_PER_ITER / 2))
+#define TCACHE_START (0x80000000 - (TLINE_SIZE * TLINES_PER_ITER / 2))
+#define TCACHE_END (0x80000000 + TTOTAL_BYTES - (TLINE_SIZE * TLINES_PER_ITER / 2))
+
+#define CURRENT_ADDR  a0
+#define END_ADDR      a1
+#define CONFIG	      a2
+#define TEMP	      a3
+
+	.set	noat
+
+/*
+ * __init_l23cache invalidates all secondary/tertiary data cache entries
+ */
+
+#if defined(SLINE_ENC) && SLINE_ENC != 0
+LEAF(__init_l23cache)
+	/* Use KSEG0 base address */
+	li    CURRENT_ADDR, SCACHE_START 
+	/* Get the address of the last batch of lines */
+	li    END_ADDR, SCACHE_END
+
+	/* Clear TagLo/TagHi registers */
+	mtc0    zero, C0_TAGLO, 4
+
+	/*
+	 * Index Store Tag Cache Op will invalidate the tag entry, clear
+	 * the lock bit, and clear the LRF bit
+	 */
+$Lnext_scache_tag:
+	addu	CURRENT_ADDR, (SLINE_SIZE * SLINES_PER_ITER)
+	cache	Index_Store_Tag_S, (SLINE_SIZE*-2)(CURRENT_ADDR)
+	cache	Index_Store_Tag_S, (SLINE_SIZE*-1)(CURRENT_ADDR)
+	cache	Index_Store_Tag_S, (SLINE_SIZE*0)(CURRENT_ADDR)
+	cache	Index_Store_Tag_S, (SLINE_SIZE*1)(CURRENT_ADDR)
+#if SLINES_PER_ITER == 8
+	cache	Index_Store_Tag_S, (SLINE_SIZE*-4)(CURRENT_ADDR)
+	cache	Index_Store_Tag_S, (SLINE_SIZE*-3)(CURRENT_ADDR)
+	cache	Index_Store_Tag_S, (SLINE_SIZE*2)(CURRENT_ADDR)
+	cache	Index_Store_Tag_S, (SLINE_SIZE*3)(CURRENT_ADDR)
+#endif
+	bne	CURRENT_ADDR, END_ADDR, $Lnext_scache_tag
+
+$Ldone_scache:
+
+#if defined(TLINE_ENC) && TLINE_ENC != 0
+
+	/* Use KSEG0 base address */
+	li	CURRENT_ADDR, TCACHE_START
+	/* Get the address of the last batch of lines */
+	li	END_ADDR, TCACHE_END
+
+	/* Clear TagLo/TagHi registers */
+	mtc0	zero, C0_TAGLO, 4
+
+	/*
+	 * Index Store Tag Cache Op will invalidate the tag entry, clear
+	 * the lock bit, and clear the LRF bit
+	 */
+$Lnext_tcache_tag:
+	addu	CURRENT_ADDR, (TLINE_SIZE * TLINES_PER_ITER)
+	cache	Index_Store_Tag_T, (TLINE_SIZE*-2)(CURRENT_ADDR)
+	cache	Index_Store_Tag_T, (TLINE_SIZE*-1)(CURRENT_ADDR)
+	cache	Index_Store_Tag_T, (TLINE_SIZE*0)(CURRENT_ADDR)
+	cache	Index_Store_Tag_T, (TLINE_SIZE*1)(CURRENT_ADDR)
+#if TLINES_PER_ITER == 8
+	cache	Index_Store_Tag_T, (TLINE_SIZE*-4)(CURRENT_ADDR)
+	cache	Index_Store_Tag_T, (TLINE_SIZE*-3)(CURRENT_ADDR)
+	cache	Index_Store_Tag_T, (TLINE_SIZE*2)(CURRENT_ADDR)
+	cache	Index_Store_Tag_T, (TLINE_SIZE*3)(CURRENT_ADDR)
+#endif
+	bne	CURRENT_ADDR, END_ADDR, $Lnext_tcache_tag
+
+$Ldone_tcache:
+
+#endif // TLINE_ENC != 0
+	jr	ra
+END(__init_l23cache)
+
+LEAF(__init_l23cache_cached)
+	jr	ra
+END(__init_l23cache_cached)
+
+#endif // SLINE_ENC != 0
diff --git a/libgloss/mips/boot/init_tlb.S b/libgloss/mips/boot/init_tlb.S
new file mode 100644
index 000000000..1eb300446
--- /dev/null
+++ b/libgloss/mips/boot/init_tlb.S
@@ -0,0 +1,350 @@
+/*
+ * Copyright 2015-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _BOOTCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+
+MIPS_NOMIPS16
+
+/*
+ * int, int __tlb_size();
+ *
+ * Return number of entries in TLB.
+ * Entries in va0, number of sets in va1.
+ * Must not use registers t8 or a3
+ *
+ */
+SLEAF(__tlb_size)
+	/* first see if we've got a TLB */
+	mfc0	t0, C0_CONFIG
+	mfc0	t1, C0_CONFIG1
+	move	va0, zero
+
+	ext	t0, t0, CFG0_MT_SHIFT, CFG0_MT_BITS
+	/* No MMU test, 0 entries */
+	beqz	t0, 1f
+
+	/* Fixed Address Translation, 0 entries */
+	li	t2, (CFG0_MT_FIXED >> CFG0_MT_SHIFT)
+	beq	t0, t2, 1f
+
+	/* Block Address Translator, 0 entries */
+	li	t2, (CFG0_MT_BAT >> CFG0_MT_SHIFT)
+	beq	t0, t2, 1f
+
+	/* (D)TLB or not? */
+	andi	t2, t0, (CFG0_MT_TLB | CFG0_MT_DUAL) >> CFG0_MT_SHIFT
+	beqz	t2, 1f
+
+	/*
+	 * As per PRA, field holds No. of entries -1
+	 * Standard TLBs and Dual TLBs have extension fields.
+	 */
+	ext	va0, t1, CFG1_MMUS_SHIFT, CFG1_MMUS_BITS
+	addiu	va0, va0, 1
+
+	mfc0	t1, C0_CONFIG3
+	ext	t1, t1, CFG3_M_SHIFT, 1
+	beqz	t1, 1f
+
+	mfc0	t1, C0_CONFIG4
+#if __mips_isa_rev < 6
+	ext	t3, t1, CFG4_MMUED_SHIFT, CFG4_MMUED_BITS
+
+	li	t2, (CFG4_MMUED_FTLBVEXT >> CFG4_MMUED_SHIFT)
+	beq	t3, t2, 2f			/* FTLB + VTLBExt */
+
+	li	t2, (CFG4_MMUED_SIZEEXT >> CFG4_MMUED_SHIFT)
+	beq	t3, t2, 3f			/* SizeExt for VTLBEXT */
+
+	li	t2, (CFG4_MMUED_FTLB >> CFG4_MMUED_SHIFT)
+	beq	t3, t2, 4f			/* FTLB Size */
+
+	/* No extension */
+	jr	ra
+
+3:
+	ext	t3, t1, CFG4_MMUSE_SHIFT, CFG4_MMUSE_BITS
+	sll	t2, t3, CFG1_MMUS_BITS
+	addu	va0, va0, t2
+	jr	ra
+#endif /* __mips_isa_rev < 6 */
+2:
+	ext	t2, t1, CFG4_VTLBSEXT_SHIFT, CFG4_VTLBSEXT_BITS
+	sll	t2, t2, CFG1_MMUS_BITS
+	addu	va0, va0, t2
+4:
+	/* Skip FTLB size calc if Config MT != 4 */
+	li	t3, (CFG0_MT_DUAL >> CFG0_MT_SHIFT)
+	bne	t3, t0, 1f
+
+	/* Ways */
+	li	t2, 2
+	ext	t3, t1, CFG4_FTLBW_SHIFT, CFG4_FTLBW_BITS
+	addu	t2, t2, t3
+
+	/* Sets per way */
+	ext	t3, t1, CFG4_FTLBS_SHIFT, CFG4_FTLBS_BITS
+	li	va1, 1
+	sllv	va1, va1, t3
+
+	/* Total sets */
+	sllv	t2, t2, t3
+	addu	va0, va0, t2
+
+1:	jr	ra
+SEND(__tlb_size)
+
+/*
+ * void __tlbinvalall()
+ *
+ * Invalidate the TLB.
+ * Must not use register a3
+ */
+SLEAF(__tlbinvalall)
+
+	mfc0	t0, C0_CONFIG
+	and	t2, t0, CFG0_MT_MASK
+	beqz	t2, $Lexit		/* Config[MT] test, return if no TLB */
+
+	li	t1, CFG0_MT_BAT
+	beq	t1, t2, $Lexit		/* return as there is a BAT */
+
+	li	t1, CFG0_MT_FIXED	/* return as there is a FMT */
+	beq	t1, t2, $Lexit
+
+	PTR_MTC0 zero, C0_ENTRYLO0
+	PTR_MTC0 zero, C0_ENTRYLO1
+	PTR_MTC0 zero, C0_PAGEMASK
+
+	/* Fetch size & number of sets in va0, va1 */
+	move	t8, ra
+	jal	__tlb_size
+	move	ra, t8
+
+	/* If Config4 does not exist then use old method for invalidation */
+	mfc0	t1, C0_CONFIG3
+	ext	t1, t1, CFG3_M_SHIFT, 1
+	beqz	t1, $Llegacy_init
+
+	/* If Config4[IE] = 0, use old method for invalidation */
+	mfc0	t9, C0_CONFIG4
+	ext     t2, t9, CFG4_IE_SHIFT, CFG4_IE_BITS
+	beqz	t2, $Llegacy_init
+
+	/* If Config4[IE] = 1, EHINV loop */
+	li	t1, (CFG4_IE_EHINV >> CFG4_IE_SHIFT)
+	beq	t1, t2, $Lehinv_init
+
+	/*
+	 * If Config4[IE] = 2, tlbinvf loop. Handles Config[MT] being either
+	 * 1 or 4.
+	 */
+	li	t1, (CFG4_IE_INV >> CFG4_IE_SHIFT)
+	beq	t1, t2, $Ltlbinvf_init
+
+	/* TLB walk done by hardware, Config4[IE] = 3 */
+	mtc0	zero, C0_INDEX
+	ehb
+	.set	push
+	.set	eva
+	tlbinvf
+	.set	pop
+	b	$Lexit
+
+$Ltlbinvf_init:
+	/*
+	 * TLB walk done by software, Config4[IE] = 2, Config[MT] = 4 or 1
+	 *
+	 * one TLBINVF is executed with an index in VTLB range to
+	 * invalidate all VTLB entries.
+	 *
+	 * For Dual TLBs additionally, one TLBINVF is executed per FTLB set.
+	 */
+
+	/* Flush the VTLB */
+	mtc0	zero, C0_INDEX
+	ehb
+	.set	push
+	.set	eva
+	tlbinvf
+	.set	pop
+
+	/*
+	 * For JTLB MMUs (Config[MT] = 1) only 1 tlbinvf is required
+	 * early out in that case.
+	 */
+	mfc0	t0, C0_CONFIG
+	ext	t3, t0, CFG0_MT_SHIFT, CFG0_MT_BITS
+	li	t1, (CFG0_MT_TLB >> CFG0_MT_SHIFT)
+	beq	t1, t3, $Lexit
+
+	/*
+	 * va0 contains number of TLB entries
+	 * va1 contains number of sets per way
+	 */
+	lui	t9, %hi(__tlb_stride_length)	/* Fetch the tlb stride for */
+	addiu	t9, %lo(__tlb_stride_length)	/* stepping through FTLB sets */
+	mul	va1, va1, t9
+	subu	t2, va0, va1			/* End pointer */
+
+1:	subu	va0, va0, t9
+	mtc0	va0, C0_INDEX
+	ehb					/* mtc0, hazard on tlbinvf */
+	.set	push
+	.set	eva
+	tlbinvf
+	.set	pop
+	bne	va0, t2, 1b
+
+	b	$Lexit
+
+$Lehinv_init:
+	/*
+	 * Config4[IE] = 1. EHINV supported, but not tlbinvf.
+	 *
+	 * Invalidate the TLB for R3 onwards by loading EHINV and writing to all
+	 * tlb entries.
+	 */
+	move	t0, zero
+	li	t1, C0_ENTRYHI_EHINV_MASK
+	mtc0	t1, C0_ENTRYHI
+1:
+	mtc0	t0, C0_INDEX
+	ehb					/* mtc0, hazard on tlbwi */
+
+	tlbwi
+	addiu	t0, t0, 1
+	bne	va0, t0, 1b
+
+	b	$Lexit
+
+$Llegacy_init:
+	/*
+	 * Invalidate the TLB for R1 onwards by loading
+	 * 0x(FFFFFFFF)KSEG0_BASE into EntryHi and writing it into index 0
+	 * incrementing by a pagesize, writing into index 1, etc.
+	 */
+
+	/*
+	 * If large physical addressing is enabled, load 0xFFFFFFFF
+	 * into the top half of EntryHi.
+	 */
+	move	t0, zero		/* t0 == 0 if XPA disabled */
+	mfc0	t9, C0_CONFIG3		/* or not present */
+	and	t9, t1, CFG3_LPA
+	beqz	t9, $Lno_xpa
+
+	mfc0	t9, C0_PAGEGRAIN
+	ext	t9, t1, PAGEGRAIN_ELPA_SHIFT, PAGEGRAIN_ELPA_BITS
+	bnez	t9, $Lno_xpa
+
+	li	t0, -1			/* t0 == 0xFFFFFFFF if XPA is used */
+$Lno_xpa:
+	li	t1, (KSEG0_BASE - 2<<13)
+
+	move	t2, zero
+1:	addiu	t1, t1, (2<<13)
+	PTR_MTC0 t1, C0_ENTRYHI
+
+	beqz	t0, $Lskip_entryhi
+	.set	push
+	.set	xpa
+	mthc0	t0, C0_ENTRYHI		/* Store 0xFFFFFFFF to upper half of EntryHI */
+	.set	pop
+
+$Lskip_entryhi:
+	ehb				/* mtc0, hazard on tlbp */
+
+	tlbp				/* Probe for a match */
+	ehb				/* tlbp, Hazard on mfc0 */
+
+	mfc0	t8, C0_INDEX
+	bgez	t8, 1b			/* Skip this address if it exists */
+
+	mtc0	t2, C0_INDEX
+	ehb				/* mtc0, hazard on tlbwi */
+
+	tlbwi
+	addiu	t2, t2, 1
+	bne	va0, t2, 1b
+
+$Lexit:
+	PTR_MTC0 zero, C0_ENTRYHI	/* Unset EntryHI, upper half is cleared */
+					/* autmatically as mtc0 writes zeroes */
+	MIPS_JRHB	(ra)
+SEND(__tlbinvalall)
+
+LEAF(__init_tlb)
+
+	mfc0	t0, C0_CONFIG
+	and	t2, t0, CFG0_MT_MASK
+	beqz	t2, 1f			/* return if no tlb present */
+
+	li	t1, CFG0_MT_BAT
+	beq	t1, t2, 1f		/* return as there is a BAT */
+
+	li	t1, CFG0_MT_FIXED	/* return as there is a FMT */
+	beq	t1, t2, 1f
+
+	lui	t1, %hi(__enable_xpa)	/* Test for XPA usage */
+	ori	t1, %lo(__enable_xpa)
+	beqz	t1, 2f
+
+	mfc0	t0, C0_CONFIG3
+	and	t0, t0, CFG3_LPA
+	bnez	t0, 3f
+	
+	/*
+	 * Raise an error because XPA was requested but LPA support is not
+	 * available.
+	 */
+	/* Incorrect config supplied, report a boot failure through UHI */
+	li      t9, 23
+	/* Reason - Predef/requested config incorrect */
+	li      a0, 2
+	/* Trigger the UHI operation */
+	sdbbp   1
+
+3:	li	t1, 1
+	mfc0	t0, C0_PAGEGRAIN
+	ins	t0, t1, PAGEGRAIN_ELPA_SHIFT, PAGEGRAIN_ELPA_BITS
+	mtc0	t0, C0_PAGEGRAIN
+2:
+	move	a3, ra
+	jal	__tlbinvalall
+	move	ra, a3
+
+	mtc0	zero, C0_PAGEMASK
+1:	jr	ra
+END(__init_tlb)
diff --git a/libgloss/mips/boot/init_tlb_predef.S b/libgloss/mips/boot/init_tlb_predef.S
new file mode 100644
index 000000000..935d4b580
--- /dev/null
+++ b/libgloss/mips/boot/init_tlb_predef.S
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2015-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _BOOTCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+#include "predef.h"
+
+MIPS_NOMIPS16
+
+LEAF(__init_tlb)
+#if HAVE_LPA && ENABLE_XPA
+	mfc0	t0, C0_PAGEGRAIN
+	li	t1, 1
+	ins	t0, t1, PAGEGRAIN_ELPA_SHIFT, PAGEGRAIN_ELPA_BITS
+	mtc0	t0, C0_PAGEGRAIN
+#endif
+	# Top halves of registers are cleared impicitly with mtc0
+	PTR_MTC0 zero, C0_PAGEMASK
+	PTR_MTC0 zero, C0_ENTRYLO0
+	PTR_MTC0 zero, C0_ENTRYLO1
+
+#if HAVE_HW_TLB_WALK
+	/* TLB walk done by hardware, Config4[IE] = 3 or Config[MT] = 1 */
+	mtc0	zero, C0_INDEX
+	ehb
+	.set	push
+	.set	eva
+	tlbinvf
+	.set	pop
+#endif
+
+#if HAVE_SW_TLB_WALK
+	/*
+	 * TLB walk done by software, Config4[IE] = 2, Config[MT] = 4
+	 *
+	 * one TLBINVF is executed with an index in VTLB range to
+	 * invalidate all VTLB entries.
+	 *
+	 * One TLBINVF is executed per FTLB entry.
+	 *
+	 */
+	li	t2, MMU_SIZE			/* Start pointer/finger */
+	li	t8, FTLB_SETS
+	li	t9, %hi(__tlb_stride_length)
+	addiu	t9, %lo(__tlb_stride_length)
+	mul	t8, t8, t9
+	subu	t1, t2, t8			/* End pointer */
+
+	mtc0	zero, C0_INDEX
+	ehb
+	.set	push
+	.set	eva
+	tlbinvf
+	.set	pop
+
+1:	subu	t2, t2, t9
+	mtc0	t2, C0_INDEX
+	ehb
+	.set	push
+	.set	eva
+	tlbinvf
+	.set	pop
+	bne	t1, t2, 1b
+#endif
+
+#if HAVE_EHINV_WALK
+	li	v0, MMU_SIZE
+	move	v1, zero
+	li	t0, C0_ENTRYHI_EHINV_MASK
+	PTR_MTC0 t0, C0_ENTRYHI
+1:
+	mtc0	v1, C0_INDEX
+	ehb
+
+	tlbwi
+	addiu	v1, v1, 1
+	bne	v0, v1, 1b
+#endif
+
+#if HAVE_NO_INV
+	/*
+	 * Clean invalidate TLB for R1 onwards by loading
+	 * 0x(FFFFFFFF)KSEG0_BASE into EntryHi and writing it into index MAX
+	 * incrementing EntryHi by a pagesize, writing into index MAX-1, etc.
+	 */
+	li	v0, MMU_SIZE
+
+	/*
+	 * If large physical addressing is enabled, load 0xFFFFFFFF
+	 * into the top half of EntryHi.
+	 */
+#if HAVE_LPA && ENABLE_LPA
+	li	t0, -1
+#endif
+	li	t1, (KSEG0_BASE - 2<<13)
+
+	move	v1, zero
+1:	addiu	t1, t1, (2<<13)
+	PTR_MTC0 t1, C0_ENTRYHI
+#if HAVE_LPA && ENABLE_LPA
+	mthc0	t0,  C0_ENTRYHI
+#endif
+	ehb				/* mt(h)c0, hazard on tlbp */
+
+	tlbp				/* Probe for a match */
+	ehb				/* tlbp, hazard on  MFC0 */
+
+	mfc0	t8, C0_INDEX
+	bgez	t8, 1b			/* Skip this address if it exists */
+
+	mtc0	v0, C0_INDEX
+	ehb				/* mtc0, hazard on tlbwi */
+
+	tlbwi
+	addiu	v1, v1, 1
+	bne	v0, v1, 1b
+#endif
+
+	PTR_MTC0 zero, C0_ENTRYHI	/* Unset EntryHI, top half */
+	MIPS_JRHB	(ra)
+END(__init_tlb)
diff --git a/libgloss/mips/boot/predef.h b/libgloss/mips/boot/predef.h
new file mode 100644
index 000000000..9cd25beed
--- /dev/null
+++ b/libgloss/mips/boot/predef.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2014-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef C0_CONFIG0_VALUE
+#error "Static TLB initialisation decisions require C0_CONFIG0_VALUE"
+#endif
+
+#ifndef C0_CONFIG1_VALUE
+#error "Static TLB/cache initialisation decisions require C0_CONFIG1_VALUE"
+#endif
+
+#define ILINE_ENC     ((C0_CONFIG1_VALUE & CFG1_IL_MASK) >> CFG1_IL_SHIFT)
+#define ILINE_SIZE    (2 << ILINE_ENC)
+#define ISET_ENC      ((C0_CONFIG1_VALUE & CFG1_IS_MASK) >> CFG1_IS_SHIFT)
+#define ISET_SIZE     (32 << ((ISET_ENC + 1) & 0x7))
+#define IASSOC_ENC    ((C0_CONFIG1_VALUE & CFG1_IA_MASK) >> CFG1_IA_SHIFT)
+#define IASSOC	      (IASSOC_ENC + 1)
+#define ITOTAL_BYTES  (ILINE_SIZE * ISET_SIZE * IASSOC)
+#define DLINE_ENC     ((C0_CONFIG1_VALUE & CFG1_DL_MASK) >> CFG1_DL_SHIFT)
+#define DLINE_SIZE    (2 << DLINE_ENC)
+#define DSET_ENC      ((C0_CONFIG1_VALUE & CFG1_DS_MASK) >> CFG1_DS_SHIFT)
+#define DSET_SIZE     (32 << ((DSET_ENC + 1) & 0x7))
+#define DASSOC_ENC    ((C0_CONFIG1_VALUE & CFG1_DA_MASK) >> CFG1_DA_SHIFT)
+#define DASSOC	      (DASSOC_ENC + 1)
+#define DTOTAL_BYTES  (DLINE_SIZE * DSET_SIZE * DASSOC)
+
+#ifndef C0_CONFIG2_VALUE
+# error "Static cache initialisation decisions require C0_CONFIG2_VALUE"
+#endif
+
+#ifndef C0_CONFIG3_VALUE
+# error "Static TLB initialisation decisions require C0_CONFIG3_VALUE"
+#endif
+
+#if (C0_CONFIG3_VALUE & CFG4_M) != 0
+# ifndef C0_CONFIG4_VALUE
+#  error "Static TLB/cache initialisation decisions require C0_CONFIG4_VALUE"
+# endif
+# if (C0_CONFIG4_VALUE & CFG4_M) != 0
+#  ifndef C0_CONFIG5_VALUE
+#   error "Static cache initialisation decisions require C0_CONFIG5_VALUE"
+#  endif
+#  if (C0_CONFIG5_VALUE & CFG5_L2C) != 0
+#   define MEM_MAPPED_L2C 1
+#  endif
+# endif
+#endif
+
+#define SLINE_ENC    ((C0_CONFIG2_VALUE & CFG2_SL_MASK) >> CFG2_SL_SHIFT)
+#define SSET_ENC     ((C0_CONFIG2_VALUE & CFG2_SS_MASK) >> CFG2_SS_SHIFT)
+#define SASSOC_ENC   ((C0_CONFIG2_VALUE & CFG2_SA_MASK) >> CFG2_SA_SHIFT)
+
+#define SLINE_SIZE   (2 << SLINE_ENC)
+#define SSET_SIZE    (64 << SSET_ENC)
+#define SASSOC	      (SASSOC_ENC + 1)
+#define STOTAL_BYTES (SLINE_SIZE * SSET_SIZE * SASSOC)
+
+#define TLINE_ENC    ((C0_CONFIG2_VALUE & CFG2_TL_MASK) >> CFG2_TL_SHIFT)
+#define TLINE_SIZE   (2 << TLINE_ENC)
+#define TSET_ENC     ((C0_CONFIG2_VALUE & CFG2_TS_MASK) >> CFG2_TS_SHIFT)
+#define TSET_SIZE    (64 << TSET_ENC)
+#define TASSOC_ENC   ((C0_CONFIG2_VALUE & CFG2_TA_MASK) >> CFG2_TA_SHIFT)
+#define TASSOC	      (TASSOC_ENC + 1)
+#define TTOTAL_BYTES (TLINE_SIZE * TSET_SIZE * TASSOC)
+
+/* TLB Macros */
+
+// TLB Type
+#define TLB_STANDARD	((C0_CONFIG0_VALUE & CFG0_MT_MASK) == CFG0_MT_TLB)
+#define TLB_DUAL	((C0_CONFIG0_VALUE & CFG0_MT_MASK) == CFG0_MT_DUAL)
+#define HAVE_TLB	(TLB_STANDARD || TLB_DUAL)
+
+// Size definitions.
+// FTLBs may be present.
+#ifdef C0_CONFIG4_VALUE
+# define FTLB_SET_ENC	(C0_CONFIG4_VALUE & CFG4_FTLBS_MASK) >> (CFG4_FTLBS_SHIFT)
+# define FTLB_WAY_ENC	((C0_CONFIG4_VALUE & CFG4_FTLBW_MASK) >> CFG4_FTLBW_SHIFT)
+# if TLB_DUAL
+#  define FTLB_SETS	(1 << FTLB_SET_ENC)
+#  define FTLB_SIZE	(2 + FTLB_WAY_ENC) * FTLB_SETS
+# else
+#  define FTLB_SETS	1
+#  define FTLB_SIZE	0
+# endif
+
+// VTLB May be present
+# define VTLB_SIZE_ENC	((C0_CONFIG4_VALUE & CFG4_VTLBSEXT_MASK) \
+			>> CFG4_VTLBSEXT_SHIFT)
+# define VTLB_SIZE	(VTLB_SIZE_ENC << CFG1_MMUS_BITS)
+#endif
+
+// Size
+#define TLB_SIZE	((C0_CONFIG1_VALUE & CFG1_MMUS_MASK) >> CFG1_MMUS_SHIFT)
+
+// ISA < 6 relys on CFG4 MMU Extension definition
+#if __mips_isa_rev < 6
+
+#if !defined(C0_CONFIG4_VALUE) || (C0_CONFIG4_VALUE & CFG4_MMUED) == 0
+# define MMU_SIZE	(TLB_SIZE + 1)
+#elif (C0_CONFIG4_VALUE & CFG4_MMUED) == CFG4_MMUED_FTLBVEXT
+# define MMU_SIZE	(FTLB_SIZE + VTLB_SIZE + TLB_SIZE + 1)
+#elif (C0_CONFIG4_VALUE & CFG4_MMUED) == CFG4_MMUED_SIZEEXT
+# define MMUSE_ENC	(C0_CONFIG4_VALUE & CFG4_MMUSE_MASK) >> CFG4_MMUSE_SHIFT
+# define TLB_EXT_SIZE	(MMUSE_ENC << CFG1_MMUS_BITS)
+# define MMU_SIZE	(TLB_EXT_SIZE + TLB_SIZE + 1)
+#elif (C0_CONFIG4_VALUE & CFG4_MMUED) == CFG4_MMUED_FTLB
+# define MMU_SIZE	(FTLB_SIZE + TLB_SIZE + 1)
+#endif /* C0_CONFIG4_VALUE & ...*/
+
+#else
+
+// ISA >= 6 always uses the FTLB + VTLB fields.
+#define MMU_SIZE	(FTLB_SIZE + VTLB_SIZE + TLB_SIZE + 1)
+
+#endif /* __mips_isa_rev < 6 */
+
+
+// Invalidation
+#ifdef C0_CONFIG4_VALUE
+# define HAVE_HW_TLB_WALK	((C0_CONFIG4_VALUE & CFG4_IE_MASK) == CFG4_IE_INVALL)
+# define HAVE_SW_TLB_WALK	((C0_CONFIG4_VALUE & CFG4_IE_MASK) == CFG4_IE_INV)
+# define HAVE_EHINV_WALK	((C0_CONFIG4_VALUE & CFG4_IE_MASK) == CFG4_IE_EHINV)
+# define HAVE_NO_INV		(!(HAVE_HW_TLB_WALK || HAVE_SW_TLB_WALK || HAVE_EHINV_WALK))
+#else
+# define HAVE_NO_INV 1
+#endif
+
+// LPA
+#define HAVE_LPA	(C0_CONFIG3_VALUE & CFG3_LPA)
diff --git a/libgloss/mips/boot/reset.S b/libgloss/mips/boot/reset.S
new file mode 100644
index 000000000..81ce55c0c
--- /dev/null
+++ b/libgloss/mips/boot/reset.S
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2014-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _RESETCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+
+MIPS_NOMIPS16
+
+	.set push
+	MIPS_NOMICROMIPS
+
+LEAF(__reset_vector)
+	lui	k1, %hi(__cpu_init)
+	addiu	k1, %lo(__cpu_init)
+	mtc0	zero, C0_COUNT	  /* Clear CP0 Count (Used to measure boot time.) */
+	jr	k1
+	.space 32		  /* Just to cope with a quirk of MIPS malta boards */
+				  /* this can be deleted for anything else */
+END(__reset_vector)
+	.set pop
+
+LEAF(__cpu_init)
+
+	/*
+	 * Verify the code is here due to a reset and not NMI. If this is an NMI then trigger
+	 * a debugger breakpoint using a sdbbp instruction.
+	 */
+
+	mfc0	k1, C0_STATUS
+	ext	k1, k1, SR_NMI_SHIFT, 1
+	beqz	k1, $Lnot_nmi
+	move	k0, t9			/* Preserve t9 */
+	move	k1, a0			/* Preserve a0 */
+	li	t9, 15			/* UHI exception operation */
+	li	a0, 0			/* Use hard register context */
+	sdbbp	1			/* Invoke UHI operation */
+
+$Lnot_nmi:
+
+	/* Init CP0 Status, Count, Compare, Watch*, and Cause */
+	jal	  __init_cp0
+
+	/*
+	 * Initialise L2/L3 cache
+	 * This could be done from cached code if there is a cca override or similar
+	 */
+	jal	__init_l23cache
+
+	/* Initialize the L1 instruction cache */
+	jal	__init_icache
+
+	/*
+	 * The changing of Kernel mode cacheability must be done from KSEG1
+	 * Since the code is executing from KSEG0 it needs to do a jump to KSEG1, change K0
+	 * and jump back to KSEG0.
+	 */
+
+	lui	a2, %hi(__change_k0_cca)
+	addiu	a2, a2, %lo(__change_k0_cca)
+	li	a1, 0xf
+	ins	a2, a1, 29, 1		/* changed to KSEG1 address by setting bit 29 */
+	jalr	a2
+
+	.weak __init_l23cache_cached
+	lui	a2, %hi(__init_l23cache_cached)
+	addiu	a2, a2, %lo(__init_l23cache_cached)
+	beqz	a2, 1f
+	jalr	a2
+1:
+	/* Initialize the L1 data cache */
+	jal	__init_dcache
+
+	/* Initialize the TLB */
+	jal	__init_tlb
+
+	/* Allow everything else to be initialized via a hook */
+	.weak __boot_init_hook
+	lui	a2, %hi(__boot_init_hook)
+	addiu	a2, a2, %lo(__boot_init_hook)
+	beqz	a2, 1f
+	jalr	a2
+1:
+	/* Skip copy to ram when executing in place */
+	.weak __xip
+	lui	a1, %hi(__xip)
+	addiu	a1, a1, %lo(__xip)
+	bnez	a1, $Lcopy_to_ram_done
+	/* Copy code and data to RAM */
+	li	s1, 0xffffffff
+
+	/* Copy code and read-only/initialized data from FLASH to (uncached) RAM */
+	lui	a1, %hi(__flash_app_start)
+	addiu	a1, a1, %lo(__flash_app_start)
+	ins	a1, s1, 29, 1		/* Make it uncached (kseg1) */
+	lui	a2, %hi(__app_start)
+	addiu	a2, a2, %lo(__app_start)
+	ins	a2, s1, 29, 1		/* Make it uncached (kseg1) */
+	lui	a3, %hi(_edata)
+	addiu	a3, a3, %lo(_edata)
+	ins	a3, s1, 29, 1		/* Make it uncached (kseg1) */
+	beq	a2, a3, $Lcopy_to_ram_done
+$Lnext_ram_word:
+	lw	a0, 0(a1)
+	addiu	a2, a2, 4
+	addiu	a1, a1, 4
+	sw	a0, -4(a2)
+	bne	a3, a2, $Lnext_ram_word
+$Lcopy_to_ram_done:
+
+	# Prepare for eret to _start
+	lui	ra, %hi($Lall_done)	/* If main returns then go to all_done */
+	addiu	ra, ra, %lo($Lall_done)
+	lui	t0, %hi(_start)
+	addiu	t0, t0, %lo(_start)
+	mtc0	t0, C0_ERRPC		/* Set ErrorEPC to _start */
+	ehb
+	li	a0, 0			/* UHI compliant null argument setup */
+
+	/* Return from exception will now execute the application startup code */
+	eret
+
+$Lall_done:
+	/*
+	 * If _start returns it will return to this point.
+	 * Just spin here reporting the exit.
+	 */
+	li	t9, 1			/* UHI exit operation */
+	move	a0, va0			/* Collect exit code for UHI exit */
+	sdbbp	1			/* Invoke UHI operation */
+	b	$Lall_done
+END(__cpu_init)
+
+/**************************************************************************************
+    B O O T   E X C E P T I O N   H A N D L E R S (CP0 Status[BEV] = 1)
+**************************************************************************************/
+/* NOTE: the linker script must insure that this code starts at start + 0x200 so the exception */
+/* vectors will be addressed properly. */
+
+/* TLB refill, 32 bit task. */
+.org 0x200
+LEAF(__boot_tlb_refill)
+	move	k0, t9			/* Preserve t9 */
+	move	k1, a0			/* Preserve a0 */
+	li	t9, 15			/* UHI exception operation */
+	li	a0, 0			/* Use hard register context */
+	sdbbp	1			/* Invoke UHI operation */
+END(__boot_tlb_refill)
+
+/* XTLB refill, 64 bit task. */
+.org 0x280
+LEAF(__boot_xtlb_refill)
+	move	k0, t9			/* Preserve t9 */
+	move	k1, a0			/* Preserve a0 */
+	li	t9, 15			/* UHI exception operation */
+	li	a0, 0			/* Use hard register context */
+	sdbbp	1			/* Invoke UHI operation */
+END(__boot_xtlb_refill)
+
+/* Cache error exception. */
+.org 0x300
+LEAF(__boot_cache_error)
+	move	k0, t9			/* Preserve t9 */
+	move	k1, a0			/* Preserve a0 */
+	li	t9, 15			/* UHI exception operation */
+	li	a0, 0			/* Use hard register context */
+	sdbbp	1			/* Invoke UHI operation */
+END(__boot_cache_error)
+
+/* General exception. */
+.org 0x380
+LEAF(__boot_general_exception)
+	move	k0, t9			/* Preserve t9 */
+	move	k1, a0			/* Preserve a0 */
+	li	t9, 15			/* UHI exception operation */
+	li	a0, 0			/* Use hard register context */
+	sdbbp	1			/* Invoke UHI operation */
+END(__boot_general_exception)
+
+# If you want the above code to fit into 1k flash you will need to leave out the
+# code below. This is the code that covers the debug exception which you normally will not get.
+
+/* EJTAG Debug */
+.org 0x480 
+LEAF(__boot_debug_exception)
+	PTR_MFC0  k1, C0_DEPC
+	PTR_MTC0  k1, C0_DESAVE
+	lui       k1, %hi(1f)
+	addiu     k1, %lo(1f)
+	PTR_MTC0  k1, C0_DEPC
+	ehb
+	deret
+1:	wait
+	b	  1b  /* Stay here */
+END(__boot_debug_exception)
diff --git a/libgloss/mips/boot/reset_predef.S b/libgloss/mips/boot/reset_predef.S
new file mode 100644
index 000000000..e77de514d
--- /dev/null
+++ b/libgloss/mips/boot/reset_predef.S
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2014-2017, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define _RESETCODE
+
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/asm.h>
+#include "predef.h"
+
+MIPS_NOMIPS16
+
+	.set push
+	MIPS_NOMICROMIPS
+
+LEAF(__reset_vector)
+	lui	k1, %hi(__cpu_init)
+	addiu	k1, %lo(__cpu_init)
+	mtc0	zero, C0_COUNT	  /* Clear CP0 Count (Used to measure boot time.) */
+	jr	k1
+	.space 32		  /* Just to cope with a quirk of MIPS malta boards */
+				  /* this can be deleted for anything else */
+END(__reset_vector)
+	.set pop
+
+LEAF(__cpu_init)
+
+	/*
+	 * Verify the code is here due to a reset and not NMI. If this is an NMI then trigger
+	 * a debugger breakpoint using a sdbbp instruction.
+	 */
+
+	mfc0	k1, C0_STATUS
+	ext	k1, k1, SR_NMI_SHIFT, 1
+	beqz	k1, $Lnot_nmi
+	move	k0, t9			/* Preserve t9 */
+	move	k1, a0			/* Preserve a0 */
+	li	t9, 15			/* UHI exception operation */
+	li	a0, 0			/* Use hard register context */
+	sdbbp	1			/* Invoke UHI operation */
+
+$Lnot_nmi:
+
+#ifndef SKIP_CORE_CHECK
+	jal	__core_check
+#endif
+
+	/* Init CP0 Status, Count, Compare, Watch*, and Cause */
+	jal	__init_cp0
+
+#if (defined(SLINE_ENC) && SLINE_ENC != 0) || (defined(MEM_MAPPED_L2C) && MEM_MAPPED_L2C != 0)
+	/*
+	 * Initialise L2/L3 cache
+	 * This could be done from cached code if there is a cca override or similar
+	 */
+	jal	 __init_l23cache
+#endif
+
+
+#if defined(ILINE_ENC) && ILINE_ENC != 0
+	/* Initialize the L1 instruction cache */
+	jal	  __init_icache
+
+	/*
+	 * The changing of Kernel mode cacheability must be done from KSEG1
+	 * Since the code is executing from KSEG0 it needs to do a jump to KSEG1, change K0
+	 * and jump back to KSEG0.
+	 */
+
+	lui	a2, %hi(__change_k0_cca)
+	addiu	a2, a2, %lo(__change_k0_cca)
+	li	a1, 0xf
+	ins	a2, a1, 29, 1		/* changed to KSEG1 address by setting bit 29 */
+	jalr	a2
+#endif
+
+#if (defined(SLINE_ENC) && SLINE_ENC != 0) || (defined(MEM_MAPPED_L2C) && MEM_MAPPED_L2C != 0)
+	/* Support initialising L2 with L1 cache enabled */
+	jal	__init_l23cache_cached
+#endif
+
+#if defined(DLINE_ENC) && DLINE_ENC != 0
+	/* Initialize the L1 data cache */
+	jal	__init_dcache
+#endif
+
+#if defined(HAVE_TLB) && HAVE_TLB
+	/* Initialize the TLB */
+	jal	__init_tlb
+#endif
+
+	/* Allow everything else to be initialized via a hook */
+	.weak __boot_init_hook
+	lui	a2, %hi(__boot_init_hook)
+	addiu	a2, a2, %lo(__boot_init_hook)
+	beqz	a2, 1f
+	jalr	a2
+1:
+	/* Skip copy to ram when executing in place */
+	.weak __xip
+	lui	a1, %hi(__xip)
+	addiu	a1, a1, %lo(__xip)
+	bnez	a1, $Lcopy_to_ram_done
+
+	/* Copy code and data to RAM */
+	li	s1, 0xffffffff
+
+	/* Copy code and read-only/initialized data from FLASH to (uncached) RAM */
+	lui	a1, %hi(__flash_app_start)
+	addiu	a1, a1, %lo(__flash_app_start)
+#if defined(ILINE_ENC) && ILINE_ENC != 0
+	ins	a1, s1, 29, 1		/* Make it uncached (kseg1) */
+#endif
+	lui	a2, %hi(__app_start)
+	addiu	a2, a2, %lo(__app_start)
+#if defined(ILINE_ENC) && ILINE_ENC != 0
+	ins	a2, s1, 29, 1		/* Make it uncached (kseg1) */
+#endif
+	lui	a3, %hi(_edata)
+	addiu	a3, a3, %lo(_edata)
+#if defined(ILINE_ENC) && ILINE_ENC != 0
+	ins	a3, s1, 29, 1		/* Make it uncached (kseg1) */
+#endif
+	beq	a2, a3, $Lcopy_to_ram_done
+$Lnext_ram_word:
+	lw	a0, 0(a1)
+	addiu	a2, a2, 4
+	addiu	a1, a1, 4
+	sw	a0, -4(a2)
+	bne	a3, a2, $Lnext_ram_word
+$Lcopy_to_ram_done:
+
+	# Prepare for eret to _start
+	lui	ra, %hi($Lall_done)	/* If main returns then go to all_done */
+	addiu	ra, ra, %lo($Lall_done)
+	lui	t0, %hi(_start)
+	addiu	t0, t0, %lo(_start)
+	mtc0	t0, C0_ERRPC		/* Set ErrorEPC to _start */
+	ehb
+	li	a0, 0			/* UHI compliant null argument setup */
+
+	/* Return from exception will now execute the application startup code */
+	eret
+
+$Lall_done:
+	/*
+	 * If _start returns it will return to this point.
+	 * Just spin here reporting the exit.
+	 */
+	li	t9, 1			/* UHI exit operation */
+	move	a0, va0			/* Collect exit code for UHI exit */
+	sdbbp	1			/* Invoke UHI operation */
+	b	$Lall_done
+
+END(__cpu_init)
+
+/**************************************************************************************
+    B O O T   E X C E P T I O N   H A N D L E R S (CP0 Status[BEV] = 1)
+**************************************************************************************/
+/* NOTE: the linker script must insure that this code starts at start + 0x200 so the exception */
+/* vectors will be addressed properly. */
+
+/* TLB refill, 32 bit task. */
+.org 0x200
+LEAF(__boot_tlb_refill)
+	move	k0, t9			/* Preserve t9 */
+	move	k1, a0			/* Preserve a0 */
+	li	t9, 15			/* UHI exception operation */
+	li	a0, 0			/* Use hard register context */
+	sdbbp	1			/* Invoke UHI operation */
+END(__boot_tlb_refill)
+
+/* XTLB refill, 64 bit task. */
+.org 0x280
+LEAF(__boot_xtlb_refill)
+	move	k0, t9			/* Preserve t9 */
+	move	k1, a0			/* Preserve a0 */
+	li	t9, 15			/* UHI exception operation */
+	li	a0, 0			/* Use hard register context */
+	sdbbp	1			/* Invoke UHI operation */
+END(__boot_xtlb_refill)
+
+/* Cache error exception. */
+.org 0x300
+LEAF(__boot_cache_error)
+	move	k0, t9			/* Preserve t9 */
+	move	k1, a0			/* Preserve a0 */
+	li	t9, 15			/* UHI exception operation */
+	li	a0, 0			/* Use hard register context */
+	sdbbp	1			/* Invoke UHI operation */
+END(__boot_cache_error)
+
+/* General exception. */
+.org 0x380
+LEAF(__boot_general_exception)
+	move	k0, t9			/* Preserve t9 */
+	move	k1, a0			/* Preserve a0 */
+	li	t9, 15			/* UHI exception operation */
+	li	a0, 0			/* Use hard register context */
+	sdbbp	1			/* Invoke UHI operation */
+END(__boot_general_exception)
+
+# If you want the above code to fit into 1k flash you will need to leave out the
+# code below. This is the code that covers the debug exception which you normally will not get.
+
+/* EJTAG Debug */
+.org 0x480 
+LEAF(__boot_debug_exception)
+	PTR_MFC0  k1, C0_DEPC
+	PTR_MTC0  k1, C0_DESAVE
+	lui       k1, %hi(1f)
+	addiu     k1, %lo(1f)
+	PTR_MTC0  k1, C0_DEPC
+	ehb
+	deret
+1:	wait
+	b	  1b  /* Stay here */
+END(__boot_debug_exception)
diff --git a/libgloss/mips/bootcode.ld b/libgloss/mips/bootcode.ld
new file mode 100644
index 000000000..5c1bcbff2
--- /dev/null
+++ b/libgloss/mips/bootcode.ld
@@ -0,0 +1,14 @@
+/* This script forces the inclusion of boot code by creating references
+   to all the initialisation functions.  These early references also
+   ensure custom versions of code are pulled out of user supplied
+   objects and libraries before default implementations.  */
+
+EXTERN (__reset_vector);
+EXTERN (__init_cp0);
+EXTERN (__init_l23cache);
+EXTERN (__init_icache);
+EXTERN (__change_k0_cca);
+EXTERN (__init_dcache);
+EXTERN (__init_tlb);
+EXTERN (__boot_init_hook);
+PROVIDE (__boot_init_hook = 0);
-- 
2.25.1



More information about the Newlib mailing list