aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorSandrine Bailleux <sandrine.bailleux@arm.com>2018-10-09 11:12:55 +0200
committerSandrine Bailleux <sandrine.bailleux@arm.com>2018-10-10 12:34:34 +0200
commit3cd87d77947ec4fc04440268ed122b4ed81c7781 (patch)
tree78fdee12b026b931029e434f29b4fe09835fe4c9 /lib
downloadtf-a-tests-3cd87d77947ec4fc04440268ed122b4ed81c7781.tar.gz
Trusted Firmware-A Tests, version 2.0v2.0
This is the first public version of the tests for the Trusted Firmware-A project. Please see the documentation provided in the source tree for more details. Change-Id: I6f3452046a1351ac94a71b3525c30a4ca8db7867 Signed-off-by: Sandrine Bailleux <sandrine.bailleux@arm.com> Co-authored-by: amobal01 <amol.balasokamble@arm.com> Co-authored-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com> Co-authored-by: Asha R <asha.r@arm.com> Co-authored-by: Chandni Cherukuri <chandni.cherukuri@arm.com> Co-authored-by: David Cunado <david.cunado@arm.com> Co-authored-by: Dimitris Papastamos <dimitris.papastamos@arm.com> Co-authored-by: Douglas Raillard <douglas.raillard@arm.com> Co-authored-by: dp-arm <dimitris.papastamos@arm.com> Co-authored-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com> Co-authored-by: Jonathan Wright <jonathan.wright@arm.com> Co-authored-by: Kévin Petit <kevin.petit@arm.com> Co-authored-by: Roberto Vargas <roberto.vargas@arm.com> Co-authored-by: Sathees Balya <sathees.balya@arm.com> Co-authored-by: Shawon Roy <Shawon.Roy@arm.com> Co-authored-by: Soby Mathew <soby.mathew@arm.com> Co-authored-by: Thomas Abraham <thomas.abraham@arm.com> Co-authored-by: Vikram Kanigiri <vikram.kanigiri@arm.com> Co-authored-by: Yatharth Kochar <yatharth.kochar@arm.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/aarch32/cache_helpers.S217
-rw-r--r--lib/aarch32/exception_stubs.S22
-rw-r--r--lib/aarch32/misc_helpers.S94
-rw-r--r--lib/aarch64/cache_helpers.S203
-rw-r--r--lib/aarch64/exception_stubs.S46
-rw-r--r--lib/aarch64/misc_helpers.S138
-rw-r--r--lib/compiler-rt/LICENSE.TXT91
-rw-r--r--lib/compiler-rt/builtins/arm/aeabi_uldivmod.S46
-rw-r--r--lib/compiler-rt/builtins/assembly.h204
-rw-r--r--lib/compiler-rt/builtins/ctzdi2.c35
-rw-r--r--lib/compiler-rt/builtins/int_endianness.h116
-rw-r--r--lib/compiler-rt/builtins/int_lib.h124
-rw-r--r--lib/compiler-rt/builtins/int_math.h114
-rw-r--r--lib/compiler-rt/builtins/int_types.h164
-rw-r--r--lib/compiler-rt/builtins/udivmoddi4.c231
-rw-r--r--lib/compiler-rt/compiler-rt.mk13
-rw-r--r--lib/delay/delay.c32
-rw-r--r--lib/events/events.c85
-rw-r--r--lib/extensions/amu/aarch32/amu.c37
-rw-r--r--lib/extensions/amu/aarch32/amu_helpers.S105
-rw-r--r--lib/extensions/amu/aarch64/amu.c37
-rw-r--r--lib/extensions/amu/aarch64/amu_helpers.S112
-rw-r--r--lib/irq/irq.c204
-rw-r--r--lib/locks/aarch32/spinlock.S37
-rw-r--r--lib/locks/aarch64/spinlock.S33
-rw-r--r--lib/power_management/hotplug/hotplug.c307
-rw-r--r--lib/power_management/suspend/aarch32/asm_tftf_suspend.S95
-rw-r--r--lib/power_management/suspend/aarch64/asm_tftf_suspend.S142
-rw-r--r--lib/power_management/suspend/suspend_private.h96
-rw-r--r--lib/power_management/suspend/tftf_suspend.c127
-rw-r--r--lib/psci/psci.c341
-rw-r--r--lib/sdei/sdei.c221
-rw-r--r--lib/semihosting/aarch32/semihosting_call.S14
-rw-r--r--lib/semihosting/aarch64/semihosting_call.S14
-rw-r--r--lib/semihosting/semihosting.c214
-rw-r--r--lib/smc/aarch32/asm_smc.S36
-rw-r--r--lib/smc/aarch32/smc.c19
-rw-r--r--lib/smc/aarch64/asm_smc.S50
-rw-r--r--lib/smc/aarch64/smc.c29
-rw-r--r--lib/stdlib/abort.c16
-rw-r--r--lib/stdlib/assert.c17
-rw-r--r--lib/stdlib/mem.c97
-rw-r--r--lib/stdlib/printf.c36
-rw-r--r--lib/stdlib/putchar.c24
-rw-r--r--lib/stdlib/puts.c31
-rw-r--r--lib/stdlib/rand.c65
-rw-r--r--lib/stdlib/strchr.c52
-rw-r--r--lib/stdlib/strcmp.c51
-rw-r--r--lib/stdlib/strlen.c45
-rw-r--r--lib/stdlib/strncmp.c52
-rw-r--r--lib/stdlib/strncpy.c61
-rw-r--r--lib/stdlib/subr_prf.c548
-rw-r--r--lib/trusted_os/trusted_os.c33
-rw-r--r--lib/utils/mp_printf.c72
-rw-r--r--lib/utils/uuid.c60
-rw-r--r--lib/xlat_tables_v2/aarch32/enable_mmu.S120
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c231
-rw-r--r--lib/xlat_tables_v2/aarch64/enable_mmu.S95
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c278
-rw-r--r--lib/xlat_tables_v2/xlat_tables.mk12
-rw-r--r--lib/xlat_tables_v2/xlat_tables_context.c199
-rw-r--r--lib/xlat_tables_v2/xlat_tables_core.c1157
-rw-r--r--lib/xlat_tables_v2/xlat_tables_private.h103
-rw-r--r--lib/xlat_tables_v2/xlat_tables_utils.c565
64 files changed, 8265 insertions, 0 deletions
diff --git a/lib/aarch32/cache_helpers.S b/lib/aarch32/cache_helpers.S
new file mode 100644
index 000000000..810af0f0d
--- /dev/null
+++ b/lib/aarch32/cache_helpers.S
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl flush_dcache_range
+ .globl clean_dcache_range
+ .globl inv_dcache_range
+ .globl dcsw_op_louis
+ .globl dcsw_op_all
+ .globl dcsw_op_level1
+ .globl dcsw_op_level2
+ .globl dcsw_op_level3
+
+/*
+ * This macro can be used for implementing various data cache operations `op`
+ */
+.macro do_dcache_maintenance_by_mva op, coproc, opc1, CRn, CRm, opc2
+ /* Exit early if size is zero */
+ cmp r1, #0
+ beq exit_loop_\op
+ dcache_line_size r2, r3
+ add r1, r0, r1
+ sub r3, r2, #1
+ bic r0, r0, r3
+loop_\op:
+ stcopr r0, \coproc, \opc1, \CRn, \CRm, \opc2
+ add r0, r0, r2
+ cmp r0, r1
+ blo loop_\op
+ dsb sy
+exit_loop_\op:
+ bx lr
+.endm
+
+ /* ------------------------------------------
+ * Clean+Invalidate from base address till
+ * size. 'r0' = addr, 'r1' = size
+ * ------------------------------------------
+ */
+func flush_dcache_range
+ do_dcache_maintenance_by_mva cimvac, DCCIMVAC
+endfunc flush_dcache_range
+
+ /* ------------------------------------------
+ * Clean from base address till size.
+ * 'r0' = addr, 'r1' = size
+ * ------------------------------------------
+ */
+func clean_dcache_range
+ do_dcache_maintenance_by_mva cmvac, DCCMVAC
+endfunc clean_dcache_range
+
+ /* ------------------------------------------
+ * Invalidate from base address till
+ * size. 'r0' = addr, 'r1' = size
+ * ------------------------------------------
+ */
+func inv_dcache_range
+ do_dcache_maintenance_by_mva imvac, DCIMVAC
+endfunc inv_dcache_range
+
+ /* ----------------------------------------------------------------
+ * Data cache operations by set/way to the level specified
+ *
+ * The main function, do_dcsw_op requires:
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * r1: The cache level to begin operation from
+ * r2: clidr_el1
+ * r3: The last cache level to operate on
+ * and will carry out the operation on each data cache from level 0
+ * to the level in r3 in sequence
+ *
+ * The dcsw_op macro sets up the r2 and r3 parameters based on
+ * clidr_el1 cache information before invoking the main function
+ * ----------------------------------------------------------------
+ */
+
+ .macro dcsw_op shift, fw, ls
+ ldcopr r2, CLIDR
+ ubfx r3, r2, \shift, \fw
+ lsl r3, r3, \ls
+ mov r1, #0
+ b do_dcsw_op
+ .endm
+
+func do_dcsw_op
+ push {r4-r12,lr}
+ adr r11, dcsw_loop_table // compute cache op based on the operation type
+ add r6, r11, r0, lsl #3 // cache op is 2x32-bit instructions
+loop1:
+ add r10, r1, r1, LSR #1 // Work out 3x current cache level
+ mov r12, r2, LSR r10 // extract cache type bits from clidr
+ and r12, r12, #7 // mask the bits for current cache only
+ cmp r12, #2 // see what cache we have at this level
+ blo level_done // no cache or only instruction cache at this level
+
+ stcopr r1, CSSELR // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ ldcopr r12, CCSIDR // read the new ccsidr
+ and r10, r12, #7 // extract the length of the cache lines
+ add r10, r10, #4 // add 4 (r10 = line length offset)
+ ubfx r4, r12, #3, #10 // r4 = maximum way number (right aligned)
+ clz r5, r4 // r5 = the bit position of the way size increment
+ mov r9, r4 // r9 working copy of the aligned max way number
+
+loop2:
+ ubfx r7, r12, #13, #15 // r7 = max set number (right aligned)
+
+loop3:
+ orr r0, r1, r9, LSL r5 // factor in the way number and cache level into r0
+ orr r0, r0, r7, LSL r10 // factor in the set number
+
+ blx r6
+ subs r7, r7, #1 // decrement the set number
+ bhs loop3
+ subs r9, r9, #1 // decrement the way number
+ bhs loop2
+level_done:
+ add r1, r1, #2 // increment the cache number
+ cmp r3, r1
+ dsb sy // ensure completion of previous cache maintenance instruction
+ bhi loop1
+
+ mov r6, #0
+ stcopr r6, CSSELR //select cache level 0 in csselr
+ dsb sy
+ isb
+ pop {r4-r12,pc}
+
+dcsw_loop_table:
+ stcopr r0, DCISW
+ bx lr
+ stcopr r0, DCCISW
+ bx lr
+ stcopr r0, DCCSW
+ bx lr
+
+endfunc do_dcsw_op
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way till PoU.
+ *
+ * The function requires :
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_louis
+ dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_louis
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way till PoC.
+ *
+ * The function requires :
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_all
+ dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_all
+
+
+ /* ---------------------------------------------------------------
+ * Helper macro for data cache operations by set/way for the
+ * level specified
+ * ---------------------------------------------------------------
+ */
+ .macro dcsw_op_level level
+ ldcopr r2, CLIDR
+ mov r3, \level
+ sub r1, r3, #2
+ b do_dcsw_op
+ .endm
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 1 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level1
+ dcsw_op_level #(1 << LEVEL_SHIFT)
+endfunc dcsw_op_level1
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 2 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level2
+ dcsw_op_level #(2 << LEVEL_SHIFT)
+endfunc dcsw_op_level2
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 3 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level3
+ dcsw_op_level #(3 << LEVEL_SHIFT)
+endfunc dcsw_op_level3
diff --git a/lib/aarch32/exception_stubs.S b/lib/aarch32/exception_stubs.S
new file mode 100644
index 000000000..ed48da2d0
--- /dev/null
+++ b/lib/aarch32/exception_stubs.S
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+/*
+ * Simplistic exceptions vector table.
+ * All entries spin, which means all types of exceptions are unrecoverable.
+ */
+ .global exception_stubs
+vector_base exception_stubs
+ b . /* Not used */
+ b . /* Undef */
+ b . /* Syscall */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Hyp trap */
+ b . /* IRQ */
+ b . /* FIQ */
diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S
new file mode 100644
index 000000000..ab37be9c3
--- /dev/null
+++ b/lib/aarch32/misc_helpers.S
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+ .globl zeromem
+ .globl memcpy4
+ .globl disable_mmu_icache
+
+/* -----------------------------------------------------------------------
+ * void zeromem(void *mem, unsigned int length);
+ *
+ * Initialise a memory region to 0.
+ * The memory address and length must be 4-byte aligned.
+ * -----------------------------------------------------------------------
+ */
+func zeromem
+#if ENABLE_ASSERTIONS
+ tst r0, #0x3
+ ASM_ASSERT(eq)
+ tst r1, #0x3
+ ASM_ASSERT(eq)
+#endif
+ add r2, r0, r1
+ mov r1, #0
+z_loop:
+ cmp r2, r0
+ beq z_end
+ str r1, [r0], #4
+ b z_loop
+z_end:
+ bx lr
+endfunc zeromem
+
+/* --------------------------------------------------------------------------
+ * void memcpy4(void *dest, const void *src, unsigned int length)
+ *
+ * Copy length bytes from memory area src to memory area dest.
+ * The memory areas should not overlap.
+ * Destination and source addresses must be 4-byte aligned.
+ * --------------------------------------------------------------------------
+ */
+func memcpy4
+#if ASM_ASSERTION
+ orr r3, r0, r1
+ tst r3, #0x3
+ ASM_ASSERT(eq)
+#endif
+/* copy 4 bytes at a time */
+m_loop4:
+ cmp r2, #4
+ blt m_loop1
+ ldr r3, [r1], #4
+ str r3, [r0], #4
+ sub r2, r2, #4
+ b m_loop4
+/* copy byte per byte */
+m_loop1:
+ cmp r2,#0
+ beq m_end
+ ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ subs r2, r2, #1
+ bne m_loop1
+m_end:
+ bx lr
+endfunc memcpy4
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU in Secure State
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mmu
+ mov r1, #(HSCTLR_M_BIT | HSCTLR_C_BIT)
+do_disable_mmu:
+ ldcopr r0, HSCTLR
+ bic r0, r0, r1
+ stcopr r0, HSCTLR
+ isb // ensure MMU is off
+ dsb sy
+ bx lr
+endfunc disable_mmu
+
+
+func disable_mmu_icache
+ ldr r1, =(HSCTLR_M_BIT | HSCTLR_C_BIT | HSCTLR_I_BIT)
+ b do_disable_mmu
+endfunc disable_mmu_icache
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
new file mode 100644
index 000000000..9c40b9db8
--- /dev/null
+++ b/lib/aarch64/cache_helpers.S
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl flush_dcache_range
+ .globl clean_dcache_range
+ .globl inv_dcache_range
+ .globl dcsw_op_louis
+ .globl dcsw_op_all
+ .globl dcsw_op_level1
+ .globl dcsw_op_level2
+ .globl dcsw_op_level3
+
+/*
+ * This macro can be used for implementing various data cache operations `op`
+ */
+.macro do_dcache_maintenance_by_mva op
+ /* Exit early if size is zero */
+ cbz x1, exit_loop_\op
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+loop_\op:
+ dc \op, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo loop_\op
+ dsb sy
+exit_loop_\op:
+ ret
+.endm
+ /* ------------------------------------------
+ * Clean+Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func flush_dcache_range
+ do_dcache_maintenance_by_mva civac
+endfunc flush_dcache_range
+
+ /* ------------------------------------------
+ * Clean from base address till size.
+ * 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func clean_dcache_range
+ do_dcache_maintenance_by_mva cvac
+endfunc clean_dcache_range
+
+ /* ------------------------------------------
+ * Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func inv_dcache_range
+ do_dcache_maintenance_by_mva ivac
+endfunc inv_dcache_range
+
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way to the level specified
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * x3: The last cache level to operate on
+ * x9: clidr_el1
+ * x10: The cache level to begin operation from
+ * and will carry out the operation on each data cache from level 0
+ * to the level in x3 in sequence
+ *
+ * The dcsw_op macro sets up the x3 and x9 parameters based on
+ * clidr_el1 cache information before invoking the main function
+ * ---------------------------------------------------------------
+ */
+
+ .macro dcsw_op shift, fw, ls
+ mrs x9, clidr_el1
+ ubfx x3, x9, \shift, \fw
+ lsl x3, x3, \ls
+ mov x10, xzr
+ b do_dcsw_op
+ .endm
+
+func do_dcsw_op
+ cbz x3, exit
+ adr x14, dcsw_loop_table // compute inner loop address
+ add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
+ mov x0, x9
+ mov w8, #1
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lo level_done // nothing to do if no cache or icache
+
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ ubfx x4, x1, #3, #10 // maximum way number
+ clz w5, w4 // bit position of way size increment
+ lsl w9, w4, w5 // w9 = aligned max way number
+ lsl w16, w8, w5 // w16 = way number loop decrement
+ orr w9, w10, w9 // w9 = combine way and cache number
+ ubfx w6, w1, #13, #15 // w6 = max set number
+ lsl w17, w8, w2 // w17 = set number loop decrement
+ dsb sy // barrier before we start this level
+ br x14 // jump to DC operation specific loop
+
+ .macro dcsw_loop _op
+loop2_\_op:
+ lsl w7, w6, w2 // w7 = aligned max set number
+
+loop3_\_op:
+ orr w11, w9, w7 // combine cache, way and set number
+ dc \_op, x11
+ subs w7, w7, w17 // decrement set number
+ b.hs loop3_\_op
+
+ subs x9, x9, x16 // decrement way number
+ b.hs loop2_\_op
+
+ b level_done
+ .endm
+
+level_done:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.hi loop1
+ msr csselr_el1, xzr // select cache level 0 in csselr
+ dsb sy // barrier to complete final cache operation
+ isb
+exit:
+ ret
+endfunc do_dcsw_op
+
+dcsw_loop_table:
+ dcsw_loop isw
+ dcsw_loop cisw
+ dcsw_loop csw
+
+
+func dcsw_op_louis
+ dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_louis
+
+
+func dcsw_op_all
+ dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_all
+
+ /* ---------------------------------------------------------------
+ * Helper macro for data cache operations by set/way for the
+ * level specified
+ * ---------------------------------------------------------------
+ */
+ .macro dcsw_op_level level
+ mrs x9, clidr_el1
+ mov x3, \level
+ sub x10, x3, #2
+ b do_dcsw_op
+ .endm
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 1 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level1
+ dcsw_op_level #(1 << LEVEL_SHIFT)
+endfunc dcsw_op_level1
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 2 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level2
+ dcsw_op_level #(2 << LEVEL_SHIFT)
+endfunc dcsw_op_level2
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 3 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level3
+ dcsw_op_level #(3 << LEVEL_SHIFT)
+endfunc dcsw_op_level3
diff --git a/lib/aarch64/exception_stubs.S b/lib/aarch64/exception_stubs.S
new file mode 100644
index 000000000..0508fe573
--- /dev/null
+++ b/lib/aarch64/exception_stubs.S
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+/*
+ * Simplistic exceptions vector table.
+ * All entries spin, which means all types of exceptions are unrecoverable.
+ */
+ .global exception_stubs
+vector_base exception_stubs
+vector_entry SynchronousExceptionSP0
+ b .
+vector_entry IrqSP0
+ b .
+vector_entry FiqSP0
+ b .
+vector_entry SErrorSP0
+ b .
+vector_entry SynchronousExceptionSPx
+ b .
+vector_entry IrqSPx
+ b .
+vector_entry FiqSPx
+ b .
+vector_entry SErrorSPx
+ b .
+vector_entry SynchronousExceptionA64
+ b .
+vector_entry IrqA64
+ b .
+vector_entry FiqA64
+ b .
+vector_entry SErrorA64
+ b .
+vector_entry SynchronousExceptionA32
+ b .
+vector_entry IrqA32
+ b .
+vector_entry FiqA32
+ b .
+vector_entry SErrorA32
+ b .
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
new file mode 100644
index 000000000..6acaa8689
--- /dev/null
+++ b/lib/aarch64/misc_helpers.S
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+ .globl get_afflvl_shift
+ .globl mpidr_mask_lower_afflvls
+ .globl eret
+ .globl smc
+
+ .globl zeromem16
+ .globl memcpy16
+
+ .globl disable_mmu
+ .globl disable_mmu_icache
+
+func get_afflvl_shift
+ cmp x0, #3
+ cinc x0, x0, eq
+ mov x1, #MPIDR_AFFLVL_SHIFT
+ lsl x0, x0, x1
+ ret
+endfunc get_afflvl_shift
+
+func mpidr_mask_lower_afflvls
+ cmp x1, #3
+ cinc x1, x1, eq
+ mov x2, #MPIDR_AFFLVL_SHIFT
+ lsl x2, x1, x2
+ lsr x0, x0, x2
+ lsl x0, x0, x2
+ ret
+endfunc mpidr_mask_lower_afflvls
+
+
+func eret
+ eret
+endfunc eret
+
+func smc
+ smc #0
+endfunc smc
+
+/* -----------------------------------------------------------------------
+ * void zeromem16(void *mem, unsigned int length);
+ *
+ * Initialise a memory region to 0.
+ * The memory address must be 16-byte aligned.
+ * -----------------------------------------------------------------------
+ */
+func zeromem16
+#if ENABLE_ASSERTIONS
+ tst x0, #0xf
+ ASM_ASSERT(eq)
+#endif
+ add x2, x0, x1
+/* zero 16 bytes at a time */
+z_loop16:
+ sub x3, x2, x0
+ cmp x3, #16
+ b.lt z_loop1
+ stp xzr, xzr, [x0], #16
+ b z_loop16
+/* zero byte per byte */
+z_loop1:
+ cmp x0, x2
+ b.eq z_end
+ strb wzr, [x0], #1
+ b z_loop1
+z_end:
+ ret
+endfunc zeromem16
+
+
+/* --------------------------------------------------------------------------
+ * void memcpy16(void *dest, const void *src, unsigned int length)
+ *
+ * Copy length bytes from memory area src to memory area dest.
+ * The memory areas should not overlap.
+ * Destination and source addresses must be 16-byte aligned.
+ * --------------------------------------------------------------------------
+ */
+func memcpy16
+#if ENABLE_ASSERTIONS
+ orr x3, x0, x1
+ tst x3, #0xf
+ ASM_ASSERT(eq)
+#endif
+/* copy 16 bytes at a time */
+m_loop16:
+ cmp x2, #16
+ b.lt m_loop1
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ sub x2, x2, #16
+ b m_loop16
+/* copy byte per byte */
+m_loop1:
+ cbz x2, m_end
+ ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne m_loop1
+m_end:
+ ret
+endfunc memcpy16
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU at the current exception level (NS-EL1 or EL2)
+ * This is implemented in assembler to ensure that the data cache is cleaned
+ * and invalidated after the MMU is disabled without any intervening cacheable
+ * data accesses
+ * ---------------------------------------------------------------------------
+ */
+func disable_mmu
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu:
+ asm_read_sctlr_el1_or_el2
+ bic x0, x0, x1
+ asm_write_sctlr_el1_or_el2 x1
+ isb /* ensure MMU is off */
+ mov x0, #DCCISW /* DCache clean and invalidate */
+ b dcsw_op_all
+endfunc disable_mmu
+
+func disable_mmu_icache
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+ b do_disable_mmu
+endfunc disable_mmu_icache
+
+/* Need this label for asm_read/write_sctlr_el1_or_el2 */
+dead:
+ b dead
diff --git a/lib/compiler-rt/LICENSE.TXT b/lib/compiler-rt/LICENSE.TXT
new file mode 100644
index 000000000..0134694e4
--- /dev/null
+++ b/lib/compiler-rt/LICENSE.TXT
@@ -0,0 +1,91 @@
+==============================================================================
+compiler_rt License
+==============================================================================
+
+The compiler_rt library is dual licensed under both the University of Illinois
+"BSD-Like" license and the MIT license. As a user of this code you may choose
+to use it under either license. As a contributor, you agree to allow your code
+to be used under both.
+
+Full text of the relevant licenses is included below.
+
+==============================================================================
+
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2018 by the contributors listed in CREDITS.TXT
+
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+
+Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+==============================================================================
+Copyrights and Licenses for Third Party Software Distributed with LLVM:
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
diff --git a/lib/compiler-rt/builtins/arm/aeabi_uldivmod.S b/lib/compiler-rt/builtins/arm/aeabi_uldivmod.S
new file mode 100644
index 000000000..be343b6bc
--- /dev/null
+++ b/lib/compiler-rt/builtins/arm/aeabi_uldivmod.S
@@ -0,0 +1,46 @@
+//===-- aeabi_uldivmod.S - EABI uldivmod implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { uint64_t quot, uint64_t rem}
+// __aeabi_uldivmod(uint64_t numerator, uint64_t denominator) {
+// uint64_t rem, quot;
+// quot = __udivmoddi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+#if defined(__MINGW32__)
+#define __aeabi_uldivmod __rt_udiv64
+#endif
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
+ push {r6, lr}
+ sub sp, sp, #16
+ add r6, sp, #8
+ str r6, [sp]
+#if defined(__MINGW32__)
+ movs r6, r0
+ movs r0, r2
+ movs r2, r6
+ movs r6, r1
+ movs r1, r3
+ movs r3, r6
+#endif
+ bl SYMBOL_NAME(__udivmoddi4)
+ ldr r2, [sp, #8]
+ ldr r3, [sp, #12]
+ add sp, sp, #16
+ pop {r6, pc}
+END_COMPILERRT_FUNCTION(__aeabi_uldivmod)
+
+NO_EXEC_STACK_DIRECTIVE
+
diff --git a/lib/compiler-rt/builtins/assembly.h b/lib/compiler-rt/builtins/assembly.h
new file mode 100644
index 000000000..3f5e59b25
--- /dev/null
+++ b/lib/compiler-rt/builtins/assembly.h
@@ -0,0 +1,204 @@
+/* ===-- assembly.h - compiler-rt assembler support macros -----------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file defines macros for use in compiler-rt assembler source.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef COMPILERRT_ASSEMBLY_H
+#define COMPILERRT_ASSEMBLY_H
+
+#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
+#define SEPARATOR @
+#else
+#define SEPARATOR ;
+#endif
+
+#if defined(__APPLE__)
+#define HIDDEN(name) .private_extern name
+#define LOCAL_LABEL(name) L_##name
+// tell linker it can break up file at label boundaries
+#define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
+#define SYMBOL_IS_FUNC(name)
+#define CONST_SECTION .const
+
+#define NO_EXEC_STACK_DIRECTIVE
+
+#elif defined(__ELF__)
+
+#define HIDDEN(name) .hidden name
+#define LOCAL_LABEL(name) .L_##name
+#define FILE_LEVEL_DIRECTIVE
+#if defined(__arm__)
+#define SYMBOL_IS_FUNC(name) .type name,%function
+#else
+#define SYMBOL_IS_FUNC(name) .type name,@function
+#endif
+#define CONST_SECTION .section .rodata
+
+#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
+ defined(__linux__)
+#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
+#else
+#define NO_EXEC_STACK_DIRECTIVE
+#endif
+
+#else // !__APPLE__ && !__ELF__
+
+#define HIDDEN(name)
+#define LOCAL_LABEL(name) .L ## name
+#define FILE_LEVEL_DIRECTIVE
+#define SYMBOL_IS_FUNC(name) \
+ .def name SEPARATOR \
+ .scl 2 SEPARATOR \
+ .type 32 SEPARATOR \
+ .endef
+#define CONST_SECTION .section .rdata,"rd"
+
+#define NO_EXEC_STACK_DIRECTIVE
+
+#endif
+
+#if defined(__arm__)
+
+/*
+ * Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
+ * - for '-mthumb -march=armv6' compiler defines '__thumb__'
+ * - for '-mthumb -march=armv7' compiler defines '__thumb__' and '__thumb2__'
+ */
+#if defined(__thumb2__) || defined(__thumb__)
+#define DEFINE_CODE_STATE .thumb SEPARATOR
+#define DECLARE_FUNC_ENCODING .thumb_func SEPARATOR
+#if defined(__thumb2__)
+#define USE_THUMB_2
+#define IT(cond) it cond
+#define ITT(cond) itt cond
+#define ITE(cond) ite cond
+#else
+#define USE_THUMB_1
+#define IT(cond)
+#define ITT(cond)
+#define ITE(cond)
+#endif // defined(__thumb__2)
+#else // !defined(__thumb2__) && !defined(__thumb__)
+#define DEFINE_CODE_STATE .arm SEPARATOR
+#define DECLARE_FUNC_ENCODING
+#define IT(cond)
+#define ITT(cond)
+#define ITE(cond)
+#endif
+
+#if defined(USE_THUMB_1) && defined(USE_THUMB_2)
+#error "USE_THUMB_1 and USE_THUMB_2 can't be defined together."
+#endif
+
+#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
+#define ARM_HAS_BX
+#endif
+#if !defined(__ARM_FEATURE_CLZ) && !defined(USE_THUMB_1) && \
+ (__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
+#define __ARM_FEATURE_CLZ
+#endif
+
+#ifdef ARM_HAS_BX
+#define JMP(r) bx r
+#define JMPc(r, c) bx##c r
+#else
+#define JMP(r) mov pc, r
+#define JMPc(r, c) mov##c pc, r
+#endif
+
+// pop {pc} can't switch Thumb mode on ARMv4T
+#if __ARM_ARCH >= 5
+#define POP_PC() pop {pc}
+#else
+#define POP_PC() \
+ pop {ip}; \
+ JMP(ip)
+#endif
+
+#if defined(USE_THUMB_2)
+#define WIDE(op) op.w
+#else
+#define WIDE(op) op
+#endif
+#else // !defined(__arm)
+#define DECLARE_FUNC_ENCODING
+#define DEFINE_CODE_STATE
+#endif
+
+#define GLUE2(a, b) a##b
+#define GLUE(a, b) GLUE2(a, b)
+#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
+
+#ifdef VISIBILITY_HIDDEN
+#define DECLARE_SYMBOL_VISIBILITY(name) \
+ HIDDEN(SYMBOL_NAME(name)) SEPARATOR
+#else
+#define DECLARE_SYMBOL_VISIBILITY(name)
+#endif
+
+#define DEFINE_COMPILERRT_FUNCTION(name) \
+ DEFINE_CODE_STATE \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) \
+ DECLARE_FUNC_ENCODING \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
+ DEFINE_CODE_STATE \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
+ .thumb_func SEPARATOR \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
+ DEFINE_CODE_STATE \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_FUNC_ENCODING \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
+ DEFINE_CODE_STATE \
+ .globl name SEPARATOR \
+ SYMBOL_IS_FUNC(name) SEPARATOR \
+ HIDDEN(name) SEPARATOR \
+ DECLARE_FUNC_ENCODING \
+ name:
+
+#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
+ .set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
+
+#if defined(__ARM_EABI__)
+#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \
+ DEFINE_COMPILERRT_FUNCTION_ALIAS(aeabi_name, name)
+#else
+#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)
+#endif
+
+#ifdef __ELF__
+#define END_COMPILERRT_FUNCTION(name) \
+ .size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
+#else
+#define END_COMPILERRT_FUNCTION(name)
+#endif
+
+#endif /* COMPILERRT_ASSEMBLY_H */
diff --git a/lib/compiler-rt/builtins/ctzdi2.c b/lib/compiler-rt/builtins/ctzdi2.c
new file mode 100644
index 000000000..eecde2971
--- /dev/null
+++ b/lib/compiler-rt/builtins/ctzdi2.c
@@ -0,0 +1,35 @@
+/* ===-- ctzdi2.c - Implement __ctzdi2 -------------------------------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file implements __ctzdi2 for the compiler_rt library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#include "int_lib.h"
+
+/* Returns: the number of trailing 0-bits */
+
+#if !defined(__clang__) && (defined(__sparc64__) || defined(__mips64) || defined(__riscv__))
+/* gcc resolves __builtin_ctz -> __ctzdi2 leading to infinite recursion */
+#define __builtin_ctz(a) __ctzsi2(a)
+extern si_int __ctzsi2(si_int);
+#endif
+
+/* Precondition: a != 0 */
+
+COMPILER_RT_ABI si_int
+__ctzdi2(di_int a)
+{
+ dwords x;
+ x.all = a;
+ const si_int f = -(x.s.low == 0);
+ return __builtin_ctz((x.s.high & f) | (x.s.low & ~f)) +
+ (f & ((si_int)(sizeof(si_int) * CHAR_BIT)));
+}
diff --git a/lib/compiler-rt/builtins/int_endianness.h b/lib/compiler-rt/builtins/int_endianness.h
new file mode 100644
index 000000000..e2586c56b
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_endianness.h
@@ -0,0 +1,116 @@
+/* ===-- int_endianness.h - configuration header for compiler-rt ------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file is a configuration header for compiler-rt.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef INT_ENDIANNESS_H
+#define INT_ENDIANNESS_H
+
+#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+ defined(__ORDER_LITTLE_ENDIAN__)
+
+/* Clang and GCC provide built-in endianness definitions. */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif /* __BYTE_ORDER__ */
+
+#else /* Compilers other than Clang or GCC. */
+
+#if defined(__SVR4) && defined(__sun)
+#include <sys/byteorder.h>
+
+#if defined(_BIG_ENDIAN)
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif defined(_LITTLE_ENDIAN)
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#else /* !_LITTLE_ENDIAN */
+#error "unknown endianness"
+#endif /* !_LITTLE_ENDIAN */
+
+#endif /* Solaris and AuroraUX. */
+
+/* .. */
+
+#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || \
+ defined(__minix)
+#include <sys/endian.h>
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif _BYTE_ORDER == _LITTLE_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif /* _BYTE_ORDER */
+
+#endif /* *BSD */
+
+#if defined(__OpenBSD__)
+#include <machine/endian.h>
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif _BYTE_ORDER == _LITTLE_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif /* _BYTE_ORDER */
+
+#endif /* OpenBSD */
+
+/* .. */
+
+/* Mac OSX has __BIG_ENDIAN__ or __LITTLE_ENDIAN__ automatically set by the
+ * compiler (at least with GCC) */
+#if defined(__APPLE__) || defined(__ellcc__ )
+
+#ifdef __BIG_ENDIAN__
+#if __BIG_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#endif
+#endif /* __BIG_ENDIAN__ */
+
+#ifdef __LITTLE_ENDIAN__
+#if __LITTLE_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif
+#endif /* __LITTLE_ENDIAN__ */
+
+#endif /* Mac OSX */
+
+/* .. */
+
+#if defined(_WIN32)
+
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+
+#endif /* Windows */
+
+#endif /* Clang or GCC. */
+
+/* . */
+
+#if !defined(_YUGA_LITTLE_ENDIAN) || !defined(_YUGA_BIG_ENDIAN)
+#error Unable to determine endian
+#endif /* Check we found an endianness correctly. */
+
+#endif /* INT_ENDIANNESS_H */
diff --git a/lib/compiler-rt/builtins/int_lib.h b/lib/compiler-rt/builtins/int_lib.h
new file mode 100644
index 000000000..724d5a4fe
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_lib.h
@@ -0,0 +1,124 @@
+/* ===-- int_lib.h - configuration header for compiler-rt -----------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file is a configuration header for compiler-rt.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef INT_LIB_H
+#define INT_LIB_H
+
+/* Assumption: Signed integral is 2's complement. */
+/* Assumption: Right shift of signed negative is arithmetic shift. */
+/* Assumption: Endianness is little or big (not mixed). */
+
+#if defined(__ELF__)
+#define FNALIAS(alias_name, original_name) \
+ void alias_name() __attribute__((__alias__(#original_name)))
+#define COMPILER_RT_ALIAS(aliasee) __attribute__((__alias__(#aliasee)))
+#else
+#define FNALIAS(alias, name) _Pragma("GCC error(\"alias unsupported on this file format\")")
+#define COMPILER_RT_ALIAS(aliasee) _Pragma("GCC error(\"alias unsupported on this file format\")")
+#endif
+
+/* ABI macro definitions */
+
+#if __ARM_EABI__
+# ifdef COMPILER_RT_ARMHF_TARGET
+# define COMPILER_RT_ABI
+# else
+# define COMPILER_RT_ABI __attribute__((__pcs__("aapcs")))
+# endif
+#else
+# define COMPILER_RT_ABI
+#endif
+
+#define AEABI_RTABI __attribute__((__pcs__("aapcs")))
+
+#ifdef _MSC_VER
+#define ALWAYS_INLINE __forceinline
+#define NOINLINE __declspec(noinline)
+#define NORETURN __declspec(noreturn)
+#define UNUSED
+#else
+#define ALWAYS_INLINE __attribute__((always_inline))
+#define NOINLINE __attribute__((noinline))
+#define NORETURN __attribute__((noreturn))
+#define UNUSED __attribute__((unused))
+#endif
+
+#include <sys/limits.h>
+#include <sys/stdint.h>
+#include <sys/types.h>
+
+/* Include the commonly used internal type definitions. */
+#include "int_types.h"
+
+COMPILER_RT_ABI si_int __paritysi2(si_int a);
+COMPILER_RT_ABI si_int __paritydi2(di_int a);
+
+COMPILER_RT_ABI di_int __divdi3(di_int a, di_int b);
+COMPILER_RT_ABI si_int __divsi3(si_int a, si_int b);
+COMPILER_RT_ABI su_int __udivsi3(su_int n, su_int d);
+
+COMPILER_RT_ABI su_int __udivmodsi4(su_int a, su_int b, su_int* rem);
+COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int* rem);
+#ifdef CRT_HAS_128BIT
+COMPILER_RT_ABI si_int __clzti2(ti_int a);
+COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
+#endif
+
+/* Definitions for builtins unavailable on MSVC */
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <intrin.h>
+
+uint32_t __inline __builtin_ctz(uint32_t value) {
+ unsigned long trailing_zero = 0;
+ if (_BitScanForward(&trailing_zero, value))
+ return trailing_zero;
+ return 32;
+}
+
+uint32_t __inline __builtin_clz(uint32_t value) {
+ unsigned long leading_zero = 0;
+ if (_BitScanReverse(&leading_zero, value))
+ return 31 - leading_zero;
+ return 32;
+}
+
+#if defined(_M_ARM) || defined(_M_X64)
+uint32_t __inline __builtin_clzll(uint64_t value) {
+ unsigned long leading_zero = 0;
+ if (_BitScanReverse64(&leading_zero, value))
+ return 63 - leading_zero;
+ return 64;
+}
+#else
+uint32_t __inline __builtin_clzll(uint64_t value) {
+ if (value == 0)
+ return 64;
+ uint32_t msh = (uint32_t)(value >> 32);
+ uint32_t lsh = (uint32_t)(value & 0xFFFFFFFF);
+ if (msh != 0)
+ return __builtin_clz(msh);
+ return 32 + __builtin_clz(lsh);
+}
+#endif
+
+#define __builtin_clzl __builtin_clzll
+#endif /* defined(_MSC_VER) && !defined(__clang__) */
+
+#endif /* INT_LIB_H */
diff --git a/lib/compiler-rt/builtins/int_math.h b/lib/compiler-rt/builtins/int_math.h
new file mode 100644
index 000000000..fc81fb7f0
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_math.h
@@ -0,0 +1,114 @@
+/* ===-- int_math.h - internal math inlines ---------------------------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===-----------------------------------------------------------------------===
+ *
+ * This file is not part of the interface of this library.
+ *
+ * This file defines substitutes for the libm functions used in some of the
+ * compiler-rt implementations, defined in such a way that there is not a direct
+ * dependency on libm or math.h. Instead, we use the compiler builtin versions
+ * where available. This reduces our dependencies on the system SDK by foisting
+ * the responsibility onto the compiler.
+ *
+ * ===-----------------------------------------------------------------------===
+ */
+
+#ifndef INT_MATH_H
+#define INT_MATH_H
+
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <math.h>
+#include <stdlib.h>
+#include <ymath.h>
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define CRT_INFINITY INFINITY
+#else
+#define CRT_INFINITY __builtin_huge_valf()
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_isfinite(x) _finite((x))
+#define crt_isinf(x) !_finite((x))
+#define crt_isnan(x) _isnan((x))
+#else
+/* Define crt_isfinite in terms of the builtin if available, otherwise provide
+ * an alternate version in terms of our other functions. This supports some
+ * versions of GCC which didn't have __builtin_isfinite.
+ */
+#if __has_builtin(__builtin_isfinite)
+# define crt_isfinite(x) __builtin_isfinite((x))
+#elif defined(__GNUC__)
+# define crt_isfinite(x) \
+ __extension__(({ \
+ __typeof((x)) x_ = (x); \
+ !crt_isinf(x_) && !crt_isnan(x_); \
+ }))
+#else
+# error "Do not know how to check for infinity"
+#endif /* __has_builtin(__builtin_isfinite) */
+#define crt_isinf(x) __builtin_isinf((x))
+#define crt_isnan(x) __builtin_isnan((x))
+#endif /* _MSC_VER */
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_copysign(x, y) copysign((x), (y))
+#define crt_copysignf(x, y) copysignf((x), (y))
+#define crt_copysignl(x, y) copysignl((x), (y))
+#else
+#define crt_copysign(x, y) __builtin_copysign((x), (y))
+#define crt_copysignf(x, y) __builtin_copysignf((x), (y))
+#define crt_copysignl(x, y) __builtin_copysignl((x), (y))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_fabs(x) fabs((x))
+#define crt_fabsf(x) fabsf((x))
+#define crt_fabsl(x) fabs((x))
+#else
+#define crt_fabs(x) __builtin_fabs((x))
+#define crt_fabsf(x) __builtin_fabsf((x))
+#define crt_fabsl(x) __builtin_fabsl((x))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_fmax(x, y) __max((x), (y))
+#define crt_fmaxf(x, y) __max((x), (y))
+#define crt_fmaxl(x, y) __max((x), (y))
+#else
+#define crt_fmax(x, y) __builtin_fmax((x), (y))
+#define crt_fmaxf(x, y) __builtin_fmaxf((x), (y))
+#define crt_fmaxl(x, y) __builtin_fmaxl((x), (y))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_logb(x) logb((x))
+#define crt_logbf(x) logbf((x))
+#define crt_logbl(x) logbl((x))
+#else
+#define crt_logb(x) __builtin_logb((x))
+#define crt_logbf(x) __builtin_logbf((x))
+#define crt_logbl(x) __builtin_logbl((x))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_scalbn(x, y) scalbn((x), (y))
+#define crt_scalbnf(x, y) scalbnf((x), (y))
+#define crt_scalbnl(x, y) scalbnl((x), (y))
+#else
+#define crt_scalbn(x, y) __builtin_scalbn((x), (y))
+#define crt_scalbnf(x, y) __builtin_scalbnf((x), (y))
+#define crt_scalbnl(x, y) __builtin_scalbnl((x), (y))
+#endif
+
+#endif /* INT_MATH_H */
diff --git a/lib/compiler-rt/builtins/int_types.h b/lib/compiler-rt/builtins/int_types.h
new file mode 100644
index 000000000..f53f343d3
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_types.h
@@ -0,0 +1,164 @@
+/* ===-- int_lib.h - configuration header for compiler-rt -----------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file is not part of the interface of this library.
+ *
+ * This file defines various standard types, most importantly a number of unions
+ * used to access parts of larger types.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef INT_TYPES_H
+#define INT_TYPES_H
+
+#include "int_endianness.h"
+
+/* si_int is defined in Linux sysroot's asm-generic/siginfo.h */
+#ifdef si_int
+#undef si_int
+#endif
+typedef int si_int;
+typedef unsigned su_int;
+
+typedef long long di_int;
+typedef unsigned long long du_int;
+
+typedef union
+{
+ di_int all;
+ struct
+ {
+#if _YUGA_LITTLE_ENDIAN
+ su_int low;
+ si_int high;
+#else
+ si_int high;
+ su_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+ }s;
+} dwords;
+
+typedef union
+{
+ du_int all;
+ struct
+ {
+#if _YUGA_LITTLE_ENDIAN
+ su_int low;
+ su_int high;
+#else
+ su_int high;
+ su_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+ }s;
+} udwords;
+
+#if (defined(__LP64__) || defined(__wasm__) || defined(__mips64)) || defined(__riscv)
+#define CRT_HAS_128BIT
+#endif
+
+#ifdef CRT_HAS_128BIT
+typedef int ti_int __attribute__ ((mode (TI)));
+typedef unsigned tu_int __attribute__ ((mode (TI)));
+
+typedef union
+{
+ ti_int all;
+ struct
+ {
+#if _YUGA_LITTLE_ENDIAN
+ du_int low;
+ di_int high;
+#else
+ di_int high;
+ du_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+ }s;
+} twords;
+
+typedef union
+{
+ tu_int all;
+ struct
+ {
+#if _YUGA_LITTLE_ENDIAN
+ du_int low;
+ du_int high;
+#else
+ du_int high;
+ du_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+ }s;
+} utwords;
+
+static __inline ti_int make_ti(di_int h, di_int l) {
+ twords r;
+ r.s.high = h;
+ r.s.low = l;
+ return r.all;
+}
+
+static __inline tu_int make_tu(du_int h, du_int l) {
+ utwords r;
+ r.s.high = h;
+ r.s.low = l;
+ return r.all;
+}
+
+#endif /* CRT_HAS_128BIT */
+
+typedef union
+{
+ su_int u;
+ float f;
+} float_bits;
+
+typedef union
+{
+ udwords u;
+ double f;
+} double_bits;
+
+typedef struct
+{
+#if _YUGA_LITTLE_ENDIAN
+ udwords low;
+ udwords high;
+#else
+ udwords high;
+ udwords low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+} uqwords;
+
+typedef union
+{
+ uqwords u;
+ long double f;
+} long_double_bits;
+
+#if __STDC_VERSION__ >= 199901L
+typedef float _Complex Fcomplex;
+typedef double _Complex Dcomplex;
+typedef long double _Complex Lcomplex;
+
+#define COMPLEX_REAL(x) __real__(x)
+#define COMPLEX_IMAGINARY(x) __imag__(x)
+#else
+typedef struct { float real, imaginary; } Fcomplex;
+
+typedef struct { double real, imaginary; } Dcomplex;
+
+typedef struct { long double real, imaginary; } Lcomplex;
+
+#define COMPLEX_REAL(x) (x).real
+#define COMPLEX_IMAGINARY(x) (x).imaginary
+#endif
+#endif /* INT_TYPES_H */
+
diff --git a/lib/compiler-rt/builtins/udivmoddi4.c b/lib/compiler-rt/builtins/udivmoddi4.c
new file mode 100644
index 000000000..0c8b4ff46
--- /dev/null
+++ b/lib/compiler-rt/builtins/udivmoddi4.c
@@ -0,0 +1,231 @@
+/* ===-- udivmoddi4.c - Implement __udivmoddi4 -----------------------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file implements __udivmoddi4 for the compiler_rt library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#include "int_lib.h"
+
+/* Effects: if rem != 0, *rem = a % b
+ * Returns: a / b
+ */
+
+/* Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide */
+
+COMPILER_RT_ABI du_int
+__udivmoddi4(du_int a, du_int b, du_int* rem)
+{
+ const unsigned n_uword_bits = sizeof(su_int) * CHAR_BIT;
+ const unsigned n_udword_bits = sizeof(du_int) * CHAR_BIT;
+ udwords n;
+ n.all = a;
+ udwords d;
+ d.all = b;
+ udwords q;
+ udwords r;
+ unsigned sr;
+ /* special cases, X is unknown, K != 0 */
+ if (n.s.high == 0)
+ {
+ if (d.s.high == 0)
+ {
+ /* 0 X
+ * ---
+ * 0 X
+ */
+ if (rem)
+ *rem = n.s.low % d.s.low;
+ return n.s.low / d.s.low;
+ }
+ /* 0 X
+ * ---
+ * K X
+ */
+ if (rem)
+ *rem = n.s.low;
+ return 0;
+ }
+ /* n.s.high != 0 */
+ if (d.s.low == 0)
+ {
+ if (d.s.high == 0)
+ {
+ /* K X
+ * ---
+ * 0 0
+ */
+ if (rem)
+ *rem = n.s.high % d.s.low;
+ return n.s.high / d.s.low;
+ }
+ /* d.s.high != 0 */
+ if (n.s.low == 0)
+ {
+ /* K 0
+ * ---
+ * K 0
+ */
+ if (rem)
+ {
+ r.s.high = n.s.high % d.s.high;
+ r.s.low = 0;
+ *rem = r.all;
+ }
+ return n.s.high / d.s.high;
+ }
+ /* K K
+ * ---
+ * K 0
+ */
+ if ((d.s.high & (d.s.high - 1)) == 0) /* if d is a power of 2 */
+ {
+ if (rem)
+ {
+ r.s.low = n.s.low;
+ r.s.high = n.s.high & (d.s.high - 1);
+ *rem = r.all;
+ }
+ return n.s.high >> __builtin_ctz(d.s.high);
+ }
+ /* K K
+ * ---
+ * K 0
+ */
+ sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high);
+ /* 0 <= sr <= n_uword_bits - 2 or sr large */
+ if (sr > n_uword_bits - 2)
+ {
+ if (rem)
+ *rem = n.all;
+ return 0;
+ }
+ ++sr;
+ /* 1 <= sr <= n_uword_bits - 1 */
+ /* q.all = n.all << (n_udword_bits - sr); */
+ q.s.low = 0;
+ q.s.high = n.s.low << (n_uword_bits - sr);
+ /* r.all = n.all >> sr; */
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ }
+ else /* d.s.low != 0 */
+ {
+ if (d.s.high == 0)
+ {
+ /* K X
+ * ---
+ * 0 K
+ */
+ if ((d.s.low & (d.s.low - 1)) == 0) /* if d is a power of 2 */
+ {
+ if (rem)
+ *rem = n.s.low & (d.s.low - 1);
+ if (d.s.low == 1)
+ return n.all;
+ sr = __builtin_ctz(d.s.low);
+ q.s.high = n.s.high >> sr;
+ q.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ return q.all;
+ }
+ /* K X
+ * ---
+ * 0 K
+ */
+ sr = 1 + n_uword_bits + __builtin_clz(d.s.low) - __builtin_clz(n.s.high);
+ /* 2 <= sr <= n_udword_bits - 1
+ * q.all = n.all << (n_udword_bits - sr);
+ * r.all = n.all >> sr;
+ */
+ if (sr == n_uword_bits)
+ {
+ q.s.low = 0;
+ q.s.high = n.s.low;
+ r.s.high = 0;
+ r.s.low = n.s.high;
+ }
+ else if (sr < n_uword_bits) // 2 <= sr <= n_uword_bits - 1
+ {
+ q.s.low = 0;
+ q.s.high = n.s.low << (n_uword_bits - sr);
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ }
+ else // n_uword_bits + 1 <= sr <= n_udword_bits - 1
+ {
+ q.s.low = n.s.low << (n_udword_bits - sr);
+ q.s.high = (n.s.high << (n_udword_bits - sr)) |
+ (n.s.low >> (sr - n_uword_bits));
+ r.s.high = 0;
+ r.s.low = n.s.high >> (sr - n_uword_bits);
+ }
+ }
+ else
+ {
+ /* K X
+ * ---
+ * K K
+ */
+ sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high);
+ /* 0 <= sr <= n_uword_bits - 1 or sr large */
+ if (sr > n_uword_bits - 1)
+ {
+ if (rem)
+ *rem = n.all;
+ return 0;
+ }
+ ++sr;
+ /* 1 <= sr <= n_uword_bits */
+ /* q.all = n.all << (n_udword_bits - sr); */
+ q.s.low = 0;
+ if (sr == n_uword_bits)
+ {
+ q.s.high = n.s.low;
+ r.s.high = 0;
+ r.s.low = n.s.high;
+ }
+ else
+ {
+ q.s.high = n.s.low << (n_uword_bits - sr);
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ }
+ }
+ }
+ /* Not a special case
+ * q and r are initialized with:
+ * q.all = n.all << (n_udword_bits - sr);
+ * r.all = n.all >> sr;
+ * 1 <= sr <= n_udword_bits - 1
+ */
+ su_int carry = 0;
+ for (; sr > 0; --sr)
+ {
+ /* r:q = ((r:q) << 1) | carry */
+ r.s.high = (r.s.high << 1) | (r.s.low >> (n_uword_bits - 1));
+ r.s.low = (r.s.low << 1) | (q.s.high >> (n_uword_bits - 1));
+ q.s.high = (q.s.high << 1) | (q.s.low >> (n_uword_bits - 1));
+ q.s.low = (q.s.low << 1) | carry;
+ /* carry = 0;
+ * if (r.all >= d.all)
+ * {
+ * r.all -= d.all;
+ * carry = 1;
+ * }
+ */
+ const di_int s = (di_int)(d.all - r.all - 1) >> (n_udword_bits - 1);
+ carry = s & 1;
+ r.all -= d.all & s;
+ }
+ q.all = (q.all << 1) | carry;
+ if (rem)
+ *rem = r.all;
+ return q.all;
+}
diff --git a/lib/compiler-rt/compiler-rt.mk b/lib/compiler-rt/compiler-rt.mk
new file mode 100644
index 000000000..d3e2d5ca5
--- /dev/null
+++ b/lib/compiler-rt/compiler-rt.mk
@@ -0,0 +1,13 @@
+#
+# Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ARCH},aarch32)
+COMPILER_RT_SRCS := $(addprefix lib/compiler-rt/builtins/, \
+ arm/aeabi_uldivmod.S \
+ ctzdi2.c \
+ udivmoddi4.c \
+)
+endif
diff --git a/lib/delay/delay.c b/lib/delay/delay.c
new file mode 100644
index 000000000..d88e500c6
--- /dev/null
+++ b/lib/delay/delay.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <tftf.h>
+
+void waitus(uint64_t us)
+{
+ uint64_t cntp_ct_val_base;
+ uint32_t cnt_frq;
+ uint64_t wait_cycles;
+
+ cnt_frq = read_cntfrq_el0();
+ cntp_ct_val_base = read_cntpct_el0();
+
+ /* Waitms in terms of counter freq */
+ wait_cycles = (us * cnt_frq) / 1000000;
+
+ while (read_cntpct_el0() - cntp_ct_val_base < wait_cycles)
+ ;
+}
+
+void waitms(uint64_t ms)
+{
+ while (ms > 0) {
+ waitus(1000);
+ ms--;
+ }
+}
diff --git a/lib/events/events.c b/lib/events/events.c
new file mode 100644
index 000000000..42130d5e8
--- /dev/null
+++ b/lib/events/events.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <events.h>
+#include <platform_def.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+void tftf_init_event(event_t *event)
+{
+ assert(event != NULL);
+ event->cnt = 0;
+ event->lock.lock = 0;
+}
+
+static void send_event_common(event_t *event, unsigned int inc)
+{
+ spin_lock(&event->lock);
+ event->cnt += inc;
+ spin_unlock(&event->lock);
+
+ sev();
+}
+
+void tftf_send_event(event_t *event)
+{
+ VERBOSE("Sending event %p\n", (void *) event);
+ send_event_common(event, 1);
+}
+
+void tftf_send_event_to_all(event_t *event)
+{
+ VERBOSE("Sending event %p to all CPUs\n", (void *) event);
+ send_event_common(event, PLATFORM_CORE_COUNT);
+}
+
+void tftf_send_event_to(event_t *event, unsigned int cpus_count)
+{
+ assert(cpus_count <= PLATFORM_CORE_COUNT);
+ VERBOSE("Sending event %p to %u CPUs\n", (void *) event, cpus_count);
+ send_event_common(event, cpus_count);
+}
+
+void tftf_wait_for_event(event_t *event)
+{
+ unsigned int event_received = 0;
+
+ VERBOSE("Waiting for event %p\n", (void *) event);
+ while (!event_received) {
+
+ dsbsy();
+ /* Wait for someone to send an event */
+ if (!event->cnt) {
+ wfe();
+ } else {
+ spin_lock(&event->lock);
+
+ /*
+ * Check that the event is still pending and that no
+ * one stole it from us while we were trying to
+ * acquire the lock.
+ */
+ if (event->cnt != 0) {
+ event_received = 1;
+ --event->cnt;
+ }
+ /*
+ * No memory barrier is needed here because spin_unlock()
+ * issues a Store-Release instruction, which guarantees
+ * that loads and stores appearing in program order
+ * before the Store-Release are observed before the
+ * Store-Release itself.
+ */
+ spin_unlock(&event->lock);
+ }
+ }
+
+ VERBOSE("Received event %p\n", (void *) event);
+}
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
new file mode 100644
index 000000000..a923df32b
--- /dev/null
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <amu.h>
+#include <amu_private.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+
+int amu_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
+ return (features & ID_PFR0_AMU_MASK) == 1;
+}
+
+/* Read the group 0 counter identified by the given `idx`. */
+uint64_t amu_group0_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ return amu_group0_cnt_read_internal(idx);
+}
+
+/* Read the group 1 counter identified by the given `idx`. */
+uint64_t amu_group1_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ return amu_group1_cnt_read_internal(idx);
+}
diff --git a/lib/extensions/amu/aarch32/amu_helpers.S b/lib/extensions/amu/aarch32/amu_helpers.S
new file mode 100644
index 000000000..72f09dc84
--- /dev/null
+++ b/lib/extensions/amu/aarch32/amu_helpers.S
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert_macros.S>
+#include <asm_macros.S>
+
+ .globl amu_group0_cnt_read_internal
+ .globl amu_group1_cnt_read_internal
+
+/*
+ * uint64_t amu_group0_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `r0`.
+ */
+func amu_group0_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /* `idx` should be between [0, 3] */
+ mov r1, r0
+ lsr r1, r1, #2
+ cmp r1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of ldcopr16/bx lr instruction pair
+ * in the table below.
+ */
+ adr r1, 1f
+ lsl r0, r0, #3 /* each ldcopr16/bx lr sequence is 8 bytes */
+ add r1, r1, r0
+ bx r1
+1:
+ ldcopr16 r0, r1, AMEVCNTR00 /* index 0 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR01 /* index 1 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR02 /* index 2 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR03 /* index 3 */
+ bx lr
+endfunc amu_group0_cnt_read_internal
+
+/*
+ * uint64_t amu_group1_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `r0`.
+ */
+func amu_group1_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /* `idx` should be between [0, 15] */
+ mov r1, r0
+ lsr r1, r1, #4
+ cmp r1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of ldcopr16/bx lr instruction pair
+ * in the table below.
+ */
+ adr r1, 1f
+ lsl r0, r0, #3 /* each ldcopr16/bx lr sequence is 8 bytes */
+ add r1, r1, r0
+ bx r1
+
+1:
+ ldcopr16 r0,r1, AMEVCNTR10 /* index 0 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR11 /* index 1 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR12 /* index 2 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR13 /* index 3 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR14 /* index 4 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR15 /* index 5 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR16 /* index 6 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR17 /* index 7 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR18 /* index 8 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR19 /* index 9 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1A /* index 10 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1B /* index 11 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1C /* index 12 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1D /* index 13 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1E /* index 14 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1F /* index 15 */
+ bx lr
+endfunc amu_group1_cnt_read_internal
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
new file mode 100644
index 000000000..00253b35d
--- /dev/null
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <amu.h>
+#include <amu_private.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+
+int amu_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
+ return (features & ID_AA64PFR0_AMU_MASK) == 1;
+}
+
+/* Read the group 0 counter identified by the given `idx`. */
+uint64_t amu_group0_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ return amu_group0_cnt_read_internal(idx);
+}
+
+/* Read the group 1 counter identified by the given `idx`. */
+uint64_t amu_group1_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ return amu_group1_cnt_read_internal(idx);
+}
diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S
new file mode 100644
index 000000000..862a71348
--- /dev/null
+++ b/lib/extensions/amu/aarch64/amu_helpers.S
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert_macros.S>
+#include <asm_macros.S>
+
+ .globl amu_group0_cnt_read_internal
+ .globl amu_group1_cnt_read_internal
+
+/*
+ * uint64_t amu_group0_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group0_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #2
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR00_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR01_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR02_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR03_EL0 /* index 3 */
+ ret
+endfunc amu_group0_cnt_read_internal
+
+/*
+ * uint64_t amu_group1_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group1_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #4
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR10_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR11_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR12_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR13_EL0 /* index 3 */
+ ret
+ mrs x0, AMEVCNTR14_EL0 /* index 4 */
+ ret
+ mrs x0, AMEVCNTR15_EL0 /* index 5 */
+ ret
+ mrs x0, AMEVCNTR16_EL0 /* index 6 */
+ ret
+ mrs x0, AMEVCNTR17_EL0 /* index 7 */
+ ret
+ mrs x0, AMEVCNTR18_EL0 /* index 8 */
+ ret
+ mrs x0, AMEVCNTR19_EL0 /* index 9 */
+ ret
+ mrs x0, AMEVCNTR1A_EL0 /* index 10 */
+ ret
+ mrs x0, AMEVCNTR1B_EL0 /* index 11 */
+ ret
+ mrs x0, AMEVCNTR1C_EL0 /* index 12 */
+ ret
+ mrs x0, AMEVCNTR1D_EL0 /* index 13 */
+ ret
+ mrs x0, AMEVCNTR1E_EL0 /* index 14 */
+ ret
+ mrs x0, AMEVCNTR1F_EL0 /* index 15 */
+ ret
+endfunc amu_group1_cnt_read_internal
diff --git a/lib/irq/irq.c b/lib/irq/irq.c
new file mode 100644
index 000000000..5f2507fdb
--- /dev/null
+++ b/lib/irq/irq.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <sgi.h>
+#include <spinlock.h>
+#include <string.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+#define IS_PLAT_SPI(irq_num) \
+ (((irq_num) >= MIN_SPI_ID) && \
+ ((irq_num) <= MIN_SPI_ID + PLAT_MAX_SPI_OFFSET_ID))
+
+static spi_desc spi_desc_table[PLAT_MAX_SPI_OFFSET_ID - MIN_SPI_ID];
+static ppi_desc ppi_desc_table[PLATFORM_CORE_COUNT][
+ (MAX_PPI_ID + 1) - MIN_PPI_ID];
+static sgi_desc sgi_desc_table[PLATFORM_CORE_COUNT][MAX_SGI_ID + 1];
+static spurious_desc spurious_desc_handler;
+
+/*
+ * For a given SPI, the associated IRQ handler is common to all CPUs.
+ * Therefore, we need a lock to prevent simultaneous updates.
+ *
+ * We use one lock for all SPIs. This will make it impossible to update
+ * different SPIs' handlers at the same time (although it would be fine) but it
+ * saves memory. Updating an SPI handler shouldn't occur that often anyway so we
+ * shouldn't suffer from this restriction too much.
+ */
+static spinlock_t spi_lock;
+
+static irq_handler_t *get_irq_handler(unsigned int irq_num)
+{
+ if (IS_PLAT_SPI(irq_num))
+ return &spi_desc_table[irq_num - MIN_SPI_ID].handler;
+
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int linear_id = platform_get_core_pos(mpid);
+
+ if (IS_PPI(irq_num))
+ return &ppi_desc_table[linear_id][irq_num - MIN_PPI_ID].handler;
+
+ if (IS_SGI(irq_num))
+ return &sgi_desc_table[linear_id][irq_num - MIN_SGI_ID].handler;
+
+ /*
+ * The only possibility is for it to be a spurious
+ * interrupt.
+ */
+ assert(irq_num == GIC_SPURIOUS_INTERRUPT);
+ return &spurious_desc_handler;
+}
+
+void tftf_send_sgi(unsigned int sgi_id, unsigned int core_pos)
+{
+ assert(IS_SGI(sgi_id));
+
+ /*
+ * Ensure that all memory accesses prior to sending the SGI are
+ * completed.
+ */
+ dsbish();
+
+ /*
+ * Don't send interrupts to CPUs that are powering down. That would be a
+ * violation of the PSCI CPU_OFF caller responsibilities. The PSCI
+ * specification explicitely says:
+ * "Asynchronous wake-ups on a core that has been switched off through a
+ * PSCI CPU_OFF call results in an erroneous state. When this erroneous
+ * state is observed, it is IMPLEMENTATION DEFINED how the PSCI
+ * implementation reacts."
+ */
+ assert(tftf_is_core_pos_online(core_pos));
+ arm_gic_send_sgi(sgi_id, core_pos);
+}
+
+void tftf_irq_enable(unsigned int irq_num, uint8_t irq_priority)
+{
+ if (IS_PLAT_SPI(irq_num)) {
+ /*
+ * Instruct the GIC Distributor to forward the interrupt to
+ * the calling core
+ */
+ arm_gic_set_intr_target(irq_num, platform_get_core_pos(read_mpidr_el1()));
+ }
+
+ arm_gic_set_intr_priority(irq_num, irq_priority);
+ arm_gic_intr_enable(irq_num);
+
+ VERBOSE("Enabled IRQ #%u\n", irq_num);
+}
+
+void tftf_irq_disable(unsigned int irq_num)
+{
+ /* Disable the interrupt */
+ arm_gic_intr_disable(irq_num);
+
+ VERBOSE("Disabled IRQ #%u\n", irq_num);
+}
+
+#define HANDLER_VALID(handler, expect_handler) \
+ ((expect_handler) ? ((handler) != NULL) : ((handler) == NULL))
+
+static int tftf_irq_update_handler(unsigned int irq_num,
+ irq_handler_t irq_handler,
+ bool expect_handler)
+{
+ irq_handler_t *cur_handler;
+ int ret = -1;
+
+ cur_handler = get_irq_handler(irq_num);
+ if (IS_PLAT_SPI(irq_num))
+ spin_lock(&spi_lock);
+
+ /*
+ * Update the IRQ handler, if the current handler is in the expected
+ * state
+ */
+ assert(HANDLER_VALID(*cur_handler, expect_handler));
+ if (HANDLER_VALID(*cur_handler, expect_handler)) {
+ *cur_handler = irq_handler;
+ ret = 0;
+ }
+
+ if (IS_PLAT_SPI(irq_num))
+ spin_unlock(&spi_lock);
+
+ return ret;
+}
+
+int tftf_irq_register_handler(unsigned int irq_num, irq_handler_t irq_handler)
+{
+ int ret;
+
+ ret = tftf_irq_update_handler(irq_num, irq_handler, false);
+ if (ret == 0)
+ INFO("Registered IRQ handler %p for IRQ #%u\n",
+ (void *)(uintptr_t) irq_handler, irq_num);
+
+ return ret;
+}
+
+int tftf_irq_unregister_handler(unsigned int irq_num)
+{
+ int ret;
+
+ ret = tftf_irq_update_handler(irq_num, NULL, true);
+ if (ret == 0)
+ INFO("Unregistered IRQ handler for IRQ #%u\n", irq_num);
+
+ return ret;
+}
+
+int tftf_irq_handler_dispatcher(void)
+{
+ unsigned int raw_iar;
+ unsigned int irq_num;
+ sgi_data_t sgi_data;
+ irq_handler_t *handler;
+ void *irq_data = NULL;
+ int rc = 0;
+
+ /* Acknowledge the interrupt */
+ irq_num = arm_gic_intr_ack(&raw_iar);
+
+ handler = get_irq_handler(irq_num);
+ if (IS_PLAT_SPI(irq_num)) {
+ irq_data = &irq_num;
+ } else if (IS_PPI(irq_num)) {
+ irq_data = &irq_num;
+ } else if (IS_SGI(irq_num)) {
+ sgi_data.irq_id = irq_num;
+ irq_data = &sgi_data;
+ }
+
+ if (*handler != NULL)
+ rc = (*handler)(irq_data);
+
+ /* Mark the processing of the interrupt as complete */
+ if (irq_num != GIC_SPURIOUS_INTERRUPT)
+ arm_gic_end_of_intr(raw_iar);
+
+ return rc;
+}
+
+void tftf_irq_setup(void)
+{
+ memset(spi_desc_table, 0, sizeof(spi_desc_table));
+ memset(ppi_desc_table, 0, sizeof(ppi_desc_table));
+ memset(sgi_desc_table, 0, sizeof(sgi_desc_table));
+ memset(&spurious_desc_handler, 0, sizeof(spurious_desc_handler));
+ init_spinlock(&spi_lock);
+}
diff --git a/lib/locks/aarch32/spinlock.S b/lib/locks/aarch32/spinlock.S
new file mode 100644
index 000000000..b408914fc
--- /dev/null
+++ b/lib/locks/aarch32/spinlock.S
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl init_spinlock
+ .globl spin_lock
+ .globl spin_unlock
+
+func init_spinlock
+ mov r1, #0
+ str r1, [r0]
+ bx lr
+endfunc init_spinlock
+
+func spin_lock
+ mov r2, #1
+1:
+ ldrex r1, [r0]
+ cmp r1, #0
+ wfene
+ strexeq r1, r2, [r0]
+ cmpeq r1, #0
+ bne 1b
+ dmb
+ bx lr
+endfunc spin_lock
+
+
+func spin_unlock
+ mov r1, #0
+ stl r1, [r0]
+ bx lr
+endfunc spin_unlock
diff --git a/lib/locks/aarch64/spinlock.S b/lib/locks/aarch64/spinlock.S
new file mode 100644
index 000000000..7f6d0c6aa
--- /dev/null
+++ b/lib/locks/aarch64/spinlock.S
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl init_spinlock
+ .globl spin_lock
+ .globl spin_unlock
+
+func init_spinlock
+ str wzr, [x0]
+ ret
+endfunc init_spinlock
+
+func spin_lock
+ mov w2, #1
+ sevl
+l1: wfe
+l2: ldaxr w1, [x0]
+ cbnz w1, l1
+ stxr w1, w2, [x0]
+ cbnz w1, l2
+ ret
+endfunc spin_lock
+
+
+func spin_unlock
+ stlr wzr, [x0]
+ ret
+endfunc spin_unlock
diff --git a/lib/power_management/hotplug/hotplug.c b/lib/power_management/hotplug/hotplug.c
new file mode 100644
index 000000000..7c3a988f8
--- /dev/null
+++ b/lib/power_management/hotplug/hotplug.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <cdefs.h> /* For __dead2 */
+#include <console.h>
+#include <debug.h>
+#include <irq.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <spinlock.h>
+#include <stdint.h>
+#include <tftf.h>
+
+/*
+ * Affinity info map of CPUs as seen by TFTF
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON to mark CPU i
+ * as ON.
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON_PENDING to mark
+ * CPU i as ON_PENDING.
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_OFF to mark CPU i
+ * as OFF.
+ */
+static tftf_cpu_state_t cpus_status_map[PLATFORM_CORE_COUNT];
+static int cpus_status_init_done;
+
+/*
+ * Reference count keeping track of the number of CPUs participating in
+ * a test.
+ */
+static volatile unsigned int ref_cnt;
+
+/* Lock to prevent concurrent accesses to the reference count */
+static spinlock_t ref_cnt_lock;
+
+/* Per-cpu test entrypoint */
+volatile test_function_t test_entrypoint[PLATFORM_CORE_COUNT];
+
+u_register_t tftf_primary_core = INVALID_MPID;
+
+unsigned int tftf_inc_ref_cnt(void)
+{
+ unsigned int cnt;
+
+ spin_lock(&ref_cnt_lock);
+ assert(ref_cnt < PLATFORM_CORE_COUNT);
+ cnt = ++ref_cnt;
+ spin_unlock(&ref_cnt_lock);
+
+ VERBOSE("Entering the test (%u CPUs in the test now)\n", cnt);
+
+ return cnt;
+}
+
+unsigned int tftf_dec_ref_cnt(void)
+{
+ unsigned int cnt;
+
+ spin_lock(&ref_cnt_lock);
+ assert(ref_cnt != 0);
+ cnt = --ref_cnt;
+ spin_unlock(&ref_cnt_lock);
+
+ VERBOSE("Exiting the test (%u CPUs in the test now)\n", cnt);
+
+ return cnt;
+}
+
+unsigned int tftf_get_ref_cnt(void)
+{
+ return ref_cnt;
+}
+
+void tftf_init_cpus_status_map(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Check only primary does the initialisation */
+ assert((mpid & MPID_MASK) == tftf_primary_core);
+
+ /* Check init is done only once */
+ assert(!cpus_status_init_done);
+
+ cpus_status_init_done = 1;
+
+ /*
+ * cpus_status_map already initialised to zero as part of BSS init,
+ * just set the primary to ON state
+ */
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON;
+}
+
+void tftf_set_cpu_online(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /*
+ * Wait here till the `tftf_try_cpu_on` has had a chance to update the
+ * the cpu state.
+ */
+ while (cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_OFF)
+ ;
+
+ spin_lock(&cpus_status_map[core_pos].lock);
+ assert(cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON_PENDING);
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON;
+ spin_unlock(&cpus_status_map[core_pos].lock);
+}
+
+void tftf_set_cpu_offline(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ spin_lock(&cpus_status_map[core_pos].lock);
+
+ assert(tftf_is_cpu_online(mpid));
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_OFF;
+ spin_unlock(&cpus_status_map[core_pos].lock);
+}
+
+unsigned int tftf_is_cpu_online(unsigned int mpid)
+{
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ return cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON;
+}
+
+unsigned int tftf_is_core_pos_online(unsigned int core_pos)
+{
+ return cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON;
+}
+
+int32_t tftf_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+{
+ int32_t ret;
+ tftf_affinity_info_t cpu_state;
+ unsigned int core_pos = platform_get_core_pos(target_cpu);
+
+ spin_lock(&cpus_status_map[core_pos].lock);
+ cpu_state = cpus_status_map[core_pos].state;
+
+ if (cpu_state == TFTF_AFFINITY_STATE_ON) {
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ return PSCI_E_ALREADY_ON;
+ }
+
+ if (cpu_state == TFTF_AFFINITY_STATE_ON_PENDING) {
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ return PSCI_E_SUCCESS;
+ }
+
+ assert(cpu_state == TFTF_AFFINITY_STATE_OFF);
+
+ do {
+ ret = tftf_psci_cpu_on(target_cpu,
+ (uintptr_t) tftf_hotplug_entry,
+ context_id);
+
+ /* Check if multiple CPU_ON calls are done for same CPU */
+ assert(ret != PSCI_E_ON_PENDING);
+ } while (ret == PSCI_E_ALREADY_ON);
+
+ if (ret == PSCI_E_SUCCESS) {
+ /*
+ * Populate the test entry point for this core.
+ * This is the address where the core will jump to once the framework
+ * has finished initialising it.
+ */
+ test_entrypoint[core_pos] = (test_function_t) entrypoint;
+
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON_PENDING;
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ } else {
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ ERROR("Failed to boot CPU 0x%llx (%d)\n",
+ (unsigned long long)target_cpu, ret);
+ }
+
+ return ret;
+}
+
+int32_t tftf_try_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+{
+ int32_t ret;
+ unsigned int core_pos = platform_get_core_pos(target_cpu);
+
+ ret = tftf_psci_cpu_on(target_cpu,
+ (uintptr_t) tftf_hotplug_entry,
+ context_id);
+
+ if (ret == PSCI_E_SUCCESS) {
+ spin_lock(&cpus_status_map[core_pos].lock);
+ assert(cpus_status_map[core_pos].state ==
+ TFTF_AFFINITY_STATE_OFF);
+ cpus_status_map[core_pos].state =
+ TFTF_AFFINITY_STATE_ON_PENDING;
+
+ spin_unlock(&cpus_status_map[core_pos].lock);
+
+ /*
+ * Populate the test entry point for this core.
+ * This is the address where the core will jump to once the
+ * framework has finished initialising it.
+ */
+ test_entrypoint[core_pos] = (test_function_t) entrypoint;
+ }
+
+ return ret;
+}
+
+/*
+ * Prepare the core to power off. Any driver which needs to perform specific
+ * tasks before powering off a CPU, e.g. migrating interrupts to another
+ * core, can implement a function and call it from here.
+ */
+static void tftf_prepare_cpu_off(void)
+{
+ /*
+ * Do the bare minimal to turn off this CPU i.e. turn off interrupts
+ * and disable the GIC CPU interface
+ */
+ disable_irq();
+ arm_gic_disable_interrupts_local();
+}
+
+/*
+ * Revert the changes made during tftf_prepare_cpu_off()
+ */
+static void tftf_revert_cpu_off(void)
+{
+ arm_gic_enable_interrupts_local();
+ enable_irq();
+}
+
+int32_t tftf_cpu_off(void)
+{
+ int32_t ret;
+
+ tftf_prepare_cpu_off();
+ tftf_set_cpu_offline();
+
+ INFO("Powering off\n");
+
+ /* Flush console before the last CPU is powered off. */
+ if (tftf_get_ref_cnt() == 0)
+ console_flush();
+
+ /* Power off the CPU */
+ ret = tftf_psci_cpu_off();
+
+ ERROR("Failed to power off (%d)\n", ret);
+
+ /*
+ * PSCI CPU_OFF call does not return when successful.
+ * Otherwise, it should return the PSCI error code 'DENIED'.
+ */
+ assert(ret == PSCI_E_DENIED);
+
+ /*
+ * The CPU failed to power down since we returned from
+ * tftf_psci_cpu_off(). So we need to adjust the framework's view of
+ * the core by marking it back online.
+ */
+ tftf_set_cpu_online();
+ tftf_revert_cpu_off();
+
+ return ret;
+}
+
+/*
+ * C entry point for a CPU that has just been powered up.
+ */
+void __dead2 tftf_warm_boot_main(void)
+{
+ /* Initialise the CPU */
+ tftf_arch_setup();
+ arm_gic_setup_local();
+
+ /* Enable the SGI used by the timer management framework */
+ tftf_irq_enable(IRQ_WAKE_SGI, GIC_HIGHEST_NS_PRIORITY);
+
+ enable_irq();
+
+ INFO("Booting\n");
+
+ tftf_set_cpu_online();
+
+ /* Enter the test session */
+ run_tests();
+
+ /* Should never reach this point */
+ bug_unreachable();
+}
diff --git a/lib/power_management/suspend/aarch32/asm_tftf_suspend.S b/lib/power_management/suspend/aarch32/asm_tftf_suspend.S
new file mode 100644
index 000000000..cc59e7dbe
--- /dev/null
+++ b/lib/power_management/suspend/aarch32/asm_tftf_suspend.S
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <psci.h>
+#include "../suspend_private.h"
+
+ .global __tftf_suspend
+ .global __tftf_save_arch_context
+ .global __tftf_cpu_resume_ep
+
+ .section .text, "ax"
+
+/*
+ * Saves CPU state for entering suspend. This saves callee registers on stack,
+ * and allocates space on the stack to save the CPU specific registers for
+ * coming out of suspend.
+ *
+ * r0 contains a pointer to tftf_suspend_context structure.
+ */
+func __tftf_suspend
+ push {r4 - r12, lr}
+ mov r2, sp
+ sub sp, sp, #SUSPEND_CTX_SZ
+ mov r1, sp
+ /*
+ * r1 now points to struct tftf_suspend_ctx allocated on the stack
+ */
+ str r2, [r1, #SUSPEND_CTX_SP_OFFSET]
+ bl tftf_enter_suspend
+
+ /*
+ * If execution reaches this point, the suspend call was either
+ * a suspend to standby call or an invalid suspend call.
+ * In case of suspend to powerdown, execution will instead resume in
+ * __tftf_cpu_resume_ep().
+ */
+ add sp, sp, #SUSPEND_CTX_SZ
+ pop {r4 - r12, lr}
+ bx lr
+endfunc __tftf_suspend
+
+func __tftf_save_arch_context
+ ldcopr r1, HMAIR0
+ ldcopr r2, HCR
+ stm r0!, {r1, r2}
+ ldcopr16 r1, r2, HTTBR_64
+ stm r0!, {r1, r2}
+ ldcopr r1, HTCR
+ ldcopr r2, HVBAR
+ ldcopr r3, HSCTLR
+ stm r0, {r1, r2, r3}
+ bx lr
+endfunc __tftf_save_arch_context
+
+/*
+ * Restore CPU register context
+ * r0 -- Should contain the context pointer
+ */
+func __tftf_cpu_resume_ep
+ /* Invalidate local tlb entries before turning on MMU */
+ stcopr r0, TLBIALLH
+ mov r4, r0
+ ldm r0!, {r1, r2}
+ stcopr r1, HMAIR0
+ stcopr r2, HCR
+ ldm r0!, {r1, r2}
+ stcopr16 r1, r2, HTTBR_64
+ ldm r0, {r1, r2, r3}
+ stcopr r1, HTCR
+ stcopr r2, HVBAR
+
+ /*
+ * TLB invalidations need to be completed before enabling MMU
+ */
+ dsb nsh
+ stcopr r3, HSCTLR
+ /* Ensure the MMU enable takes effect immediately */
+ isb
+
+ mov r0, r4
+ ldr r2, [r0, #SUSPEND_CTX_SP_OFFSET]
+ mov sp, r2
+ ldr r1, [r0, #SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET]
+ cmp r1, #0
+ beq skip_sys_restore
+ bl tftf_restore_system_ctx
+skip_sys_restore:
+ pop {r4 - r12, lr}
+ mov r0, #PSCI_E_SUCCESS
+ bx lr
+endfunc __tftf_cpu_resume_ep
diff --git a/lib/power_management/suspend/aarch64/asm_tftf_suspend.S b/lib/power_management/suspend/aarch64/asm_tftf_suspend.S
new file mode 100644
index 000000000..692baded6
--- /dev/null
+++ b/lib/power_management/suspend/aarch64/asm_tftf_suspend.S
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <psci.h>
+#include "../suspend_private.h"
+
+ .global __tftf_suspend
+ .global __tftf_save_arch_context
+ .global __tftf_cpu_resume_ep
+
+ .section .text, "ax"
+
+/*
+ * Saves CPU state for entering suspend. This saves callee registers on stack,
+ * and allocates space on the stack to save the CPU specific registers for
+ * coming out of suspend.
+ *
+ * x0 contains a pointer to tftf_suspend_context structure.
+ */
+func __tftf_suspend
+ stp x29, x30, [sp, #-96]!
+ stp x19, x20, [sp, #16]
+ stp x21, x22, [sp, #32]
+ stp x23, x24, [sp, #48]
+ stp x25, x26, [sp, #64]
+ stp x27, x28, [sp, #80]
+ mov x2, sp
+ sub sp, sp, #SUSPEND_CTX_SZ
+ mov x1, sp
+ /*
+ * x1 now points to struct tftf_suspend_ctx allocated on the stack
+ */
+ str x2, [x1, #SUSPEND_CTX_SP_OFFSET]
+ bl tftf_enter_suspend
+
+ /*
+ * If execution reaches this point, the suspend call was either
+ * a suspend to standby call or an invalid suspend call.
+ * In case of suspend to powerdown, execution will instead resume in
+ * __tftf_cpu_resume_ep().
+ */
+ add sp, sp, #SUSPEND_CTX_SZ
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, x30, [sp], #96
+ ret
+endfunc __tftf_suspend
+
+func __tftf_save_arch_context
+ JUMP_EL1_OR_EL2 x1, 1f, 2f, dead
+1: mrs x1, mair_el1
+ mrs x2, cpacr_el1
+ mrs x3, ttbr0_el1
+ mrs x4, tcr_el1
+ mrs x5, vbar_el1
+ mrs x6, sctlr_el1
+ stp x1, x2, [x0]
+ stp x3, x4, [x0, #16]
+ stp x5, x6, [x0, #32]
+ ret
+
+2: mrs x1, mair_el2
+ mrs x2, hcr_el2
+ mrs x3, ttbr0_el2
+ mrs x4, tcr_el2
+ mrs x5, vbar_el2
+ mrs x6, sctlr_el2
+ stp x1, x2, [x0]
+ stp x3, x4, [x0, #16]
+ stp x5, x6, [x0, #32]
+ ret
+endfunc __tftf_save_arch_context
+
+/*
+ * Restore CPU register context
+ * X0 -- Should contain the context pointer
+ */
+func __tftf_cpu_resume_ep
+ JUMP_EL1_OR_EL2 x1, 1f, 2f, dead
+1: /* Invalidate local tlb entries before turning on MMU */
+ tlbi vmalle1
+ ldp x1, x2, [x0]
+ ldp x3, x4, [x0, #16]
+ ldp x5, x6, [x0, #32]
+ msr mair_el1, x1
+ msr cpacr_el1, x2
+ msr ttbr0_el1, x3
+ msr tcr_el1, x4
+ msr vbar_el1, x5
+ /*
+ * TLB invalidations need to be completed before enabling MMU
+ */
+ dsb nsh
+ msr sctlr_el1, x6
+ /* Ensure the MMU enable takes effect immediately */
+ isb
+ b restore_callee_regs
+
+ /* Invalidate local tlb entries before turning on MMU */
+2: tlbi alle2
+ ldp x1, x2, [x0]
+ ldp x3, x4, [x0, #16]
+ ldp x5, x6, [x0, #32]
+ msr mair_el2, x1
+ msr hcr_el2, x2
+ msr ttbr0_el2, x3
+ msr tcr_el2, x4
+ msr vbar_el2, x5
+ /*
+ * TLB invalidations need to be completed before enabling MMU
+ */
+ dsb nsh
+ msr sctlr_el2, x6
+ /* Ensure the MMU enable takes effect immediately */
+ isb
+
+restore_callee_regs:
+ ldr x2, [x0, #SUSPEND_CTX_SP_OFFSET]
+ mov sp, x2
+ ldr w1, [x0, #SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET]
+ cbz w1, skip_sys_restore
+ bl tftf_restore_system_ctx
+skip_sys_restore:
+ ldp x19, x20, [sp, #16] /* Restore the callee saved registers */
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, x30, [sp], #96
+ mov x0, PSCI_E_SUCCESS
+ ret
+endfunc __tftf_cpu_resume_ep
+
+dead:
+ b .
diff --git a/lib/power_management/suspend/suspend_private.h b/lib/power_management/suspend/suspend_private.h
new file mode 100644
index 000000000..dfc2e930e
--- /dev/null
+++ b/lib/power_management/suspend/suspend_private.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SUSPEND_PRIV_H__
+#define __SUSPEND_PRIV_H__
+
+#define SUSPEND_CTX_SZ 64
+#define SUSPEND_CTX_SP_OFFSET 48
+#define SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET 56
+
+#ifndef __ASSEMBLY__
+#include <cassert.h>
+#include <power_management.h>
+#include <stdint.h>
+#include <string.h>
+#include <types.h>
+
+#define NR_CTX_REGS 6
+
+/*
+ * struct tftf_suspend_ctx represents the architecture context to
+ * be saved and restored while entering suspend and coming out.
+ * It must be 16-byte aligned since it is allocated on the stack, which must be
+ * 16-byte aligned on ARMv8 (AArch64). Even though the alignment requirement
+ * is not present in AArch32, we use the same alignment and register width as
+ * it allows the same structure to be reused for AArch32.
+ */
+typedef struct tftf_suspend_context {
+ uint64_t arch_ctx_regs[NR_CTX_REGS];
+ uint64_t stack_pointer;
+ /*
+ * Whether the system context is saved and and needs to be restored.
+ * Note that the system context itself is not saved in this structure.
+ */
+ unsigned int save_system_context;
+} __aligned(16) tftf_suspend_ctx_t;
+
+/*
+ * Saves callee save registers on the stack
+ * Allocate space on stack for CPU context regs
+ * Enters suspend by calling tftf_enter_suspend.
+ * power state: PSCI power state to be sent via SMC
+ * Returns: PSCI_E_SUCCESS or PSCI_E_INVALID_PARAMS
+ */
+unsigned int __tftf_suspend(const suspend_info_t *power_state);
+
+/*
+ * Saves the architecture context of CPU in the memory
+ * tftf_suspend_context: Pointer to the location for saving the context
+ */
+void __tftf_save_arch_context(struct tftf_suspend_context *ctx);
+
+/*
+ * Calls __tftf_save_arch_context to saves arch context of cpu to the memory
+ * pointed by ctx
+ * Enters suspend by calling the SMC
+ * power state: PSCI power state to be sent via SMC
+ * ctx: Pointer to the location where suspend context can be stored
+ * Returns: PSCI_E_SUCCESS or PSCI_E_INVALID_PARAMS
+ */
+int32_t tftf_enter_suspend(const suspend_info_t *power_state,
+ tftf_suspend_ctx_t *ctx);
+
+/*
+ * Invokes the appropriate driver functions in the TFTF framework
+ * to save their context prior to a system suspend.
+ */
+void tftf_save_system_ctx(tftf_suspend_ctx_t *ctx);
+
+/*
+ * Invokes the appropriate driver functions in the TFTF framework
+ * to restore their context on wake-up from system suspend.
+ */
+void tftf_restore_system_ctx(tftf_suspend_ctx_t *ctx);
+
+/*
+ * Restores the CPU arch context and callee registers from the location pointed
+ * by X0(context ID).
+ * Returns: PSCI_E_SUCCESS
+ */
+unsigned int __tftf_cpu_resume_ep(void);
+
+/* Assembler asserts to verify #defines of offsets match as seen by compiler */
+CASSERT(SUSPEND_CTX_SZ == sizeof(tftf_suspend_ctx_t),
+ assert_suspend_context_size_mismatch);
+CASSERT(SUSPEND_CTX_SP_OFFSET == __builtin_offsetof(tftf_suspend_ctx_t, stack_pointer),
+ assert_stack_pointer_location_mismatch_in_suspend_ctx);
+CASSERT(SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET ==
+ __builtin_offsetof(tftf_suspend_ctx_t, save_system_context),
+ assert_save_sys_ctx_mismatch_in_suspend_ctx);
+#endif /* __ASSEMBLY__ */
+
+#endif /* __SUSPEND_PRIV_H__ */
diff --git a/lib/power_management/suspend/tftf_suspend.c b/lib/power_management/suspend/tftf_suspend.c
new file mode 100644
index 000000000..75c2ade06
--- /dev/null
+++ b/lib/power_management/suspend/tftf_suspend.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <stdint.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+#include "suspend_private.h"
+
+int32_t tftf_enter_suspend(const suspend_info_t *info,
+ tftf_suspend_ctx_t *ctx)
+{
+ smc_args cpu_suspend_args = {
+ info->psci_api,
+ info->power_state,
+ (uintptr_t)__tftf_cpu_resume_ep,
+ (u_register_t)ctx
+ };
+
+ smc_args system_suspend_args = {
+ info->psci_api,
+ (uintptr_t)__tftf_cpu_resume_ep,
+ (u_register_t)ctx
+ };
+
+ smc_ret_values rc;
+
+ if (info->save_system_context) {
+ ctx->save_system_context = 1;
+ tftf_save_system_ctx(ctx);
+ } else
+ ctx->save_system_context = 0;
+
+ /*
+ * Save the CPU context. It will be restored in resume path in
+ * __tftf_cpu_resume_ep().
+ */
+ __tftf_save_arch_context(ctx);
+
+ /*
+ * Flush the context that must be retrieved with MMU off
+ */
+ flush_dcache_range((u_register_t)ctx, sizeof(*ctx));
+
+ if (info->psci_api == SMC_PSCI_CPU_SUSPEND)
+ rc = tftf_smc(&cpu_suspend_args);
+ else
+ rc = tftf_smc(&system_suspend_args);
+
+ /*
+ * If execution reaches this point, The above SMC call was an invalid
+ * call or a suspend to standby call. In both cases the CPU does not
+ * power down so there is no need to restore the context.
+ */
+ return rc.ret0;
+}
+
+void tftf_restore_system_ctx(tftf_suspend_ctx_t *ctx)
+{
+ assert(ctx != NULL);
+ assert(ctx->save_system_context);
+
+ /*
+ * TODO: Check if there is a need for separate platform
+ * API for resume.
+ */
+
+ tftf_early_platform_setup();
+
+ INFO("Restoring system context\n");
+
+ /* restore the global GIC context */
+ arm_gic_restore_context_global();
+ tftf_timer_gic_state_restore();
+}
+
+void tftf_save_system_ctx(tftf_suspend_ctx_t *ctx)
+{
+ assert(ctx != NULL);
+ assert(ctx->save_system_context);
+
+ /* Nothing to do here currently */
+ INFO("Saving system context\n");
+
+ /* Save the global GIC context */
+ arm_gic_save_context_global();
+}
+
+int tftf_suspend(const suspend_info_t *info)
+{
+ int32_t rc;
+ uint64_t flags;
+
+ flags = read_daif();
+
+ disable_irq();
+
+ INFO("Going into suspend state\n");
+
+ /* Save the local GIC context */
+ arm_gic_save_context_local();
+
+ rc = __tftf_suspend(info);
+
+ /* Restore the local GIC context */
+ arm_gic_restore_context_local();
+
+ /*
+ * DAIF flags should be restored last because it could be an issue
+ * to unmask exceptions before that point, e.g. if GIC must be
+ * reconfigured upon resume from suspend.
+ */
+ write_daif(flags);
+
+ INFO("Resumed from suspend state\n");
+
+ return rc;
+}
diff --git a/lib/psci/psci.c b/lib/psci/psci.c
new file mode 100644
index 000000000..520c72435
--- /dev/null
+++ b/lib/psci/psci.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <irq.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+static unsigned int pstate_format_detected;
+static unsigned int pstate_format;
+static unsigned int is_state_id_null;
+
+const psci_function_t psci_functions[PSCI_NUM_CALLS] = {
+ DEFINE_PSCI_FUNC(PSCI_FEATURES, true),
+ DEFINE_PSCI_FUNC(PSCI_VERSION, true),
+ DEFINE_PSCI_FUNC(PSCI_CPU_SUSPEND_AARCH32, true),
+ DEFINE_PSCI_FUNC(PSCI_CPU_SUSPEND_AARCH64, true),
+ DEFINE_PSCI_FUNC(PSCI_CPU_OFF, true),
+ DEFINE_PSCI_FUNC(PSCI_CPU_ON_AARCH32, true),
+ DEFINE_PSCI_FUNC(PSCI_CPU_ON_AARCH64, true),
+ DEFINE_PSCI_FUNC(PSCI_AFFINITY_INFO_AARCH32, true),
+ DEFINE_PSCI_FUNC(PSCI_AFFINITY_INFO_AARCH64, true),
+ DEFINE_PSCI_FUNC(PSCI_SYSTEM_OFF, true),
+ DEFINE_PSCI_FUNC(PSCI_SYSTEM_RESET, true),
+ DEFINE_PSCI_FUNC(PSCI_MIG_INFO_TYPE, false),
+ DEFINE_PSCI_FUNC(PSCI_MIG_INFO_UP_CPU_AARCH32, false),
+ DEFINE_PSCI_FUNC(PSCI_MIG_INFO_UP_CPU_AARCH64, false),
+ DEFINE_PSCI_FUNC(PSCI_MIG_AARCH32, false),
+ DEFINE_PSCI_FUNC(PSCI_MIG_AARCH64, false),
+ DEFINE_PSCI_FUNC(PSCI_CPU_FREEZE, false),
+ DEFINE_PSCI_FUNC(PSCI_CPU_DEFAULT_SUSPEND32, false),
+ DEFINE_PSCI_FUNC(PSCI_CPU_DEFAULT_SUSPEND64, false),
+ DEFINE_PSCI_FUNC(PSCI_CPU_HW_STATE32, false),
+ DEFINE_PSCI_FUNC(PSCI_CPU_HW_STATE64, false),
+ DEFINE_PSCI_FUNC(PSCI_SYSTEM_SUSPEND32, false),
+ DEFINE_PSCI_FUNC(PSCI_SYSTEM_SUSPEND64, false),
+ DEFINE_PSCI_FUNC(PSCI_SET_SUSPEND_MODE, false),
+ DEFINE_PSCI_FUNC(PSCI_STAT_RESIDENCY32, false),
+ DEFINE_PSCI_FUNC(PSCI_STAT_RESIDENCY64, false),
+ DEFINE_PSCI_FUNC(PSCI_STAT_COUNT32, false),
+ DEFINE_PSCI_FUNC(PSCI_STAT_COUNT64, false),
+ DEFINE_PSCI_FUNC(PSCI_MEM_PROTECT, false),
+ DEFINE_PSCI_FUNC(PSCI_MEM_PROTECT_CHECK_RANGE32, false),
+ DEFINE_PSCI_FUNC(PSCI_MEM_PROTECT_CHECK_RANGE64, false),
+ DEFINE_PSCI_FUNC(PSCI_RESET2_AARCH32, false),
+ DEFINE_PSCI_FUNC(PSCI_RESET2_AARCH64, false),
+};
+
+int32_t tftf_psci_cpu_on(u_register_t target_cpu,
+ uintptr_t entry_point_address,
+ u_register_t context_id)
+{
+ smc_args args = {
+ SMC_PSCI_CPU_ON,
+ target_cpu,
+ entry_point_address,
+ context_id
+ };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+
+ return ret_vals.ret0;
+}
+
+int32_t tftf_psci_cpu_off(void)
+{
+ smc_args args = { SMC_PSCI_CPU_OFF };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+
+u_register_t tftf_psci_stat_residency(u_register_t target_cpu,
+ uint32_t power_state)
+{
+ smc_args args = {
+ SMC_PSCI_STAT_RESIDENCY,
+ target_cpu,
+ power_state,
+ };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+u_register_t tftf_psci_stat_count(u_register_t target_cpu,
+ uint32_t power_state)
+{
+ smc_args args = {
+ SMC_PSCI_STAT_COUNT,
+ target_cpu,
+ power_state,
+ };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+int32_t tftf_psci_affinity_info(u_register_t target_affinity,
+ uint32_t lowest_affinity_level)
+{
+ smc_ret_values ret_vals;
+
+ smc_args args = {
+ SMC_PSCI_AFFINITY_INFO,
+ target_affinity,
+ lowest_affinity_level
+ };
+
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+int32_t tftf_psci_node_hw_state(u_register_t target_cpu, uint32_t power_level)
+{
+ smc_args args = {
+ SMC_PSCI_CPU_HW_STATE,
+ target_cpu,
+ power_level
+ };
+ smc_ret_values ret;
+
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int32_t tftf_get_psci_feature_info(uint32_t psci_func_id)
+{
+ smc_args args = {
+ SMC_PSCI_FEATURES,
+ psci_func_id
+ };
+ smc_ret_values ret;
+
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int tftf_psci_make_composite_state_id(uint32_t affinity_level,
+ uint32_t state_type, uint32_t *state_id)
+{
+ unsigned int found_entry, i;
+ int ret = PSCI_E_SUCCESS;
+ const plat_state_prop_t *state_prop;
+
+ assert(state_id);
+
+ *state_id = 0;
+ for (i = 0; i <= affinity_level; i++) {
+ state_prop = plat_get_state_prop(i);
+ if (!state_prop) {
+ *state_id |= psci_make_local_state_id(i,
+ PLAT_PSCI_DUMMY_STATE_ID);
+ ret = PSCI_E_INVALID_PARAMS;
+ continue;
+ }
+ found_entry = 0;
+
+ while (state_prop->state_ID) {
+ if (state_type == state_prop->is_pwrdown) {
+ *state_id |= psci_make_local_state_id(i,
+ state_prop->state_ID);
+ found_entry = 1;
+ break;
+ }
+ state_prop++;
+ }
+ if (!found_entry) {
+ *state_id |= psci_make_local_state_id(i,
+ PLAT_PSCI_DUMMY_STATE_ID);
+ ret = PSCI_E_INVALID_PARAMS;
+ }
+ }
+
+ return ret;
+}
+
+static unsigned int tftf_psci_get_pstate_format(void)
+{
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_CPU_SUSPEND);
+
+ /*
+ * If error is returned, then it probably means that the PSCI version
+ * is less than 1.0 and only the original format is supported. In case
+ * the version is 1.0 or higher, then PSCI FEATURES which is a
+ * mandatory API is not implemented which implies that only the
+ * original format is supported.
+ */
+ if (ret == PSCI_E_NOT_SUPPORTED)
+ return CPU_SUSPEND_FEAT_PSTATE_FORMAT_ORIGINAL;
+
+ /* Treat the invalid return value as PSCI FEATURES not supported */
+ if ((ret & ~CPU_SUSPEND_FEAT_VALID_MASK) != 0)
+ return CPU_SUSPEND_FEAT_PSTATE_FORMAT_ORIGINAL;
+
+ return (ret >> CPU_SUSPEND_FEAT_PSTATE_FORMAT_SHIFT) & 0x1;
+}
+
+/* Make the power state in the original format */
+uint32_t tftf_make_psci_pstate(uint32_t affinity_level,
+ uint32_t state_type,
+ uint32_t state_id)
+{
+ uint32_t power_state;
+
+ assert(psci_state_type_valid(state_type));
+
+ assert(pstate_format_detected);
+
+ if (pstate_format == CPU_SUSPEND_FEAT_PSTATE_FORMAT_EXTENDED) {
+ assert(psci_state_id_ext_valid(state_id));
+ power_state = (state_type << PSTATE_TYPE_SHIFT_EXT)
+ | (state_id << PSTATE_ID_SHIFT_EXT);
+ } else {
+ assert(psci_affinity_level_valid(affinity_level));
+ assert(psci_state_id_valid(state_id));
+ power_state = (affinity_level << PSTATE_AFF_LVL_SHIFT)
+ | (state_type << PSTATE_TYPE_SHIFT);
+ if (!is_state_id_null)
+ power_state |= (state_id << PSTATE_ID_SHIFT);
+ }
+
+ return power_state;
+}
+
+
+void tftf_detect_psci_pstate_format(void)
+{
+ uint32_t power_state;
+ unsigned int ret;
+
+ pstate_format = tftf_psci_get_pstate_format();
+
+ /*
+ * If the power state format is extended format, then the state-ID
+ * must use recommended encoding.
+ */
+ if (pstate_format == CPU_SUSPEND_FEAT_PSTATE_FORMAT_EXTENDED) {
+ pstate_format_detected = 1;
+ INFO("Extended PSCI power state format detected\n");
+ return;
+ }
+
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ /*
+ * Mask IRQ to prevent the interrupt handler being invoked
+ * and clearing the interrupt. A pending interrupt will cause this
+ * CPU to wake-up from suspend.
+ */
+ disable_irq();
+
+ /* Configure an SGI to wake-up from suspend */
+ tftf_send_sgi(IRQ_NS_SGI_0,
+ platform_get_core_pos(read_mpidr_el1() & MPID_MASK));
+
+
+ /*
+ * Try to detect if the platform uses NULL State-ID encoding by sending
+ * PSCI_SUSPEND call with the NULL State-ID encoding. If the call
+ * succeeds then the platform uses NULL State-ID encoding. Else it
+ * uses the recommended encoding for State-ID.
+ */
+ power_state = (PSTATE_AFF_LVL_0 << PSTATE_AFF_LVL_SHIFT)
+ | (PSTATE_TYPE_STANDBY << PSTATE_TYPE_SHIFT);
+
+ ret = tftf_cpu_suspend(power_state);
+
+ /* Unmask the IRQ to let the interrupt handler to execute */
+ enable_irq();
+ isb();
+
+ tftf_irq_disable(IRQ_NS_SGI_0);
+
+ /*
+ * The NULL State-ID returned SUCCESS. Hence State-ID is NULL
+ * for the power state format.
+ */
+ if (ret == PSCI_E_SUCCESS) {
+ is_state_id_null = 1;
+ INFO("Original PSCI power state format with NULL State-ID detected\n");
+ } else
+ INFO("Original PSCI power state format detected\n");
+
+
+ pstate_format_detected = 1;
+}
+
+unsigned int tftf_is_psci_state_id_null(void)
+{
+ assert(pstate_format_detected);
+
+ if (pstate_format == CPU_SUSPEND_FEAT_PSTATE_FORMAT_ORIGINAL)
+ return is_state_id_null;
+ else
+ return 0; /* Extended state ID does not support null format */
+}
+
+unsigned int tftf_is_psci_pstate_format_original(void)
+{
+ assert(pstate_format_detected);
+
+ return pstate_format == CPU_SUSPEND_FEAT_PSTATE_FORMAT_ORIGINAL;
+}
+
+unsigned int tftf_get_psci_version(void)
+{
+ smc_args args = { SMC_PSCI_VERSION };
+ smc_ret_values ret;
+
+ ret = tftf_smc(&args);
+
+ return ret.ret0;
+}
+
+int tftf_is_valid_psci_version(unsigned int version)
+{
+ if (version != PSCI_VERSION(1, 1) &&
+ version != PSCI_VERSION(1, 0) &&
+ version != PSCI_VERSION(0, 2) &&
+ version != PSCI_VERSION(0, 1)) {
+ return 0;
+ }
+ return 1;
+}
diff --git a/lib/sdei/sdei.c b/lib/sdei/sdei.c
new file mode 100644
index 000000000..846b96eb4
--- /dev/null
+++ b/lib/sdei/sdei.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_gic.h>
+#include <assert.h>
+#include <sdei.h>
+#include <smccc.h>
+#include <stdint.h>
+#include <tftf_lib.h>
+
+int64_t sdei_version(void)
+{
+ smc_args args = { SDEI_VERSION };
+ smc_ret_values ret;
+
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_interrupt_bind(int intr, struct sdei_intr_ctx *intr_ctx)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ assert(intr_ctx);
+
+ intr_ctx->priority = arm_gic_get_intr_priority(intr);
+ intr_ctx->num = intr;
+ intr_ctx->enabled = arm_gic_intr_enabled(intr);
+ arm_gic_intr_disable(intr);
+
+ args.arg0 = SDEI_INTERRUPT_BIND;
+ args.arg1 = intr;
+ ret = tftf_smc(&args);
+ if (ret.ret0 < 0) {
+ arm_gic_set_intr_priority(intr_ctx->num, intr_ctx->priority);
+ if (intr_ctx->enabled)
+ arm_gic_intr_enable(intr_ctx->num);
+ }
+
+ return ret.ret0;
+}
+
+int64_t sdei_interrupt_release(int ev, const struct sdei_intr_ctx *intr_ctx)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ assert(intr_ctx);
+
+ args.arg0 = SDEI_INTERRUPT_RELEASE;
+ args.arg1 = ev;
+ ret = tftf_smc(&args);
+ if (ret.ret0 == 0) {
+ arm_gic_set_intr_priority(intr_ctx->num, intr_ctx->priority);
+ if (intr_ctx->enabled)
+ arm_gic_intr_enable(intr_ctx->num);
+ }
+
+ return ret.ret0;
+}
+
+int64_t sdei_event_register(int ev, sdei_handler_t *ep,
+ uint64_t ep_arg, int flags, uint64_t mpidr)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_REGISTER;
+ args.arg1 = ev;
+ args.arg2 = (u_register_t)ep;
+ args.arg3 = ep_arg;
+ args.arg4 = flags;
+ args.arg5 = mpidr;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_unregister(int ev)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_UNREGISTER;
+ args.arg1 = ev;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_enable(int ev)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_ENABLE;
+ args.arg1 = ev;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_disable(int ev)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_DISABLE;
+ args.arg1 = ev;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_pe_mask(void)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_PE_MASK;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_pe_unmask(void)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_PE_UNMASK;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_private_reset(void)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_PRIVATE_RESET;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_shared_reset(void)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_SHARED_RESET;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_signal(uint64_t mpidr)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_SIGNAL;
+ args.arg1 = 0; /* must be event 0 */
+ args.arg2 = mpidr;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_status(int32_t ev)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_STATUS;
+ args.arg1 = ev;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_routing_set(int32_t ev, uint64_t flags)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_ROUTING_SET;
+ args.arg1 = ev;
+ args.arg2 = flags;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_context(uint32_t param)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_CONTEXT;
+ args.arg1 = param;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_complete(uint32_t flags)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_COMPLETE;
+ args.arg1 = flags;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_complete_and_resume(uint64_t addr)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_COMPLETE_AND_RESUME;
+ args.arg1 = addr;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
diff --git a/lib/semihosting/aarch32/semihosting_call.S b/lib/semihosting/aarch32/semihosting_call.S
new file mode 100644
index 000000000..fe489b6f6
--- /dev/null
+++ b/lib/semihosting/aarch32/semihosting_call.S
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl semihosting_call
+
+func semihosting_call
+ svc #0x123456
+ bx lr
+endfunc semihosting_call
diff --git a/lib/semihosting/aarch64/semihosting_call.S b/lib/semihosting/aarch64/semihosting_call.S
new file mode 100644
index 000000000..dfded2ec6
--- /dev/null
+++ b/lib/semihosting/aarch64/semihosting_call.S
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl semihosting_call
+
+func semihosting_call
+ hlt #0xf000
+ ret
+endfunc semihosting_call
diff --git a/lib/semihosting/semihosting.c b/lib/semihosting/semihosting.c
new file mode 100644
index 000000000..cf1080227
--- /dev/null
+++ b/lib/semihosting/semihosting.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <semihosting.h>
+#include <string.h>
+
+#ifndef SEMIHOSTING_SUPPORTED
+#define SEMIHOSTING_SUPPORTED 1
+#endif
+
+long semihosting_call(unsigned long operation,
+ void *system_block_address);
+
+typedef struct {
+ const char *file_name;
+ unsigned long mode;
+ size_t name_length;
+} smh_file_open_block_t;
+
+typedef struct {
+ long handle;
+ uintptr_t buffer;
+ size_t length;
+} smh_file_read_write_block_t;
+
+typedef struct {
+ long handle;
+ ssize_t location;
+} smh_file_seek_block_t;
+
+typedef struct {
+ char *command_line;
+ size_t command_length;
+} smh_system_block_t;
+
+long semihosting_connection_supported(void)
+{
+ return SEMIHOSTING_SUPPORTED;
+}
+
+long semihosting_file_open(const char *file_name, size_t mode)
+{
+ smh_file_open_block_t open_block;
+
+ open_block.file_name = file_name;
+ open_block.mode = mode;
+ open_block.name_length = strlen(file_name);
+
+ return semihosting_call(SEMIHOSTING_SYS_OPEN,
+ (void *) &open_block);
+}
+
+long semihosting_file_seek(long file_handle, ssize_t offset)
+{
+ smh_file_seek_block_t seek_block;
+ long result;
+
+ seek_block.handle = file_handle;
+ seek_block.location = offset;
+
+ result = semihosting_call(SEMIHOSTING_SYS_SEEK,
+ (void *) &seek_block);
+
+ if (result)
+ result = semihosting_call(SEMIHOSTING_SYS_ERRNO, 0);
+
+ return result;
+}
+
+long semihosting_file_read(long file_handle, size_t *length, uintptr_t buffer)
+{
+ smh_file_read_write_block_t read_block;
+ long result = -EINVAL;
+
+ if ((length == NULL) || (buffer == (uintptr_t)NULL))
+ return result;
+
+ read_block.handle = file_handle;
+ read_block.buffer = buffer;
+ read_block.length = *length;
+
+ result = semihosting_call(SEMIHOSTING_SYS_READ,
+ (void *) &read_block);
+
+ if (result == *length) {
+ return -EINVAL;
+ } else if (result < *length) {
+ *length -= result;
+ return 0;
+ } else
+ return result;
+}
+
+long semihosting_file_write(long file_handle,
+ size_t *length,
+ const uintptr_t buffer)
+{
+ smh_file_read_write_block_t write_block;
+
+ if ((length == NULL) || (buffer == (uintptr_t)NULL))
+ return -EINVAL;
+
+ write_block.handle = file_handle;
+ write_block.buffer = (uintptr_t)buffer; /* cast away const */
+ write_block.length = *length;
+
+ *length = semihosting_call(SEMIHOSTING_SYS_WRITE,
+ (void *) &write_block);
+
+ return *length;
+}
+
+long semihosting_file_close(long file_handle)
+{
+ return semihosting_call(SEMIHOSTING_SYS_CLOSE,
+ (void *) &file_handle);
+}
+
+long semihosting_file_length(long file_handle)
+{
+ return semihosting_call(SEMIHOSTING_SYS_FLEN,
+ (void *) &file_handle);
+}
+
+char semihosting_read_char(void)
+{
+ return semihosting_call(SEMIHOSTING_SYS_READC, NULL);
+}
+
+void semihosting_write_char(char character)
+{
+ semihosting_call(SEMIHOSTING_SYS_WRITEC, (void *) &character);
+}
+
+void semihosting_write_string(char *string)
+{
+ semihosting_call(SEMIHOSTING_SYS_WRITE0, (void *) string);
+}
+
+long semihosting_system(char *command_line)
+{
+ smh_system_block_t system_block;
+
+ system_block.command_line = command_line;
+ system_block.command_length = strlen(command_line);
+
+ return semihosting_call(SEMIHOSTING_SYS_SYSTEM,
+ (void *) &system_block);
+}
+
+long semihosting_get_flen(const char *file_name)
+{
+ long file_handle;
+ size_t length;
+
+ assert(semihosting_connection_supported());
+
+ file_handle = semihosting_file_open(file_name, FOPEN_MODE_RB);
+ if (file_handle == -1)
+ return file_handle;
+
+ /* Find the length of the file */
+ length = semihosting_file_length(file_handle);
+
+ return semihosting_file_close(file_handle) ? -1 : length;
+}
+
+long semihosting_download_file(const char *file_name,
+ size_t buf_size,
+ uintptr_t buf)
+{
+ long ret = -EINVAL;
+ size_t length;
+ long file_handle;
+
+ /* Null pointer check */
+ if (!buf)
+ return ret;
+
+ assert(semihosting_connection_supported());
+
+ file_handle = semihosting_file_open(file_name, FOPEN_MODE_RB);
+ if (file_handle == -1)
+ return ret;
+
+ /* Find the actual length of the file */
+ length = semihosting_file_length(file_handle);
+ if (length == -1)
+ goto semihosting_fail;
+
+ /* Signal error if we do not have enough space for the file */
+ if (length > buf_size)
+ goto semihosting_fail;
+
+ /*
+ * A successful read will return 0 in which case we pass back
+ * the actual number of bytes read. Else we pass a negative
+ * value indicating an error.
+ */
+ ret = semihosting_file_read(file_handle, &length, buf);
+ if (ret)
+ goto semihosting_fail;
+ else
+ ret = length;
+
+semihosting_fail:
+ semihosting_file_close(file_handle);
+ return ret;
+}
diff --git a/lib/smc/aarch32/asm_smc.S b/lib/smc/aarch32/asm_smc.S
new file mode 100644
index 000000000..908b8d075
--- /dev/null
+++ b/lib/smc/aarch32/asm_smc.S
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016-2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl asm_tftf_smc32
+
+/* ---------------------------------------------------------------------------
+ * void asm_tftf_smc32(const smc_args *args,
+ * smc_ret_values *smc_ret);
+ * ---------------------------------------------------------------------------
+ */
+func asm_tftf_smc32
+ /* Push r9 to keep the stack pointer aligned to 64 bit. */
+ push {r4 - r9}
+
+ /* Store the `smc_ret` pointer in a callee saved register */
+ mov r8, r1
+
+ /* Load values used as arguments for the SMC. */
+ ldm r0, {r0 - r7}
+
+ smc #0
+
+ /*
+ * The returned values from the SMC are in r0-r3, put them in the
+ * 'smc_ret_values' return structure.
+ */
+ stm r8, {r0 - r3}
+
+ pop {r4 - r9}
+ bx lr
+endfunc asm_tftf_smc32
diff --git a/lib/smc/aarch32/smc.c b/lib/smc/aarch32/smc.c
new file mode 100644
index 000000000..dc3d83fbe
--- /dev/null
+++ b/lib/smc/aarch32/smc.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <tftf.h>
+
+void asm_tftf_smc32(const smc_args *args,
+ smc_ret_values *smc_ret);
+
+smc_ret_values tftf_smc(const smc_args *args)
+{
+ smc_ret_values ret = {0};
+ asm_tftf_smc32(args, &ret);
+
+ return ret;
+}
diff --git a/lib/smc/aarch64/asm_smc.S b/lib/smc/aarch64/asm_smc.S
new file mode 100644
index 000000000..2b305b9d3
--- /dev/null
+++ b/lib/smc/aarch64/asm_smc.S
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013-2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl asm_tftf_smc64
+
+ .section .text, "ax"
+
+
+/* ---------------------------------------------------------------------------
+ * smc_ret_values asm_tftf_smc64(uint64_t arg0,
+ * uint64_t arg1,
+ * uint64_t arg2,
+ * uint64_t arg3,
+ * uint64_t arg4,
+ * uint64_t arg5,
+ * uint64_t arg6,
+ * uint64_t arg7);
+ * ---------------------------------------------------------------------------
+ */
+func asm_tftf_smc64
+ /*
+ * According to the AAPCS64, x8 is the indirect result location
+ * register. It contains the address of the memory block that the caller
+ * has reserved to hold the result, i.e. the smc_ret_values structure
+ * in our case.
+ * x8 might be clobbered across the SMC call so save it on the stack.
+ * Although x8 contains an 8 byte value, we are allocating 16bytes on the stack
+ * to respect 16byte stack-alignment.
+ */
+ str x8, [sp, #-16]!
+
+ /* SMC arguments are already stored in x0-x6 */
+ smc #0
+
+ /* Pop x8 into a caller-saved register */
+ ldr x9, [sp], #16
+
+ /*
+ * Return values are stored in x0-x3, put them in the 'smc_ret_values'
+ * return structure
+ */
+ stp x0, x1, [x9, #0]
+ stp x2, x3, [x9, #16]
+ ret
+endfunc asm_tftf_smc64
diff --git a/lib/smc/aarch64/smc.c b/lib/smc/aarch64/smc.c
new file mode 100644
index 000000000..06b841c9f
--- /dev/null
+++ b/lib/smc/aarch64/smc.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <tftf.h>
+
+smc_ret_values asm_tftf_smc64(u_register_t arg0,
+ u_register_t arg1,
+ u_register_t arg2,
+ u_register_t arg3,
+ u_register_t arg4,
+ u_register_t arg5,
+ u_register_t arg6,
+ u_register_t arg7);
+
+smc_ret_values tftf_smc(const smc_args *args)
+{
+ return asm_tftf_smc64(args->arg0,
+ args->arg1,
+ args->arg2,
+ args->arg3,
+ args->arg4,
+ args->arg5,
+ args->arg6,
+ args->arg7);
+}
diff --git a/lib/stdlib/abort.c b/lib/stdlib/abort.c
new file mode 100644
index 000000000..50780335d
--- /dev/null
+++ b/lib/stdlib/abort.c
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+
+/*
+ * This is a basic implementation. This could be improved.
+ */
+void abort (void)
+{
+ ERROR("ABORT\n");
+ panic();
+}
diff --git a/lib/stdlib/assert.c b/lib/stdlib/assert.c
new file mode 100644
index 000000000..39f30d12c
--- /dev/null
+++ b/lib/stdlib/assert.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+
+/*
+ * This is a basic implementation. This could be improved.
+ */
+void __assert (const char *function, const char *file, unsigned int line,
+ const char *assertion)
+{
+ mp_printf("ASSERT: %s <%d> : %s\n", function, line, assertion);
+ while (1);
+}
diff --git a/lib/stdlib/mem.c b/lib/stdlib/mem.c
new file mode 100644
index 000000000..ec9dc2dea
--- /dev/null
+++ b/lib/stdlib/mem.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h> /* size_t */
+
+/*
+ * Fill @count bytes of memory pointed to by @dst with @val
+ */
+void *memset(void *dst, int val, size_t count)
+{
+ char *ptr = dst;
+
+ while (count--)
+ *ptr++ = val;
+
+ return dst;
+}
+
+/*
+ * Compare @len bytes of @s1 and @s2
+ */
+int memcmp(const void *s1, const void *s2, size_t len)
+{
+ const char *s = s1;
+ const char *d = s2;
+ char dc;
+ char sc;
+
+ while (len--) {
+ sc = *s++;
+ dc = *d++;
+ if (sc - dc)
+ return (sc - dc);
+ }
+
+ return 0;
+}
+
+/*
+ * Copy @len bytes from @src to @dst
+ */
+void *memcpy(void *dst, const void *src, size_t len)
+{
+ const char *s = src;
+ char *d = dst;
+
+ while (len--)
+ *d++ = *s++;
+
+ return dst;
+}
+
+/*
+ * Move @len bytes from @src to @dst
+ */
+void *memmove(void *dst, const void *src, size_t len)
+{
+ /*
+ * The following test makes use of unsigned arithmetic overflow to
+ * more efficiently test the condition !(src <= dst && dst < str+len).
+ * It also avoids the situation where the more explicit test would give
+ * incorrect results were the calculation str+len to overflow (though
+ * that issue is probably moot as such usage is probably undefined
+ * behaviour and a bug anyway.
+ */
+ if ((size_t)dst - (size_t)src >= len) {
+ /* destination not in source data, so can safely use memcpy */
+ return memcpy(dst, src, len);
+ } else {
+ /* copy backwards... */
+ const char *end = dst;
+ const char *s = (const char *)src + len;
+ char *d = (char *)dst + len;
+ while (d != end)
+ *--d = *--s;
+ }
+ return dst;
+}
+
+/*
+ * Scan @len bytes of @src for value @c
+ */
+void *memchr(const void *src, int c, size_t len)
+{
+ const char *s = src;
+
+ while (len--) {
+ if (*s == c)
+ return (void *) s;
+ s++;
+ }
+
+ return NULL;
+}
diff --git a/lib/stdlib/printf.c b/lib/stdlib/printf.c
new file mode 100644
index 000000000..6329157d0
--- /dev/null
+++ b/lib/stdlib/printf.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+
+/* Choose max of 512 chars for now. */
+#define PRINT_BUFFER_SIZE 512
+int printf(const char *fmt, ...)
+{
+ va_list args;
+ char buf[PRINT_BUFFER_SIZE];
+ int count;
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf) - 1, fmt, args);
+ va_end(args);
+
+ /* Use putchar directly as 'puts()' adds a newline. */
+ buf[PRINT_BUFFER_SIZE - 1] = '\0';
+ count = 0;
+ while (buf[count])
+ {
+ if (putchar(buf[count]) != EOF) {
+ count++;
+ } else {
+ count = EOF;
+ break;
+ }
+ }
+
+ return count;
+}
diff --git a/lib/stdlib/putchar.c b/lib/stdlib/putchar.c
new file mode 100644
index 000000000..6b6f75ebb
--- /dev/null
+++ b/lib/stdlib/putchar.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <console.h>
+#include <stdio.h>
+
+/* Putchar() should either return the character printed or EOF in case of error.
+ * Our current console_putc() function assumes success and returns the
+ * character. Write all other printing functions in terms of putchar(), if
+ * possible, so they all benefit when this is improved.
+ */
+int putchar(int c)
+{
+ int res;
+ if (console_putc((unsigned char)c) >= 0)
+ res = c;
+ else
+ res = EOF;
+
+ return res;
+}
diff --git a/lib/stdlib/puts.c b/lib/stdlib/puts.c
new file mode 100644
index 000000000..08283a38c
--- /dev/null
+++ b/lib/stdlib/puts.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+
+int puts(const char *s)
+{
+ int count = 0;
+ while(*s)
+ {
+ if (putchar(*s++) != EOF) {
+ count++;
+ } else {
+ count = EOF;
+ break;
+ }
+ }
+
+ /* According to the puts(3) manpage, the function should write a
+ * trailing newline.
+ */
+ if ((count != EOF) && (putchar('\n') != EOF))
+ count++;
+ else
+ count = EOF;
+
+ return count;
+}
diff --git a/lib/stdlib/rand.c b/lib/stdlib/rand.c
new file mode 100644
index 000000000..59cb79631
--- /dev/null
+++ b/lib/stdlib/rand.c
@@ -0,0 +1,65 @@
+/*-
+ * Portions Copyright (c) 2010, Intel Corporation. All rights reserved.<BR>
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+//__FBSDID("$FreeBSD: src/lib/libc/stdlib/rand.c,v 1.17.2.1.2.1 2009/10/25 01:10:29 kensmith Exp $");
+#include <stdlib.h>
+
+static unsigned int next = 1;
+
+/** Compute a pseudo-random number.
+ *
+ * Compute x = (7^5 * x) mod (2^31 - 1)
+ * without overflowing 31 bits:
+ * (2^31 - 1) = 127773 * (7^5) + 2836
+ * From "Random number generators: good ones are hard to find",
+ * Park and Miller, Communications of the ACM, vol. 31, no. 10,
+ * October 1988, p. 1195.
+**/
+int
+rand()
+{
+ int hi, lo, x;
+
+ /* Can't be initialized with 0, so use another value. */
+ if (next == 0)
+ next = 123459876;
+ hi = next / 127773;
+ lo = next % 127773;
+ x = 16807 * lo - 2836 * hi;
+ if (x < 0)
+ x += 0x7fffffff;
+ return ((next = x) % ((unsigned int)RAND_MAX + 1));
+}
+
+void
+srand(unsigned int seed)
+{
+ next = (unsigned int)seed;
+}
diff --git a/lib/stdlib/strchr.c b/lib/stdlib/strchr.c
new file mode 100644
index 000000000..0963cb4c0
--- /dev/null
+++ b/lib/stdlib/strchr.c
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2013-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <sys/cdefs.h>
+#include <stddef.h>
+#include <string.h>
+
+char *
+strchr(const char *p, int ch)
+{
+ char c;
+
+ c = ch;
+ for (;; ++p) {
+ if (*p == c)
+ return ((char *)p);
+ if (*p == '\0')
+ return (NULL);
+ }
+ /* NOTREACHED */
+}
diff --git a/lib/stdlib/strcmp.c b/lib/stdlib/strcmp.c
new file mode 100644
index 000000000..52a415b7e
--- /dev/null
+++ b/lib/stdlib/strcmp.c
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2014-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <sys/cdefs.h>
+#include <string.h>
+
+/*
+ * Compare strings.
+ */
+int
+strcmp(const char *s1, const char *s2)
+{
+ while (*s1 == *s2++)
+ if (*s1++ == '\0')
+ return 0;
+ return *(const unsigned char *)s1 - *(const unsigned char *)(s2 - 1);
+}
diff --git a/lib/stdlib/strlen.c b/lib/stdlib/strlen.c
new file mode 100644
index 000000000..ba2c5ea22
--- /dev/null
+++ b/lib/stdlib/strlen.c
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2009-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <stddef.h>
+
+size_t
+strlen(str)
+ const char *str;
+{
+ register const char *s;
+
+ for (s = str; *s; ++s);
+ return(s - str);
+}
diff --git a/lib/stdlib/strncmp.c b/lib/stdlib/strncmp.c
new file mode 100644
index 000000000..c5ad0a862
--- /dev/null
+++ b/lib/stdlib/strncmp.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2014-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <sys/cdefs.h>
+#include <string.h>
+
+int
+strncmp(const char *s1, const char *s2, size_t n)
+{
+
+ if (n == 0)
+ return 0;
+ do {
+ if (*s1 != *s2++)
+ return (*(const unsigned char *)s1 -
+ *(const unsigned char *)(s2 - 1));
+ if (*s1++ == '\0')
+ break;
+ } while (--n != 0);
+ return 0;
+}
diff --git a/lib/stdlib/strncpy.c b/lib/stdlib/strncpy.c
new file mode 100644
index 000000000..00e4b7a41
--- /dev/null
+++ b/lib/stdlib/strncpy.c
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2015-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <string.h>
+
+/*
+ * Copy src to dst, truncating or null-padding to always copy n bytes.
+ * Return dst.
+ */
+char *
+strncpy(char * __restrict dst, const char * __restrict src, size_t n)
+{
+ if (n != 0) {
+ char *d = dst;
+ const char *s = src;
+
+ do {
+ if ((*d++ = *s++) == '\0') {
+ /* NUL pad the remaining n-1 bytes */
+ while (--n != 0)
+ *d++ = '\0';
+ break;
+ }
+ } while (--n != 0);
+ }
+ return (dst);
+}
diff --git a/lib/stdlib/subr_prf.c b/lib/stdlib/subr_prf.c
new file mode 100644
index 000000000..2e272d900
--- /dev/null
+++ b/lib/stdlib/subr_prf.c
@@ -0,0 +1,548 @@
+/*-
+ * Copyright (c) 1986, 1988, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)subr_prf.c 8.3 (Berkeley) 1/21/94
+ */
+
+/*
+ * Portions copyright (c) 2009-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <ctype.h>
+
+typedef unsigned char u_char;
+typedef unsigned int u_int;
+typedef int64_t quad_t;
+typedef uint64_t u_quad_t;
+typedef unsigned long u_long;
+typedef unsigned short u_short;
+
+static inline int imax(int a, int b) { return (a > b ? a : b); }
+
+/*
+ * Note that stdarg.h and the ANSI style va_start macro is used for both
+ * ANSI and traditional C compilers.
+ */
+
+#define TOCONS 0x01
+#define TOTTY 0x02
+#define TOLOG 0x04
+
+/* Max number conversion buffer length: a u_quad_t in base 2, plus NUL byte. */
+#define MAXNBUF (sizeof(intmax_t) * 8 + 1)
+
+struct putchar_arg {
+ int flags;
+ int pri;
+ struct tty *tty;
+ char *p_bufr;
+ size_t n_bufr;
+ char *p_next;
+ size_t remain;
+};
+
+struct snprintf_arg {
+ char *str;
+ size_t remain;
+};
+
+extern int log_open;
+
+static char *ksprintn(char *nbuf, uintmax_t num, int base, int *len, int upper);
+static void snprintf_func(int ch, void *arg);
+static int kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_list ap);
+
+int vsnprintf(char *str, size_t size, const char *format, va_list ap);
+
+static char const hex2ascii_data[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+#define hex2ascii(hex) (hex2ascii_data[hex])
+
+/*
+ * Scaled down version of sprintf(3).
+ */
+int
+sprintf(char *buf, const char *cfmt, ...)
+{
+ int retval;
+ va_list ap;
+
+ va_start(ap, cfmt);
+ retval = kvprintf(cfmt, NULL, (void *)buf, 10, ap);
+ buf[retval] = '\0';
+ va_end(ap);
+ return (retval);
+}
+
+/*
+ * Scaled down version of vsprintf(3).
+ */
+int
+vsprintf(char *buf, const char *cfmt, va_list ap)
+{
+ int retval;
+
+ retval = kvprintf(cfmt, NULL, (void *)buf, 10, ap);
+ buf[retval] = '\0';
+ return (retval);
+}
+
+/*
+ * Scaled down version of snprintf(3).
+ */
+int
+snprintf(char *str, size_t size, const char *format, ...)
+{
+ int retval;
+ va_list ap;
+
+ va_start(ap, format);
+ retval = vsnprintf(str, size, format, ap);
+ va_end(ap);
+ return(retval);
+}
+
+/*
+ * Scaled down version of vsnprintf(3).
+ */
+int
+vsnprintf(char *str, size_t size, const char *format, va_list ap)
+{
+ struct snprintf_arg info;
+ int retval;
+
+ info.str = str;
+ info.remain = size;
+ retval = kvprintf(format, snprintf_func, &info, 10, ap);
+ if (info.remain >= 1)
+ *info.str++ = '\0';
+ return (retval);
+}
+
+static void
+snprintf_func(int ch, void *arg)
+{
+ struct snprintf_arg *const info = arg;
+
+ if (info->remain >= 2) {
+ *info->str++ = ch;
+ info->remain--;
+ }
+}
+
+
+/*
+ * Kernel version which takes radix argument vsnprintf(3).
+ */
+int
+vsnrprintf(char *str, size_t size, int radix, const char *format, va_list ap)
+{
+ struct snprintf_arg info;
+ int retval;
+
+ info.str = str;
+ info.remain = size;
+ retval = kvprintf(format, snprintf_func, &info, radix, ap);
+ if (info.remain >= 1)
+ *info.str++ = '\0';
+ return (retval);
+}
+
+
+/*
+ * Put a NUL-terminated ASCII number (base <= 36) in a buffer in reverse
+ * order; return an optional length and a pointer to the last character
+ * written in the buffer (i.e., the first character of the string).
+ * The buffer pointed to by `nbuf' must have length >= MAXNBUF.
+ */
+static char *
+ksprintn(char *nbuf, uintmax_t num, int base, int *lenp, int upper)
+{
+ char *p, c;
+
+ p = nbuf;
+ *p = '\0';
+ do {
+ c = hex2ascii(num % base);
+ *++p = upper ? toupper(c) : c;
+ } while (num /= base);
+ if (lenp)
+ *lenp = p - nbuf;
+ return (p);
+}
+
+/*
+ * Scaled down version of printf(3).
+ *
+ * Two additional formats:
+ *
+ * The format %b is supported to decode error registers.
+ * Its usage is:
+ *
+ * printf("reg=%b\n", regval, "<base><arg>*");
+ *
+ * where <base> is the output base expressed as a control character, e.g.
+ * \10 gives octal; \20 gives hex. Each arg is a sequence of characters,
+ * the first of which gives the bit number to be inspected (origin 1), and
+ * the next characters (up to a control character, i.e. a character <= 32),
+ * give the name of the register. Thus:
+ *
+ * kvprintf("reg=%b\n", 3, "\10\2BITTWO\1BITONE\n");
+ *
+ * would produce output:
+ *
+ * reg=3<BITTWO,BITONE>
+ *
+ * XXX: %D -- Hexdump, takes pointer and separator string:
+ * ("%6D", ptr, ":") -> XX:XX:XX:XX:XX:XX
+ * ("%*D", len, ptr, " " -> XX XX XX XX ...
+ */
+int
+kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_list ap)
+{
+#define PCHAR(c) {int cc=(c); if (func) (*func)(cc,arg); else *d++ = cc; retval++; }
+ char nbuf[MAXNBUF];
+ char *d;
+ const char *p, *percent, *q;
+ u_char *up;
+ int ch, n;
+ uintmax_t num;
+ int base, lflag, qflag, tmp, width, ladjust, sharpflag, neg, sign, dot;
+ int cflag, hflag, jflag, tflag, zflag;
+ int dwidth, upper;
+ char padc;
+ int stop = 0, retval = 0;
+
+ num = 0;
+ if (!func)
+ d = (char *) arg;
+ else
+ d = NULL;
+
+ if (fmt == NULL)
+ fmt = "(fmt null)\n";
+
+ if (radix < 2 || radix > 36)
+ radix = 10;
+
+ for (;;) {
+ padc = ' ';
+ width = 0;
+ while ((ch = (u_char)*fmt++) != '%' || stop) {
+ if (ch == '\0')
+ return (retval);
+ PCHAR(ch);
+ }
+ percent = fmt - 1;
+ qflag = 0; lflag = 0; ladjust = 0; sharpflag = 0; neg = 0;
+ sign = 0; dot = 0; dwidth = 0; upper = 0;
+ cflag = 0; hflag = 0; jflag = 0; tflag = 0; zflag = 0;
+reswitch: switch (ch = (u_char)*fmt++) {
+ case '.':
+ dot = 1;
+ goto reswitch;
+ case '#':
+ sharpflag = 1;
+ goto reswitch;
+ case '+':
+ sign = 1;
+ goto reswitch;
+ case '-':
+ ladjust = 1;
+ goto reswitch;
+ case '%':
+ PCHAR(ch);
+ break;
+ case '*':
+ if (!dot) {
+ width = va_arg(ap, int);
+ if (width < 0) {
+ ladjust = !ladjust;
+ width = -width;
+ }
+ } else {
+ dwidth = va_arg(ap, int);
+ }
+ goto reswitch;
+ case '0':
+ if (!dot) {
+ padc = '0';
+ goto reswitch;
+ }
+ case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ for (n = 0;; ++fmt) {
+ n = n * 10 + ch - '0';
+ ch = *fmt;
+ if (ch < '0' || ch > '9')
+ break;
+ }
+ if (dot)
+ dwidth = n;
+ else
+ width = n;
+ goto reswitch;
+ case 'b':
+ num = (u_int)va_arg(ap, int);
+ p = va_arg(ap, char *);
+ for (q = ksprintn(nbuf, num, *p++, NULL, 0); *q;)
+ PCHAR(*q--);
+
+ if (num == 0)
+ break;
+
+ for (tmp = 0; *p;) {
+ n = *p++;
+ if (num & (1 << (n - 1))) {
+ PCHAR(tmp ? ',' : '<');
+ for (; (n = *p) > ' '; ++p)
+ PCHAR(n);
+ tmp = 1;
+ } else
+ for (; *p > ' '; ++p)
+ continue;
+ }
+ if (tmp)
+ PCHAR('>');
+ break;
+ case 'c':
+ PCHAR(va_arg(ap, int));
+ break;
+ case 'D':
+ up = va_arg(ap, u_char *);
+ p = va_arg(ap, char *);
+ if (!width)
+ width = 16;
+ while(width--) {
+ PCHAR(hex2ascii(*up >> 4));
+ PCHAR(hex2ascii(*up & 0x0f));
+ up++;
+ if (width)
+ for (q=p;*q;q++)
+ PCHAR(*q);
+ }
+ break;
+ case 'd':
+ case 'i':
+ base = 10;
+ sign = 1;
+ goto handle_sign;
+ case 'h':
+ if (hflag) {
+ hflag = 0;
+ cflag = 1;
+ } else
+ hflag = 1;
+ goto reswitch;
+ case 'j':
+ jflag = 1;
+ goto reswitch;
+ case 'l':
+ if (lflag) {
+ lflag = 0;
+ qflag = 1;
+ } else
+ lflag = 1;
+ goto reswitch;
+ case 'n':
+ if (jflag)
+ *(va_arg(ap, intmax_t *)) = retval;
+ else if (qflag)
+ *(va_arg(ap, quad_t *)) = retval;
+ else if (lflag)
+ *(va_arg(ap, long *)) = retval;
+ else if (zflag)
+ *(va_arg(ap, size_t *)) = retval;
+ else if (hflag)
+ *(va_arg(ap, short *)) = retval;
+ else if (cflag)
+ *(va_arg(ap, char *)) = retval;
+ else
+ *(va_arg(ap, int *)) = retval;
+ break;
+ case 'o':
+ base = 8;
+ goto handle_nosign;
+ case 'p':
+ base = 16;
+ sharpflag = (width == 0);
+ sign = 0;
+ num = (uintptr_t)va_arg(ap, void *);
+ goto number;
+ case 'q':
+ qflag = 1;
+ goto reswitch;
+ case 'r':
+ base = radix;
+ if (sign)
+ goto handle_sign;
+ goto handle_nosign;
+ case 's':
+ p = va_arg(ap, char *);
+ if (p == NULL)
+ p = "(null)";
+ if (!dot)
+ n = strlen (p);
+ else
+ for (n = 0; n < dwidth && p[n]; n++)
+ continue;
+
+ width -= n;
+
+ if (!ladjust && width > 0)
+ while (width--)
+ PCHAR(padc);
+ while (n--)
+ PCHAR(*p++);
+ if (ladjust && width > 0)
+ while (width--)
+ PCHAR(padc);
+ break;
+ case 't':
+ tflag = 1;
+ goto reswitch;
+ case 'u':
+ base = 10;
+ goto handle_nosign;
+ case 'X':
+ upper = 1;
+ case 'x':
+ base = 16;
+ goto handle_nosign;
+ case 'y':
+ base = 16;
+ sign = 1;
+ goto handle_sign;
+ case 'z':
+ zflag = 1;
+ goto reswitch;
+handle_nosign:
+ sign = 0;
+ if (jflag)
+ num = va_arg(ap, uintmax_t);
+ else if (qflag)
+ num = va_arg(ap, u_quad_t);
+ else if (tflag)
+ num = va_arg(ap, ptrdiff_t);
+ else if (lflag)
+ num = va_arg(ap, u_long);
+ else if (zflag)
+ num = va_arg(ap, size_t);
+ else if (hflag)
+ num = (u_short)va_arg(ap, int);
+ else if (cflag)
+ num = (u_char)va_arg(ap, int);
+ else
+ num = va_arg(ap, u_int);
+ goto number;
+handle_sign:
+ if (jflag)
+ num = va_arg(ap, intmax_t);
+ else if (qflag)
+ num = va_arg(ap, quad_t);
+ else if (tflag)
+ num = va_arg(ap, ptrdiff_t);
+ else if (lflag)
+ num = va_arg(ap, long);
+ else if (zflag)
+ num = va_arg(ap, ssize_t);
+ else if (hflag)
+ num = (short)va_arg(ap, int);
+ else if (cflag)
+ num = (char)va_arg(ap, int);
+ else
+ num = va_arg(ap, int);
+number:
+ if (sign && (intmax_t)num < 0) {
+ neg = 1;
+ num = -(intmax_t)num;
+ }
+ p = ksprintn(nbuf, num, base, &n, upper);
+ tmp = 0;
+ if (sharpflag && num != 0) {
+ if (base == 8)
+ tmp++;
+ else if (base == 16)
+ tmp += 2;
+ }
+ if (neg)
+ tmp++;
+
+ if (!ladjust && padc == '0')
+ dwidth = width - tmp;
+ width -= tmp + imax(dwidth, n);
+ dwidth -= n;
+ if (!ladjust)
+ while (width-- > 0)
+ PCHAR(' ');
+ if (neg)
+ PCHAR('-');
+ if (sharpflag && num != 0) {
+ if (base == 8) {
+ PCHAR('0');
+ } else if (base == 16) {
+ PCHAR('0');
+ PCHAR('x');
+ }
+ }
+ while (dwidth-- > 0)
+ PCHAR('0');
+
+ while (*p)
+ PCHAR(*p--);
+
+ if (ladjust)
+ while (width-- > 0)
+ PCHAR(' ');
+
+ break;
+ default:
+ while (percent < fmt)
+ PCHAR(*percent++);
+ /*
+ * Since we ignore an formatting argument it is no
+ * longer safe to obey the remaining formatting
+ * arguments as the arguments will no longer match
+ * the format specs.
+ */
+ stop = 1;
+ break;
+ }
+ }
+#undef PCHAR
+}
diff --git a/lib/trusted_os/trusted_os.c b/lib/trusted_os/trusted_os.c
new file mode 100644
index 000000000..b24c3d395
--- /dev/null
+++ b/lib/trusted_os/trusted_os.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <smccc.h>
+#include <stdint.h>
+#include <tftf.h>
+#include <trusted_os.h>
+#include <uuid_utils.h>
+
+unsigned int is_trusted_os_present(uuid_t *tos_uuid)
+{
+ smc_args tos_uid_args = { SMC_TOS_UID };
+ smc_ret_values ret;
+ uint32_t *tos_uuid32;
+
+ ret = tftf_smc(&tos_uid_args);
+
+ if ((ret.ret0 == SMC_UNKNOWN) ||
+ ((ret.ret0 == 0) && (ret.ret1 == 0) && (ret.ret2 == 0) &&
+ (ret.ret3 == 0)))
+ return 0;
+
+ tos_uuid32 = (uint32_t *) tos_uuid;
+ tos_uuid32[0] = ret.ret0;
+ tos_uuid32[1] = ret.ret1;
+ tos_uuid32[2] = ret.ret2;
+ tos_uuid32[3] = ret.ret3;
+
+ return 1;
+}
diff --git a/lib/utils/mp_printf.c b/lib/utils/mp_printf.c
new file mode 100644
index 000000000..d1eb780ea
--- /dev/null
+++ b/lib/utils/mp_printf.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <platform.h>
+#include <spinlock.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+/* Lock to avoid concurrent accesses to the serial console */
+static spinlock_t printf_lock;
+
+/*
+ * Print the MPID header, e.g.: [cpu 0x0100]
+ *
+ * If SHELL_COLOR == 1, this also prints shell's color escape sequences to ease
+ * identifying which CPU displays the message. There are 8 standard colors so
+ * if the platform has more than 8 CPUs, some colors will be reused.
+ */
+#if SHELL_COLOR
+#define PRINT_MPID_HDR(_mpid) \
+ do { \
+ unsigned int linear_id = platform_get_core_pos(_mpid); \
+ printf("\033[1;%u;40m", 30 + (linear_id & 0x7)); \
+ printf("[cpu 0x%.4x] ", _mpid); \
+ printf("\033[0m"); \
+ } while (0)
+#else
+#define PRINT_MPID_HDR(_mpid) \
+ printf("[cpu 0x%.4x] ", _mpid)
+#endif /* SHELL_COLOR */
+
+void mp_printf(const char *fmt, ...)
+{
+ va_list ap;
+ char str[256];
+ /*
+ * As part of testing Firmware Update feature on Cortex-A57 CPU, an
+ * issue was discovered while printing in NS_BL1U stage. The issue
+ * appears when the second call to `NOTICE()` is made in the
+ * `ns_bl1u_main()`. As a result of this issue the CPU hangs and the
+ * debugger is also not able to connect anymore.
+ *
+ * After further debugging and experiments it was found that if
+ * `read_mpidr_el1()` is avoided or volatile qualifier is used for
+ * reading the mpidr, this issue gets resolved.
+ *
+ * NOTE: The actual/real reason why this happens is still not known.
+ * Moreover this problem is not encountered on Cortex-A53 CPU.
+ */
+ volatile unsigned int mpid = read_mpidr_el1() & 0xFFFF;
+
+ /*
+ * TODO: It would be simpler to use vprintf() instead of
+ * vsnprintf() + printf(), we wouldn't need to declare a static buffer
+ * for storing the product of vsnprintf(). Unfortunately our C library
+ * doesn't provide vprintf() at the moment.
+ * Import vprintf() code from FreeBSD C library to our local C library.
+ */
+ va_start(ap, fmt);
+ vsnprintf(str, sizeof(str), fmt, ap);
+ str[sizeof(str) - 1] = 0;
+ va_end(ap);
+
+ spin_lock(&printf_lock);
+ PRINT_MPID_HDR(mpid);
+ printf("%s", str);
+ spin_unlock(&printf_lock);
+}
diff --git a/lib/utils/uuid.c b/lib/utils/uuid.c
new file mode 100644
index 000000000..21747a208
--- /dev/null
+++ b/lib/utils/uuid.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include <uuid_utils.h>
+
+/* Format string to print a UUID */
+static const char *uuid_str_fmt = "{ 0x%.8x, 0x%.4x, 0x%.4x, 0x%.2x, 0x%.2x, "
+ "0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x }";
+
+
+unsigned int is_uuid_null(const uuid_t *uuid)
+{
+ const uuid_t uuid_null = {0};
+
+ return memcmp(uuid, &uuid_null, sizeof(uuid_t)) == 0;
+}
+
+char *uuid_to_str(const uuid_t *uuid, char *str)
+{
+ assert(uuid != NULL);
+ assert(str != NULL);
+
+ snprintf(str, UUID_STR_SIZE, uuid_str_fmt,
+ uuid->time_low, uuid->time_mid, uuid->time_hi_and_version,
+ uuid->clock_seq_hi_and_reserved, uuid->clock_seq_low,
+ uuid->node[0], uuid->node[1], uuid->node[2], uuid->node[3],
+ uuid->node[4], uuid->node[5]);
+
+ return str;
+}
+
+unsigned int uuid_equal(const uuid_t *uuid1, const uuid_t *uuid2)
+{
+ return memcmp(uuid1, uuid2, sizeof(uuid_t)) == 0;
+}
+
+uuid_t *make_uuid_from_4words(uuid_t *uuid,
+ uint32_t w0,
+ uint32_t w1,
+ uint32_t w2,
+ uint32_t w3)
+{
+ uint32_t *uuid32;
+
+ assert(uuid != NULL);
+
+ uuid32 = (uint32_t *) uuid;
+ uuid32[0] = w0;
+ uuid32[1] = w1;
+ uuid32[2] = w2;
+ uuid32[3] = w3;
+
+ return uuid;
+}
diff --git a/lib/xlat_tables_v2/aarch32/enable_mmu.S b/lib/xlat_tables_v2/aarch32/enable_mmu.S
new file mode 100644
index 000000000..4a4ac30f5
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/enable_mmu.S
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+ .global enable_mmu_direct_svc_mon
+ .global enable_mmu_direct_hyp
+
+ /* void enable_mmu_direct_svc_mon(unsigned int flags) */
+func enable_mmu_direct_svc_mon
+ /* Assert that MMU is turned off */
+#if ENABLE_ASSERTIONS
+ ldcopr r1, SCTLR
+ tst r1, #SCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* Invalidate TLB entries */
+ TLB_INVALIDATE(r0, TLBIALL)
+
+ mov r3, r0
+ ldr r0, =mmu_cfg_params
+
+ /* MAIR0. Only the lower 32 bits are used. */
+ ldr r1, [r0, #(MMU_CFG_MAIR << 3)]
+ stcopr r1, MAIR0
+
+ /* TTBCR. Only the lower 32 bits are used. */
+ ldr r2, [r0, #(MMU_CFG_TCR << 3)]
+ stcopr r2, TTBCR
+
+ /* TTBR0 */
+ ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)]
+ ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)]
+ stcopr16 r1, r2, TTBR0_64
+
+ /* TTBR1 is unused right now; set it to 0. */
+ mov r1, #0
+ mov r2, #0
+ stcopr16 r1, r2, TTBR1_64
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Enable enable MMU by honoring flags */
+ ldcopr r1, SCTLR
+ ldr r2, =(SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT)
+ orr r1, r1, r2
+
+ /* Clear C bit if requested */
+ tst r3, #DISABLE_DCACHE
+ bicne r1, r1, #SCTLR_C_BIT
+
+ stcopr r1, SCTLR
+ isb
+
+ bx lr
+endfunc enable_mmu_direct_svc_mon
+
+
+ /* void enable_mmu_direct_hyp(unsigned int flags) */
+func enable_mmu_direct_hyp
+ /* Assert that MMU is turned off */
+#if ENABLE_ASSERTIONS
+ ldcopr r1, HSCTLR
+ tst r1, #HSCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* Invalidate TLB entries */
+ TLB_INVALIDATE(r0, TLBIALL)
+
+ mov r3, r0
+ ldr r0, =mmu_cfg_params
+
+ /* HMAIR0 */
+ ldr r1, [r0, #(MMU_CFG_MAIR << 3)]
+ stcopr r1, HMAIR0
+
+ /* HTCR */
+ ldr r2, [r0, #(MMU_CFG_TCR << 3)]
+ stcopr r2, HTCR
+
+ /* HTTBR */
+ ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)]
+ ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)]
+ stcopr16 r1, r2, HTTBR_64
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Enable enable MMU by honoring flags */
+ ldcopr r1, HSCTLR
+ ldr r2, =(HSCTLR_WXN_BIT | HSCTLR_C_BIT | HSCTLR_M_BIT)
+ orr r1, r1, r2
+
+ /* Clear C bit if requested */
+ tst r3, #DISABLE_DCACHE
+ bicne r1, r1, #HSCTLR_C_BIT
+
+ stcopr r1, HSCTLR
+ isb
+
+ bx lr
+endfunc enable_mmu_direct_hyp
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
new file mode 100644
index 000000000..66938e5f1
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+#include "../xlat_tables_private.h"
+
+#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
+#error ARMv7 target does not support LPAE MMU descriptors
+#endif
+
+/*
+ * Returns true if the provided granule size is supported, false otherwise.
+ */
+bool xlat_arch_is_granule_size_supported(size_t size)
+{
+ /*
+ * The library uses the long descriptor translation table format, which
+ * supports 4 KiB pages only.
+ */
+ return size == PAGE_SIZE_4KB;
+}
+
+size_t xlat_arch_get_max_supported_granule_size(void)
+{
+ return PAGE_SIZE_4KB;
+}
+
+#if ENABLE_ASSERTIONS
+unsigned long long xlat_arch_get_max_supported_pa(void)
+{
+ /* Physical address space size for long descriptor format. */
+ return (1ULL << 40) - 1ULL;
+}
+#endif /* ENABLE_ASSERTIONS*/
+
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
+{
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() == 1U);
+ return (read_sctlr() & SCTLR_M_BIT) != 0U;
+ } else {
+ assert(ctx->xlat_regime == EL2_REGIME);
+ assert(xlat_arch_current_el() == 2U);
+ return (read_hsctlr() & HSCTLR_M_BIT) != 0U;
+ }
+}
+
+bool is_dcache_enabled(void)
+{
+ if (IS_IN_EL2()) {
+ return (read_hsctlr() & HSCTLR_C_BIT) != 0U;
+ } else {
+ return (read_sctlr() & SCTLR_C_BIT) != 0U;
+ }
+}
+
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
+{
+ if (xlat_regime == EL1_EL0_REGIME) {
+ return UPPER_ATTRS(XN) | UPPER_ATTRS(PXN);
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ return UPPER_ATTRS(XN);
+ }
+}
+
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
+{
+ /*
+ * Ensure the translation table write has drained into memory before
+ * invalidating the TLB entry.
+ */
+ dsbishst();
+
+ if (xlat_regime == EL1_EL0_REGIME) {
+ tlbimvaais(TLBI_ADDR(va));
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ tlbimvahis(TLBI_ADDR(va));
+ }
+}
+
+void xlat_arch_tlbi_va_sync(void)
+{
+ /* Invalidate all entries from branch predictors. */
+ bpiallis();
+
+ /*
+ * A TLB maintenance instruction can complete at any time after
+ * it is issued, but is only guaranteed to be complete after the
+ * execution of DSB by the PE that executed the TLB maintenance
+ * instruction. After the TLB invalidate instruction is
+ * complete, no new memory accesses using the invalidated TLB
+ * entries will be observed by any observer of the system
+ * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
+ * "Ordering and completion of TLB maintenance instructions".
+ */
+ dsbish();
+
+ /*
+ * The effects of a completed TLB maintenance instruction are
+ * only guaranteed to be visible on the PE that executed the
+ * instruction after the execution of an ISB instruction by the
+ * PE that executed the TLB maintenance instruction.
+ */
+ isb();
+}
+
+unsigned int xlat_arch_current_el(void)
+{
+ if (IS_IN_HYP()) {
+ return 2U;
+ } else {
+ assert(IS_IN_SVC() || IS_IN_MON());
+ /*
+ * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor,
+ * System, SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
+ *
+ * The PL1&0 translation regime in AArch32 behaves like the
+ * EL1&0 regime in AArch64 except for the XN bits, but we set
+ * and unset them at the same time, so there's no difference in
+ * practice.
+ */
+ return 1U;
+ }
+}
+
+/*******************************************************************************
+ * Function for enabling the MMU in PL1 or PL2, assuming that the page tables
+ * have already been created.
+ ******************************************************************************/
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+ const uint64_t *base_table, unsigned long long max_pa,
+ uintptr_t max_va, __unused int xlat_regime)
+{
+ uint64_t mair, ttbr0;
+ uint32_t ttbcr;
+
+ /* Set attributes in the right indices of the MAIR */
+ mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+ mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
+ ATTR_IWBWA_OWBWA_NTR_INDEX);
+ mair |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
+ ATTR_NON_CACHEABLE_INDEX);
+
+ /*
+ * Configure the control register for stage 1 of the PL1&0 or EL2
+ * translation regimes.
+ */
+
+ /* Use the Long-descriptor translation table format. */
+ ttbcr = TTBCR_EAE_BIT;
+
+ if (xlat_regime == EL1_EL0_REGIME) {
+ assert(IS_IN_SVC() || IS_IN_MON());
+ /*
+ * Disable translation table walk for addresses that are
+ * translated using TTBR1. Therefore, only TTBR0 is used.
+ */
+ ttbcr |= TTBCR_EPD1_BIT;
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ assert(IS_IN_HYP());
+
+ /*
+ * Set HTCR bits as well. Set HTTBR table properties
+ * as Inner & outer WBWA & shareable.
+ */
+ ttbcr |= HTCR_RES1 |
+ HTCR_SH0_INNER_SHAREABLE | HTCR_RGN0_OUTER_WBA |
+ HTCR_RGN0_INNER_WBA;
+ }
+
+ /*
+ * Limit the input address ranges and memory region sizes translated
+ * using TTBR0 to the given virtual address space size, if smaller than
+ * 32 bits.
+ */
+ if (max_va != UINT32_MAX) {
+ uintptr_t virtual_addr_space_size = max_va + 1U;
+
+ assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+ /*
+ * __builtin_ctzll(0) is undefined but here we are guaranteed
+ * that virtual_addr_space_size is in the range [1, UINT32_MAX].
+ */
+ int t0sz = 32 - __builtin_ctzll(virtual_addr_space_size);
+
+ ttbcr |= (uint32_t) t0sz;
+ }
+
+ /*
+ * Set the cacheability and shareability attributes for memory
+ * associated with translation table walks using TTBR0.
+ */
+ if ((flags & XLAT_TABLE_NC) != 0U) {
+ /* Inner & outer non-cacheable non-shareable. */
+ ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
+ TTBCR_RGN0_INNER_NC;
+ } else {
+ /* Inner & outer WBWA & shareable. */
+ ttbcr |= TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
+ TTBCR_RGN0_INNER_WBA;
+ }
+
+ /* Set TTBR0 bits as well */
+ ttbr0 = (uint64_t)(uintptr_t) base_table;
+
+#if ARM_ARCH_AT_LEAST(8, 2)
+ /*
+ * Enable CnP bit so as to share page tables with all PEs. This
+ * is mandatory for ARMv8.2 implementations.
+ */
+ ttbr0 |= TTBR_CNP_BIT;
+#endif
+
+ /* Now populate MMU configuration */
+ params[MMU_CFG_MAIR] = mair;
+ params[MMU_CFG_TCR] = (uint64_t) ttbcr;
+ params[MMU_CFG_TTBR0] = ttbr0;
+}
diff --git a/lib/xlat_tables_v2/aarch64/enable_mmu.S b/lib/xlat_tables_v2/aarch64/enable_mmu.S
new file mode 100644
index 000000000..21717d28a
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/enable_mmu.S
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+ .global enable_mmu_direct_el1
+ .global enable_mmu_direct_el2
+ .global enable_mmu_direct_el3
+
+ /* Macros to read and write to system register for a given EL. */
+ .macro _msr reg_name, el, gp_reg
+ msr \reg_name\()_el\()\el, \gp_reg
+ .endm
+
+ .macro _mrs gp_reg, reg_name, el
+ mrs \gp_reg, \reg_name\()_el\()\el
+ .endm
+
+ .macro tlbi_invalidate_all el
+ .if \el == 1
+ TLB_INVALIDATE(vmalle1)
+ .elseif \el == 2
+ TLB_INVALIDATE(alle2)
+ .elseif \el == 3
+ TLB_INVALIDATE(alle3)
+ .else
+ .error "EL must be 1, 2 or 3"
+ .endif
+ .endm
+
+ /* void enable_mmu_direct_el<x>(unsigned int flags) */
+ .macro define_mmu_enable_func el
+ func enable_mmu_direct_\()el\el
+#if ENABLE_ASSERTIONS
+ _mrs x1, sctlr, \el
+ tst x1, #SCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+ /* Invalidate all TLB entries */
+ tlbi_invalidate_all \el
+
+ mov x7, x0
+ ldr x0, =mmu_cfg_params
+
+ /* MAIR */
+ ldr x1, [x0, #(MMU_CFG_MAIR << 3)]
+ _msr mair, \el, x1
+
+ /* TCR */
+ ldr x2, [x0, #(MMU_CFG_TCR << 3)]
+ _msr tcr, \el, x2
+
+ /* TTBR */
+ ldr x3, [x0, #(MMU_CFG_TTBR0 << 3)]
+ _msr ttbr0, \el, x3
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Set and clear required fields of SCTLR */
+ _mrs x4, sctlr, \el
+ mov_imm x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT
+ orr x4, x4, x5
+
+ /* Additionally, amend SCTLR fields based on flags */
+ bic x5, x4, #SCTLR_C_BIT
+ tst x7, #DISABLE_DCACHE
+ csel x4, x5, x4, ne
+
+ _msr sctlr, \el, x4
+ isb
+
+ ret
+ endfunc enable_mmu_direct_\()el\el
+ .endm
+
+ /*
+ * Define MMU-enabling functions for EL1 and EL3:
+ *
+ * enable_mmu_direct_el1
+ * enable_mmu_direct_el3
+ */
+ define_mmu_enable_func 1
+ define_mmu_enable_func 2
+ define_mmu_enable_func 3
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
new file mode 100644
index 000000000..d1555bf27
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+#include "../xlat_tables_private.h"
+
+/*
+ * Returns true if the provided granule size is supported, false otherwise.
+ */
+bool xlat_arch_is_granule_size_supported(size_t size)
+{
+ u_register_t id_aa64mmfr0_el1 = read_id_aa64mmfr0_el1();
+
+ if (size == PAGE_SIZE_4KB) {
+ return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN4_MASK) ==
+ ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED;
+ } else if (size == PAGE_SIZE_16KB) {
+ return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN16_MASK) ==
+ ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED;
+ } else if (size == PAGE_SIZE_64KB) {
+ return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN64_MASK) ==
+ ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED;
+ } else {
+ return 0;
+ }
+}
+
+size_t xlat_arch_get_max_supported_granule_size(void)
+{
+ if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB)) {
+ return PAGE_SIZE_64KB;
+ } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB)) {
+ return PAGE_SIZE_16KB;
+ } else {
+ assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB));
+ return PAGE_SIZE_4KB;
+ }
+}
+
+unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
+{
+ /* Physical address can't exceed 48 bits */
+ assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
+
+ /* 48 bits address */
+ if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
+ return TCR_PS_BITS_256TB;
+
+ /* 44 bits address */
+ if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
+ return TCR_PS_BITS_16TB;
+
+ /* 42 bits address */
+ if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
+ return TCR_PS_BITS_4TB;
+
+ /* 40 bits address */
+ if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
+ return TCR_PS_BITS_1TB;
+
+ /* 36 bits address */
+ if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
+ return TCR_PS_BITS_64GB;
+
+ return TCR_PS_BITS_4GB;
+}
+
+#if ENABLE_ASSERTIONS
+/*
+ * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is
+ * supported in ARMv8.2 onwards.
+ */
+static const unsigned int pa_range_bits_arr[] = {
+ PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
+ PARANGE_0101, PARANGE_0110
+};
+
+unsigned long long xlat_arch_get_max_supported_pa(void)
+{
+ u_register_t pa_range = read_id_aa64mmfr0_el1() &
+ ID_AA64MMFR0_EL1_PARANGE_MASK;
+
+ /* All other values are reserved */
+ assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
+
+ return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
+}
+#endif /* ENABLE_ASSERTIONS*/
+
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
+{
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() >= 1U);
+ return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
+ } else if (ctx->xlat_regime == EL2_REGIME) {
+ assert(xlat_arch_current_el() >= 2U);
+ return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ assert(xlat_arch_current_el() >= 3U);
+ return (read_sctlr_el3() & SCTLR_M_BIT) != 0U;
+ }
+}
+
+bool is_dcache_enabled(void)
+{
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+
+ if (el == 1U) {
+ return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
+ } else if (el == 2U) {
+ return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
+ } else {
+ return (read_sctlr_el3() & SCTLR_C_BIT) != 0U;
+ }
+}
+
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
+{
+ if (xlat_regime == EL1_EL0_REGIME) {
+ return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
+ } else {
+ assert((xlat_regime == EL2_REGIME) ||
+ (xlat_regime == EL3_REGIME));
+ return UPPER_ATTRS(XN);
+ }
+}
+
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
+{
+ /*
+ * Ensure the translation table write has drained into memory before
+ * invalidating the TLB entry.
+ */
+ dsbishst();
+
+ /*
+ * This function only supports invalidation of TLB entries for the EL3
+ * and EL1&0 translation regimes.
+ *
+ * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher
+ * exception level (see section D4.9.2 of the ARM ARM rev B.a).
+ */
+ if (xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() >= 1U);
+ tlbivaae1is(TLBI_ADDR(va));
+ } else if (xlat_regime == EL2_REGIME) {
+ assert(xlat_arch_current_el() >= 2U);
+ tlbivae2is(TLBI_ADDR(va));
+ } else {
+ assert(xlat_regime == EL3_REGIME);
+ assert(xlat_arch_current_el() >= 3U);
+ tlbivae3is(TLBI_ADDR(va));
+ }
+}
+
+void xlat_arch_tlbi_va_sync(void)
+{
+ /*
+ * A TLB maintenance instruction can complete at any time after
+ * it is issued, but is only guaranteed to be complete after the
+ * execution of DSB by the PE that executed the TLB maintenance
+ * instruction. After the TLB invalidate instruction is
+ * complete, no new memory accesses using the invalidated TLB
+ * entries will be observed by any observer of the system
+ * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
+ * "Ordering and completion of TLB maintenance instructions".
+ */
+ dsbish();
+
+ /*
+ * The effects of a completed TLB maintenance instruction are
+ * only guaranteed to be visible on the PE that executed the
+ * instruction after the execution of an ISB instruction by the
+ * PE that executed the TLB maintenance instruction.
+ */
+ isb();
+}
+
+unsigned int xlat_arch_current_el(void)
+{
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+
+ assert(el > 0U);
+
+ return el;
+}
+
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+ const uint64_t *base_table, unsigned long long max_pa,
+ uintptr_t max_va, int xlat_regime)
+{
+ uint64_t mair, ttbr0, tcr;
+ uintptr_t virtual_addr_space_size;
+
+ /* Set attributes in the right indices of the MAIR. */
+ mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+ mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
+ mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
+
+ /*
+ * Limit the input address ranges and memory region sizes translated
+ * using TTBR0 to the given virtual address space size.
+ */
+ assert(max_va < ((uint64_t)UINTPTR_MAX));
+
+ virtual_addr_space_size = (uintptr_t)max_va + 1U;
+ assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+
+ /*
+ * __builtin_ctzll(0) is undefined but here we are guaranteed that
+ * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
+ */
+ int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size);
+
+ tcr = (uint64_t) t0sz;
+
+ /*
+ * Set the cacheability and shareability attributes for memory
+ * associated with translation table walks.
+ */
+ if ((flags & XLAT_TABLE_NC) != 0U) {
+ /* Inner & outer non-cacheable non-shareable. */
+ tcr |= TCR_SH_NON_SHAREABLE |
+ TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
+ } else {
+ /* Inner & outer WBWA & shareable. */
+ tcr |= TCR_SH_INNER_SHAREABLE |
+ TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA;
+ }
+
+ /*
+ * It is safer to restrict the max physical address accessible by the
+ * hardware as much as possible.
+ */
+ unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa);
+
+ if (xlat_regime == EL1_EL0_REGIME) {
+ /*
+ * TCR_EL1.EPD1: Disable translation table walk for addresses
+ * that are translated using TTBR1_EL1.
+ */
+ tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
+ } else if (xlat_regime == EL2_REGIME) {
+ tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT);
+ } else {
+ assert(xlat_regime == EL3_REGIME);
+ tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
+ }
+
+ /* Set TTBR bits as well */
+ ttbr0 = (uint64_t) base_table;
+
+#if ARM_ARCH_AT_LEAST(8, 2)
+ /*
+ * Enable CnP bit so as to share page tables with all PEs. This
+ * is mandatory for ARMv8.2 implementations.
+ */
+ ttbr0 |= TTBR_CNP_BIT;
+#endif
+
+ params[MMU_CFG_MAIR] = mair;
+ params[MMU_CFG_TCR] = tcr;
+ params[MMU_CFG_TTBR0] = ttbr0;
+}
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
new file mode 100644
index 000000000..9507ad715
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -0,0 +1,12 @@
+#
+# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
+ ${ARCH}/enable_mmu.S \
+ ${ARCH}/xlat_tables_arch.c \
+ xlat_tables_context.c \
+ xlat_tables_core.c \
+ xlat_tables_utils.c)
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
new file mode 100644
index 000000000..df491d097
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+/*
+ * MMU configuration register values for the active translation context. Used
+ * from the MMU assembly helpers.
+ */
+uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+/*
+ * Each platform can define the size of its physical and virtual address spaces.
+ * If the platform hasn't defined one or both of them, default to
+ * ADDR_SPACE_SIZE. The latter is deprecated, though.
+ */
+#if ERROR_DEPRECATED
+# ifdef ADDR_SPACE_SIZE
+# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
+# endif
+#elif defined(ADDR_SPACE_SIZE)
+# ifndef PLAT_PHY_ADDR_SPACE_SIZE
+# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
+# endif
+# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
+# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
+# endif
+#endif
+
+/*
+ * Allocate and initialise the default translation context for the software
+ * image currently executing.
+ */
+REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
+ PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
+
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
+ unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+
+ mmap_add_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+void mmap_add(const mmap_region_t *mm)
+{
+ mmap_add_ctx(&tf_xlat_ctx, mm);
+}
+
+void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
+ size_t size, unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
+
+ mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
+
+ *base_va = mm.base_va;
+}
+
+void mmap_add_alloc_va(mmap_region_t *mm)
+{
+ while (mm->size != 0U) {
+ assert(mm->base_va == 0U);
+ mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
+ mm++;
+ }
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+ size_t size, unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+
+ return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
+ uintptr_t *base_va, size_t size,
+ unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
+
+ int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
+
+ *base_va = mm.base_va;
+
+ return rc;
+}
+
+
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
+{
+ return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
+ base_va, size);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+void init_xlat_tables(void)
+{
+ assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
+
+ unsigned int current_el = xlat_arch_current_el();
+
+ if (current_el == 1U) {
+ tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
+ } else if (current_el == 2U) {
+ tf_xlat_ctx.xlat_regime = EL2_REGIME;
+ } else {
+ assert(current_el == 3U);
+ tf_xlat_ctx.xlat_regime = EL3_REGIME;
+ }
+
+ init_xlat_tables_ctx(&tf_xlat_ctx);
+}
+
+int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
+{
+ return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
+}
+
+int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
+{
+ return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
+}
+
+/*
+ * If dynamic allocation of new regions is disabled then by the time we call the
+ * function enabling the MMU, we'll have registered all the memory regions to
+ * map for the system's lifetime. Therefore, at this point we know the maximum
+ * physical address that will ever be mapped.
+ *
+ * If dynamic allocation is enabled then we can't make any such assumption
+ * because the maximum physical address could get pushed while adding a new
+ * region. Therefore, in this case we have to assume that the whole address
+ * space size might be mapped.
+ */
+#ifdef PLAT_XLAT_TABLES_DYNAMIC
+#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
+#else
+#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
+#endif
+
+#ifdef AARCH32
+
+void enable_mmu_svc_mon(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
+ enable_mmu_direct_svc_mon(flags);
+}
+
+void enable_mmu_hyp(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL2_REGIME);
+ enable_mmu_direct_hyp(flags);
+}
+
+#else
+
+void enable_mmu_el1(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
+ enable_mmu_direct_el1(flags);
+}
+
+void enable_mmu_el2(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL2_REGIME);
+ enable_mmu_direct_el2(flags);
+}
+
+void enable_mmu_el3(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL3_REGIME);
+ enable_mmu_direct_el3(flags);
+}
+
+#endif /* AARCH32 */
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
new file mode 100644
index 000000000..80ce4fa7a
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -0,0 +1,1157 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <string.h>
+#include <types.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+/* Helper function that cleans the data cache only if it is enabled. */
+static inline void xlat_clean_dcache_range(uintptr_t addr, size_t size)
+{
+ if (is_dcache_enabled())
+ clean_dcache_range(addr, size);
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+/*
+ * The following functions assume that they will be called using subtables only.
+ * The base table can't be unmapped, so it is not needed to do any special
+ * handling for it.
+ */
+
+/*
+ * Returns the index of the array corresponding to the specified translation
+ * table.
+ */
+static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
+{
+ for (int i = 0; i < ctx->tables_num; i++)
+ if (ctx->tables[i] == table)
+ return i;
+
+ /*
+ * Maybe we were asked to get the index of the base level table, which
+ * should never happen.
+ */
+ assert(false);
+
+ return -1;
+}
+
+/* Returns a pointer to an empty translation table. */
+static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
+{
+ for (int i = 0; i < ctx->tables_num; i++)
+ if (ctx->tables_mapped_regions[i] == 0)
+ return ctx->tables[i];
+
+ return NULL;
+}
+
+/* Increments region count for a given table. */
+static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
+{
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]++;
+}
+
+/* Decrements region count for a given table. */
+static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
+{
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]--;
+}
+
+/* Returns 0 if the specified table isn't empty, otherwise 1. */
+static bool xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
+{
+ return ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0;
+}
+
+#else /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/* Returns a pointer to the first empty translation table. */
+static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
+{
+ assert(ctx->next_table < ctx->tables_num);
+
+ return ctx->tables[ctx->next_table++];
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
+ unsigned long long addr_pa, unsigned int level)
+{
+ uint64_t desc;
+ uint32_t mem_type;
+
+ /* Make sure that the granularity is fine enough to map this address. */
+ assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
+
+ desc = addr_pa;
+ /*
+ * There are different translation table descriptors for level 3 and the
+ * rest.
+ */
+ desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
+ /*
+ * Always set the access flag, as this library assumes access flag
+ * faults aren't managed.
+ */
+ desc |= LOWER_ATTRS(ACCESS_FLAG);
+ /*
+ * Deduce other fields of the descriptor based on the MT_NS and MT_RW
+ * memory region attributes.
+ */
+ desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
+ desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+
+ /*
+ * Do not allow unprivileged access when the mapping is for a privileged
+ * EL. For translation regimes that do not have mappings for access for
+ * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
+ */
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ if ((attr & MT_USER) != 0U) {
+ /* EL0 mapping requested, so we give User access */
+ desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
+ } else {
+ /* EL1 mapping requested, no User access granted */
+ desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
+ }
+ } else {
+ assert((ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL3_REGIME));
+ desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
+ }
+
+ /*
+ * Deduce shareability domain and executability of the memory region
+ * from the memory type of the attributes (MT_TYPE).
+ *
+ * Data accesses to device memory and non-cacheable normal memory are
+ * coherent for all observers in the system, and correspondingly are
+ * always treated as being Outer Shareable. Therefore, for these 2 types
+ * of memory, it is not strictly needed to set the shareability field
+ * in the translation tables.
+ */
+ mem_type = MT_TYPE(attr);
+ if (mem_type == MT_DEVICE) {
+ desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
+ /*
+ * Always map device memory as execute-never.
+ * This is to avoid the possibility of a speculative instruction
+ * fetch, which could be an issue if this memory region
+ * corresponds to a read-sensitive peripheral.
+ */
+ desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+ } else { /* Normal memory */
+ /*
+ * Always map read-write normal memory as execute-never.
+ * This library assumes that it is used by software that does
+ * not self-modify its code, therefore R/W memory is reserved
+ * for data storage, which must not be executable.
+ *
+ * Note that setting the XN bit here is for consistency only.
+ * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
+ * which makes any writable memory region to be treated as
+ * execute-never, regardless of the value of the XN bit in the
+ * translation table.
+ *
+ * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
+ * attribute to figure out the value of the XN bit. The actual
+ * XN bit(s) to set in the descriptor depends on the context's
+ * translation regime and the policy applied in
+ * xlat_arch_regime_get_xn_desc().
+ */
+ if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
+ desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+ }
+
+ if (mem_type == MT_MEMORY) {
+ desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+ } else {
+ assert(mem_type == MT_NON_CACHEABLE);
+ desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
+ }
+ }
+
+ return desc;
+}
+
+/*
+ * Enumeration of actions that can be made when mapping table entries depending
+ * on the previous value in that entry and information about the region being
+ * mapped.
+ */
+typedef enum {
+
+ /* Do nothing */
+ ACTION_NONE,
+
+ /* Write a block (or page, if in level 3) entry. */
+ ACTION_WRITE_BLOCK_ENTRY,
+
+ /*
+ * Create a new table and write a table entry pointing to it. Recurse
+ * into it for further processing.
+ */
+ ACTION_CREATE_NEW_TABLE,
+
+ /*
+ * There is a table descriptor in this entry, read it and recurse into
+ * that table for further processing.
+ */
+ ACTION_RECURSE_INTO_TABLE,
+
+} action_t;
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+/*
+ * Recursive function that writes to the translation tables and unmaps the
+ * specified region.
+ */
+static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
+ const uintptr_t table_base_va,
+ uint64_t *const table_base,
+ const unsigned int table_entries,
+ const unsigned int level)
+{
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
+
+ uint64_t *subtable;
+ uint64_t desc;
+
+ uintptr_t table_idx_va;
+ uintptr_t table_idx_end_va; /* End VA of this entry */
+
+ uintptr_t region_end_va = mm->base_va + mm->size - 1U;
+
+ unsigned int table_idx;
+
+ if (mm->base_va > table_base_va) {
+ /* Find the first index of the table affected by the region. */
+ table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
+
+ table_idx = (unsigned int)((table_idx_va - table_base_va) >>
+ XLAT_ADDR_SHIFT(level));
+
+ assert(table_idx < table_entries);
+ } else {
+ /* Start from the beginning of the table. */
+ table_idx_va = table_base_va;
+ table_idx = 0;
+ }
+
+ while (table_idx < table_entries) {
+
+ table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
+
+ desc = table_base[table_idx];
+ uint64_t desc_type = desc & DESC_MASK;
+
+ action_t action;
+
+ if ((mm->base_va <= table_idx_va) &&
+ (region_end_va >= table_idx_end_va)) {
+ /* Region covers all block */
+
+ if (level == 3U) {
+ /*
+ * Last level, only page descriptors allowed,
+ * erase it.
+ */
+ assert(desc_type == PAGE_DESC);
+
+ action = ACTION_WRITE_BLOCK_ENTRY;
+ } else {
+ /*
+ * Other levels can have table descriptors. If
+ * so, recurse into it and erase descriptors
+ * inside it as needed. If there is a block
+ * descriptor, just erase it. If an invalid
+ * descriptor is found, this table isn't
+ * actually mapped, which shouldn't happen.
+ */
+ if (desc_type == TABLE_DESC) {
+ action = ACTION_RECURSE_INTO_TABLE;
+ } else {
+ assert(desc_type == BLOCK_DESC);
+ action = ACTION_WRITE_BLOCK_ENTRY;
+ }
+ }
+
+ } else if ((mm->base_va <= table_idx_end_va) ||
+ (region_end_va >= table_idx_va)) {
+ /*
+ * Region partially covers block.
+ *
+ * It can't happen in level 3.
+ *
+ * There must be a table descriptor here, if not there
+ * was a problem when mapping the region.
+ */
+ assert(level < 3U);
+ assert(desc_type == TABLE_DESC);
+
+ action = ACTION_RECURSE_INTO_TABLE;
+ } else {
+ /* The region doesn't cover the block at all */
+ action = ACTION_NONE;
+ }
+
+ if (action == ACTION_WRITE_BLOCK_ENTRY) {
+
+ table_base[table_idx] = INVALID_DESC;
+ xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
+
+ } else if (action == ACTION_RECURSE_INTO_TABLE) {
+
+ subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+
+ /* Recurse to write into subtable */
+ xlat_tables_unmap_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ /*
+ * If the subtable is now empty, remove its reference.
+ */
+ if (xlat_table_is_empty(ctx, subtable)) {
+ table_base[table_idx] = INVALID_DESC;
+ xlat_arch_tlbi_va(table_idx_va,
+ ctx->xlat_regime);
+ }
+
+ } else {
+ assert(action == ACTION_NONE);
+ }
+
+ table_idx++;
+ table_idx_va += XLAT_BLOCK_SIZE(level);
+
+ /* If reached the end of the region, exit */
+ if (region_end_va <= table_idx_va)
+ break;
+ }
+
+ if (level > ctx->base_level)
+ xlat_table_dec_regions_count(ctx, table_base);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * From the given arguments, it decides which action to take when mapping the
+ * specified region.
+ */
+static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
+ unsigned int desc_type, unsigned long long dest_pa,
+ uintptr_t table_entry_base_va, unsigned int level)
+{
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
+ uintptr_t table_entry_end_va =
+ table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
+
+ /*
+ * The descriptor types allowed depend on the current table level.
+ */
+
+ if ((mm->base_va <= table_entry_base_va) &&
+ (mm_end_va >= table_entry_end_va)) {
+
+ /*
+ * Table entry is covered by region
+ * --------------------------------
+ *
+ * This means that this table entry can describe the whole
+ * translation with this granularity in principle.
+ */
+
+ if (level == 3U) {
+ /*
+ * Last level, only page descriptors are allowed.
+ */
+ if (desc_type == PAGE_DESC) {
+ /*
+ * There's another region mapped here, don't
+ * overwrite.
+ */
+ return ACTION_NONE;
+ } else {
+ assert(desc_type == INVALID_DESC);
+ return ACTION_WRITE_BLOCK_ENTRY;
+ }
+
+ } else {
+
+ /*
+ * Other levels. Table descriptors are allowed. Block
+ * descriptors too, but they have some limitations.
+ */
+
+ if (desc_type == TABLE_DESC) {
+ /* There's already a table, recurse into it. */
+ return ACTION_RECURSE_INTO_TABLE;
+
+ } else if (desc_type == INVALID_DESC) {
+ /*
+ * There's nothing mapped here, create a new
+ * entry.
+ *
+ * Check if the destination granularity allows
+ * us to use a block descriptor or we need a
+ * finer table for it.
+ *
+ * Also, check if the current level allows block
+ * descriptors. If not, create a table instead.
+ */
+ if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
+ || (level < MIN_LVL_BLOCK_DESC) ||
+ (mm->granularity < XLAT_BLOCK_SIZE(level)))
+ return ACTION_CREATE_NEW_TABLE;
+ else
+ return ACTION_WRITE_BLOCK_ENTRY;
+
+ } else {
+ /*
+ * There's another region mapped here, don't
+ * overwrite.
+ */
+ assert(desc_type == BLOCK_DESC);
+
+ return ACTION_NONE;
+ }
+ }
+
+ } else if ((mm->base_va <= table_entry_end_va) ||
+ (mm_end_va >= table_entry_base_va)) {
+
+ /*
+ * Region partially covers table entry
+ * -----------------------------------
+ *
+ * This means that this table entry can't describe the whole
+ * translation, a finer table is needed.
+
+ * There cannot be partial block overlaps in level 3. If that
+ * happens, some of the preliminary checks when adding the
+ * mmap region failed to detect that PA and VA must at least be
+ * aligned to PAGE_SIZE.
+ */
+ assert(level < 3U);
+
+ if (desc_type == INVALID_DESC) {
+ /*
+ * The block is not fully covered by the region. Create
+ * a new table, recurse into it and try to map the
+ * region with finer granularity.
+ */
+ return ACTION_CREATE_NEW_TABLE;
+
+ } else {
+ assert(desc_type == TABLE_DESC);
+ /*
+ * The block is not fully covered by the region, but
+ * there is already a table here. Recurse into it and
+ * try to map with finer granularity.
+ *
+ * PAGE_DESC for level 3 has the same value as
+ * TABLE_DESC, but this code can't run on a level 3
+ * table because there can't be overlaps in level 3.
+ */
+ return ACTION_RECURSE_INTO_TABLE;
+ }
+ } else {
+
+ /*
+ * This table entry is outside of the region specified in the
+ * arguments, don't write anything to it.
+ */
+ return ACTION_NONE;
+ }
+}
+
+/*
+ * Recursive function that writes to the translation tables and maps the
+ * specified region. On success, it returns the VA of the last byte that was
+ * successfully mapped. On error, it returns the VA of the next entry that
+ * should have been mapped.
+ */
+static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
+ uintptr_t table_base_va,
+ uint64_t *const table_base,
+ unsigned int table_entries,
+ unsigned int level)
+{
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
+
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
+
+ uintptr_t table_idx_va;
+ unsigned long long table_idx_pa;
+
+ uint64_t *subtable;
+ uint64_t desc;
+
+ unsigned int table_idx;
+
+ if (mm->base_va > table_base_va) {
+ /* Find the first index of the table affected by the region. */
+ table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
+
+ table_idx = (unsigned int)((table_idx_va - table_base_va) >>
+ XLAT_ADDR_SHIFT(level));
+
+ assert(table_idx < table_entries);
+ } else {
+ /* Start from the beginning of the table. */
+ table_idx_va = table_base_va;
+ table_idx = 0U;
+ }
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+ if (level > ctx->base_level)
+ xlat_table_inc_regions_count(ctx, table_base);
+#endif
+
+ while (table_idx < table_entries) {
+
+ desc = table_base[table_idx];
+
+ table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
+
+ action_t action = xlat_tables_map_region_action(mm,
+ (uint32_t)(desc & DESC_MASK), table_idx_pa,
+ table_idx_va, level);
+
+ if (action == ACTION_WRITE_BLOCK_ENTRY) {
+
+ table_base[table_idx] =
+ xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
+ level);
+
+ } else if (action == ACTION_CREATE_NEW_TABLE) {
+ uintptr_t end_va;
+
+ subtable = xlat_table_get_empty(ctx);
+ if (subtable == NULL) {
+ /* Not enough free tables to map this region */
+ return table_idx_va;
+ }
+
+ /* Point to new subtable from this one. */
+ table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
+
+ /* Recurse to write into subtable */
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+ return end_va;
+
+ } else if (action == ACTION_RECURSE_INTO_TABLE) {
+ uintptr_t end_va;
+
+ subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ /* Recurse to write into subtable */
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+ return end_va;
+
+ } else {
+
+ assert(action == ACTION_NONE);
+
+ }
+
+ table_idx++;
+ table_idx_va += XLAT_BLOCK_SIZE(level);
+
+ /* If reached the end of the region, exit */
+ if (mm_end_va <= table_idx_va)
+ break;
+ }
+
+ return table_idx_va - 1U;
+}
+
+/*
+ * Function that verifies that a region can be mapped.
+ * Returns:
+ * 0: Success, the mapping is allowed.
+ * EINVAL: Invalid values were used as arguments.
+ * ERANGE: The memory limits were surpassed.
+ * ENOMEM: There is not enough memory in the mmap array.
+ * EPERM: Region overlaps another one in an invalid way.
+ */
+static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+ unsigned long long base_pa = mm->base_pa;
+ uintptr_t base_va = mm->base_va;
+ size_t size = mm->size;
+ size_t granularity = mm->granularity;
+
+ unsigned long long end_pa = base_pa + size - 1U;
+ uintptr_t end_va = base_va + size - 1U;
+
+ if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
+ !IS_PAGE_ALIGNED(size))
+ return -EINVAL;
+
+ if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
+ (granularity != XLAT_BLOCK_SIZE(2U)) &&
+ (granularity != XLAT_BLOCK_SIZE(3U))) {
+ return -EINVAL;
+ }
+
+ /* Check for overflows */
+ if ((base_pa > end_pa) || (base_va > end_va))
+ return -ERANGE;
+
+ if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
+ return -ERANGE;
+
+ if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
+ return -ERANGE;
+
+ /* Check that there is space in the ctx->mmap array */
+ if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
+ return -ENOMEM;
+
+ /* Check for PAs and VAs overlaps with all other regions */
+ for (const mmap_region_t *mm_cursor = ctx->mmap;
+ mm_cursor->size != 0U; ++mm_cursor) {
+
+ uintptr_t mm_cursor_end_va = mm_cursor->base_va
+ + mm_cursor->size - 1U;
+
+ /*
+ * Check if one of the regions is completely inside the other
+ * one.
+ */
+ bool fully_overlapped_va =
+ ((base_va >= mm_cursor->base_va) &&
+ (end_va <= mm_cursor_end_va)) ||
+ ((mm_cursor->base_va >= base_va) &&
+ (mm_cursor_end_va <= end_va));
+
+ /*
+ * Full VA overlaps are only allowed if both regions are
+ * identity mapped (zero offset) or have the same VA to PA
+ * offset. Also, make sure that it's not the exact same area.
+ * This can only be done with static regions.
+ */
+ if (fully_overlapped_va) {
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+ if (((mm->attr & MT_DYNAMIC) != 0U) ||
+ ((mm_cursor->attr & MT_DYNAMIC) != 0U))
+ return -EPERM;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+ if ((mm_cursor->base_va - mm_cursor->base_pa) !=
+ (base_va - base_pa))
+ return -EPERM;
+
+ if ((base_va == mm_cursor->base_va) &&
+ (size == mm_cursor->size))
+ return -EPERM;
+
+ } else {
+ /*
+ * If the regions do not have fully overlapping VAs,
+ * then they must have fully separated VAs and PAs.
+ * Partial overlaps are not allowed
+ */
+
+ unsigned long long mm_cursor_end_pa =
+ mm_cursor->base_pa + mm_cursor->size - 1U;
+
+ bool separated_pa = (end_pa < mm_cursor->base_pa) ||
+ (base_pa > mm_cursor_end_pa);
+ bool separated_va = (end_va < mm_cursor->base_va) ||
+ (base_va > mm_cursor_end_va);
+
+ if (!separated_va || !separated_pa)
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+ mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
+ const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
+ const mmap_region_t *mm_last;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
+ int ret;
+
+ /* Ignore empty regions */
+ if (mm->size == 0U)
+ return;
+
+ /* Static regions must be added before initializing the xlat tables. */
+ assert(!ctx->initialized);
+
+ ret = mmap_add_region_check(ctx, mm);
+ if (ret != 0) {
+ ERROR("mmap_add_region_check() failed. error %d\n", ret);
+ assert(false);
+ return;
+ }
+
+ /*
+ * Find correct place in mmap to insert new region.
+ *
+ * 1 - Lower region VA end first.
+ * 2 - Smaller region size first.
+ *
+ * VA 0 0xFF
+ *
+ * 1st |------|
+ * 2nd |------------|
+ * 3rd |------|
+ * 4th |---|
+ * 5th |---|
+ * 6th |----------|
+ * 7th |-------------------------------------|
+ *
+ * This is required for overlapping regions only. It simplifies adding
+ * regions with the loop in xlat_tables_init_internal because the outer
+ * ones won't overwrite block or page descriptors of regions added
+ * previously.
+ *
+ * Overlapping is only allowed for static regions.
+ */
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
+ ++mm_cursor;
+ }
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
+ ++mm_cursor;
+ }
+
+ /*
+ * Find the last entry marker in the mmap
+ */
+ mm_last = ctx->mmap;
+ while ((mm_last->size != 0U) && (mm_last < mm_end)) {
+ ++mm_last;
+ }
+
+ /*
+ * Check if we have enough space in the memory mapping table.
+ * This shouldn't happen as we have checked in mmap_add_region_check
+ * that there is free space.
+ */
+ assert(mm_last->size == 0U);
+
+ /* Make room for new region by moving other regions up by one place */
+ mm_destination = mm_cursor + 1;
+ (void)memmove(mm_destination, mm_cursor,
+ (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+ /*
+ * Check we haven't lost the empty sentinel from the end of the array.
+ * This shouldn't happen as we have checked in mmap_add_region_check
+ * that there is free space.
+ */
+ assert(mm_end->size == 0U);
+
+ *mm_cursor = *mm;
+
+ if (end_pa > ctx->max_pa)
+ ctx->max_pa = end_pa;
+ if (end_va > ctx->max_va)
+ ctx->max_va = end_va;
+}
+
+/*
+ * Determine the table level closest to the initial lookup level that
+ * can describe this translation. Then, align base VA to the next block
+ * at the determined level.
+ */
+static void mmap_alloc_va_align_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ /*
+ * By or'ing the size and base PA the alignment will be the one
+ * corresponding to the smallest boundary of the two of them.
+ *
+ * There are three different cases. For example (for 4 KiB page size):
+ *
+ * +--------------+------------------++--------------+
+ * | PA alignment | Size multiple of || VA alignment |
+ * +--------------+------------------++--------------+
+ * | 2 MiB | 2 MiB || 2 MiB | (1)
+ * | 2 MiB | 4 KiB || 4 KiB | (2)
+ * | 4 KiB | 2 MiB || 4 KiB | (3)
+ * +--------------+------------------++--------------+
+ *
+ * - In (1), it is possible to take advantage of the alignment of the PA
+ * and the size of the region to use a level 2 translation table
+ * instead of a level 3 one.
+ *
+ * - In (2), the size is smaller than a block entry of level 2, so it is
+ * needed to use a level 3 table to describe the region or the library
+ * will map more memory than the desired one.
+ *
+ * - In (3), even though the region has the size of one level 2 block
+ * entry, it isn't possible to describe the translation with a level 2
+ * block entry because of the alignment of the base PA.
+ *
+ * Only bits 47:21 of a level 2 block descriptor are used by the MMU,
+ * bits 20:0 of the resulting address are 0 in this case. Because of
+ * this, the PA generated as result of this translation is aligned to
+ * 2 MiB. The PA that was requested to be mapped is aligned to 4 KiB,
+ * though, which means that the resulting translation is incorrect.
+ * The only way to prevent this is by using a finer granularity.
+ */
+ unsigned long long align_check;
+
+ align_check = mm->base_pa | (unsigned long long)mm->size;
+
+ /*
+ * Assume it is always aligned to level 3. There's no need to check that
+ * level because its block size is PAGE_SIZE. The checks to verify that
+ * the addresses and size are aligned to PAGE_SIZE are inside
+ * mmap_add_region.
+ */
+ for (int level = ctx->base_level; level <= 2; ++level) {
+
+ if (align_check & XLAT_BLOCK_MASK(level))
+ continue;
+
+ mm->base_va = round_up(mm->base_va, XLAT_BLOCK_SIZE(level));
+ return;
+ }
+}
+
+void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ mm->base_va = ctx->max_va + 1UL;
+
+ assert(mm->size > 0U);
+
+ mmap_alloc_va_align_ctx(ctx, mm);
+
+ /* Detect overflows. More checks are done in mmap_add_region_check(). */
+ assert(mm->base_va > ctx->max_va);
+
+ mmap_add_region_ctx(ctx, mm);
+}
+
+void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+ const mmap_region_t *mm_cursor = mm;
+
+ while (mm_cursor->size != 0U) {
+ mmap_add_region_ctx(ctx, mm_cursor);
+ mm_cursor++;
+ }
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ mmap_region_t *mm_cursor = ctx->mmap;
+ const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
+ int ret;
+
+ /* Nothing to do */
+ if (mm->size == 0U)
+ return 0;
+
+ /* Now this region is a dynamic one */
+ mm->attr |= MT_DYNAMIC;
+
+ ret = mmap_add_region_check(ctx, mm);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Find the adequate entry in the mmap array in the same way done for
+ * static regions in mmap_add_region_ctx().
+ */
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
+ ++mm_cursor;
+ }
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
+ ++mm_cursor;
+ }
+
+ /* Make room for new region by moving other regions up by one place */
+ (void)memmove(mm_cursor + 1U, mm_cursor,
+ (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+ /*
+ * Check we haven't lost the empty sentinal from the end of the array.
+ * This shouldn't happen as we have checked in mmap_add_region_check
+ * that there is free space.
+ */
+ assert(mm_last->size == 0U);
+
+ *mm_cursor = *mm;
+
+ /*
+ * Update the translation tables if the xlat tables are initialized. If
+ * not, this region will be mapped when they are initialized.
+ */
+ if (ctx->initialized) {
+ end_va = xlat_tables_map_region(ctx, mm_cursor,
+ 0U, ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ /* Failed to map, remove mmap entry, unmap and return error. */
+ if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
+ (void)memmove(mm_cursor, mm_cursor + 1U,
+ (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+ /*
+ * Check if the mapping function actually managed to map
+ * anything. If not, just return now.
+ */
+ if (mm->base_va >= end_va)
+ return -ENOMEM;
+
+ /*
+ * Something went wrong after mapping some table
+ * entries, undo every change done up to this point.
+ */
+ mmap_region_t unmap_mm = {
+ .base_pa = 0U,
+ .base_va = mm->base_va,
+ .size = end_va - mm->base_va,
+ .attr = 0U
+ };
+ xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ return -ENOMEM;
+ }
+
+ /*
+ * Make sure that all entries are written to the memory. There
+ * is no need to invalidate entries when mapping dynamic regions
+ * because new table/block/page descriptors only replace old
+ * invalid descriptors, that aren't TLB cached.
+ */
+ dsbishst();
+ }
+
+ if (end_pa > ctx->max_pa)
+ ctx->max_pa = end_pa;
+ if (end_va > ctx->max_va)
+ ctx->max_va = end_va;
+
+ return 0;
+}
+
+int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ mm->base_va = ctx->max_va + 1UL;
+
+ if (mm->size == 0U)
+ return 0;
+
+ mmap_alloc_va_align_ctx(ctx, mm);
+
+ /* Detect overflows. More checks are done in mmap_add_region_check(). */
+ if (mm->base_va < ctx->max_va) {
+ return -ENOMEM;
+ }
+
+ return mmap_add_dynamic_region_ctx(ctx, mm);
+}
+
+/*
+ * Removes the region with given base Virtual Address and size from the given
+ * context.
+ *
+ * Returns:
+ * 0: Success.
+ * EINVAL: Invalid values were used as arguments (region not found).
+ * EPERM: Tried to remove a static region.
+ */
+int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
+ size_t size)
+{
+ mmap_region_t *mm = ctx->mmap;
+ const mmap_region_t *mm_last = mm + ctx->mmap_num;
+ int update_max_va_needed = 0;
+ int update_max_pa_needed = 0;
+
+ /* Check sanity of mmap array. */
+ assert(mm[ctx->mmap_num].size == 0U);
+
+ while (mm->size != 0U) {
+ if ((mm->base_va == base_va) && (mm->size == size))
+ break;
+ ++mm;
+ }
+
+ /* Check that the region was found */
+ if (mm->size == 0U)
+ return -EINVAL;
+
+ /* If the region is static it can't be removed */
+ if ((mm->attr & MT_DYNAMIC) == 0U)
+ return -EPERM;
+
+ /* Check if this region is using the top VAs or PAs. */
+ if ((mm->base_va + mm->size - 1U) == ctx->max_va)
+ update_max_va_needed = 1;
+ if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
+ update_max_pa_needed = 1;
+
+ /* Update the translation tables if needed */
+ if (ctx->initialized) {
+ xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
+ ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ xlat_arch_tlbi_va_sync();
+ }
+
+ /* Remove this region by moving the rest down by one place. */
+ (void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
+
+ /* Check if we need to update the max VAs and PAs */
+ if (update_max_va_needed == 1) {
+ ctx->max_va = 0U;
+ mm = ctx->mmap;
+ while (mm->size != 0U) {
+ if ((mm->base_va + mm->size - 1U) > ctx->max_va)
+ ctx->max_va = mm->base_va + mm->size - 1U;
+ ++mm;
+ }
+ }
+
+ if (update_max_pa_needed == 1) {
+ ctx->max_pa = 0U;
+ mm = ctx->mmap;
+ while (mm->size != 0U) {
+ if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
+ ctx->max_pa = mm->base_pa + mm->size - 1U;
+ ++mm;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+void init_xlat_tables_ctx(xlat_ctx_t *ctx)
+{
+ assert(ctx != NULL);
+ assert(!ctx->initialized);
+ assert((ctx->xlat_regime == EL3_REGIME) ||
+ (ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL1_EL0_REGIME));
+ assert(!is_mmu_enabled_ctx(ctx));
+
+ mmap_region_t *mm = ctx->mmap;
+
+ xlat_mmap_print(mm);
+
+ /* All tables must be zeroed before mapping any region. */
+
+ for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
+ ctx->base_table[i] = INVALID_DESC;
+
+ for (int j = 0; j < ctx->tables_num; j++) {
+#if PLAT_XLAT_TABLES_DYNAMIC
+ ctx->tables_mapped_regions[j] = 0;
+#endif
+ for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
+ ctx->tables[j][i] = INVALID_DESC;
+ }
+
+ while (mm->size != 0U) {
+ uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ if (end_va != (mm->base_va + mm->size - 1U)) {
+ ERROR("Not enough memory to map region:\n"
+ " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
+ mm->base_va, mm->base_pa, mm->size, mm->attr);
+ panic();
+ }
+
+ mm++;
+ }
+
+ assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
+ assert(ctx->max_va <= ctx->va_max_address);
+ assert(ctx->max_pa <= ctx->pa_max_address);
+
+ ctx->initialized = true;
+
+ xlat_tables_print(ctx);
+}
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
new file mode 100644
index 000000000..528996a29
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_PRIVATE_H
+#define XLAT_TABLES_PRIVATE_H
+
+#include <platform_def.h>
+#include <stdbool.h>
+#include <xlat_tables_defs.h>
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * Private shifts and masks to access fields of an mmap attribute
+ */
+/* Dynamic or static */
+#define MT_DYN_SHIFT U(31)
+
+/*
+ * Memory mapping private attributes
+ *
+ * Private attributes not exposed in the public header.
+ */
+
+/*
+ * Regions mapped before the MMU can't be unmapped dynamically (they are
+ * static) and regions mapped with MMU enabled can be unmapped. This
+ * behaviour can't be overridden.
+ *
+ * Static regions can overlap each other, dynamic regions can't.
+ */
+#define MT_STATIC (U(0) << MT_DYN_SHIFT)
+#define MT_DYNAMIC (U(1) << MT_DYN_SHIFT)
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+extern uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+/*
+ * Return the execute-never mask that will prevent instruction fetch at the
+ * given translation regime.
+ */
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime);
+
+/*
+ * Invalidate all TLB entries that match the given virtual address. This
+ * operation applies to all PEs in the same Inner Shareable domain as the PE
+ * that executes this function. This functions must be called for every
+ * translation table entry that is modified. It only affects the specified
+ * translation regime.
+ *
+ * Note, however, that it is architecturally UNDEFINED to invalidate TLB entries
+ * pertaining to a higher exception level, e.g. invalidating EL3 entries from
+ * S-EL1.
+ */
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime);
+
+/*
+ * This function has to be called at the end of any code that uses the function
+ * xlat_arch_tlbi_va().
+ */
+void xlat_arch_tlbi_va_sync(void);
+
+/* Print VA, PA, size and attributes of all regions in the mmap array. */
+void xlat_mmap_print(const mmap_region_t *mmap);
+
+/*
+ * Print the current state of the translation tables by reading them from
+ * memory.
+ */
+void xlat_tables_print(xlat_ctx_t *ctx);
+
+/*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
+ unsigned long long addr_pa, unsigned int level);
+
+/*
+ * Architecture-specific initialization code.
+ */
+
+/* Returns the current Exception Level. The returned EL must be 1 or higher. */
+unsigned int xlat_arch_current_el(void);
+
+/*
+ * Return the maximum physical address supported by the hardware.
+ * This value depends on the execution state (AArch32/AArch64).
+ */
+unsigned long long xlat_arch_get_max_supported_pa(void);
+
+/*
+ * Returns true if the MMU of the translation regime managed by the given
+ * xlat_ctx_t is enabled, false otherwise.
+ */
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
+
+/* Returns true if the data cache is enabled at the current EL. */
+bool is_dcache_enabled(void);
+
+#endif /* XLAT_TABLES_PRIVATE_H */
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
new file mode 100644
index 000000000..1ada48896
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <types.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+#if LOG_LEVEL < LOG_LEVEL_VERBOSE
+
+void xlat_mmap_print(__unused const mmap_region_t *mmap)
+{
+ /* Empty */
+}
+
+void xlat_tables_print(__unused xlat_ctx_t *ctx)
+{
+ /* Empty */
+}
+
+#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+void xlat_mmap_print(const mmap_region_t *mmap)
+{
+ printf("mmap:\n");
+ const mmap_region_t *mm = mmap;
+
+ while (mm->size != 0U) {
+ printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x granularity:0x%zx\n",
+ mm->base_va, mm->base_pa, mm->size, mm->attr,
+ mm->granularity);
+ ++mm;
+ };
+ printf("\n");
+}
+
+/* Print the attributes of the specified block descriptor. */
+static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
+{
+ uint64_t mem_type_index = ATTR_INDEX_GET(desc);
+ int xlat_regime = ctx->xlat_regime;
+
+ if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+ printf("MEM");
+ } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
+ printf("NC");
+ } else {
+ assert(mem_type_index == ATTR_DEVICE_INDEX);
+ printf("DEV");
+ }
+
+ if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) {
+ /* For EL3 and EL2 only check the AP[2] and XN bits. */
+ printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
+ printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
+ } else {
+ assert(xlat_regime == EL1_EL0_REGIME);
+ /*
+ * For EL0 and EL1:
+ * - In AArch64 PXN and UXN can be set independently but in
+ * AArch32 there is no UXN (XN affects both privilege levels).
+ * For consistency, we set them simultaneously in both cases.
+ * - RO and RW permissions must be the same in EL1 and EL0. If
+ * EL0 can access that memory region, so can EL1, with the
+ * same permissions.
+ */
+#if ENABLE_ASSERTIONS
+ uint64_t xn_mask = xlat_arch_regime_get_xn_desc(EL1_EL0_REGIME);
+ uint64_t xn_perm = desc & xn_mask;
+
+ assert((xn_perm == xn_mask) || (xn_perm == 0ULL));
+#endif
+ printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
+ /* Only check one of PXN and UXN, the other one is the same. */
+ printf(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-XN" : "-EXEC");
+ /*
+ * Privileged regions can only be accessed from EL1, user
+ * regions can be accessed from EL1 and EL0.
+ */
+ printf(((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) != 0ULL)
+ ? "-USER" : "-PRIV");
+ }
+
+ printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
+}
+
+static const char * const level_spacers[] = {
+ "[LV0] ",
+ " [LV1] ",
+ " [LV2] ",
+ " [LV3] "
+};
+
+static const char *invalid_descriptors_ommited =
+ "%s(%d invalid descriptors omitted)\n";
+
+/*
+ * Recursive function that reads the translation tables passed as an argument
+ * and prints their status.
+ */
+static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
+ const uint64_t *table_base, unsigned int table_entries,
+ unsigned int level)
+{
+ assert(level <= XLAT_TABLE_LEVEL_MAX);
+
+ uint64_t desc;
+ uintptr_t table_idx_va = table_base_va;
+ unsigned int table_idx = 0U;
+ size_t level_size = XLAT_BLOCK_SIZE(level);
+
+ /*
+ * Keep track of how many invalid descriptors are counted in a row.
+ * Whenever multiple invalid descriptors are found, only the first one
+ * is printed, and a line is added to inform about how many descriptors
+ * have been omitted.
+ */
+ int invalid_row_count = 0;
+
+ while (table_idx < table_entries) {
+
+ desc = table_base[table_idx];
+
+ if ((desc & DESC_MASK) == INVALID_DESC) {
+
+ if (invalid_row_count == 0) {
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
+ }
+ invalid_row_count++;
+
+ } else {
+
+ if (invalid_row_count > 1) {
+ printf(invalid_descriptors_ommited,
+ level_spacers[level],
+ invalid_row_count - 1);
+ }
+ invalid_row_count = 0;
+
+ /*
+ * Check if this is a table or a block. Tables are only
+ * allowed in levels other than 3, but DESC_PAGE has the
+ * same value as DESC_TABLE, so we need to check.
+ */
+ if (((desc & DESC_MASK) == TABLE_DESC) &&
+ (level < XLAT_TABLE_LEVEL_MAX)) {
+ /*
+ * Do not print any PA for a table descriptor,
+ * as it doesn't directly map physical memory
+ * but instead points to the next translation
+ * table in the translation table walk.
+ */
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
+
+ uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
+
+ xlat_tables_print_internal(ctx, table_idx_va,
+ (uint64_t *)addr_inner,
+ XLAT_TABLE_ENTRIES, level + 1U);
+ } else {
+ printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
+ level_spacers[level], table_idx_va,
+ (uint64_t)(desc & TABLE_ADDR_MASK),
+ level_size);
+ xlat_desc_print(ctx, desc);
+ printf("\n");
+ }
+ }
+
+ table_idx++;
+ table_idx_va += level_size;
+ }
+
+ if (invalid_row_count > 1) {
+ printf(invalid_descriptors_ommited,
+ level_spacers[level], invalid_row_count - 1);
+ }
+}
+
+void xlat_tables_print(xlat_ctx_t *ctx)
+{
+ const char *xlat_regime_str;
+ int used_page_tables;
+
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ xlat_regime_str = "1&0";
+ } else if (ctx->xlat_regime == EL2_REGIME) {
+ xlat_regime_str = "2";
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ xlat_regime_str = "3";
+ }
+ VERBOSE("Translation tables state:\n");
+ VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
+ VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
+ VERBOSE(" Max allowed VA: 0x%lx\n", ctx->va_max_address);
+ VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
+ VERBOSE(" Max mapped VA: 0x%lx\n", ctx->max_va);
+
+ VERBOSE(" Initial lookup level: %u\n", ctx->base_level);
+ VERBOSE(" Entries @initial lookup level: %u\n",
+ ctx->base_table_entries);
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+ used_page_tables = 0;
+ for (int i = 0; i < ctx->tables_num; ++i) {
+ if (ctx->tables_mapped_regions[i] != 0)
+ ++used_page_tables;
+ }
+#else
+ used_page_tables = ctx->next_table;
+#endif
+ VERBOSE(" Used %d sub-tables out of %d (spare: %d)\n",
+ used_page_tables, ctx->tables_num,
+ ctx->tables_num - used_page_tables);
+
+ xlat_tables_print_internal(ctx, 0U, ctx->base_table,
+ ctx->base_table_entries, ctx->base_level);
+}
+
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+/*
+ * Do a translation table walk to find the block or page descriptor that maps
+ * virtual_addr.
+ *
+ * On success, return the address of the descriptor within the translation
+ * table. Its lookup level is stored in '*out_level'.
+ * On error, return NULL.
+ *
+ * xlat_table_base
+ * Base address for the initial lookup level.
+ * xlat_table_base_entries
+ * Number of entries in the translation table for the initial lookup level.
+ * virt_addr_space_size
+ * Size in bytes of the virtual address space.
+ */
+static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
+ void *xlat_table_base,
+ unsigned int xlat_table_base_entries,
+ unsigned long long virt_addr_space_size,
+ unsigned int *out_level)
+{
+ unsigned int start_level;
+ uint64_t *table;
+ unsigned int entries;
+
+ start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
+
+ table = xlat_table_base;
+ entries = xlat_table_base_entries;
+
+ for (unsigned int level = start_level;
+ level <= XLAT_TABLE_LEVEL_MAX;
+ ++level) {
+ uint64_t idx, desc, desc_type;
+
+ idx = XLAT_TABLE_IDX(virtual_addr, level);
+ if (idx >= entries) {
+ WARN("Missing xlat table entry at address 0x%lx\n",
+ virtual_addr);
+ return NULL;
+ }
+
+ desc = table[idx];
+ desc_type = desc & DESC_MASK;
+
+ if (desc_type == INVALID_DESC) {
+ VERBOSE("Invalid entry (memory not mapped)\n");
+ return NULL;
+ }
+
+ if (level == XLAT_TABLE_LEVEL_MAX) {
+ /*
+ * Only page descriptors allowed at the final lookup
+ * level.
+ */
+ assert(desc_type == PAGE_DESC);
+ *out_level = level;
+ return &table[idx];
+ }
+
+ if (desc_type == BLOCK_DESC) {
+ *out_level = level;
+ return &table[idx];
+ }
+
+ assert(desc_type == TABLE_DESC);
+ table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ entries = XLAT_TABLE_ENTRIES;
+ }
+
+ /*
+ * This shouldn't be reached, the translation table walk should end at
+ * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
+ */
+ assert(false);
+
+ return NULL;
+}
+
+
+static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx,
+ uintptr_t base_va, uint32_t *attributes, uint64_t **table_entry,
+ unsigned long long *addr_pa, unsigned int *table_level)
+{
+ uint64_t *entry;
+ uint64_t desc;
+ unsigned int level;
+ unsigned long long virt_addr_space_size;
+
+ /*
+ * Sanity-check arguments.
+ */
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+ assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
+ (ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL3_REGIME));
+
+ virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
+ assert(virt_addr_space_size > 0U);
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address 0x%lx is not mapped.\n", base_va);
+ return -EINVAL;
+ }
+
+ if (addr_pa != NULL) {
+ *addr_pa = *entry & TABLE_ADDR_MASK;
+ }
+
+ if (table_entry != NULL) {
+ *table_entry = entry;
+ }
+
+ if (table_level != NULL) {
+ *table_level = level;
+ }
+
+ desc = *entry;
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ VERBOSE("Attributes: ");
+ xlat_desc_print(ctx, desc);
+ printf("\n");
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+ assert(attributes != NULL);
+ *attributes = 0U;
+
+ uint64_t attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+
+ if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+ *attributes |= MT_MEMORY;
+ } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
+ *attributes |= MT_NON_CACHEABLE;
+ } else {
+ assert(attr_index == ATTR_DEVICE_INDEX);
+ *attributes |= MT_DEVICE;
+ }
+
+ uint64_t ap2_bit = (desc >> AP2_SHIFT) & 1U;
+
+ if (ap2_bit == AP2_RW)
+ *attributes |= MT_RW;
+
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ uint64_t ap1_bit = (desc >> AP1_SHIFT) & 1U;
+
+ if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
+ *attributes |= MT_USER;
+ }
+
+ uint64_t ns_bit = (desc >> NS_SHIFT) & 1U;
+
+ if (ns_bit == 1U)
+ *attributes |= MT_NS;
+
+ uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+ if ((desc & xn_mask) == xn_mask) {
+ *attributes |= MT_EXECUTE_NEVER;
+ } else {
+ assert((desc & xn_mask) == 0U);
+ }
+
+ return 0;
+}
+
+
+int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ uint32_t *attr)
+{
+ return xlat_get_mem_attributes_internal(ctx, base_va, attr,
+ NULL, NULL, NULL);
+}
+
+
+int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ size_t size, uint32_t attr)
+{
+ /* Note: This implementation isn't optimized. */
+
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+
+ unsigned long long virt_addr_space_size =
+ (unsigned long long)ctx->va_max_address + 1U;
+ assert(virt_addr_space_size > 0U);
+
+ if (!IS_PAGE_ALIGNED(base_va)) {
+ WARN("%s: Address 0x%lx is not aligned on a page boundary.\n",
+ __func__, base_va);
+ return -EINVAL;
+ }
+
+ if (size == 0U) {
+ WARN("%s: Size is 0.\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((size % PAGE_SIZE) != 0U) {
+ WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
+ __func__, size);
+ return -EINVAL;
+ }
+
+ if (((attr & MT_EXECUTE_NEVER) == 0U) && ((attr & MT_RW) != 0U)) {
+ WARN("%s: Mapping memory as read-write and executable not allowed.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ size_t pages_count = size / PAGE_SIZE;
+
+ VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n",
+ pages_count, base_va);
+
+ uintptr_t base_va_original = base_va;
+
+ /*
+ * Sanity checks.
+ */
+ for (size_t i = 0U; i < pages_count; ++i) {
+ const uint64_t *entry;
+ uint64_t desc, attr_index;
+ unsigned int level;
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address 0x%lx is not mapped.\n", base_va);
+ return -EINVAL;
+ }
+
+ desc = *entry;
+
+ /*
+ * Check that all the required pages are mapped at page
+ * granularity.
+ */
+ if (((desc & DESC_MASK) != PAGE_DESC) ||
+ (level != XLAT_TABLE_LEVEL_MAX)) {
+ WARN("Address 0x%lx is not mapped at the right granularity.\n",
+ base_va);
+ WARN("Granularity is 0x%llx, should be 0x%x.\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /*
+ * If the region type is device, it shouldn't be executable.
+ */
+ attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ if (attr_index == ATTR_DEVICE_INDEX) {
+ if ((attr & MT_EXECUTE_NEVER) == 0U) {
+ WARN("Setting device memory as executable at address 0x%lx.",
+ base_va);
+ return -EINVAL;
+ }
+ }
+
+ base_va += PAGE_SIZE;
+ }
+
+ /* Restore original value. */
+ base_va = base_va_original;
+
+ for (unsigned int i = 0U; i < pages_count; ++i) {
+
+ uint32_t old_attr = 0U, new_attr;
+ uint64_t *entry = NULL;
+ unsigned int level = 0U;
+ unsigned long long addr_pa = 0ULL;
+
+ (void) xlat_get_mem_attributes_internal(ctx, base_va, &old_attr,
+ &entry, &addr_pa, &level);
+
+ /*
+ * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
+ * MT_USER/MT_PRIVILEGED are taken into account. Any other
+ * information is ignored.
+ */
+
+ /* Clean the old attributes so that they can be rebuilt. */
+ new_attr = old_attr & ~(MT_RW | MT_EXECUTE_NEVER | MT_USER);
+
+ /*
+ * Update attributes, but filter out the ones this function
+ * isn't allowed to change.
+ */
+ new_attr |= attr & (MT_RW | MT_EXECUTE_NEVER | MT_USER);
+
+ /*
+ * The break-before-make sequence requires writing an invalid
+ * descriptor and making sure that the system sees the change
+ * before writing the new descriptor.
+ */
+ *entry = INVALID_DESC;
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ dccvac((uintptr_t)entry);
+#endif
+ /* Invalidate any cached copy of this mapping in the TLBs. */
+ xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
+
+ /* Ensure completion of the invalidation. */
+ xlat_arch_tlbi_va_sync();
+
+ /* Write new descriptor */
+ *entry = xlat_desc(ctx, new_attr, addr_pa, level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ dccvac((uintptr_t)entry);
+#endif
+ base_va += PAGE_SIZE;
+ }
+
+ /* Ensure that the last descriptor writen is seen by the system. */
+ dsbish();
+
+ return 0;
+}