~funderscore blog cgit wiki get in touch
aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFerass El Hafidi <vitali64pmemail@protonmail.com>2023-05-08 19:03:10 +0200
committerFerass El Hafidi <vitali64pmemail@protonmail.com>2023-05-08 19:03:10 +0200
commitf9ed707f171c8069e99e24e24c3da73d8b6f5716 (patch)
tree4da9838d387c8bc260e83f3f51f5dfa83e0b48ae /lib/aarch64
downloadamlogic-bl2-master.tar.gz
Push old Amlogic BL2 sourcesHEADmaster
Diffstat (limited to 'lib/aarch64')
-rw-r--r--lib/aarch64/cache_helpers.S245
-rw-r--r--lib/aarch64/cpu_helpers.S74
-rw-r--r--lib/aarch64/misc_helpers.S324
-rw-r--r--lib/aarch64/sysreg_helpers.S782
-rw-r--r--lib/aarch64/tlb_helpers.S73
-rw-r--r--lib/aarch64/xlat_helpers.c127
-rw-r--r--lib/aarch64/xlat_tables.c298
7 files changed, 1923 insertions, 0 deletions
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
new file mode 100644
index 0000000..04caac4
--- /dev/null
+++ b/lib/aarch64/cache_helpers.S
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl dcisw
+ .globl dccisw
+ .globl dccsw
+ .globl dccvac
+ .globl dcivac
+ .globl dccivac
+ .globl dccvau
+ .globl dczva
+ .globl flush_dcache_range
+ .globl inv_dcache_range
+ .globl dcsw_op_louis
+ .globl dcsw_op_all
+
+ .global _clean_dcache_addr
+ .global _clean_invd_dcache_addr
+ .globl platform_stack_set_bl2
+
+ .type platform_stack_set_bl2, @function
+platform_stack_set_bl2:
+ mov x9, x30 //lr
+ mov sp, x0
+ ret x9
+
+func dcisw
+ dc isw, x0
+ ret
+
+
+func dccisw
+ dc cisw, x0
+ ret
+
+
+func dccsw
+ dc csw, x0
+ ret
+
+
+func dccvac
+ dc cvac, x0
+ ret
+
+
+func dcivac
+ dc ivac, x0
+ ret
+
+
+func dccivac
+ dc civac, x0
+ ret
+
+
+func dccvau
+ dc cvau, x0
+ ret
+
+
+func dczva
+ dc zva, x0
+ ret
+
+
+ /* ------------------------------------------
+ * Clean+Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func flush_dcache_range
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+flush_loop:
+ dc civac, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo flush_loop
+ dsb sy
+ ret
+
+
+ /* ------------------------------------------
+ * Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func inv_dcache_range
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+inv_loop:
+ dc ivac, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo inv_loop
+ dsb sy
+ ret
+
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way to the level specified
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * x3: The last cache level to operate on
+ * x9: clidr_el1
+ * and will carry out the operation on each data cache from level 0
+ * to the level in x3 in sequence
+ *
+ * The dcsw_op macro sets up the x3 and x9 parameters based on
+ * clidr_el1 cache information before invoking the main function
+ * ---------------------------------------------------------------
+ */
+
+ .macro dcsw_op shift, fw, ls
+ mrs x9, clidr_el1
+ ubfx x3, x9, \shift, \fw
+ lsl x3, x3, \ls
+ b do_dcsw_op
+ .endm
+
+func do_dcsw_op
+ cbz x3, exit
+ mov x10, xzr
+ adr x14, dcsw_loop_table // compute inner loop address
+ add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
+ mov x0, x9
+ mov w8, #1
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lt level_done // nothing to do if no cache or icache
+
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ ubfx x4, x1, #3, #10 // maximum way number
+ clz w5, w4 // bit position of way size increment
+ lsl w9, w4, w5 // w9 = aligned max way number
+ lsl w16, w8, w5 // w16 = way number loop decrement
+ orr w9, w10, w9 // w9 = combine way and cache number
+ ubfx w6, w1, #13, #15 // w6 = max set number
+ lsl w17, w8, w2 // w17 = set number loop decrement
+ dsb sy // barrier before we start this level
+ br x14 // jump to DC operation specific loop
+
+ .macro dcsw_loop _op
+loop2_\_op:
+ lsl w7, w6, w2 // w7 = aligned max set number
+
+loop3_\_op:
+ orr w11, w9, w7 // combine cache, way and set number
+ dc \_op, x11
+ subs w7, w7, w17 // decrement set number
+ b.ge loop3_\_op
+
+ subs x9, x9, x16 // decrement way number
+ b.ge loop2_\_op
+
+ b level_done
+ .endm
+
+level_done:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.gt loop1
+ msr csselr_el1, xzr // select cache level 0 in csselr
+ dsb sy // barrier to complete final cache operation
+ isb
+exit:
+ ret
+
+dcsw_loop_table:
+ dcsw_loop isw
+ dcsw_loop cisw
+ dcsw_loop csw
+
+
+func dcsw_op_louis
+ dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+
+
+func dcsw_op_all
+ dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+
+
+#if 0
+//==============================================
+// void _clean_dcache_addr(unsigned long addr)
+// clean dcache by VA
+//==============================================
+ .type _clean_dcache_addr, @function
+_clean_dcache_addr:
+ dc cvac, x0
+ dsb sy
+ ret
+//==============================================
+// void _clean_invd_dcache_addr(unsigned long addr)
+// clean&invalid dcache by VA
+//==============================================
+ .type _clean_invd_dcache_addr, @function
+_clean_invd_dcache_addr:
+ dc civac, x0
+ dsb sy
+ ret
+#endif
diff --git a/lib/aarch64/cpu_helpers.S b/lib/aarch64/cpu_helpers.S
new file mode 100644
index 0000000..008d39d
--- /dev/null
+++ b/lib/aarch64/cpu_helpers.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .weak cpu_reset_handler
+
+
+func cpu_reset_handler
+ /* ---------------------------------------------
+ * As a bare minimal enable the SMP bit.
+ * ---------------------------------------------
+ */
+ mrs x0, midr_el1
+ lsr x1, x0, #MIDR_PN_SHIFT
+ and x1, x1, #MIDR_PN_MASK
+ cmp x1, #MIDR_PN_A57
+ b.eq a57_setup_begin
+ cmp x1, #MIDR_PN_A53
+ b.eq smp_setup_begin
+ b smp_setup_end
+
+a57_setup_begin:
+ ubfx x1, x0, #MIDR_VAR_SHIFT, #4
+ cmp x1, #0 // Major Revision 0
+ b.ne smp_setup_begin
+ ubfx x1, x0, #MIDR_REV_SHIFT, #4
+ cmp x1, #0 // Minor Revision 0
+ b.ne smp_setup_begin
+ mov x1, #CPUACTLR_NO_ALLOC_WBWA
+ orr x1, x1, #CPUACTLR_DIS_DMB_NULL
+ orr x1, x1, #CPUACTLR_DCC_AS_DCCI
+ mrs x0, CPUACTLR_EL1
+ orr x0, x0, x1
+ msr CPUACTLR_EL1, x0
+ mov x0, #0x082
+ msr s3_1_c11_c0_2, x0
+
+smp_setup_begin:
+ mrs x0, CPUECTLR_EL1
+ orr x0, x0, #CPUECTLR_SMP_BIT
+ msr CPUECTLR_EL1, x0
+ isb
+
+smp_setup_end:
+ ret
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
new file mode 100644
index 0000000..e15c243
--- /dev/null
+++ b/lib/aarch64/misc_helpers.S
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl enable_irq
+ .globl disable_irq
+
+ .globl enable_fiq
+ .globl disable_fiq
+
+ .globl enable_serror
+ .globl disable_serror
+
+ .globl enable_debug_exceptions
+ .globl disable_debug_exceptions
+
+ .globl read_daif
+ .globl write_daif
+
+ .globl read_spsr_el1
+ .globl read_spsr_el2
+ .globl read_spsr_el3
+
+ .globl write_spsr_el1
+ .globl write_spsr_el2
+ .globl write_spsr_el3
+
+ .globl read_elr_el1
+ .globl read_elr_el2
+ .globl read_elr_el3
+
+ .globl write_elr_el1
+ .globl write_elr_el2
+ .globl write_elr_el3
+
+ .globl get_afflvl_shift
+ .globl mpidr_mask_lower_afflvls
+ .globl dsb
+ .globl isb
+ .globl sev
+ .globl wfe
+ .globl wfi
+ .globl eret
+ .globl smc
+
+ .globl zeromem16
+ .globl memcpy16
+
+ .globl disable_mmu_el3
+ .globl disable_mmu_icache_el3
+ .globl remap_zero_address
+
+
+func get_afflvl_shift
+ cmp x0, #3
+ cinc x0, x0, eq
+ mov x1, #MPIDR_AFFLVL_SHIFT
+ lsl x0, x0, x1
+ ret
+
+func mpidr_mask_lower_afflvls
+ cmp x1, #3
+ cinc x1, x1, eq
+ mov x2, #MPIDR_AFFLVL_SHIFT
+ lsl x2, x1, x2
+ lsr x0, x0, x2
+ lsl x0, x0, x2
+ ret
+
+ /* -----------------------------------------------------
+ * Asynchronous exception manipulation accessors
+ * -----------------------------------------------------
+ */
+func enable_irq
+ msr daifclr, #DAIF_IRQ_BIT
+ ret
+
+
+func enable_fiq
+ msr daifclr, #DAIF_FIQ_BIT
+ ret
+
+
+func enable_serror
+ msr daifclr, #DAIF_ABT_BIT
+ ret
+
+
+func enable_debug_exceptions
+ msr daifclr, #DAIF_DBG_BIT
+ ret
+
+
+func disable_irq
+ msr daifset, #DAIF_IRQ_BIT
+ ret
+
+
+func disable_fiq
+ msr daifset, #DAIF_FIQ_BIT
+ ret
+
+
+func disable_serror
+ msr daifset, #DAIF_ABT_BIT
+ ret
+
+
+func disable_debug_exceptions
+ msr daifset, #DAIF_DBG_BIT
+ ret
+
+
+func read_daif
+ mrs x0, daif
+ ret
+
+
+func write_daif
+ msr daif, x0
+ ret
+
+
+func read_spsr_el1
+ mrs x0, spsr_el1
+ ret
+
+
+func read_spsr_el2
+ mrs x0, spsr_el2
+ ret
+
+
+func read_spsr_el3
+ mrs x0, spsr_el3
+ ret
+
+
+func write_spsr_el1
+ msr spsr_el1, x0
+ ret
+
+
+func write_spsr_el2
+ msr spsr_el2, x0
+ ret
+
+
+func write_spsr_el3
+ msr spsr_el3, x0
+ ret
+
+
+func read_elr_el1
+ mrs x0, elr_el1
+ ret
+
+
+func read_elr_el2
+ mrs x0, elr_el2
+ ret
+
+
+func read_elr_el3
+ mrs x0, elr_el3
+ ret
+
+
+func write_elr_el1
+ msr elr_el1, x0
+ ret
+
+
+func write_elr_el2
+ msr elr_el2, x0
+ ret
+
+
+func write_elr_el3
+ msr elr_el3, x0
+ ret
+
+
+func dsb
+ dsb sy
+ ret
+
+
+func isb
+ isb
+ ret
+
+
+func sev
+ sev
+ ret
+
+
+func wfe
+ wfe
+ ret
+
+
+func wfi
+ wfi
+ ret
+
+
+func eret
+ eret
+
+
+func smc
+ smc #0
+
+/* -----------------------------------------------------------------------
+ * void zeromem16(void *mem, unsigned int length);
+ *
+ * Initialise a memory region to 0.
+ * The memory address must be 16-byte aligned.
+ * -----------------------------------------------------------------------
+ */
+func zeromem16
+ add x2, x0, x1
+/* zero 16 bytes at a time */
+z_loop16:
+ sub x3, x2, x0
+ cmp x3, #16
+ b.lt z_loop1
+ stp xzr, xzr, [x0], #16
+ b z_loop16
+/* zero byte per byte */
+z_loop1:
+ cmp x0, x2
+ b.eq z_end
+ strb wzr, [x0], #1
+ b z_loop1
+z_end: ret
+
+
+/* --------------------------------------------------------------------------
+ * void memcpy16(void *dest, const void *src, unsigned int length)
+ *
+ * Copy length bytes from memory area src to memory area dest.
+ * The memory areas should not overlap.
+ * Destination and source addresses must be 16-byte aligned.
+ * --------------------------------------------------------------------------
+ */
+func memcpy16
+/* copy 16 bytes at a time */
+m_loop16:
+ cmp x2, #16
+ b.lt m_loop1
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ sub x2, x2, #16
+ b m_loop16
+/* copy byte per byte */
+m_loop1:
+ cbz x2, m_end
+ ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne m_loop1
+m_end: ret
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU at EL3
+ * This is implemented in assembler to ensure that the data cache is cleaned
+ * and invalidated after the MMU is disabled without any intervening cacheable
+ * data accesses
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mmu_el3
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu:
+ mrs x0, sctlr_el3
+ bic x0, x0, x1
+ msr sctlr_el3, x0
+ isb // ensure MMU is off
+ mov x0, #DCCISW // DCache clean and invalidate
+ b dcsw_op_all
+
+
+func disable_mmu_icache_el3
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+ b do_disable_mmu
+
+func remap_zero_address
+ ldr x1, =0xc1300000
+ ldr w0, =0x00000001
+ str w0, [x1]
+ isb
+ dmb sy
diff --git a/lib/aarch64/sysreg_helpers.S b/lib/aarch64/sysreg_helpers.S
new file mode 100644
index 0000000..925e93e
--- /dev/null
+++ b/lib/aarch64/sysreg_helpers.S
@@ -0,0 +1,782 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl read_vbar_el1
+ .globl read_vbar_el2
+ .globl read_vbar_el3
+ .globl write_vbar_el1
+ .globl write_vbar_el2
+ .globl write_vbar_el3
+
+ .globl read_sctlr_el1
+ .globl read_sctlr_el2
+ .globl read_sctlr_el3
+ .globl write_sctlr_el1
+ .globl write_sctlr_el2
+ .globl write_sctlr_el3
+
+ .globl read_actlr_el1
+ .globl read_actlr_el2
+ .globl read_actlr_el3
+ .globl write_actlr_el1
+ .globl write_actlr_el2
+ .globl write_actlr_el3
+
+ .globl read_esr_el1
+ .globl read_esr_el2
+ .globl read_esr_el3
+ .globl write_esr_el1
+ .globl write_esr_el2
+ .globl write_esr_el3
+
+ .globl read_afsr0_el1
+ .globl read_afsr0_el2
+ .globl read_afsr0_el3
+ .globl write_afsr0_el1
+ .globl write_afsr0_el2
+ .globl write_afsr0_el3
+
+ .globl read_afsr1_el1
+ .globl read_afsr1_el2
+ .globl read_afsr1_el3
+ .globl write_afsr1_el1
+ .globl write_afsr1_el2
+ .globl write_afsr1_el3
+
+ .globl read_far_el1
+ .globl read_far_el2
+ .globl read_far_el3
+ .globl write_far_el1
+ .globl write_far_el2
+ .globl write_far_el3
+
+ .globl read_mair_el1
+ .globl read_mair_el2
+ .globl read_mair_el3
+ .globl write_mair_el1
+ .globl write_mair_el2
+ .globl write_mair_el3
+
+ .globl read_amair_el1
+ .globl read_amair_el2
+ .globl read_amair_el3
+ .globl write_amair_el1
+ .globl write_amair_el2
+ .globl write_amair_el3
+
+ .globl read_rvbar_el1
+ .globl read_rvbar_el2
+ .globl read_rvbar_el3
+
+ .globl read_rmr_el1
+ .globl read_rmr_el2
+ .globl read_rmr_el3
+ .globl write_rmr_el1
+ .globl write_rmr_el2
+ .globl write_rmr_el3
+
+ .globl read_tcr_el1
+ .globl read_tcr_el2
+ .globl read_tcr_el3
+ .globl write_tcr_el1
+ .globl write_tcr_el2
+ .globl write_tcr_el3
+
+ .globl read_cptr_el2
+ .globl read_cptr_el3
+ .globl write_cptr_el2
+ .globl write_cptr_el3
+
+ .globl read_ttbr0_el1
+ .globl read_ttbr0_el2
+ .globl read_ttbr0_el3
+ .globl write_ttbr0_el1
+ .globl write_ttbr0_el2
+ .globl write_ttbr0_el3
+
+ .globl read_ttbr1_el1
+ .globl write_ttbr1_el1
+
+ .globl read_cpacr
+ .globl write_cpacr
+
+ .globl read_cntfrq
+ .globl write_cntfrq
+
+ .globl read_cpuectlr
+ .globl write_cpuectlr
+
+ .globl read_cnthctl_el2
+ .globl write_cnthctl_el2
+
+ .globl read_cntfrq_el0
+ .globl write_cntfrq_el0
+
+ .globl read_cntps_ctl_el1
+ .globl write_cntps_ctl_el1
+
+ .globl read_cntps_cval_el1
+ .globl write_cntps_cval_el1
+
+ .globl read_cntps_tval_el1
+ .globl write_cntps_tval_el1
+
+ .globl read_scr
+ .globl write_scr
+
+ .globl read_hcr
+ .globl write_hcr
+
+ .globl read_midr
+ .globl read_mpidr
+
+ .globl read_cntpct_el0
+ .globl read_current_el
+ .globl read_id_pfr1_el1
+ .globl read_id_aa64pfr0_el1
+
+ .globl write_tpidr_el3
+ .globl read_tpidr_el3
+
+#if SUPPORT_VFP
+ .globl enable_vfp
+#endif
+
+
+func read_current_el
+ mrs x0, CurrentEl
+ ret
+
+
+func read_id_pfr1_el1
+ mrs x0, id_pfr1_el1
+ ret
+
+
+func read_id_aa64pfr0_el1
+ mrs x0, id_aa64pfr0_el1
+ ret
+
+
+ /* -----------------------------------------------------
+ * VBAR accessors
+ * -----------------------------------------------------
+ */
+func read_vbar_el1
+ mrs x0, vbar_el1
+ ret
+
+
+func read_vbar_el2
+ mrs x0, vbar_el2
+ ret
+
+
+func read_vbar_el3
+ mrs x0, vbar_el3
+ ret
+
+
+func write_vbar_el1
+ msr vbar_el1, x0
+ ret
+
+
+func write_vbar_el2
+ msr vbar_el2, x0
+ ret
+
+
+func write_vbar_el3
+ msr vbar_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * AFSR0 accessors
+ * -----------------------------------------------------
+ */
+func read_afsr0_el1
+ mrs x0, afsr0_el1
+ ret
+
+
+func read_afsr0_el2
+ mrs x0, afsr0_el2
+ ret
+
+
+func read_afsr0_el3
+ mrs x0, afsr0_el3
+ ret
+
+
+func write_afsr0_el1
+ msr afsr0_el1, x0
+ ret
+
+
+func write_afsr0_el2
+ msr afsr0_el2, x0
+ ret
+
+
+func write_afsr0_el3
+ msr afsr0_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * FAR accessors
+ * -----------------------------------------------------
+ */
+func read_far_el1
+ mrs x0, far_el1
+ ret
+
+
+func read_far_el2
+ mrs x0, far_el2
+ ret
+
+
+func read_far_el3
+ mrs x0, far_el3
+ ret
+
+
+func write_far_el1
+ msr far_el1, x0
+ ret
+
+
+func write_far_el2
+ msr far_el2, x0
+ ret
+
+
+func write_far_el3
+ msr far_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * MAIR accessors
+ * -----------------------------------------------------
+ */
+func read_mair_el1
+ mrs x0, mair_el1
+ ret
+
+
+func read_mair_el2
+ mrs x0, mair_el2
+ ret
+
+
+func read_mair_el3
+ mrs x0, mair_el3
+ ret
+
+
+func write_mair_el1
+ msr mair_el1, x0
+ ret
+
+
+func write_mair_el2
+ msr mair_el2, x0
+ ret
+
+
+func write_mair_el3
+ msr mair_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * AMAIR accessors
+ * -----------------------------------------------------
+ */
+func read_amair_el1
+ mrs x0, amair_el1
+ ret
+
+
+func read_amair_el2
+ mrs x0, amair_el2
+ ret
+
+
+func read_amair_el3
+ mrs x0, amair_el3
+ ret
+
+
+func write_amair_el1
+ msr amair_el1, x0
+ ret
+
+
+func write_amair_el2
+ msr amair_el2, x0
+ ret
+
+
+func write_amair_el3
+ msr amair_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * RVBAR accessors
+ * -----------------------------------------------------
+ */
+func read_rvbar_el1
+ mrs x0, rvbar_el1
+ ret
+
+
+func read_rvbar_el2
+ mrs x0, rvbar_el2
+ ret
+
+
+func read_rvbar_el3
+ mrs x0, rvbar_el3
+ ret
+
+
+ /* -----------------------------------------------------
+ * RMR accessors
+ * -----------------------------------------------------
+ */
+func read_rmr_el1
+ mrs x0, rmr_el1
+ ret
+
+
+func read_rmr_el2
+ mrs x0, rmr_el2
+ ret
+
+
+func read_rmr_el3
+ mrs x0, rmr_el3
+ ret
+
+
+func write_rmr_el1
+ msr rmr_el1, x0
+ ret
+
+
+func write_rmr_el2
+ msr rmr_el2, x0
+ ret
+
+
+func write_rmr_el3
+ msr rmr_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * AFSR1 accessors
+ * -----------------------------------------------------
+ */
+func read_afsr1_el1
+ mrs x0, afsr1_el1
+ ret
+
+
+func read_afsr1_el2
+ mrs x0, afsr1_el2
+ ret
+
+
+func read_afsr1_el3
+ mrs x0, afsr1_el3
+ ret
+
+
+func write_afsr1_el1
+ msr afsr1_el1, x0
+ ret
+
+
+func write_afsr1_el2
+ msr afsr1_el2, x0
+ ret
+
+
+func write_afsr1_el3
+ msr afsr1_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * SCTLR accessors
+ * -----------------------------------------------------
+ */
+func read_sctlr_el1
+ mrs x0, sctlr_el1
+ ret
+
+
+func read_sctlr_el2
+ mrs x0, sctlr_el2
+ ret
+
+
+func read_sctlr_el3
+ mrs x0, sctlr_el3
+ ret
+
+
+func write_sctlr_el1
+ msr sctlr_el1, x0
+ ret
+
+
+func write_sctlr_el2
+ msr sctlr_el2, x0
+ ret
+
+
+func write_sctlr_el3
+ msr sctlr_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * ACTLR accessors
+ * -----------------------------------------------------
+ */
+func read_actlr_el1
+ mrs x0, actlr_el1
+ ret
+
+
+func read_actlr_el2
+ mrs x0, actlr_el2
+ ret
+
+
+func read_actlr_el3
+ mrs x0, actlr_el3
+ ret
+
+
+func write_actlr_el1
+ msr actlr_el1, x0
+ ret
+
+
+func write_actlr_el2
+ msr actlr_el2, x0
+ ret
+
+
+func write_actlr_el3
+ msr actlr_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * ESR accessors
+ * -----------------------------------------------------
+ */
+func read_esr_el1
+ mrs x0, esr_el1
+ ret
+
+
+func read_esr_el2
+ mrs x0, esr_el2
+ ret
+
+
+func read_esr_el3
+ mrs x0, esr_el3
+ ret
+
+
+func write_esr_el1
+ msr esr_el1, x0
+ ret
+
+
+func write_esr_el2
+ msr esr_el2, x0
+ ret
+
+
+func write_esr_el3
+ msr esr_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * TCR accessors
+ * -----------------------------------------------------
+ */
+func read_tcr_el1
+ mrs x0, tcr_el1
+ ret
+
+
+func read_tcr_el2
+ mrs x0, tcr_el2
+ ret
+
+
+func read_tcr_el3
+ mrs x0, tcr_el3
+ ret
+
+
+func write_tcr_el1
+ msr tcr_el1, x0
+ ret
+
+
+func write_tcr_el2
+ msr tcr_el2, x0
+ ret
+
+
+func write_tcr_el3
+ msr tcr_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * CPTR accessors
+ * -----------------------------------------------------
+ */
+func read_cptr_el2
+ mrs x0, cptr_el2
+ ret
+
+
+func read_cptr_el3
+ mrs x0, cptr_el3
+ ret
+
+
+func write_cptr_el2
+ msr cptr_el2, x0
+ ret
+
+
+func write_cptr_el3
+ msr cptr_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * TTBR0 accessors
+ * -----------------------------------------------------
+ */
+func read_ttbr0_el1
+ mrs x0, ttbr0_el1
+ ret
+
+
+func read_ttbr0_el2
+ mrs x0, ttbr0_el2
+ ret
+
+
+func read_ttbr0_el3
+ mrs x0, ttbr0_el3
+ ret
+
+
+func write_ttbr0_el1
+ msr ttbr0_el1, x0
+ ret
+
+
+func write_ttbr0_el2
+ msr ttbr0_el2, x0
+ ret
+
+
+func write_ttbr0_el3
+ msr ttbr0_el3, x0
+ ret
+
+
+ /* -----------------------------------------------------
+ * TTBR1 accessors
+ * -----------------------------------------------------
+ */
+func read_ttbr1_el1
+ mrs x0, ttbr1_el1
+ ret
+
+
+func write_ttbr1_el1
+ msr ttbr1_el1, x0
+ ret
+
+
+func read_hcr
+ mrs x0, hcr_el2
+ ret
+
+
+func write_hcr
+ msr hcr_el2, x0
+ ret
+
+
+func read_cpacr
+ mrs x0, cpacr_el1
+ ret
+
+
+func write_cpacr
+ msr cpacr_el1, x0
+ ret
+
+
+func read_cntfrq_el0
+ mrs x0, cntfrq_el0
+ ret
+
+
+func write_cntfrq_el0
+ msr cntfrq_el0, x0
+ ret
+
+func read_cntps_ctl_el1
+ mrs x0, cntps_ctl_el1
+ ret
+
+func write_cntps_ctl_el1
+ msr cntps_ctl_el1, x0
+ ret
+
+func read_cntps_cval_el1
+ mrs x0, cntps_cval_el1
+ ret
+
+func write_cntps_cval_el1
+ msr cntps_cval_el1, x0
+ ret
+
+func read_cntps_tval_el1
+ mrs x0, cntps_tval_el1
+ ret
+
+func write_cntps_tval_el1
+ msr cntps_tval_el1, x0
+ ret
+
+func read_cntpct_el0
+ mrs x0, cntpct_el0
+ ret
+
+func read_cpuectlr
+ mrs x0, CPUECTLR_EL1
+ ret
+
+
+func write_cpuectlr
+ msr CPUECTLR_EL1, x0
+ ret
+
+
+func read_cnthctl_el2
+ mrs x0, cnthctl_el2
+ ret
+
+
+func write_cnthctl_el2
+ msr cnthctl_el2, x0
+ ret
+
+
+func read_cntfrq
+ mrs x0, cntfrq_el0
+ ret
+
+
+func write_cntfrq
+ msr cntfrq_el0, x0
+ ret
+
+
+func write_scr
+ msr scr_el3, x0
+ ret
+
+
+func read_scr
+ mrs x0, scr_el3
+ ret
+
+
+func read_midr
+ mrs x0, midr_el1
+ ret
+
+
+func read_mpidr
+ mrs x0, mpidr_el1
+ ret
+
+func write_tpidr_el3
+ msr tpidr_el3, x0
+ ret
+
+func read_tpidr_el3
+ mrs x0, tpidr_el3
+ ret
+
+#if SUPPORT_VFP
+func enable_vfp
+ mrs x0, cpacr_el1
+ orr x0, x0, #CPACR_VFP_BITS
+ msr cpacr_el1, x0
+ mrs x0, cptr_el3
+ mov x1, #AARCH64_CPTR_TFP
+ bic x0, x0, x1
+ msr cptr_el3, x0
+ isb
+ ret
+
+#endif
diff --git a/lib/aarch64/tlb_helpers.S b/lib/aarch64/tlb_helpers.S
new file mode 100644
index 0000000..8dfae12
--- /dev/null
+++ b/lib/aarch64/tlb_helpers.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+
+ .globl tlbialle1
+ .globl tlbialle1is
+ .globl tlbialle2
+ .globl tlbialle2is
+ .globl tlbialle3
+ .globl tlbialle3is
+ .globl tlbivmalle1
+
+
+func tlbialle1
+ tlbi alle1
+ ret
+
+
+func tlbialle1is
+ tlbi alle1is
+ ret
+
+
+func tlbialle2
+ tlbi alle2
+ ret
+
+
+func tlbialle2is
+ tlbi alle2is
+ ret
+
+
+func tlbialle3
+ tlbi alle3
+ ret
+
+
+func tlbialle3is
+ tlbi alle3is
+ ret
+
+func tlbivmalle1
+ tlbi vmalle1
+ ret
diff --git a/lib/aarch64/xlat_helpers.c b/lib/aarch64/xlat_helpers.c
new file mode 100644
index 0000000..d401ffc
--- /dev/null
+++ b/lib/aarch64/xlat_helpers.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <assert.h>
+
+/*******************************************************************************
+ * Helper to create a level 1/2 table descriptor which points to a level 2/3
+ * table.
+ ******************************************************************************/
+unsigned long create_table_desc(unsigned long *next_table_ptr)
+{
+ unsigned long desc = (unsigned long) next_table_ptr;
+
+ /* Clear the last 12 bits */
+ desc >>= FOUR_KB_SHIFT;
+ desc <<= FOUR_KB_SHIFT;
+
+ desc |= TABLE_DESC;
+
+ return desc;
+}
+
+/*******************************************************************************
+ * Helper to create a level 1/2/3 block descriptor which maps the va to addr
+ ******************************************************************************/
+unsigned long create_block_desc(unsigned long desc,
+ unsigned long addr,
+ unsigned int level)
+{
+ switch (level) {
+ case LEVEL1:
+ desc |= (addr << FIRST_LEVEL_DESC_N) | BLOCK_DESC;
+ break;
+ case LEVEL2:
+ desc |= (addr << SECOND_LEVEL_DESC_N) | BLOCK_DESC;
+ break;
+ case LEVEL3:
+ desc |= (addr << THIRD_LEVEL_DESC_N) | TABLE_DESC;
+ break;
+ default:
+ assert(0);
+ }
+
+ return desc;
+}
+
+/*******************************************************************************
+ * Helper to create a level 1/2/3 block descriptor which maps the va to output_
+ * addr with Device nGnRE attributes.
+ ******************************************************************************/
+unsigned long create_device_block(unsigned long output_addr,
+ unsigned int level,
+ unsigned int ns)
+{
+ unsigned long upper_attrs, lower_attrs, desc;
+
+ lower_attrs = LOWER_ATTRS(ACCESS_FLAG | OSH | AP_RW);
+ lower_attrs |= LOWER_ATTRS(ns | ATTR_DEVICE_INDEX);
+ upper_attrs = UPPER_ATTRS(XN);
+ desc = upper_attrs | lower_attrs;
+
+ return create_block_desc(desc, output_addr, level);
+}
+
+/*******************************************************************************
+ * Helper to create a level 1/2/3 block descriptor which maps the va to output_
+ * addr with inner-shareable normal wbwa read-only memory attributes.
+ ******************************************************************************/
+unsigned long create_romem_block(unsigned long output_addr,
+ unsigned int level,
+ unsigned int ns)
+{
+ unsigned long upper_attrs, lower_attrs, desc;
+
+ lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RO);
+ lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX);
+ upper_attrs = UPPER_ATTRS(0ull);
+ desc = upper_attrs | lower_attrs;
+
+ return create_block_desc(desc, output_addr, level);
+}
+
+/*******************************************************************************
+ * Helper to create a level 1/2/3 block descriptor which maps the va to output_
+ * addr with inner-shareable normal wbwa read-write memory attributes.
+ ******************************************************************************/
+unsigned long create_rwmem_block(unsigned long output_addr,
+ unsigned int level,
+ unsigned int ns)
+{
+ unsigned long upper_attrs, lower_attrs, desc;
+
+ lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RW);
+ lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX);
+ upper_attrs = UPPER_ATTRS(XN);
+ desc = upper_attrs | lower_attrs;
+
+ return create_block_desc(desc, output_addr, level);
+}
diff --git a/lib/aarch64/xlat_tables.c b/lib/aarch64/xlat_tables.c
new file mode 100644
index 0000000..6e8df59
--- /dev/null
+++ b/lib/aarch64/xlat_tables.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <platform_def.h>
+#include <string.h>
+#include <xlat_tables.h>
+#include <stdio.h>
+#include <fip.h>
+
+#ifndef DEBUG_XLAT_TABLE
+#define DEBUG_XLAT_TABLE 0
+#endif
+
+#if DEBUG_XLAT_TABLE
+#define debug_print(...) printf(__VA_ARGS__)
+#else
+#define debug_print(...) ((void)0)
+#endif
+
+
+#define UNSET_DESC ~0ul
+
+#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
+
+static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
+__aligned(NUM_L1_ENTRIES * sizeof(uint64_t));
+
+static uint64_t **xlat_tables;
+//__aligned(XLAT_TABLE_SIZE) __attribute__((section("xlat_table")));
+
+static unsigned next_xlat;
+
+/*
+ * Array of all memory regions stored in order of ascending base address.
+ * The list is terminated by the first entry with size == 0.
+ */
+static mmap_region_t mmap[MAX_MMAP_REGIONS + 1];
+
+static void print_mmap(void)
+{
+#if DEBUG_XLAT_TABLE
+ debug_print("mmap:\n");
+ mmap_region_t *mm = mmap;
+ while (mm->size) {
+ debug_print(" %010lx %10lx %x\n", mm->base, mm->size, mm->attr);
+ ++mm;
+ };
+ debug_print("\n");
+#endif
+}
+
+void mmap_add_region(unsigned long base, unsigned long size, unsigned attr)
+{
+ mmap_region_t *mm = mmap;
+ mmap_region_t *mm_last = mm + sizeof(mmap) / sizeof(mmap[0]) - 1;
+
+ assert(IS_PAGE_ALIGNED(base));
+ assert(IS_PAGE_ALIGNED(size));
+
+ if (!size)
+ return;
+
+ /* Find correct place in mmap to insert new region */
+ while (mm->base < base && mm->size)
+ ++mm;
+
+ /* Make room for new region by moving other regions up by one place */
+ memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
+
+ /* Check we haven't lost the empty sentinal from the end of the array */
+ assert(mm_last->size == 0);
+
+ mm->base = base;
+ mm->size = size;
+ mm->attr = attr;
+}
+
+void mmap_add(const mmap_region_t *mm)
+{
+ while (mm->size) {
+ mmap_add_region(mm->base, mm->size, mm->attr);
+ ++mm;
+ }
+}
+
+static unsigned long mmap_desc(unsigned attr, unsigned long addr,
+ unsigned level)
+{
+ unsigned long desc = addr;
+
+ desc |= level == 3 ? TABLE_DESC : BLOCK_DESC;
+
+ desc |= attr & MT_NS ? LOWER_ATTRS(NS) : 0;
+
+ desc |= attr & MT_RW ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+
+ desc |= LOWER_ATTRS(ACCESS_FLAG);
+
+ if (attr & MT_MEMORY) {
+ desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+ if (attr & MT_RW)
+ desc |= UPPER_ATTRS(XN);
+ } else {
+ desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
+ desc |= UPPER_ATTRS(XN);
+ }
+
+ debug_print(attr & MT_MEMORY ? "MEM" : "DEV");
+ debug_print(attr & MT_RW ? "-RW" : "-RO");
+ debug_print(attr & MT_NS ? "-NS" : "-S");
+
+ return desc;
+}
+
+static int mmap_region_attr(mmap_region_t *mm, unsigned long base,
+ unsigned long size)
+{
+ int attr = mm->attr;
+
+ for (;;) {
+ ++mm;
+
+ if (!mm->size)
+ return attr; /* Reached end of list */
+
+ if (mm->base >= base + size)
+ return attr; /* Next region is after area so end */
+
+ if (mm->base + mm->size <= base)
+ continue; /* Next region has already been overtaken */
+
+ if ((mm->attr & attr) == attr)
+ continue; /* Region doesn't override attribs so skip */
+
+ attr &= mm->attr;
+
+ if (mm->base > base || mm->base + mm->size < base + size)
+ return -1; /* Region doesn't fully cover our area */
+ }
+}
+
+static mmap_region_t *init_xlation_table(mmap_region_t *mm, unsigned long base,
+ unsigned long *table, unsigned level)
+{
+ unsigned level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
+ XLAT_TABLE_ENTRIES_SHIFT;
+ unsigned level_size = 1 << level_size_shift;
+ unsigned long level_index_mask = XLAT_TABLE_ENTRIES_MASK << level_size_shift;
+
+ assert(level <= 3);
+
+ debug_print("New xlat table:\n");
+
+ do {
+ unsigned long desc = UNSET_DESC;
+
+ if (mm->base + mm->size <= base) {
+ /* Area now after the region so skip it */
+ ++mm;
+ continue;
+ }
+
+ debug_print(" %010lx %8lx " + 6 - 2 * level, base, level_size);
+
+ if (mm->base >= base + level_size) {
+ /* Next region is after area so nothing to map yet */
+ desc = INVALID_DESC;
+ } else if (mm->base <= base &&
+ mm->base + mm->size >= base + level_size) {
+ /* Next region covers all of area */
+ int attr = mmap_region_attr(mm, base, level_size);
+ if (attr >= 0)
+ desc = mmap_desc(attr, base, level);
+ }
+ /* else Next region only partially covers area, so need */
+
+ if (desc == UNSET_DESC) {
+ /* Area not covered by a region so need finer table */
+ unsigned long *new_table = xlat_tables[next_xlat++];
+ assert(next_xlat <= MAX_XLAT_TABLES);
+ desc = TABLE_DESC | (unsigned long)new_table;
+
+ /* Recurse to fill in new table */
+ mm = init_xlation_table(mm, base, new_table, level+1);
+ }
+
+ debug_print("\n");
+
+ *table++ = desc;
+ base += level_size;
+ } while (mm->size && (base & level_index_mask));
+
+ return mm;
+}
+
+void init_xlat_tables(void)
+{
+ /* move mmu table to ddr*/
+ memset((void *)MMU_TABLE_BASE, 0, MMU_TABLE_SIZE);
+ uint64_t * xlat_tables_x[MAX_XLAT_TABLES] = {0};
+ uint32_t loop = 0;
+ for (loop=0; loop<MAX_XLAT_TABLES; loop++) {
+ xlat_tables_x[loop] = (uint64_t *)(uint64_t)(MMU_TABLE_BASE + loop * MMU_TABLE_SIZE);
+ }
+ xlat_tables = xlat_tables_x;
+
+ print_mmap();
+ init_xlation_table(mmap, 0, l1_xlation_table, 1);
+}
+
+/*******************************************************************************
+ * Macro generating the code for the function enabling the MMU in the given
+ * exception level, assuming that the pagetables have already been created.
+ *
+ * _el: Exception level at which the function will run
+ * _tcr_extra: Extra bits to set in the TCR register. This mask will
+ * be OR'ed with the default TCR value.
+ * _tlbi_fct: Function to invalidate the TLBs at the current
+ * exception level
+ ******************************************************************************/
+#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
+ void enable_mmu_el##_el(void) \
+ { \
+ uint64_t mair, tcr, ttbr; \
+ uint32_t sctlr; \
+ \
+ assert(IS_IN_EL(_el)); \
+ assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
+ \
+ /* Set attributes in the right indices of the MAIR */ \
+ mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
+ mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
+ ATTR_IWBWA_OWBWA_NTR_INDEX); \
+ write_mair_el##_el(mair); \
+ \
+ /* Invalidate TLBs at the current exception level */ \
+ _tlbi_fct(); \
+ \
+ /* Set TCR bits as well. */ \
+ /* Inner & outer WBWA & shareable + T0SZ = 32 */ \
+ tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
+ TCR_RGN_INNER_WBA | TCR_T0SZ_4GB; \
+ tcr |= _tcr_extra; \
+ write_tcr_el##_el(tcr); \
+ \
+ /* Set TTBR bits as well */ \
+ ttbr = (uint64_t) l1_xlation_table; \
+ write_ttbr0_el##_el(ttbr); \
+ \
+ /* Ensure all translation table writes have drained */ \
+ /* into memory, the TLB invalidation is complete, */ \
+ /* and translation register writes are committed */ \
+ /* before enabling the MMU */ \
+ dsb(); \
+ isb(); \
+ \
+ sctlr = read_sctlr_el##_el(); \
+ sctlr |= SCTLR_WXN_BIT | SCTLR_I_BIT; \
+ sctlr |= SCTLR_A_BIT | SCTLR_C_BIT; \
+ write_sctlr_el##_el(sctlr); \
+ \
+ /* Ensure the MMU enable takes effect immediately */ \
+ isb(); \
+ }
+
+/* Define EL1 and EL3 variants of the function enabling the MMU */
+DEFINE_ENABLE_MMU_EL(1, 0, tlbivmalle1)
+DEFINE_ENABLE_MMU_EL(3, TCR_EL3_RES1, tlbialle3)