~funderscore blog cgit wiki get in touch
aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFerass El Hafidi <vitali64pmemail@protonmail.com>2023-05-08 19:03:10 +0200
committerFerass El Hafidi <vitali64pmemail@protonmail.com>2023-05-08 19:03:10 +0200
commitf9ed707f171c8069e99e24e24c3da73d8b6f5716 (patch)
tree4da9838d387c8bc260e83f3f51f5dfa83e0b48ae /lib/aarch64/cache_helpers.S
downloadamlogic-bl2-master.tar.gz
Push old Amlogic BL2 sourcesHEADmaster
Diffstat (limited to 'lib/aarch64/cache_helpers.S')
-rw-r--r--lib/aarch64/cache_helpers.S245
1 files changed, 245 insertions, 0 deletions
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
new file mode 100644
index 0000000..04caac4
--- /dev/null
+++ b/lib/aarch64/cache_helpers.S
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl dcisw
+ .globl dccisw
+ .globl dccsw
+ .globl dccvac
+ .globl dcivac
+ .globl dccivac
+ .globl dccvau
+ .globl dczva
+ .globl flush_dcache_range
+ .globl inv_dcache_range
+ .globl dcsw_op_louis
+ .globl dcsw_op_all
+
+ .global _clean_dcache_addr
+ .global _clean_invd_dcache_addr
+ .globl platform_stack_set_bl2
+
+ .type platform_stack_set_bl2, @function
+platform_stack_set_bl2:
+ mov x9, x30 //lr
+ mov sp, x0
+ ret x9
+
+func dcisw
+ dc isw, x0
+ ret
+
+
+func dccisw
+ dc cisw, x0
+ ret
+
+
+func dccsw
+ dc csw, x0
+ ret
+
+
+func dccvac
+ dc cvac, x0
+ ret
+
+
+func dcivac
+ dc ivac, x0
+ ret
+
+
+func dccivac
+ dc civac, x0
+ ret
+
+
+func dccvau
+ dc cvau, x0
+ ret
+
+
+func dczva
+ dc zva, x0
+ ret
+
+
+ /* ------------------------------------------
+ * Clean+Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func flush_dcache_range
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+flush_loop:
+ dc civac, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo flush_loop
+ dsb sy
+ ret
+
+
+ /* ------------------------------------------
+ * Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func inv_dcache_range
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+inv_loop:
+ dc ivac, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo inv_loop
+ dsb sy
+ ret
+
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way to the level specified
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * x3: The last cache level to operate on
+ * x9: clidr_el1
+ * and will carry out the operation on each data cache from level 0
+ * to the level in x3 in sequence
+ *
+ * The dcsw_op macro sets up the x3 and x9 parameters based on
+ * clidr_el1 cache information before invoking the main function
+ * ---------------------------------------------------------------
+ */
+
+ .macro dcsw_op shift, fw, ls
+ mrs x9, clidr_el1
+ ubfx x3, x9, \shift, \fw
+ lsl x3, x3, \ls
+ b do_dcsw_op
+ .endm
+
+func do_dcsw_op
+ cbz x3, exit
+ mov x10, xzr
+ adr x14, dcsw_loop_table // compute inner loop address
+ add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
+ mov x0, x9
+ mov w8, #1
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lt level_done // nothing to do if no cache or icache
+
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ ubfx x4, x1, #3, #10 // maximum way number
+ clz w5, w4 // bit position of way size increment
+ lsl w9, w4, w5 // w9 = aligned max way number
+ lsl w16, w8, w5 // w16 = way number loop decrement
+ orr w9, w10, w9 // w9 = combine way and cache number
+ ubfx w6, w1, #13, #15 // w6 = max set number
+ lsl w17, w8, w2 // w17 = set number loop decrement
+ dsb sy // barrier before we start this level
+ br x14 // jump to DC operation specific loop
+
+ .macro dcsw_loop _op
+loop2_\_op:
+ lsl w7, w6, w2 // w7 = aligned max set number
+
+loop3_\_op:
+ orr w11, w9, w7 // combine cache, way and set number
+ dc \_op, x11
+ subs w7, w7, w17 // decrement set number
+ b.ge loop3_\_op
+
+ subs x9, x9, x16 // decrement way number
+ b.ge loop2_\_op
+
+ b level_done
+ .endm
+
+level_done:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.gt loop1
+ msr csselr_el1, xzr // select cache level 0 in csselr
+ dsb sy // barrier to complete final cache operation
+ isb
+exit:
+ ret
+
+dcsw_loop_table:
+ dcsw_loop isw
+ dcsw_loop cisw
+ dcsw_loop csw
+
+
+func dcsw_op_louis
+ dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+
+
+func dcsw_op_all
+ dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+
+
+#if 0
+//==============================================
+// void _clean_dcache_addr(unsigned long addr)
+// clean dcache by VA
+//==============================================
+ .type _clean_dcache_addr, @function
+_clean_dcache_addr:
+ dc cvac, x0
+ dsb sy
+ ret
+//==============================================
+// void _clean_invd_dcache_addr(unsigned long addr)
+// clean&invalid dcache by VA
+//==============================================
+ .type _clean_invd_dcache_addr, @function
+_clean_invd_dcache_addr:
+ dc civac, x0
+ dsb sy
+ ret
+#endif