1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 MediaTek Inc. All rights reserved.
*
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#include <image.h>
#include <asm/system.h>
#include <asm/sections.h>
#include <asm/cacheops.h>
#include <asm/mipsregs.h>
#include <asm/cm.h>
#define INDEX_STORE_DATA_SD 0x0f
typedef void __noreturn (*image_entry_noargs_t)(void);
/*
* Lock L2 cache and fill data
* Assume that data is 4-byte aligned and start_addr/size is 32-byte aligned
*/
static void fill_lock_l2cache(uintptr_t dataptr, ulong start_addr, ulong size)
{
ulong slsize = CONFIG_SYS_DCACHE_LINE_SIZE;
ulong end_addr = start_addr + size;
const u32 *data = (u32 *)dataptr;
ulong i, addr;
u32 val;
/* Clear WSC & SPR bit in ErrCtl */
val = read_c0_ecc();
val &= 0xcfffffff;
write_c0_ecc(val);
execution_hazard_barrier();
for (addr = start_addr; addr < end_addr; addr += slsize) {
/* Set STagLo to lock cache line */
write_c0_staglo((addr & 0x1ffff800) | 0xa0);
mips_cache(INDEX_STORE_TAG_SD, (void *)addr);
/* Fill data */
for (i = 0; i < slsize; i += 8) {
val = *data++;
__write_32bit_c0_register($28, 5, val); /* sdtaglo */
val = *data++;
__write_32bit_c0_register($29, 5, val); /* sdtaghi */
mips_cache(INDEX_STORE_DATA_SD, (void *)(addr + i));
}
}
sync();
}
/* A simple function to initialize MT7621's cache */
static void mt7621_cache_init(void)
{
void __iomem *cm_base = (void *)KSEG1ADDR(CONFIG_MIPS_CM_BASE);
ulong lsize = CONFIG_SYS_DCACHE_LINE_SIZE;
ulong addr;
u32 val;
/* Enable CCA override. Set to uncached */
val = readl(cm_base + GCR_BASE);
val &= ~CCA_DEFAULT_OVR_MASK;
val |= CCA_DEFAULT_OVREN | (2 << CCA_DEFAULT_OVR_SHIFT);
writel(val, cm_base + GCR_BASE);
/* Initialize L1 I-Cache */
write_c0_taglo(0);
write_c0_taghi(0);
for (addr = 0; addr < CONFIG_SYS_ICACHE_SIZE; addr += lsize)
mips_cache(INDEX_STORE_TAG_I, (void *)addr);
/* Initialize L1 D-Cache */
write_c0_dtaglo(0);
__write_32bit_c0_register($29, 2, 0); /* dtaghi */
for (addr = 0; addr < CONFIG_SYS_DCACHE_SIZE; addr += lsize)
mips_cache(INDEX_STORE_TAG_D, (void *)addr);
/* Initialize L2 Cache */
write_c0_staglo(0);
__write_32bit_c0_register($29, 4, 0); /* staghi */
for (addr = 0; addr < (256 << 10); addr += lsize)
mips_cache(INDEX_STORE_TAG_SD, (void *)addr);
/* Dsiable CCA override */
val = readl(cm_base + GCR_BASE);
val &= ~(CCA_DEFAULT_OVR_MASK | CCA_DEFAULT_OVREN);
writel(val, cm_base + GCR_BASE);
/* Set KSEG0 to non-coherent cached (important!) */
val = read_c0_config();
val &= ~CONF_CM_CMASK;
val |= CONF_CM_CACHABLE_NONCOHERENT;
write_c0_config(val);
execution_hazard_barrier();
/* Again, invalidate L1 D-Cache */
for (addr = 0; addr < CONFIG_SYS_DCACHE_SIZE; addr += lsize)
mips_cache(INDEX_WRITEBACK_INV_D, (void *)addr);
/* Invalidate L1 I-Cache */
for (addr = 0; addr < CONFIG_SYS_ICACHE_SIZE; addr += lsize)
mips_cache(INDEX_INVALIDATE_I, (void *)addr);
/* Disable L2 cache bypass */
val = read_c0_config2();
val &= ~MIPS_CONF_IMPL;
write_c0_config2(val);
execution_hazard_barrier();
}
void __noreturn tpl_main(void)
{
const struct legacy_img_hdr *hdr = (const struct legacy_img_hdr *)__image_copy_end;
image_entry_noargs_t image_entry;
u32 loadaddr, size;
uintptr_t data;
/* Initialize the cache first */
mt7621_cache_init();
if (image_get_magic(hdr) != IH_MAGIC)
goto failed;
loadaddr = image_get_load(hdr);
size = image_get_size(hdr);
image_entry = (image_entry_noargs_t)image_get_ep(hdr);
/* Load TPL image to L2 cache */
data = (uintptr_t)__image_copy_end + sizeof(struct legacy_img_hdr);
fill_lock_l2cache(data, loadaddr, size);
/* Jump to SPL */
image_entry();
failed:
for (;;)
;
}
|