1/* 2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU) 3 * Licensed under the Mulan PSL v2. 4 * You can use this software according to the terms and conditions of the Mulan PSL v2. 5 * You may obtain a copy of Mulan PSL v2 at: 6 * http://license.coscl.org.cn/MulanPSL2 7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR 8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR 9 * PURPOSE. 10 * See the Mulan PSL v2 for more details. 11 */ 12 13#include <common/asm.h> 14#include <arch/machine/registers.h> 15 16#define TCR_T0SZ(x) ((64 - (x))) 17#define TCR_T1SZ(x) ((64 - (x)) << 16) 18#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) 19 20#define TCR_IRGN0_WBWC (1 << 8) 21#define TCR_IRGN_NC ((0 << 8) | (0 << 24)) 22#define TCR_IRGN_WBWA ((1 << 8) | (1 << 24)) 23#define TCR_IRGN_WT ((2 << 8) | (2 << 24)) 24#define TCR_IRGN_WBnWA ((3 << 8) | (3 << 24)) 25#define TCR_IRGN_MASK ((3 << 8) | (3 << 24)) 26 27#define TCR_ORGN0_WBWC (1 << 10) 28#define TCR_ORGN_NC ((0 << 10) | (0 << 26)) 29#define TCR_ORGN_WBWA ((1 << 10) | (1 << 26)) 30#define TCR_ORGN_WT ((2 << 10) | (2 << 26)) 31#define TCR_ORGN_WBnWA ((3 << 10) | (3 << 26)) 32#define TCR_ORGN_MASK ((3 << 10) | (3 << 26)) 33 34#define TCR_SH0_ISH (3 << 12) 35 36#define TCR_TG0_4K (0 << 14) 37#define TCR_TG0_64K (1 << 14) 38#define TCR_TG1_4K (2 << 30) 39#define TCR_TG1_64K (3 << 30) 40 41#define TCR_PS_4G (0 << 16) 42#define TCR_PS_64G (1 << 16) 43#define TCR_PS_1T (2 << 16) 44#define TCR_PS_4T (3 << 16) 45#define TCR_PS_16T (4 << 16) 46#define TCR_PS_256T (5 << 16) 47 48/* bits are reserved as 1 */ 49#define TCR_EL2_RES1 ((1 << 23) | (1 << 31)) 50#define TCR_ASID16 (1 << 36) 51 52#define UL(x) x##UL 53 54#define TCR_SH0_SHIFT 12 55#define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT) 56#define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT) 57#define TCR_SH1_SHIFT 28 58#define TCR_SH1_MASK (UL(3) << TCR_SH1_SHIFT) 59#define TCR_SH1_INNER (UL(3) << TCR_SH1_SHIFT) 60 61#define TCR_SHARED (TCR_SH0_INNER | TCR_SH1_INNER) 62 63#define TCR_TBI0 (UL(1) << 37) 64#define TCR_A1 (UL(1) << 22) 65 66#define MT_DEVICE_nGnRnE 0 67#define MT_DEVICE_nGnRE 1 68#define MT_DEVICE_GRE 2 69#define MT_NORMAL_NC 3 70#define MT_NORMAL 4 71#define MAIR(_attr, _mt) ((_attr) << ((_mt) * 8)) 72 73#define CURRENTEL_EL1 (0b01 << 2) 74#define CURRENTEL_EL2 (0b10 << 2) 75 76#define CPACR_EL1_FPEN (0b11 << 20) 77#define ID_AA64PFR0_EL1_GIC (0b1111 << 24) 78 79#define CNTHCTL_EL2_EL1PCEN (1 << 1) 80#define CNTHCTL_EL2_EL1PCTEN (1 << 0) 81#define CPTR_EL2_RES1 0x33ff 82#define HCR_EL2_RW (1 << 31) 83#define ICC_SRE_EL2_SRE (1 << 0) 84#define ICC_SRE_EL2_ENABLE (1 << 3) 85 86#define SCR_EL3_HCE (1 << 8) 87#define SCR_EL3_NS (1 << 0) 88#define SCR_EL3_RW (1 << 10) 89 90#define SPSR_ELX_DAIF (0b1111 << 6) 91#define SPSR_ELX_EL1H (0b0101) 92 93#define ICH_HCR_EL2 S3_4_C12_C11_0 94#define ICC_SRE_EL2 S3_4_C12_C9_5 95 96.macro dcache op 97 dsb sy 98 mrs x0, clidr_el1 99 and x3, x0, #0x7000000 100 lsr x3, x3, #23 101 102 cbz x3, finished_\op 103 mov x10, #0 104 105 loop1_\op: 106 add x2, x10, x10, lsr #1 107 lsr x1, x0, x2 108 and x1, x1, #7 109 cmp x1, #2 110 b.lt skip_\op 111 112 msr csselr_el1, x10 113 isb 114 115 mrs x1, ccsidr_el1 116 and x2, x1, #7 117 add x2, x2, #4 118 mov x4, #0x3ff 119 and x4, x4, x1, lsr #3 120 clz w5, w4 121 mov x7, #0x7fff 122 and x7, x7, x1, lsr #13 123 124 loop2_\op: 125 mov x9, x4 126 127 loop3_\op: 128 lsl x6, x9, x5 129 orr x11, x10, x6 130 lsl x6, x7, x2 131 orr x11, x11, x6 132 dc \op, x11 133 subs x9, x9, #1 134 b.ge loop3_\op 135 subs x7, x7, #1 136 b.ge loop2_\op 137 138 skip_\op: 139 add x10, x10, #2 140 cmp x3, x10 141 b.gt loop1_\op 142 143 finished_\op: 144 mov x10, #0 145 msr csselr_el1, x10 146 dsb sy 147 isb 148.endm 149 150.macro enable_mmu sctlr tmp 151 mrs \tmp, \sctlr 152 /* Enable MMU */ 153 orr \tmp, \tmp, #SCTLR_EL1_M 154 /* Disable alignment checking */ 155 bic \tmp, \tmp, #SCTLR_EL1_A 156 bic \tmp, \tmp, #SCTLR_EL1_SA0 157 bic \tmp, \tmp, #SCTLR_EL1_SA 158 orr \tmp, \tmp, #SCTLR_EL1_nAA 159 /* Data accesses Cacheable */ 160 orr \tmp, \tmp, #SCTLR_EL1_C 161 /* Instruction access Cacheable */ 162 orr \tmp, \tmp, #SCTLR_EL1_I 163 msr \sctlr, \tmp 164 isb 165.endm 166 167.macro disable_mmu sctlr tmp 168 mrs \tmp, \sctlr 169 /* Disable MMU */ 170 bic \tmp, \tmp, #SCTLR_EL1_M 171 /* Disable alignment checking */ 172 bic \tmp, \tmp, #SCTLR_EL1_A 173 bic \tmp, \tmp, #SCTLR_EL1_SA0 174 bic \tmp, \tmp, #SCTLR_EL1_SA 175 orr \tmp, \tmp, #SCTLR_EL1_nAA 176 /* Disable Data Cache */ 177 bic \tmp, \tmp, #SCTLR_EL1_C 178 /* Disable Instruction Cache */ 179 bic \tmp, \tmp, #SCTLR_EL1_I 180 msr \sctlr, \tmp 181 isb 182.endm 183 184BEGIN_FUNC(invalidate_dcache) 185 dcache isw 186 ret 187END_FUNC(invalidate_dcache) 188 189BEGIN_FUNC(invalidate_icache) 190 ic iallu 191 dsb nsh 192 isb 193 ret 194END_FUNC(invalidate_icache) 195 196.extern boot_ttbr0_l0 197.extern boot_ttbr1_l0 198 199BEGIN_FUNC(el1_mmu_activate) 200 /* We call nested functions, follow the ABI. */ 201 stp x29, x30, [sp, #-16]! 202 mov x29, sp 203 204 bl invalidate_dcache 205 206 /* Ensure I-cache, D-cache and mmu are disabled for EL1/Stage1 */ 207 disable_mmu sctlr_el1 , x8 208 209 /* 210 * Invalidate the local I-cache so that any instructions fetched 211 * speculatively are discarded. 212 */ 213 bl invalidate_icache 214 215 /* 216 * DEVICE_nGnRnE 000 00000000 217 * DEVICE_nGnRE 001 00000100 218 * DEVICE_GRE 010 00001100 219 * NORMAL_NC 011 01000100 220 * NORMAL 100 11111111 221 */ 222 ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) |\ 223 MAIR(0x04, MT_DEVICE_nGnRE) |\ 224 MAIR(0x0c, MT_DEVICE_GRE) |\ 225 MAIR(0x44, MT_NORMAL_NC) |\ 226 MAIR(0xff, MT_NORMAL) 227 228 msr mair_el1, x5 229 230 ldr x10, =TCR_TxSZ(48) | TCR_IRGN_WBWA | TCR_ORGN_WBWA |\ 231 TCR_TG0_4K | TCR_TG1_4K | TCR_ASID16 | TCR_SHARED 232 233 mrs x9, ID_AA64MMFR0_EL1 234 bfi x10, x9, #32, #3 235 msr tcr_el1, x10 236 237 /* Setup page tables */ 238 adrp x8, boot_ttbr0_l0 239 msr ttbr0_el1, x8 240 adrp x8, boot_ttbr1_l0 241 msr ttbr1_el1, x8 242 isb 243 244 /* invalidate all TLB entries for EL1 */ 245 tlbi vmalle1is 246 dsb ish 247 isb 248 249 enable_mmu sctlr_el1 , x8 250 251 ldp x29, x30, [sp], #16 252 ret 253END_FUNC(el1_mmu_activate) 254