• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 *     http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12
13#include <common/asm.h>
14#include <arch/machine/registers.h>
15
16#define CURRENTEL_EL1           (0b01 << 2)
17#define CURRENTEL_EL2           (0b10 << 2)
18
19#define CPACR_EL1_FPEN          (0b11 << 20)
20#define ID_AA64PFR0_EL1_GIC     (0b1111 << 24)
21
22#define CNTHCTL_EL2_EL1PCEN     (1 << 1)
23#define CNTHCTL_EL2_EL1PCTEN    (1 << 0)
24#define CPTR_EL2_RES1           0x33ff
25#define HCR_EL2_RW              (1 << 31)
26#define ICC_SRE_EL2_SRE         (1 << 0)
27#define ICC_SRE_EL2_ENABLE      (1 << 3)
28
29#define SCR_EL3_HCE             (1 << 8)
30#define SCR_EL3_NS              (1 << 0)
31#define SCR_EL3_RW              (1 << 10)
32
33#define SPSR_ELX_DAIF           (0b1111 << 6)
34#define SPSR_ELX_EL1H           (0b0101)
35
36#define ICH_HCR_EL2             S3_4_C12_C11_0
37#define ICC_SRE_EL2             S3_4_C12_C9_5
38
39BEGIN_FUNC(arm64_elX_to_el1)
40	mrs x9, CurrentEL
41
42	// Check the current exception level.
43	cmp x9, CURRENTEL_EL1
44	beq .Ltarget
45	cmp x9, CURRENTEL_EL2
46	beq .Lin_el2
47	// Otherwise, we are in EL3.
48
49	// Set EL2 to 64bit and enable the HVC instruction.
50	mrs x9, scr_el3
51	mov x10, SCR_EL3_NS | SCR_EL3_HCE | SCR_EL3_RW
52	orr x9, x9, x10
53	msr scr_el3, x9
54
55	// Set the return address and exception level.
56	adr x9, .Ltarget
57	msr elr_el3, x9
58	mov x9, SPSR_ELX_DAIF | SPSR_ELX_EL1H
59	msr spsr_el3, x9
60
61.Lin_el2:
62	// Disable EL1 timer traps and the timer offset.
63	mrs x9, cnthctl_el2
64	orr x9, x9, CNTHCTL_EL2_EL1PCEN | CNTHCTL_EL2_EL1PCTEN
65	msr cnthctl_el2, x9
66	msr cntvoff_el2, xzr
67
68	// Disable stage 2 translations.
69	msr vttbr_el2, xzr
70
71	// Disable EL2 coprocessor traps.
72	mov x9, CPTR_EL2_RES1
73	msr cptr_el2, x9
74
75	// Disable EL1 FPU traps.
76	mov x9, CPACR_EL1_FPEN
77	msr cpacr_el1, x9
78
79	// Check whether the GIC system registers are supported.
80	mrs x9, id_aa64pfr0_el1
81	and x9, x9, ID_AA64PFR0_EL1_GIC
82	cbz x9, .Lno_gic_sr
83
84	// Enable the GIC system registers in EL2, and allow their use in EL1.
85	mrs x9, ICC_SRE_EL2
86	mov x10, ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE
87	orr x9, x9, x10
88	msr ICC_SRE_EL2, x9
89
90	// Disable the GIC virtual CPU interface.
91	msr ICH_HCR_EL2, xzr
92
93.Lno_gic_sr:
94	// Set EL1 to 64bit.
95	mov x9, HCR_EL2_RW
96	msr hcr_el2, x9
97
98	// Set the return address and exception level.
99	adr x9, .Ltarget
100	msr elr_el2, x9
101	mov x9, SPSR_ELX_DAIF | SPSR_ELX_EL1H
102	msr spsr_el2, x9
103
104	isb
105	eret
106
107.Ltarget:
108	ret
109END_FUNC(arm64_elX_to_el1)
110
111// See https://developer.arm.com/documentation/den0024/a/Caches/Cache-maintenance
112BEGIN_FUNC(invalidate_cache_all)
113	mrs     x0, clidr_el1
114	and     w3, w0, #0x07000000     // get 2x level of coherence
115	lsr     w3, w3, #23
116	cbz     w3, .Lfinished_inv_cache
117	mov     w10, #0                 // w10 = 2x cache level
118	mov     w8, #1                  // w8 = constant 1
119.Lloop1_inv_cache:
120	add     w2, w10, w10, lsr #1    // calculate 3x cache level
121	lsr     w1, w0, w2              // extract 3 bit cache type for this level
122	and     w1, w1, #0x7
123	cmp     w1, #2
124	b.lt    .Lskip_inv_cache        // no data or unified cache at this level
125	msr     csselr_el1, x10         // select this cache level
126	isb                             // synchronize change to csselr
127	mrs     x1, ccsidr_el1          // w1 = ccsidr
128	and     w2, w1, #7              // w2 = log2(line len) - 4
129	add     w2, w2, #4              // w2 = log2(line len)
130	ubfx    w4, w1, #3, #10         // w4 = max way number, right aligned
131	clz     w5, w4                  // w5 = 32 - log2(ways), bit position of way in DC operand
132	lsl     w9, w4, w5              // w9 = max way number, aligned to position in DC operand
133	lsl     w12, w8, w5             // w12 = amount to decrement way number per iteration
134
135.Lloop2_inv_cache:
136	ubfx    w7, w1, #13, #15        // w7 = max set number, right aligned
137	lsl     w7, w7, w2              // w7 = max set number, aligned to position in DC operand
138	lsl     w13, w8, w2             // w13 = amount to decrement set number per iteration
139.Lloop3_inv_cache:
140	orr     w11, w10, w9            // w11 = combine way number and cache number
141	orr     w11, w11, w7            //       and set number for DC operand
142	dc      isw, x11                // data cache op
143	subs    w7, w7, w13             // decrement set number
144	b.ge    .Lloop3_inv_cache
145
146	subs    x9, x9, x12             // decrement way number
147	b.ge    .Lloop2_inv_cache
148.Lskip_inv_cache:
149	add     w10, w10, #2            // increment 2x cache level
150	cmp     w3, w10
151	dsb     sy                      // ensure completetion of previous cache maintainance instructions
152	b.gt    .Lloop1_inv_cache
153.Lfinished_inv_cache:
154
155	// dump the instruction cache as well
156	ic      iallu
157	isb
158	ret
159END_FUNC(invalidate_cache_all)
160
161.extern boot_ttbr0_l0
162.extern boot_ttbr1_l0
163
164
165/* DEVICE_nGnRnE */
166#define MMU_MAIR_ATTR0		(0x00 << (8 * 0))
167
168/* DEVICE_nGnRE */
169#define MMU_MAIR_ATTR1		(0x04 << (8 * 1))
170
171/* DEVICE_GRE */
172#define MMU_MAIR_ATTR2		(0x0c << (8 * 2))
173
174/* NORMAL_NC */
175#define MMU_MAIR_ATTR3          (0x44 << (8 * 3))
176
177/* NORMAL */
178#define MMU_MAIR_ATTR4          (0xff << (8 * 4))
179
180/*
181 * Enable cached page table walks:
182 * inner/outer (IRGN/ORGN): write-back + write-allocate
183 */
184#define MMU_TCR_TG1_4k 			(0 << 14)
185#define MMU_TCR_SH1_INNER_SH	        (3 << 28)
186#define MMU_TCR_ORGN1_WBA		(1 << 26)
187#define MMU_TCR_IRGN1_WBA		(1 << 24)
188#define MMU_TCR_T1SZ			((64 - 48) << 16) /* 48-bit  */
189#define MMU_TCR_FLAGS1			(MMU_TCR_TG1_4k | MMU_TCR_SH1_INNER_SH | \
190 						MMU_TCR_ORGN1_WBA | MMU_TCR_IRGN1_WBA | MMU_TCR_T1SZ)
191
192#define MMU_TCR_TG0_4k 			(0 << 30)
193#define MMU_TCR_SH0_INNER_SH	        (3 << 12)
194#define MMU_TCR_ORGN0_WBA		(1 << 10)
195#define MMU_TCR_IRGN0_WBA		(1 << 8)
196#define MMU_TCR_T0SZ			((64 - 48) << 0) /* 48-bit */
197#define MMU_TCR_FLAGS0			(MMU_TCR_TG0_4k | MMU_TCR_SH0_INNER_SH | \
198 						MMU_TCR_ORGN0_WBA | MMU_TCR_IRGN0_WBA | MMU_TCR_T0SZ)
199#define MMU_TCR_IPS 			(0b101 << 32) /* 48-bit */
200#define MMU_TCR_AS				(1 << 36)
201
202
203BEGIN_FUNC(el1_mmu_activate)
204	stp     x29, x30, [sp, #-16]!
205	mov     x29, sp
206
207	bl	invalidate_cache_all
208
209	/* Invalidate TLB */
210	tlbi    vmalle1is
211	isb
212	dsb     sy
213
214	/* Initialize Memory Attribute Indirection Register */
215	ldr 	x8, =MMU_MAIR_ATTR0 | MMU_MAIR_ATTR1 | MMU_MAIR_ATTR2 | MMU_MAIR_ATTR3 | MMU_MAIR_ATTR4
216	msr     mair_el1, x8
217
218	/* Initialize TCR_EL1 */
219	/* set cacheable attributes on translation walk */
220	/* (SMP extensions) non-shareable, inner write-back write-allocate */
221	ldr  	x8, =MMU_TCR_FLAGS1 | MMU_TCR_FLAGS0 | MMU_TCR_IPS | MMU_TCR_AS
222	msr     tcr_el1, x8
223	isb
224
225	/* Write ttbr with phys addr of the translation table */
226	adrp    x8, boot_ttbr0_l0
227	msr     ttbr0_el1, x8
228	adrp    x8, boot_ttbr1_l0
229	msr     ttbr1_el1, x8
230	isb
231
232	mrs     x8, sctlr_el1
233	/* Enable MMU */
234	orr     x8, x8, #SCTLR_EL1_M
235	/* Disable alignment checking */
236	bic     x8, x8, #SCTLR_EL1_A
237	bic     x8, x8, #SCTLR_EL1_SA0
238	bic     x8, x8, #SCTLR_EL1_SA
239	orr     x8, x8, #SCTLR_EL1_nAA
240	/* Data accesses Cacheable */
241	orr     x8, x8, #SCTLR_EL1_C
242	/* Instruction access Cacheable */
243	orr     x8, x8, #SCTLR_EL1_I
244	msr     sctlr_el1, x8
245
246	ldp     x29, x30, [sp], #16
247	ret
248END_FUNC(el1_mmu_activate)
249