• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <bl_common.h>
11 #include <cassert.h>
12 #include <common_def.h>
13 #include <sys/types.h>
14 #include <utils.h>
15 #include <utils_def.h>
16 #include <xlat_tables_v2.h>
17 #include "../xlat_tables_private.h"
18 
calc_physical_addr_size_bits(unsigned long long max_addr)19 static unsigned long long calc_physical_addr_size_bits(
20 					unsigned long long max_addr)
21 {
22 	/* Physical address can't exceed 48 bits */
23 	assert((max_addr & ADDR_MASK_48_TO_63) == 0);
24 
25 	/* 48 bits address */
26 	if (max_addr & ADDR_MASK_44_TO_47)
27 		return TCR_PS_BITS_256TB;
28 
29 	/* 44 bits address */
30 	if (max_addr & ADDR_MASK_42_TO_43)
31 		return TCR_PS_BITS_16TB;
32 
33 	/* 42 bits address */
34 	if (max_addr & ADDR_MASK_40_TO_41)
35 		return TCR_PS_BITS_4TB;
36 
37 	/* 40 bits address */
38 	if (max_addr & ADDR_MASK_36_TO_39)
39 		return TCR_PS_BITS_1TB;
40 
41 	/* 36 bits address */
42 	if (max_addr & ADDR_MASK_32_TO_35)
43 		return TCR_PS_BITS_64GB;
44 
45 	return TCR_PS_BITS_4GB;
46 }
47 
48 #if ENABLE_ASSERTIONS
49 /* Physical Address ranges supported in the AArch64 Memory Model */
50 static const unsigned int pa_range_bits_arr[] = {
51 	PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
52 	PARANGE_0101
53 };
54 
xlat_arch_get_max_supported_pa(void)55 unsigned long long xlat_arch_get_max_supported_pa(void)
56 {
57 	u_register_t pa_range = read_id_aa64mmfr0_el1() &
58 						ID_AA64MMFR0_EL1_PARANGE_MASK;
59 
60 	/* All other values are reserved */
61 	assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
62 
63 	return (1ull << pa_range_bits_arr[pa_range]) - 1ull;
64 }
65 #endif /* ENABLE_ASSERTIONS*/
66 
is_mmu_enabled_ctx(const xlat_ctx_t * ctx)67 int is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
68 {
69 	if (ctx->xlat_regime == EL1_EL0_REGIME) {
70 		assert(xlat_arch_current_el() >= 1);
71 		return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
72 	} else {
73 		assert(ctx->xlat_regime == EL3_REGIME);
74 		assert(xlat_arch_current_el() >= 3);
75 		return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
76 	}
77 }
78 
79 
xlat_arch_tlbi_va(uintptr_t va)80 void xlat_arch_tlbi_va(uintptr_t va)
81 {
82 #if IMAGE_EL == 1
83 	assert(IS_IN_EL(1));
84 	xlat_arch_tlbi_va_regime(va, EL1_EL0_REGIME);
85 #elif IMAGE_EL == 3
86 	assert(IS_IN_EL(3));
87 	xlat_arch_tlbi_va_regime(va, EL3_REGIME);
88 #endif
89 }
90 
xlat_arch_tlbi_va_regime(uintptr_t va,xlat_regime_t xlat_regime)91 void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime)
92 {
93 	/*
94 	 * Ensure the translation table write has drained into memory before
95 	 * invalidating the TLB entry.
96 	 */
97 	dsbishst();
98 
99 	/*
100 	 * This function only supports invalidation of TLB entries for the EL3
101 	 * and EL1&0 translation regimes.
102 	 *
103 	 * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher
104 	 * exception level (see section D4.9.2 of the ARM ARM rev B.a).
105 	 */
106 	if (xlat_regime == EL1_EL0_REGIME) {
107 		assert(xlat_arch_current_el() >= 1);
108 		tlbivaae1is(TLBI_ADDR(va));
109 	} else {
110 		assert(xlat_regime == EL3_REGIME);
111 		assert(xlat_arch_current_el() >= 3);
112 		tlbivae3is(TLBI_ADDR(va));
113 	}
114 }
115 
xlat_arch_tlbi_va_sync(void)116 void xlat_arch_tlbi_va_sync(void)
117 {
118 	/*
119 	 * A TLB maintenance instruction can complete at any time after
120 	 * it is issued, but is only guaranteed to be complete after the
121 	 * execution of DSB by the PE that executed the TLB maintenance
122 	 * instruction. After the TLB invalidate instruction is
123 	 * complete, no new memory accesses using the invalidated TLB
124 	 * entries will be observed by any observer of the system
125 	 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
126 	 * "Ordering and completion of TLB maintenance instructions".
127 	 */
128 	dsbish();
129 
130 	/*
131 	 * The effects of a completed TLB maintenance instruction are
132 	 * only guaranteed to be visible on the PE that executed the
133 	 * instruction after the execution of an ISB instruction by the
134 	 * PE that executed the TLB maintenance instruction.
135 	 */
136 	isb();
137 }
138 
xlat_arch_current_el(void)139 int xlat_arch_current_el(void)
140 {
141 	int el = GET_EL(read_CurrentEl());
142 
143 	assert(el > 0);
144 
145 	return el;
146 }
147 
148 /*******************************************************************************
149  * Macro generating the code for the function enabling the MMU in the given
150  * exception level, assuming that the pagetables have already been created.
151  *
152  *   _el:		Exception level at which the function will run
153  *   _tlbi_fct:		Function to invalidate the TLBs at the current
154  *			exception level
155  ******************************************************************************/
156 #define DEFINE_ENABLE_MMU_EL(_el, _tlbi_fct)				\
157 	static void enable_mmu_internal_el##_el(int flags,		\
158 						uint64_t mair,		\
159 						uint64_t tcr,		\
160 						uint64_t ttbr)		\
161 	{								\
162 		uint32_t sctlr = read_sctlr_el##_el();			\
163 		assert((sctlr & SCTLR_M_BIT) == 0);			\
164 									\
165 		/* Invalidate TLBs at the current exception level */	\
166 		_tlbi_fct();						\
167 									\
168 		write_mair_el##_el(mair);				\
169 		write_tcr_el##_el(tcr);					\
170 									\
171 		/* Set TTBR bits as well */				\
172 		if (ARM_ARCH_AT_LEAST(8, 2)) {				\
173 			/* Enable CnP bit so as to share page tables */	\
174 			/* with all PEs. This is mandatory for */	\
175 			/* ARMv8.2 implementations. */			\
176 			ttbr |= TTBR_CNP_BIT;				\
177 		}							\
178 		write_ttbr0_el##_el(ttbr);				\
179 									\
180 		/* Ensure all translation table writes have drained */	\
181 		/* into memory, the TLB invalidation is complete, */	\
182 		/* and translation register writes are committed */	\
183 		/* before enabling the MMU */				\
184 		dsbish();						\
185 		isb();							\
186 									\
187 		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;			\
188 		if (flags & DISABLE_DCACHE)				\
189 			sctlr &= ~SCTLR_C_BIT;				\
190 		else							\
191 			sctlr |= SCTLR_C_BIT;				\
192 									\
193 		write_sctlr_el##_el(sctlr);				\
194 									\
195 		/* Ensure the MMU enable takes effect immediately */	\
196 		isb();							\
197 	}
198 
199 /* Define EL1 and EL3 variants of the function enabling the MMU */
200 #if IMAGE_EL == 1
201 DEFINE_ENABLE_MMU_EL(1, tlbivmalle1)
202 #elif IMAGE_EL == 3
203 DEFINE_ENABLE_MMU_EL(3, tlbialle3)
204 #endif
205 
enable_mmu_arch(unsigned int flags,uint64_t * base_table,unsigned long long max_pa,uintptr_t max_va)206 void enable_mmu_arch(unsigned int flags,
207 		uint64_t *base_table,
208 		unsigned long long max_pa,
209 		uintptr_t max_va)
210 {
211 	uint64_t mair, ttbr, tcr;
212 
213 	/* Set attributes in the right indices of the MAIR. */
214 	mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
215 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
216 	mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
217 
218 	ttbr = (uint64_t) base_table;
219 
220 	/*
221 	 * Set TCR bits as well.
222 	 */
223 
224 	/*
225 	 * Limit the input address ranges and memory region sizes translated
226 	 * using TTBR0 to the given virtual address space size.
227 	 */
228 	assert(max_va < UINTPTR_MAX);
229 	uintptr_t virtual_addr_space_size = max_va + 1;
230 	assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
231 	/*
232 	 * __builtin_ctzll(0) is undefined but here we are guaranteed that
233 	 * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
234 	 */
235 	tcr = 64 - __builtin_ctzll(virtual_addr_space_size);
236 
237 	/*
238 	 * Set the cacheability and shareability attributes for memory
239 	 * associated with translation table walks.
240 	 */
241 	if (flags & XLAT_TABLE_NC) {
242 		/* Inner & outer non-cacheable non-shareable. */
243 		tcr |= TCR_SH_NON_SHAREABLE |
244 			TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
245 	} else {
246 		/* Inner & outer WBWA & shareable. */
247 		tcr |= TCR_SH_INNER_SHAREABLE |
248 			TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA;
249 	}
250 
251 	/*
252 	 * It is safer to restrict the max physical address accessible by the
253 	 * hardware as much as possible.
254 	 */
255 	unsigned long long tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
256 
257 #if IMAGE_EL == 1
258 	assert(IS_IN_EL(1));
259 	/*
260 	 * TCR_EL1.EPD1: Disable translation table walk for addresses that are
261 	 * translated using TTBR1_EL1.
262 	 */
263 	tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
264 	enable_mmu_internal_el1(flags, mair, tcr, ttbr);
265 #elif IMAGE_EL == 3
266 	assert(IS_IN_EL(3));
267 	tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
268 	enable_mmu_internal_el3(flags, mair, tcr, ttbr);
269 #endif
270 }
271