• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <assert.h>
9 
10 #include <platform_def.h>
11 
12 #include <common/debug.h>
13 #include <lib/xlat_tables/xlat_tables_defs.h>
14 #include <lib/xlat_tables/xlat_tables_v2.h>
15 
16 #include "xlat_tables_private.h"
17 
18 /*
19  * MMU configuration register values for the active translation context. Used
20  * from the MMU assembly helpers.
21  */
22 uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
23 
24 /*
25  * Allocate and initialise the default translation context for the BL image
26  * currently executing.
27  */
28 REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
29 		      PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
30 
mmap_add_region(unsigned long long base_pa,uintptr_t base_va,size_t size,unsigned int attr)31 void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
32 		     unsigned int attr)
33 {
34 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
35 
36 	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
37 }
38 
mmap_add(const mmap_region_t * mm)39 void mmap_add(const mmap_region_t *mm)
40 {
41 	mmap_add_ctx(&tf_xlat_ctx, mm);
42 }
43 
mmap_add_region_alloc_va(unsigned long long base_pa,uintptr_t * base_va,size_t size,unsigned int attr)44 void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
45 			      size_t size, unsigned int attr)
46 {
47 	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
48 
49 	mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
50 
51 	*base_va = mm.base_va;
52 }
53 
mmap_add_alloc_va(mmap_region_t * mm)54 void mmap_add_alloc_va(mmap_region_t *mm)
55 {
56 	while (mm->granularity != 0U) {
57 		assert(mm->base_va == 0U);
58 		mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
59 		mm++;
60 	}
61 }
62 
63 #if PLAT_XLAT_TABLES_DYNAMIC
64 
mmap_add_dynamic_region(unsigned long long base_pa,uintptr_t base_va,size_t size,unsigned int attr)65 int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
66 			    size_t size, unsigned int attr)
67 {
68 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
69 
70 	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
71 }
72 
mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,uintptr_t * base_va,size_t size,unsigned int attr)73 int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
74 				     uintptr_t *base_va, size_t size,
75 				     unsigned int attr)
76 {
77 	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
78 
79 	int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
80 
81 	*base_va = mm.base_va;
82 
83 	return rc;
84 }
85 
86 
mmap_remove_dynamic_region(uintptr_t base_va,size_t size)87 int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
88 {
89 	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
90 					base_va, size);
91 }
92 
93 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
94 
init_xlat_tables(void)95 void __init init_xlat_tables(void)
96 {
97 	assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
98 
99 	unsigned int current_el = xlat_arch_current_el();
100 
101 	if (current_el == 1U) {
102 		tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
103 	} else if (current_el == 2U) {
104 		tf_xlat_ctx.xlat_regime = EL2_REGIME;
105 	} else {
106 		assert(current_el == 3U);
107 		tf_xlat_ctx.xlat_regime = EL3_REGIME;
108 	}
109 
110 	init_xlat_tables_ctx(&tf_xlat_ctx);
111 }
112 
xlat_get_mem_attributes(uintptr_t base_va,uint32_t * attr)113 int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
114 {
115 	return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
116 }
117 
xlat_change_mem_attributes(uintptr_t base_va,size_t size,uint32_t attr)118 int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
119 {
120 	return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
121 }
122 
123 #if PLAT_RO_XLAT_TABLES
124 /* Change the memory attributes of the descriptors which resolve the address
125  * range that belongs to the translation tables themselves, which are by default
126  * mapped as part of read-write data in the BL image's memory.
127  *
128  * Since the translation tables map themselves via these level 3 (page)
129  * descriptors, any change applied to them with the MMU on would introduce a
130  * chicken and egg problem because of the break-before-make sequence.
131  * Eventually, it would reach the descriptor that resolves the very table it
132  * belongs to and the invalidation (break step) would cause the subsequent write
133  * (make step) to it to generate an MMU fault. Therefore, the MMU is disabled
134  * before making the change.
135  *
136  * No assumption is made about what data this function needs, therefore all the
137  * caches are flushed in order to ensure coherency. A future optimization would
138  * be to only flush the required data to main memory.
139  */
xlat_make_tables_readonly(void)140 int xlat_make_tables_readonly(void)
141 {
142 	assert(tf_xlat_ctx.initialized == true);
143 #ifdef __aarch64__
144 	if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
145 		disable_mmu_el1();
146 	} else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) {
147 		disable_mmu_el3();
148 	} else {
149 		assert(tf_xlat_ctx.xlat_regime == EL2_REGIME);
150 		return -1;
151 	}
152 
153 	/* Flush all caches. */
154 	dcsw_op_all(DCCISW);
155 #else /* !__aarch64__ */
156 	assert(tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME);
157 	/* On AArch32, we flush the caches before disabling the MMU. The reason
158 	 * for this is that the dcsw_op_all AArch32 function pushes some
159 	 * registers onto the stack under the assumption that it is writing to
160 	 * cache, which is not true with the MMU off. This would result in the
161 	 * stack becoming corrupted and a wrong/junk value for the LR being
162 	 * restored at the end of the routine.
163 	 */
164 	dcsw_op_all(DC_OP_CISW);
165 	disable_mmu_secure();
166 #endif
167 
168 	int rc = xlat_change_mem_attributes_ctx(&tf_xlat_ctx,
169 				(uintptr_t)tf_xlat_ctx.tables,
170 				tf_xlat_ctx.tables_num * XLAT_TABLE_SIZE,
171 				MT_RO_DATA | MT_SECURE);
172 
173 #ifdef __aarch64__
174 	if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
175 		enable_mmu_el1(0U);
176 	} else {
177 		assert(tf_xlat_ctx.xlat_regime == EL3_REGIME);
178 		enable_mmu_el3(0U);
179 	}
180 #else /* !__aarch64__ */
181 	enable_mmu_svc_mon(0U);
182 #endif
183 
184 	if (rc == 0) {
185 		tf_xlat_ctx.readonly_tables = true;
186 	}
187 
188 	return rc;
189 }
190 #endif /* PLAT_RO_XLAT_TABLES */
191 
192 /*
193  * If dynamic allocation of new regions is disabled then by the time we call the
194  * function enabling the MMU, we'll have registered all the memory regions to
195  * map for the system's lifetime. Therefore, at this point we know the maximum
196  * physical address that will ever be mapped.
197  *
198  * If dynamic allocation is enabled then we can't make any such assumption
199  * because the maximum physical address could get pushed while adding a new
200  * region. Therefore, in this case we have to assume that the whole address
201  * space size might be mapped.
202  */
203 #ifdef PLAT_XLAT_TABLES_DYNAMIC
204 #define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
205 #else
206 #define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
207 #endif
208 
209 #ifdef __aarch64__
210 
enable_mmu_el1(unsigned int flags)211 void enable_mmu_el1(unsigned int flags)
212 {
213 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
214 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
215 		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
216 	enable_mmu_direct_el1(flags);
217 }
218 
enable_mmu_el2(unsigned int flags)219 void enable_mmu_el2(unsigned int flags)
220 {
221 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
222 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
223 		      tf_xlat_ctx.va_max_address, EL2_REGIME);
224 	enable_mmu_direct_el2(flags);
225 }
226 
enable_mmu_el3(unsigned int flags)227 void enable_mmu_el3(unsigned int flags)
228 {
229 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
230 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
231 		      tf_xlat_ctx.va_max_address, EL3_REGIME);
232 	enable_mmu_direct_el3(flags);
233 }
234 
enable_mmu(unsigned int flags)235 void enable_mmu(unsigned int flags)
236 {
237 	switch (get_current_el_maybe_constant()) {
238 	case 1:
239 		enable_mmu_el1(flags);
240 		break;
241 	case 2:
242 		enable_mmu_el2(flags);
243 		break;
244 	case 3:
245 		enable_mmu_el3(flags);
246 		break;
247 	default:
248 		panic();
249 	}
250 }
251 
252 #else /* !__aarch64__ */
253 
enable_mmu_svc_mon(unsigned int flags)254 void enable_mmu_svc_mon(unsigned int flags)
255 {
256 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
257 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
258 		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
259 	enable_mmu_direct_svc_mon(flags);
260 }
261 
enable_mmu_hyp(unsigned int flags)262 void enable_mmu_hyp(unsigned int flags)
263 {
264 	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
265 		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
266 		      tf_xlat_ctx.va_max_address, EL2_REGIME);
267 	enable_mmu_direct_hyp(flags);
268 }
269 
270 #endif /* __aarch64__ */
271