• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <common_def.h>
11 #include <debug.h>
12 #include <errno.h>
13 #include <platform_def.h>
14 #include <string.h>
15 #include <types.h>
16 #include <utils.h>
17 #include <xlat_tables_arch_private.h>
18 #include <xlat_tables_defs.h>
19 #include <xlat_tables_v2.h>
20 
21 #include "xlat_tables_private.h"
22 
23 /*
24  * Each platform can define the size of its physical and virtual address spaces.
25  * If the platform hasn't defined one or both of them, default to
26  * ADDR_SPACE_SIZE. The latter is deprecated, though.
27  */
28 #if ERROR_DEPRECATED
29 # ifdef ADDR_SPACE_SIZE
30 #  error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
31 # endif
32 #elif defined(ADDR_SPACE_SIZE)
33 # ifndef PLAT_PHY_ADDR_SPACE_SIZE
34 #  define PLAT_PHY_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
35 # endif
36 # ifndef PLAT_VIRT_ADDR_SPACE_SIZE
37 #  define PLAT_VIRT_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
38 # endif
39 #endif
40 
41 /*
42  * Allocate and initialise the default translation context for the BL image
43  * currently executing.
44  */
45 REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
46 		PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
47 
48 #if PLAT_XLAT_TABLES_DYNAMIC
49 
50 /*
51  * The following functions assume that they will be called using subtables only.
52  * The base table can't be unmapped, so it is not needed to do any special
53  * handling for it.
54  */
55 
56 /*
57  * Returns the index of the array corresponding to the specified translation
58  * table.
59  */
xlat_table_get_index(xlat_ctx_t * ctx,const uint64_t * table)60 static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
61 {
62 	for (unsigned int i = 0; i < ctx->tables_num; i++)
63 		if (ctx->tables[i] == table)
64 			return i;
65 
66 	/*
67 	 * Maybe we were asked to get the index of the base level table, which
68 	 * should never happen.
69 	 */
70 	assert(0);
71 
72 	return -1;
73 }
74 
75 /* Returns a pointer to an empty translation table. */
xlat_table_get_empty(xlat_ctx_t * ctx)76 static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
77 {
78 	for (unsigned int i = 0; i < ctx->tables_num; i++)
79 		if (ctx->tables_mapped_regions[i] == 0)
80 			return ctx->tables[i];
81 
82 	return NULL;
83 }
84 
85 /* Increments region count for a given table. */
xlat_table_inc_regions_count(xlat_ctx_t * ctx,const uint64_t * table)86 static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
87 {
88 	ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
89 }
90 
91 /* Decrements region count for a given table. */
xlat_table_dec_regions_count(xlat_ctx_t * ctx,const uint64_t * table)92 static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
93 {
94 	ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
95 }
96 
97 /* Returns 0 if the speficied table isn't empty, otherwise 1. */
xlat_table_is_empty(xlat_ctx_t * ctx,const uint64_t * table)98 static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
99 {
100 	return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
101 }
102 
103 #else /* PLAT_XLAT_TABLES_DYNAMIC */
104 
105 /* Returns a pointer to the first empty translation table. */
xlat_table_get_empty(xlat_ctx_t * ctx)106 static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
107 {
108 	assert(ctx->next_table < ctx->tables_num);
109 
110 	return ctx->tables[ctx->next_table++];
111 }
112 
113 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
114 
115 /*
116  * Returns a block/page table descriptor for the given level and attributes.
117  */
xlat_desc(const xlat_ctx_t * ctx,mmap_attr_t attr,unsigned long long addr_pa,int level)118 uint64_t xlat_desc(const xlat_ctx_t *ctx, mmap_attr_t attr,
119 		   unsigned long long addr_pa, int level)
120 {
121 	uint64_t desc;
122 	int mem_type;
123 
124 	/* Make sure that the granularity is fine enough to map this address. */
125 	assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
126 
127 	desc = addr_pa;
128 	/*
129 	 * There are different translation table descriptors for level 3 and the
130 	 * rest.
131 	 */
132 	desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
133 	/*
134 	 * Always set the access flag, as TF doesn't manage access flag faults.
135 	 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
136 	 * memory region attributes.
137 	 */
138 	desc |= LOWER_ATTRS(ACCESS_FLAG);
139 
140 	desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
141 	desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
142 
143 	/*
144 	 * Do not allow unprivileged access when the mapping is for a privileged
145 	 * EL. For translation regimes that do not have mappings for access for
146 	 * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
147 	 */
148 	if (ctx->xlat_regime == EL1_EL0_REGIME) {
149 		if (attr & MT_USER) {
150 			/* EL0 mapping requested, so we give User access */
151 			desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
152 		} else {
153 			/* EL1 mapping requested, no User access granted */
154 			desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
155 		}
156 	} else {
157 		assert(ctx->xlat_regime == EL3_REGIME);
158 		desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
159 	}
160 
161 	/*
162 	 * Deduce shareability domain and executability of the memory region
163 	 * from the memory type of the attributes (MT_TYPE).
164 	 *
165 	 * Data accesses to device memory and non-cacheable normal memory are
166 	 * coherent for all observers in the system, and correspondingly are
167 	 * always treated as being Outer Shareable. Therefore, for these 2 types
168 	 * of memory, it is not strictly needed to set the shareability field
169 	 * in the translation tables.
170 	 */
171 	mem_type = MT_TYPE(attr);
172 	if (mem_type == MT_DEVICE) {
173 		desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
174 		/*
175 		 * Always map device memory as execute-never.
176 		 * This is to avoid the possibility of a speculative instruction
177 		 * fetch, which could be an issue if this memory region
178 		 * corresponds to a read-sensitive peripheral.
179 		 */
180 		desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
181 
182 	} else { /* Normal memory */
183 		/*
184 		 * Always map read-write normal memory as execute-never.
185 		 * (Trusted Firmware doesn't self-modify its code, therefore
186 		 * R/W memory is reserved for data storage, which must not be
187 		 * executable.)
188 		 * Note that setting the XN bit here is for consistency only.
189 		 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
190 		 * which makes any writable memory region to be treated as
191 		 * execute-never, regardless of the value of the XN bit in the
192 		 * translation table.
193 		 *
194 		 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
195 		 * attribute to figure out the value of the XN bit.  The actual
196 		 * XN bit(s) to set in the descriptor depends on the context's
197 		 * translation regime and the policy applied in
198 		 * xlat_arch_regime_get_xn_desc().
199 		 */
200 		if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
201 			desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
202 		}
203 
204 		if (mem_type == MT_MEMORY) {
205 			desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
206 		} else {
207 			assert(mem_type == MT_NON_CACHEABLE);
208 			desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
209 		}
210 	}
211 
212 	return desc;
213 }
214 
215 /*
216  * Enumeration of actions that can be made when mapping table entries depending
217  * on the previous value in that entry and information about the region being
218  * mapped.
219  */
220 typedef enum {
221 
222 	/* Do nothing */
223 	ACTION_NONE,
224 
225 	/* Write a block (or page, if in level 3) entry. */
226 	ACTION_WRITE_BLOCK_ENTRY,
227 
228 	/*
229 	 * Create a new table and write a table entry pointing to it. Recurse
230 	 * into it for further processing.
231 	 */
232 	ACTION_CREATE_NEW_TABLE,
233 
234 	/*
235 	 * There is a table descriptor in this entry, read it and recurse into
236 	 * that table for further processing.
237 	 */
238 	ACTION_RECURSE_INTO_TABLE,
239 
240 } action_t;
241 
242 #if PLAT_XLAT_TABLES_DYNAMIC
243 
244 /*
245  * Recursive function that writes to the translation tables and unmaps the
246  * specified region.
247  */
xlat_tables_unmap_region(xlat_ctx_t * ctx,mmap_region_t * mm,const uintptr_t table_base_va,uint64_t * const table_base,const int table_entries,const unsigned int level)248 static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
249 				     const uintptr_t table_base_va,
250 				     uint64_t *const table_base,
251 				     const int table_entries,
252 				     const unsigned int level)
253 {
254 	assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
255 
256 	uint64_t *subtable;
257 	uint64_t desc;
258 
259 	uintptr_t table_idx_va;
260 	uintptr_t table_idx_end_va; /* End VA of this entry */
261 
262 	uintptr_t region_end_va = mm->base_va + mm->size - 1;
263 
264 	int table_idx;
265 
266 	if (mm->base_va > table_base_va) {
267 		/* Find the first index of the table affected by the region. */
268 		table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
269 
270 		table_idx = (table_idx_va - table_base_va) >>
271 			    XLAT_ADDR_SHIFT(level);
272 
273 		assert(table_idx < table_entries);
274 	} else {
275 		/* Start from the beginning of the table. */
276 		table_idx_va = table_base_va;
277 		table_idx = 0;
278 	}
279 
280 	while (table_idx < table_entries) {
281 
282 		table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
283 
284 		desc = table_base[table_idx];
285 		uint64_t desc_type = desc & DESC_MASK;
286 
287 		action_t action = ACTION_NONE;
288 
289 		if ((mm->base_va <= table_idx_va) &&
290 		    (region_end_va >= table_idx_end_va)) {
291 
292 			/* Region covers all block */
293 
294 			if (level == 3) {
295 				/*
296 				 * Last level, only page descriptors allowed,
297 				 * erase it.
298 				 */
299 				assert(desc_type == PAGE_DESC);
300 
301 				action = ACTION_WRITE_BLOCK_ENTRY;
302 			} else {
303 				/*
304 				 * Other levels can have table descriptors. If
305 				 * so, recurse into it and erase descriptors
306 				 * inside it as needed. If there is a block
307 				 * descriptor, just erase it. If an invalid
308 				 * descriptor is found, this table isn't
309 				 * actually mapped, which shouldn't happen.
310 				 */
311 				if (desc_type == TABLE_DESC) {
312 					action = ACTION_RECURSE_INTO_TABLE;
313 				} else {
314 					assert(desc_type == BLOCK_DESC);
315 					action = ACTION_WRITE_BLOCK_ENTRY;
316 				}
317 			}
318 
319 		} else if ((mm->base_va <= table_idx_end_va) ||
320 			   (region_end_va >= table_idx_va)) {
321 
322 			/*
323 			 * Region partially covers block.
324 			 *
325 			 * It can't happen in level 3.
326 			 *
327 			 * There must be a table descriptor here, if not there
328 			 * was a problem when mapping the region.
329 			 */
330 
331 			assert(level < 3);
332 
333 			assert(desc_type == TABLE_DESC);
334 
335 			action = ACTION_RECURSE_INTO_TABLE;
336 		}
337 
338 		if (action == ACTION_WRITE_BLOCK_ENTRY) {
339 
340 			table_base[table_idx] = INVALID_DESC;
341 			xlat_arch_tlbi_va_regime(table_idx_va, ctx->xlat_regime);
342 
343 		} else if (action == ACTION_RECURSE_INTO_TABLE) {
344 
345 			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
346 
347 			/* Recurse to write into subtable */
348 			xlat_tables_unmap_region(ctx, mm, table_idx_va,
349 						 subtable, XLAT_TABLE_ENTRIES,
350 						 level + 1);
351 
352 			/*
353 			 * If the subtable is now empty, remove its reference.
354 			 */
355 			if (xlat_table_is_empty(ctx, subtable)) {
356 				table_base[table_idx] = INVALID_DESC;
357 				xlat_arch_tlbi_va_regime(table_idx_va,
358 						ctx->xlat_regime);
359 			}
360 
361 		} else {
362 			assert(action == ACTION_NONE);
363 		}
364 
365 		table_idx++;
366 		table_idx_va += XLAT_BLOCK_SIZE(level);
367 
368 		/* If reached the end of the region, exit */
369 		if (region_end_va <= table_idx_va)
370 			break;
371 	}
372 
373 	if (level > ctx->base_level)
374 		xlat_table_dec_regions_count(ctx, table_base);
375 }
376 
377 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
378 
379 /*
380  * From the given arguments, it decides which action to take when mapping the
381  * specified region.
382  */
xlat_tables_map_region_action(const mmap_region_t * mm,const int desc_type,const unsigned long long dest_pa,const uintptr_t table_entry_base_va,const unsigned int level)383 static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
384 		const int desc_type, const unsigned long long dest_pa,
385 		const uintptr_t table_entry_base_va, const unsigned int level)
386 {
387 	uintptr_t mm_end_va = mm->base_va + mm->size - 1;
388 	uintptr_t table_entry_end_va =
389 			table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
390 
391 	/*
392 	 * The descriptor types allowed depend on the current table level.
393 	 */
394 
395 	if ((mm->base_va <= table_entry_base_va) &&
396 	    (mm_end_va >= table_entry_end_va)) {
397 
398 		/*
399 		 * Table entry is covered by region
400 		 * --------------------------------
401 		 *
402 		 * This means that this table entry can describe the whole
403 		 * translation with this granularity in principle.
404 		 */
405 
406 		if (level == 3) {
407 			/*
408 			 * Last level, only page descriptors are allowed.
409 			 */
410 			if (desc_type == PAGE_DESC) {
411 				/*
412 				 * There's another region mapped here, don't
413 				 * overwrite.
414 				 */
415 				return ACTION_NONE;
416 			} else {
417 				assert(desc_type == INVALID_DESC);
418 				return ACTION_WRITE_BLOCK_ENTRY;
419 			}
420 
421 		} else {
422 
423 			/*
424 			 * Other levels. Table descriptors are allowed. Block
425 			 * descriptors too, but they have some limitations.
426 			 */
427 
428 			if (desc_type == TABLE_DESC) {
429 				/* There's already a table, recurse into it. */
430 				return ACTION_RECURSE_INTO_TABLE;
431 
432 			} else if (desc_type == INVALID_DESC) {
433 				/*
434 				 * There's nothing mapped here, create a new
435 				 * entry.
436 				 *
437 				 * Check if the destination granularity allows
438 				 * us to use a block descriptor or we need a
439 				 * finer table for it.
440 				 *
441 				 * Also, check if the current level allows block
442 				 * descriptors. If not, create a table instead.
443 				 */
444 				if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
445 				    (level < MIN_LVL_BLOCK_DESC) ||
446 				    (mm->granularity < XLAT_BLOCK_SIZE(level)))
447 					return ACTION_CREATE_NEW_TABLE;
448 				else
449 					return ACTION_WRITE_BLOCK_ENTRY;
450 
451 			} else {
452 				/*
453 				 * There's another region mapped here, don't
454 				 * overwrite.
455 				 */
456 				assert(desc_type == BLOCK_DESC);
457 
458 				return ACTION_NONE;
459 			}
460 		}
461 
462 	} else if ((mm->base_va <= table_entry_end_va) ||
463 		   (mm_end_va >= table_entry_base_va)) {
464 
465 		/*
466 		 * Region partially covers table entry
467 		 * -----------------------------------
468 		 *
469 		 * This means that this table entry can't describe the whole
470 		 * translation, a finer table is needed.
471 
472 		 * There cannot be partial block overlaps in level 3. If that
473 		 * happens, some of the preliminary checks when adding the
474 		 * mmap region failed to detect that PA and VA must at least be
475 		 * aligned to PAGE_SIZE.
476 		 */
477 		assert(level < 3);
478 
479 		if (desc_type == INVALID_DESC) {
480 			/*
481 			 * The block is not fully covered by the region. Create
482 			 * a new table, recurse into it and try to map the
483 			 * region with finer granularity.
484 			 */
485 			return ACTION_CREATE_NEW_TABLE;
486 
487 		} else {
488 			assert(desc_type == TABLE_DESC);
489 			/*
490 			 * The block is not fully covered by the region, but
491 			 * there is already a table here. Recurse into it and
492 			 * try to map with finer granularity.
493 			 *
494 			 * PAGE_DESC for level 3 has the same value as
495 			 * TABLE_DESC, but this code can't run on a level 3
496 			 * table because there can't be overlaps in level 3.
497 			 */
498 			return ACTION_RECURSE_INTO_TABLE;
499 		}
500 	}
501 
502 	/*
503 	 * This table entry is outside of the region specified in the arguments,
504 	 * don't write anything to it.
505 	 */
506 	return ACTION_NONE;
507 }
508 
509 /*
510  * Recursive function that writes to the translation tables and maps the
511  * specified region. On success, it returns the VA of the last byte that was
512  * succesfully mapped. On error, it returns the VA of the next entry that
513  * should have been mapped.
514  */
xlat_tables_map_region(xlat_ctx_t * ctx,mmap_region_t * mm,const uintptr_t table_base_va,uint64_t * const table_base,const int table_entries,const unsigned int level)515 static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
516 				   const uintptr_t table_base_va,
517 				   uint64_t *const table_base,
518 				   const int table_entries,
519 				   const unsigned int level)
520 {
521 	assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
522 
523 	uintptr_t mm_end_va = mm->base_va + mm->size - 1;
524 
525 	uintptr_t table_idx_va;
526 	unsigned long long table_idx_pa;
527 
528 	uint64_t *subtable;
529 	uint64_t desc;
530 
531 	int table_idx;
532 
533 	if (mm->base_va > table_base_va) {
534 		/* Find the first index of the table affected by the region. */
535 		table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
536 
537 		table_idx = (table_idx_va - table_base_va) >>
538 			    XLAT_ADDR_SHIFT(level);
539 
540 		assert(table_idx < table_entries);
541 	} else {
542 		/* Start from the beginning of the table. */
543 		table_idx_va = table_base_va;
544 		table_idx = 0;
545 	}
546 
547 #if PLAT_XLAT_TABLES_DYNAMIC
548 	if (level > ctx->base_level)
549 		xlat_table_inc_regions_count(ctx, table_base);
550 #endif
551 
552 	while (table_idx < table_entries) {
553 
554 		desc = table_base[table_idx];
555 
556 		table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
557 
558 		action_t action = xlat_tables_map_region_action(mm,
559 			desc & DESC_MASK, table_idx_pa, table_idx_va, level);
560 
561 		if (action == ACTION_WRITE_BLOCK_ENTRY) {
562 
563 			table_base[table_idx] =
564 				xlat_desc(ctx, mm->attr, table_idx_pa, level);
565 
566 		} else if (action == ACTION_CREATE_NEW_TABLE) {
567 
568 			subtable = xlat_table_get_empty(ctx);
569 			if (subtable == NULL) {
570 				/* Not enough free tables to map this region */
571 				return table_idx_va;
572 			}
573 
574 			/* Point to new subtable from this one. */
575 			table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
576 
577 			/* Recurse to write into subtable */
578 			uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
579 					       subtable, XLAT_TABLE_ENTRIES,
580 					       level + 1);
581 			if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
582 				return end_va;
583 
584 		} else if (action == ACTION_RECURSE_INTO_TABLE) {
585 
586 			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
587 			/* Recurse to write into subtable */
588 			uintptr_t end_va =  xlat_tables_map_region(ctx, mm, table_idx_va,
589 					       subtable, XLAT_TABLE_ENTRIES,
590 					       level + 1);
591 			if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
592 				return end_va;
593 
594 		} else {
595 
596 			assert(action == ACTION_NONE);
597 
598 		}
599 
600 		table_idx++;
601 		table_idx_va += XLAT_BLOCK_SIZE(level);
602 
603 		/* If reached the end of the region, exit */
604 		if (mm_end_va <= table_idx_va)
605 			break;
606 	}
607 
608 	return table_idx_va - 1;
609 }
610 
print_mmap(mmap_region_t * const mmap)611 void print_mmap(mmap_region_t *const mmap)
612 {
613 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
614 	tf_printf("mmap:\n");
615 	mmap_region_t *mm = mmap;
616 
617 	while (mm->size) {
618 		tf_printf(" VA:%p  PA:0x%llx  size:0x%zx  attr:0x%x",
619 				(void *)mm->base_va, mm->base_pa,
620 				mm->size, mm->attr);
621 		tf_printf(" granularity:0x%zx\n", mm->granularity);
622 		++mm;
623 	};
624 	tf_printf("\n");
625 #endif
626 }
627 
628 /*
629  * Function that verifies that a region can be mapped.
630  * Returns:
631  *        0: Success, the mapping is allowed.
632  *   EINVAL: Invalid values were used as arguments.
633  *   ERANGE: The memory limits were surpassed.
634  *   ENOMEM: There is not enough memory in the mmap array.
635  *    EPERM: Region overlaps another one in an invalid way.
636  */
mmap_add_region_check(xlat_ctx_t * ctx,const mmap_region_t * mm)637 static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
638 {
639 	unsigned long long base_pa = mm->base_pa;
640 	uintptr_t base_va = mm->base_va;
641 	size_t size = mm->size;
642 	size_t granularity = mm->granularity;
643 
644 	unsigned long long end_pa = base_pa + size - 1;
645 	uintptr_t end_va = base_va + size - 1;
646 
647 	if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
648 			!IS_PAGE_ALIGNED(size))
649 		return -EINVAL;
650 
651 	if ((granularity != XLAT_BLOCK_SIZE(1)) &&
652 		(granularity != XLAT_BLOCK_SIZE(2)) &&
653 		(granularity != XLAT_BLOCK_SIZE(3))) {
654 		return -EINVAL;
655 	}
656 
657 	/* Check for overflows */
658 	if ((base_pa > end_pa) || (base_va > end_va))
659 		return -ERANGE;
660 
661 	if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
662 		return -ERANGE;
663 
664 	if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
665 		return -ERANGE;
666 
667 	/* Check that there is space in the ctx->mmap array */
668 	if (ctx->mmap[ctx->mmap_num - 1].size != 0)
669 		return -ENOMEM;
670 
671 	/* Check for PAs and VAs overlaps with all other regions */
672 	for (mmap_region_t *mm_cursor = ctx->mmap;
673 						mm_cursor->size; ++mm_cursor) {
674 
675 		uintptr_t mm_cursor_end_va = mm_cursor->base_va
676 							+ mm_cursor->size - 1;
677 
678 		/*
679 		 * Check if one of the regions is completely inside the other
680 		 * one.
681 		 */
682 		int fully_overlapped_va =
683 			((base_va >= mm_cursor->base_va) &&
684 					(end_va <= mm_cursor_end_va)) ||
685 
686 			((mm_cursor->base_va >= base_va) &&
687 						(mm_cursor_end_va <= end_va));
688 
689 		/*
690 		 * Full VA overlaps are only allowed if both regions are
691 		 * identity mapped (zero offset) or have the same VA to PA
692 		 * offset. Also, make sure that it's not the exact same area.
693 		 * This can only be done with static regions.
694 		 */
695 		if (fully_overlapped_va) {
696 
697 #if PLAT_XLAT_TABLES_DYNAMIC
698 			if ((mm->attr & MT_DYNAMIC) ||
699 						(mm_cursor->attr & MT_DYNAMIC))
700 				return -EPERM;
701 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
702 			if ((mm_cursor->base_va - mm_cursor->base_pa) !=
703 							(base_va - base_pa))
704 				return -EPERM;
705 
706 			if ((base_va == mm_cursor->base_va) &&
707 						(size == mm_cursor->size))
708 				return -EPERM;
709 
710 		} else {
711 			/*
712 			 * If the regions do not have fully overlapping VAs,
713 			 * then they must have fully separated VAs and PAs.
714 			 * Partial overlaps are not allowed
715 			 */
716 
717 			unsigned long long mm_cursor_end_pa =
718 				     mm_cursor->base_pa + mm_cursor->size - 1;
719 
720 			int separated_pa =
721 				(end_pa < mm_cursor->base_pa) ||
722 				(base_pa > mm_cursor_end_pa);
723 			int separated_va =
724 				(end_va < mm_cursor->base_va) ||
725 				(base_va > mm_cursor_end_va);
726 
727 			if (!(separated_va && separated_pa))
728 				return -EPERM;
729 		}
730 	}
731 
732 	return 0;
733 }
734 
mmap_add_region_ctx(xlat_ctx_t * ctx,const mmap_region_t * mm)735 void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
736 {
737 	mmap_region_t *mm_cursor = ctx->mmap;
738 	mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
739 	unsigned long long end_pa = mm->base_pa + mm->size - 1;
740 	uintptr_t end_va = mm->base_va + mm->size - 1;
741 	int ret;
742 
743 	/* Ignore empty regions */
744 	if (!mm->size)
745 		return;
746 
747 	/* Static regions must be added before initializing the xlat tables. */
748 	assert(!ctx->initialized);
749 
750 	ret = mmap_add_region_check(ctx, mm);
751 	if (ret != 0) {
752 		ERROR("mmap_add_region_check() failed. error %d\n", ret);
753 		assert(0);
754 		return;
755 	}
756 
757 	/*
758 	 * Find correct place in mmap to insert new region.
759 	 *
760 	 * 1 - Lower region VA end first.
761 	 * 2 - Smaller region size first.
762 	 *
763 	 * VA  0                                   0xFF
764 	 *
765 	 * 1st |------|
766 	 * 2nd |------------|
767 	 * 3rd                 |------|
768 	 * 4th                            |---|
769 	 * 5th                                   |---|
770 	 * 6th                            |----------|
771 	 * 7th |-------------------------------------|
772 	 *
773 	 * This is required for overlapping regions only. It simplifies adding
774 	 * regions with the loop in xlat_tables_init_internal because the outer
775 	 * ones won't overwrite block or page descriptors of regions added
776 	 * previously.
777 	 *
778 	 * Overlapping is only allowed for static regions.
779 	 */
780 
781 	while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
782 	       && mm_cursor->size)
783 		++mm_cursor;
784 
785 	while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
786 	       && (mm_cursor->size < mm->size))
787 		++mm_cursor;
788 
789 	/* Make room for new region by moving other regions up by one place */
790 	memmove(mm_cursor + 1, mm_cursor,
791 		(uintptr_t)mm_last - (uintptr_t)mm_cursor);
792 
793 	/*
794 	 * Check we haven't lost the empty sentinel from the end of the array.
795 	 * This shouldn't happen as we have checked in mmap_add_region_check
796 	 * that there is free space.
797 	 */
798 	assert(mm_last->size == 0);
799 
800 	*mm_cursor = *mm;
801 
802 	if (end_pa > ctx->max_pa)
803 		ctx->max_pa = end_pa;
804 	if (end_va > ctx->max_va)
805 		ctx->max_va = end_va;
806 }
807 
mmap_add_region(unsigned long long base_pa,uintptr_t base_va,size_t size,mmap_attr_t attr)808 void mmap_add_region(unsigned long long base_pa,
809 				uintptr_t base_va,
810 				size_t size,
811 				mmap_attr_t attr)
812 {
813 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
814 	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
815 }
816 
817 
mmap_add_ctx(xlat_ctx_t * ctx,const mmap_region_t * mm)818 void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
819 {
820 	while (mm->size) {
821 		mmap_add_region_ctx(ctx, mm);
822 		mm++;
823 	}
824 }
825 
mmap_add(const mmap_region_t * mm)826 void mmap_add(const mmap_region_t *mm)
827 {
828 	mmap_add_ctx(&tf_xlat_ctx, mm);
829 }
830 
831 #if PLAT_XLAT_TABLES_DYNAMIC
832 
mmap_add_dynamic_region_ctx(xlat_ctx_t * ctx,mmap_region_t * mm)833 int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
834 {
835 	mmap_region_t *mm_cursor = ctx->mmap;
836 	mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
837 	unsigned long long end_pa = mm->base_pa + mm->size - 1;
838 	uintptr_t end_va = mm->base_va + mm->size - 1;
839 	int ret;
840 
841 	/* Nothing to do */
842 	if (!mm->size)
843 		return 0;
844 
845 	/* Now this region is a dynamic one */
846 	mm->attr |= MT_DYNAMIC;
847 
848 	ret = mmap_add_region_check(ctx, mm);
849 	if (ret != 0)
850 		return ret;
851 
852 	/*
853 	 * Find the adequate entry in the mmap array in the same way done for
854 	 * static regions in mmap_add_region_ctx().
855 	 */
856 
857 	while ((mm_cursor->base_va + mm_cursor->size - 1)
858 					< end_va && mm_cursor->size)
859 		++mm_cursor;
860 
861 	while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
862 				&& (mm_cursor->size < mm->size))
863 		++mm_cursor;
864 
865 	/* Make room for new region by moving other regions up by one place */
866 	memmove(mm_cursor + 1, mm_cursor,
867 		     (uintptr_t)mm_last - (uintptr_t)mm_cursor);
868 
869 	/*
870 	 * Check we haven't lost the empty sentinal from the end of the array.
871 	 * This shouldn't happen as we have checked in mmap_add_region_check
872 	 * that there is free space.
873 	 */
874 	assert(mm_last->size == 0);
875 
876 	*mm_cursor = *mm;
877 
878 	/*
879 	 * Update the translation tables if the xlat tables are initialized. If
880 	 * not, this region will be mapped when they are initialized.
881 	 */
882 	if (ctx->initialized) {
883 		uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor,
884 				0, ctx->base_table, ctx->base_table_entries,
885 				ctx->base_level);
886 
887 		/* Failed to map, remove mmap entry, unmap and return error. */
888 		if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
889 			memmove(mm_cursor, mm_cursor + 1,
890 				(uintptr_t)mm_last - (uintptr_t)mm_cursor);
891 
892 			/*
893 			 * Check if the mapping function actually managed to map
894 			 * anything. If not, just return now.
895 			 */
896 			if (mm_cursor->base_va >= end_va)
897 				return -ENOMEM;
898 
899 			/*
900 			 * Something went wrong after mapping some table
901 			 * entries, undo every change done up to this point.
902 			 */
903 			mmap_region_t unmap_mm = {
904 					.base_pa = 0,
905 					.base_va = mm->base_va,
906 					.size = end_va - mm->base_va,
907 					.attr = 0
908 			};
909 			xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
910 							ctx->base_table_entries, ctx->base_level);
911 
912 			return -ENOMEM;
913 		}
914 
915 		/*
916 		 * Make sure that all entries are written to the memory. There
917 		 * is no need to invalidate entries when mapping dynamic regions
918 		 * because new table/block/page descriptors only replace old
919 		 * invalid descriptors, that aren't TLB cached.
920 		 */
921 		dsbishst();
922 	}
923 
924 	if (end_pa > ctx->max_pa)
925 		ctx->max_pa = end_pa;
926 	if (end_va > ctx->max_va)
927 		ctx->max_va = end_va;
928 
929 	return 0;
930 }
931 
mmap_add_dynamic_region(unsigned long long base_pa,uintptr_t base_va,size_t size,mmap_attr_t attr)932 int mmap_add_dynamic_region(unsigned long long base_pa,
933 			    uintptr_t base_va, size_t size, mmap_attr_t attr)
934 {
935 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
936 	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
937 }
938 
939 /*
940  * Removes the region with given base Virtual Address and size from the given
941  * context.
942  *
943  * Returns:
944  *        0: Success.
945  *   EINVAL: Invalid values were used as arguments (region not found).
946  *    EPERM: Tried to remove a static region.
947  */
mmap_remove_dynamic_region_ctx(xlat_ctx_t * ctx,uintptr_t base_va,size_t size)948 int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
949 				   size_t size)
950 {
951 	mmap_region_t *mm = ctx->mmap;
952 	mmap_region_t *mm_last = mm + ctx->mmap_num;
953 	int update_max_va_needed = 0;
954 	int update_max_pa_needed = 0;
955 
956 	/* Check sanity of mmap array. */
957 	assert(mm[ctx->mmap_num].size == 0);
958 
959 	while (mm->size) {
960 		if ((mm->base_va == base_va) && (mm->size == size))
961 			break;
962 		++mm;
963 	}
964 
965 	/* Check that the region was found */
966 	if (mm->size == 0)
967 		return -EINVAL;
968 
969 	/* If the region is static it can't be removed */
970 	if (!(mm->attr & MT_DYNAMIC))
971 		return -EPERM;
972 
973 	/* Check if this region is using the top VAs or PAs. */
974 	if ((mm->base_va + mm->size - 1) == ctx->max_va)
975 		update_max_va_needed = 1;
976 	if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
977 		update_max_pa_needed = 1;
978 
979 	/* Update the translation tables if needed */
980 	if (ctx->initialized) {
981 		xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
982 					 ctx->base_table_entries,
983 					 ctx->base_level);
984 		xlat_arch_tlbi_va_sync();
985 	}
986 
987 	/* Remove this region by moving the rest down by one place. */
988 	memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
989 
990 	/* Check if we need to update the max VAs and PAs */
991 	if (update_max_va_needed) {
992 		ctx->max_va = 0;
993 		mm = ctx->mmap;
994 		while (mm->size) {
995 			if ((mm->base_va + mm->size - 1) > ctx->max_va)
996 				ctx->max_va = mm->base_va + mm->size - 1;
997 			++mm;
998 		}
999 	}
1000 
1001 	if (update_max_pa_needed) {
1002 		ctx->max_pa = 0;
1003 		mm = ctx->mmap;
1004 		while (mm->size) {
1005 			if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
1006 				ctx->max_pa = mm->base_pa + mm->size - 1;
1007 			++mm;
1008 		}
1009 	}
1010 
1011 	return 0;
1012 }
1013 
mmap_remove_dynamic_region(uintptr_t base_va,size_t size)1014 int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
1015 {
1016 	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
1017 					base_va, size);
1018 }
1019 
1020 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
1021 
1022 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
1023 
1024 /* Print the attributes of the specified block descriptor. */
xlat_desc_print(const xlat_ctx_t * ctx,uint64_t desc)1025 static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
1026 {
1027 	int mem_type_index = ATTR_INDEX_GET(desc);
1028 	xlat_regime_t xlat_regime = ctx->xlat_regime;
1029 
1030 	if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
1031 		tf_printf("MEM");
1032 	} else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
1033 		tf_printf("NC");
1034 	} else {
1035 		assert(mem_type_index == ATTR_DEVICE_INDEX);
1036 		tf_printf("DEV");
1037 	}
1038 
1039 	const char *priv_str = "(PRIV)";
1040 	const char *user_str = "(USER)";
1041 
1042 	/*
1043 	 * Showing Privileged vs Unprivileged only makes sense for EL1&0
1044 	 * mappings
1045 	 */
1046 	const char *ro_str = "-RO";
1047 	const char *rw_str = "-RW";
1048 	const char *no_access_str = "-NOACCESS";
1049 
1050 	if (xlat_regime == EL3_REGIME) {
1051 		/* For EL3, the AP[2] bit is all what matters */
1052 		tf_printf((desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str);
1053 	} else {
1054 		const char *ap_str = (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str;
1055 		tf_printf(ap_str);
1056 		tf_printf(priv_str);
1057 		/*
1058 		 * EL0 can only have the same permissions as EL1 or no
1059 		 * permissions at all.
1060 		 */
1061 		tf_printf((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
1062 			  ? ap_str : no_access_str);
1063 		tf_printf(user_str);
1064 	}
1065 
1066 	const char *xn_str = "-XN";
1067 	const char *exec_str = "-EXEC";
1068 
1069 	if (xlat_regime == EL3_REGIME) {
1070 		/* For EL3, the XN bit is all what matters */
1071 		tf_printf(LOWER_ATTRS(XN) & desc ? xn_str : exec_str);
1072 	} else {
1073 		/* For EL0 and EL1, we need to know who has which rights */
1074 		tf_printf(LOWER_ATTRS(PXN) & desc ? xn_str : exec_str);
1075 		tf_printf(priv_str);
1076 
1077 		tf_printf(LOWER_ATTRS(UXN) & desc ? xn_str : exec_str);
1078 		tf_printf(user_str);
1079 	}
1080 
1081 	tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
1082 }
1083 
1084 static const char * const level_spacers[] = {
1085 	"[LV0] ",
1086 	"  [LV1] ",
1087 	"    [LV2] ",
1088 	"      [LV3] "
1089 };
1090 
1091 static const char *invalid_descriptors_ommited =
1092 		"%s(%d invalid descriptors omitted)\n";
1093 
1094 /*
1095  * Recursive function that reads the translation tables passed as an argument
1096  * and prints their status.
1097  */
xlat_tables_print_internal(xlat_ctx_t * ctx,const uintptr_t table_base_va,uint64_t * const table_base,const int table_entries,const unsigned int level)1098 static void xlat_tables_print_internal(xlat_ctx_t *ctx,
1099 		const uintptr_t table_base_va,
1100 		uint64_t *const table_base, const int table_entries,
1101 		const unsigned int level)
1102 {
1103 	assert(level <= XLAT_TABLE_LEVEL_MAX);
1104 
1105 	uint64_t desc;
1106 	uintptr_t table_idx_va = table_base_va;
1107 	int table_idx = 0;
1108 
1109 	size_t level_size = XLAT_BLOCK_SIZE(level);
1110 
1111 	/*
1112 	 * Keep track of how many invalid descriptors are counted in a row.
1113 	 * Whenever multiple invalid descriptors are found, only the first one
1114 	 * is printed, and a line is added to inform about how many descriptors
1115 	 * have been omitted.
1116 	 */
1117 	int invalid_row_count = 0;
1118 
1119 	while (table_idx < table_entries) {
1120 
1121 		desc = table_base[table_idx];
1122 
1123 		if ((desc & DESC_MASK) == INVALID_DESC) {
1124 
1125 			if (invalid_row_count == 0) {
1126 				tf_printf("%sVA:%p size:0x%zx\n",
1127 					  level_spacers[level],
1128 					  (void *)table_idx_va, level_size);
1129 			}
1130 			invalid_row_count++;
1131 
1132 		} else {
1133 
1134 			if (invalid_row_count > 1) {
1135 				tf_printf(invalid_descriptors_ommited,
1136 					  level_spacers[level],
1137 					  invalid_row_count - 1);
1138 			}
1139 			invalid_row_count = 0;
1140 
1141 			/*
1142 			 * Check if this is a table or a block. Tables are only
1143 			 * allowed in levels other than 3, but DESC_PAGE has the
1144 			 * same value as DESC_TABLE, so we need to check.
1145 			 */
1146 			if (((desc & DESC_MASK) == TABLE_DESC) &&
1147 					(level < XLAT_TABLE_LEVEL_MAX)) {
1148 				/*
1149 				 * Do not print any PA for a table descriptor,
1150 				 * as it doesn't directly map physical memory
1151 				 * but instead points to the next translation
1152 				 * table in the translation table walk.
1153 				 */
1154 				tf_printf("%sVA:%p size:0x%zx\n",
1155 					  level_spacers[level],
1156 					  (void *)table_idx_va, level_size);
1157 
1158 				uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
1159 
1160 				xlat_tables_print_internal(ctx, table_idx_va,
1161 					(uint64_t *)addr_inner,
1162 					XLAT_TABLE_ENTRIES, level + 1);
1163 			} else {
1164 				tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
1165 					  level_spacers[level],
1166 					  (void *)table_idx_va,
1167 					  (unsigned long long)(desc & TABLE_ADDR_MASK),
1168 					  level_size);
1169 				xlat_desc_print(ctx, desc);
1170 				tf_printf("\n");
1171 			}
1172 		}
1173 
1174 		table_idx++;
1175 		table_idx_va += level_size;
1176 	}
1177 
1178 	if (invalid_row_count > 1) {
1179 		tf_printf(invalid_descriptors_ommited,
1180 			  level_spacers[level], invalid_row_count - 1);
1181 	}
1182 }
1183 
1184 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1185 
xlat_tables_print(xlat_ctx_t * ctx)1186 void xlat_tables_print(xlat_ctx_t *ctx)
1187 {
1188 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
1189 	const char *xlat_regime_str;
1190 	if (ctx->xlat_regime == EL1_EL0_REGIME) {
1191 		xlat_regime_str = "1&0";
1192 	} else {
1193 		assert(ctx->xlat_regime == EL3_REGIME);
1194 		xlat_regime_str = "3";
1195 	}
1196 	VERBOSE("Translation tables state:\n");
1197 	VERBOSE("  Xlat regime:     EL%s\n", xlat_regime_str);
1198 	VERBOSE("  Max allowed PA:  0x%llx\n", ctx->pa_max_address);
1199 	VERBOSE("  Max allowed VA:  %p\n", (void *) ctx->va_max_address);
1200 	VERBOSE("  Max mapped PA:   0x%llx\n", ctx->max_pa);
1201 	VERBOSE("  Max mapped VA:   %p\n", (void *) ctx->max_va);
1202 
1203 	VERBOSE("  Initial lookup level: %i\n", ctx->base_level);
1204 	VERBOSE("  Entries @initial lookup level: %i\n",
1205 		ctx->base_table_entries);
1206 
1207 	int used_page_tables;
1208 #if PLAT_XLAT_TABLES_DYNAMIC
1209 	used_page_tables = 0;
1210 	for (unsigned int i = 0; i < ctx->tables_num; ++i) {
1211 		if (ctx->tables_mapped_regions[i] != 0)
1212 			++used_page_tables;
1213 	}
1214 #else
1215 	used_page_tables = ctx->next_table;
1216 #endif
1217 	VERBOSE("  Used %i sub-tables out of %i (spare: %i)\n",
1218 		used_page_tables, ctx->tables_num,
1219 		ctx->tables_num - used_page_tables);
1220 
1221 	xlat_tables_print_internal(ctx, 0, ctx->base_table,
1222 				   ctx->base_table_entries, ctx->base_level);
1223 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1224 }
1225 
init_xlat_tables_ctx(xlat_ctx_t * ctx)1226 void init_xlat_tables_ctx(xlat_ctx_t *ctx)
1227 {
1228 	assert(ctx != NULL);
1229 	assert(!ctx->initialized);
1230 	assert(ctx->xlat_regime == EL3_REGIME || ctx->xlat_regime == EL1_EL0_REGIME);
1231 	assert(!is_mmu_enabled_ctx(ctx));
1232 
1233 	mmap_region_t *mm = ctx->mmap;
1234 
1235 	print_mmap(mm);
1236 
1237 	/* All tables must be zeroed before mapping any region. */
1238 
1239 	for (unsigned int i = 0; i < ctx->base_table_entries; i++)
1240 		ctx->base_table[i] = INVALID_DESC;
1241 
1242 	for (unsigned int j = 0; j < ctx->tables_num; j++) {
1243 #if PLAT_XLAT_TABLES_DYNAMIC
1244 		ctx->tables_mapped_regions[j] = 0;
1245 #endif
1246 		for (unsigned int i = 0; i < XLAT_TABLE_ENTRIES; i++)
1247 			ctx->tables[j][i] = INVALID_DESC;
1248 	}
1249 
1250 	while (mm->size) {
1251 		uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
1252 				ctx->base_table_entries, ctx->base_level);
1253 
1254 		if (end_va != mm->base_va + mm->size - 1) {
1255 			ERROR("Not enough memory to map region:\n"
1256 			      " VA:%p  PA:0x%llx  size:0x%zx  attr:0x%x\n",
1257 			      (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
1258 			panic();
1259 		}
1260 
1261 		mm++;
1262 	}
1263 
1264 	assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
1265 	assert(ctx->max_va <= ctx->va_max_address);
1266 	assert(ctx->max_pa <= ctx->pa_max_address);
1267 
1268 	ctx->initialized = 1;
1269 
1270 	xlat_tables_print(ctx);
1271 }
1272 
init_xlat_tables(void)1273 void init_xlat_tables(void)
1274 {
1275 	init_xlat_tables_ctx(&tf_xlat_ctx);
1276 }
1277 
1278 /*
1279  * If dynamic allocation of new regions is disabled then by the time we call the
1280  * function enabling the MMU, we'll have registered all the memory regions to
1281  * map for the system's lifetime. Therefore, at this point we know the maximum
1282  * physical address that will ever be mapped.
1283  *
1284  * If dynamic allocation is enabled then we can't make any such assumption
1285  * because the maximum physical address could get pushed while adding a new
1286  * region. Therefore, in this case we have to assume that the whole address
1287  * space size might be mapped.
1288  */
1289 #ifdef PLAT_XLAT_TABLES_DYNAMIC
1290 #define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
1291 #else
1292 #define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
1293 #endif
1294 
1295 #ifdef AARCH32
1296 
enable_mmu_secure(unsigned int flags)1297 void enable_mmu_secure(unsigned int flags)
1298 {
1299 	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
1300 			tf_xlat_ctx.va_max_address);
1301 }
1302 
1303 #else
1304 
enable_mmu_el1(unsigned int flags)1305 void enable_mmu_el1(unsigned int flags)
1306 {
1307 	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
1308 			tf_xlat_ctx.va_max_address);
1309 }
1310 
enable_mmu_el3(unsigned int flags)1311 void enable_mmu_el3(unsigned int flags)
1312 {
1313 	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
1314 			tf_xlat_ctx.va_max_address);
1315 }
1316 
1317 #endif /* AARCH32 */
1318 
1319 /*
1320  * Do a translation table walk to find the block or page descriptor that maps
1321  * virtual_addr.
1322  *
1323  * On success, return the address of the descriptor within the translation
1324  * table. Its lookup level is stored in '*out_level'.
1325  * On error, return NULL.
1326  *
1327  * xlat_table_base
1328  *   Base address for the initial lookup level.
1329  * xlat_table_base_entries
1330  *   Number of entries in the translation table for the initial lookup level.
1331  * virt_addr_space_size
1332  *   Size in bytes of the virtual address space.
1333  */
find_xlat_table_entry(uintptr_t virtual_addr,void * xlat_table_base,int xlat_table_base_entries,unsigned long long virt_addr_space_size,int * out_level)1334 static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
1335 				       void *xlat_table_base,
1336 				       int xlat_table_base_entries,
1337 				       unsigned long long virt_addr_space_size,
1338 				       int *out_level)
1339 {
1340 	unsigned int start_level;
1341 	uint64_t *table;
1342 	int entries;
1343 
1344 	VERBOSE("%s(%p)\n", __func__, (void *)virtual_addr);
1345 
1346 	start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
1347 	VERBOSE("Starting translation table walk from level %i\n", start_level);
1348 
1349 	table = xlat_table_base;
1350 	entries = xlat_table_base_entries;
1351 
1352 	for (unsigned int level = start_level;
1353 	     level <= XLAT_TABLE_LEVEL_MAX;
1354 	     ++level) {
1355 		int idx;
1356 		uint64_t desc;
1357 		uint64_t desc_type;
1358 
1359 		VERBOSE("Table address: %p\n", (void *)table);
1360 
1361 		idx = XLAT_TABLE_IDX(virtual_addr, level);
1362 		VERBOSE("Index into level %i table: %i\n", level, idx);
1363 		if (idx >= entries) {
1364 			VERBOSE("Invalid address\n");
1365 			return NULL;
1366 		}
1367 
1368 		desc = table[idx];
1369 		desc_type = desc & DESC_MASK;
1370 		VERBOSE("Descriptor at level %i: 0x%llx\n", level,
1371 				(unsigned long long)desc);
1372 
1373 		if (desc_type == INVALID_DESC) {
1374 			VERBOSE("Invalid entry (memory not mapped)\n");
1375 			return NULL;
1376 		}
1377 
1378 		if (level == XLAT_TABLE_LEVEL_MAX) {
1379 			/*
1380 			 * There can't be table entries at the final lookup
1381 			 * level.
1382 			 */
1383 			assert(desc_type == PAGE_DESC);
1384 			VERBOSE("Descriptor mapping a memory page (size: 0x%llx)\n",
1385 				(unsigned long long)XLAT_BLOCK_SIZE(XLAT_TABLE_LEVEL_MAX));
1386 			*out_level = level;
1387 			return &table[idx];
1388 		}
1389 
1390 		if (desc_type == BLOCK_DESC) {
1391 			VERBOSE("Descriptor mapping a memory block (size: 0x%llx)\n",
1392 				(unsigned long long)XLAT_BLOCK_SIZE(level));
1393 			*out_level = level;
1394 			return &table[idx];
1395 		}
1396 
1397 		assert(desc_type == TABLE_DESC);
1398 		VERBOSE("Table descriptor, continuing xlat table walk...\n");
1399 		table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
1400 		entries = XLAT_TABLE_ENTRIES;
1401 	}
1402 
1403 	/*
1404 	 * This shouldn't be reached, the translation table walk should end at
1405 	 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
1406 	 */
1407 	assert(0);
1408 
1409 	return NULL;
1410 }
1411 
1412 
get_mem_attributes_internal(const xlat_ctx_t * ctx,uintptr_t base_va,mmap_attr_t * attributes,uint64_t ** table_entry,unsigned long long * addr_pa,int * table_level)1413 static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
1414 		mmap_attr_t *attributes, uint64_t **table_entry,
1415 		unsigned long long *addr_pa, int *table_level)
1416 {
1417 	uint64_t *entry;
1418 	uint64_t desc;
1419 	int level;
1420 	unsigned long long virt_addr_space_size;
1421 
1422 	/*
1423 	 * Sanity-check arguments.
1424 	 */
1425 	assert(ctx != NULL);
1426 	assert(ctx->initialized);
1427 	assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
1428 
1429 	virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
1430 	assert(virt_addr_space_size > 0);
1431 
1432 	entry = find_xlat_table_entry(base_va,
1433 				ctx->base_table,
1434 				ctx->base_table_entries,
1435 				virt_addr_space_size,
1436 				&level);
1437 	if (entry == NULL) {
1438 		WARN("Address %p is not mapped.\n", (void *)base_va);
1439 		return -EINVAL;
1440 	}
1441 
1442 	if (addr_pa != NULL) {
1443 		*addr_pa = *entry & TABLE_ADDR_MASK;
1444 	}
1445 
1446 	if (table_entry != NULL) {
1447 		*table_entry = entry;
1448 	}
1449 
1450 	if (table_level != NULL) {
1451 		*table_level = level;
1452 	}
1453 
1454 	desc = *entry;
1455 
1456 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
1457 	VERBOSE("Attributes: ");
1458 	xlat_desc_print(ctx, desc);
1459 	tf_printf("\n");
1460 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
1461 
1462 	assert(attributes != NULL);
1463 	*attributes = 0;
1464 
1465 	int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
1466 
1467 	if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
1468 		*attributes |= MT_MEMORY;
1469 	} else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
1470 		*attributes |= MT_NON_CACHEABLE;
1471 	} else {
1472 		assert(attr_index == ATTR_DEVICE_INDEX);
1473 		*attributes |= MT_DEVICE;
1474 	}
1475 
1476 	int ap2_bit = (desc >> AP2_SHIFT) & 1;
1477 
1478 	if (ap2_bit == AP2_RW)
1479 		*attributes |= MT_RW;
1480 
1481 	if (ctx->xlat_regime == EL1_EL0_REGIME) {
1482 		int ap1_bit = (desc >> AP1_SHIFT) & 1;
1483 		if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
1484 			*attributes |= MT_USER;
1485 	}
1486 
1487 	int ns_bit = (desc >> NS_SHIFT) & 1;
1488 
1489 	if (ns_bit == 1)
1490 		*attributes |= MT_NS;
1491 
1492 	uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
1493 
1494 	if ((desc & xn_mask) == xn_mask) {
1495 		*attributes |= MT_EXECUTE_NEVER;
1496 	} else {
1497 		assert((desc & xn_mask) == 0);
1498 	}
1499 
1500 	return 0;
1501 }
1502 
1503 
get_mem_attributes(const xlat_ctx_t * ctx,uintptr_t base_va,mmap_attr_t * attributes)1504 int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
1505 		mmap_attr_t *attributes)
1506 {
1507 	return get_mem_attributes_internal(ctx, base_va, attributes,
1508 					   NULL, NULL, NULL);
1509 }
1510 
1511 
change_mem_attributes(xlat_ctx_t * ctx,uintptr_t base_va,size_t size,mmap_attr_t attr)1512 int change_mem_attributes(xlat_ctx_t *ctx,
1513 			uintptr_t base_va,
1514 			size_t size,
1515 			mmap_attr_t attr)
1516 {
1517 	/* Note: This implementation isn't optimized. */
1518 
1519 	assert(ctx != NULL);
1520 	assert(ctx->initialized);
1521 
1522 	unsigned long long virt_addr_space_size =
1523 		(unsigned long long)ctx->va_max_address + 1;
1524 	assert(virt_addr_space_size > 0);
1525 
1526 	if (!IS_PAGE_ALIGNED(base_va)) {
1527 		WARN("%s: Address %p is not aligned on a page boundary.\n",
1528 		     __func__, (void *)base_va);
1529 		return -EINVAL;
1530 	}
1531 
1532 	if (size == 0) {
1533 		WARN("%s: Size is 0.\n", __func__);
1534 		return -EINVAL;
1535 	}
1536 
1537 	if ((size % PAGE_SIZE) != 0) {
1538 		WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
1539 		     __func__, size);
1540 		return -EINVAL;
1541 	}
1542 
1543 	if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
1544 		WARN("%s() doesn't allow to remap memory as read-write and executable.\n",
1545 		     __func__);
1546 		return -EINVAL;
1547 	}
1548 
1549 	int pages_count = size / PAGE_SIZE;
1550 
1551 	VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
1552 		pages_count, (void *)base_va);
1553 
1554 	uintptr_t base_va_original = base_va;
1555 
1556 	/*
1557 	 * Sanity checks.
1558 	 */
1559 	for (int i = 0; i < pages_count; ++i) {
1560 		uint64_t *entry;
1561 		uint64_t desc;
1562 		int level;
1563 
1564 		entry = find_xlat_table_entry(base_va,
1565 					      ctx->base_table,
1566 					      ctx->base_table_entries,
1567 					      virt_addr_space_size,
1568 					      &level);
1569 		if (entry == NULL) {
1570 			WARN("Address %p is not mapped.\n", (void *)base_va);
1571 			return -EINVAL;
1572 		}
1573 
1574 		desc = *entry;
1575 
1576 		/*
1577 		 * Check that all the required pages are mapped at page
1578 		 * granularity.
1579 		 */
1580 		if (((desc & DESC_MASK) != PAGE_DESC) ||
1581 			(level != XLAT_TABLE_LEVEL_MAX)) {
1582 			WARN("Address %p is not mapped at the right granularity.\n",
1583 			     (void *)base_va);
1584 			WARN("Granularity is 0x%llx, should be 0x%x.\n",
1585 			     (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
1586 			return -EINVAL;
1587 		}
1588 
1589 		/*
1590 		 * If the region type is device, it shouldn't be executable.
1591 		 */
1592 		int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
1593 		if (attr_index == ATTR_DEVICE_INDEX) {
1594 			if ((attr & MT_EXECUTE_NEVER) == 0) {
1595 				WARN("Setting device memory as executable at address %p.",
1596 				     (void *)base_va);
1597 				return -EINVAL;
1598 			}
1599 		}
1600 
1601 		base_va += PAGE_SIZE;
1602 	}
1603 
1604 	/* Restore original value. */
1605 	base_va = base_va_original;
1606 
1607 	VERBOSE("%s: All pages are mapped, now changing their attributes...\n",
1608 		__func__);
1609 
1610 	for (int i = 0; i < pages_count; ++i) {
1611 
1612 		mmap_attr_t old_attr, new_attr;
1613 		uint64_t *entry;
1614 		int level;
1615 		unsigned long long addr_pa;
1616 
1617 		get_mem_attributes_internal(ctx, base_va, &old_attr,
1618 					    &entry, &addr_pa, &level);
1619 
1620 		VERBOSE("Old attributes: 0x%x\n", old_attr);
1621 
1622 		/*
1623 		 * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
1624 		 * MT_USER/MT_PRIVILEGED are taken into account. Any other
1625 		 * information is ignored.
1626 		 */
1627 
1628 		/* Clean the old attributes so that they can be rebuilt. */
1629 		new_attr = old_attr & ~(MT_RW|MT_EXECUTE_NEVER|MT_USER);
1630 
1631 		/*
1632 		 * Update attributes, but filter out the ones this function
1633 		 * isn't allowed to change.
1634 		 */
1635 		new_attr |= attr & (MT_RW|MT_EXECUTE_NEVER|MT_USER);
1636 
1637 		VERBOSE("New attributes: 0x%x\n", new_attr);
1638 
1639 		/*
1640 		 * The break-before-make sequence requires writing an invalid
1641 		 * descriptor and making sure that the system sees the change
1642 		 * before writing the new descriptor.
1643 		 */
1644 		*entry = INVALID_DESC;
1645 
1646 		/* Invalidate any cached copy of this mapping in the TLBs. */
1647 		xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
1648 
1649 		/* Ensure completion of the invalidation. */
1650 		xlat_arch_tlbi_va_sync();
1651 
1652 		/* Write new descriptor */
1653 		*entry = xlat_desc(ctx, new_attr, addr_pa, level);
1654 
1655 		base_va += PAGE_SIZE;
1656 	}
1657 
1658 	/* Ensure that the last descriptor writen is seen by the system. */
1659 	dsbish();
1660 
1661 	return 0;
1662 }
1663