• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdbool.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <platform_def.h>
14 
15 #include <arch_features.h>
16 #include <arch_helpers.h>
17 #include <common/debug.h>
18 #include <lib/utils_def.h>
19 #include <lib/xlat_tables/xlat_tables_defs.h>
20 #include <lib/xlat_tables/xlat_tables_v2.h>
21 
22 #include "xlat_tables_private.h"
23 
24 /* Helper function that cleans the data cache only if it is enabled. */
xlat_clean_dcache_range(uintptr_t addr,size_t size)25 static inline __attribute__((unused)) void xlat_clean_dcache_range(uintptr_t addr, size_t size)
26 {
27 	if (is_dcache_enabled())
28 		clean_dcache_range(addr, size);
29 }
30 
31 #if PLAT_XLAT_TABLES_DYNAMIC
32 
33 /*
34  * The following functions assume that they will be called using subtables only.
35  * The base table can't be unmapped, so it is not needed to do any special
36  * handling for it.
37  */
38 
39 /*
40  * Returns the index of the array corresponding to the specified translation
41  * table.
42  */
xlat_table_get_index(const xlat_ctx_t * ctx,const uint64_t * table)43 static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
44 {
45 	for (int i = 0; i < ctx->tables_num; i++)
46 		if (ctx->tables[i] == table)
47 			return i;
48 
49 	/*
50 	 * Maybe we were asked to get the index of the base level table, which
51 	 * should never happen.
52 	 */
53 	assert(false);
54 
55 	return -1;
56 }
57 
58 /* Returns a pointer to an empty translation table. */
xlat_table_get_empty(const xlat_ctx_t * ctx)59 static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
60 {
61 	for (int i = 0; i < ctx->tables_num; i++)
62 		if (ctx->tables_mapped_regions[i] == 0)
63 			return ctx->tables[i];
64 
65 	return NULL;
66 }
67 
68 /* Increments region count for a given table. */
xlat_table_inc_regions_count(const xlat_ctx_t * ctx,const uint64_t * table)69 static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
70 					 const uint64_t *table)
71 {
72 	int idx = xlat_table_get_index(ctx, table);
73 
74 	ctx->tables_mapped_regions[idx]++;
75 }
76 
77 /* Decrements region count for a given table. */
xlat_table_dec_regions_count(const xlat_ctx_t * ctx,const uint64_t * table)78 static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
79 					 const uint64_t *table)
80 {
81 	int idx = xlat_table_get_index(ctx, table);
82 
83 	ctx->tables_mapped_regions[idx]--;
84 }
85 
86 /* Returns 0 if the specified table isn't empty, otherwise 1. */
xlat_table_is_empty(const xlat_ctx_t * ctx,const uint64_t * table)87 static bool xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
88 {
89 	return ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0;
90 }
91 
92 #else /* PLAT_XLAT_TABLES_DYNAMIC */
93 
94 /* Returns a pointer to the first empty translation table. */
xlat_table_get_empty(xlat_ctx_t * ctx)95 static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
96 {
97 	assert(ctx->next_table < ctx->tables_num);
98 
99 	return ctx->tables[ctx->next_table++];
100 }
101 
102 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
103 
104 /*
105  * Returns a block/page table descriptor for the given level and attributes.
106  */
xlat_desc(const xlat_ctx_t * ctx,uint32_t attr,unsigned long long addr_pa,unsigned int level)107 uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
108 		   unsigned long long addr_pa, unsigned int level)
109 {
110 	uint64_t desc;
111 	uint32_t mem_type;
112 	uint32_t shareability_type;
113 
114 	/* Make sure that the granularity is fine enough to map this address. */
115 	assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
116 
117 	desc = addr_pa;
118 	/*
119 	 * There are different translation table descriptors for level 3 and the
120 	 * rest.
121 	 */
122 	desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
123 	/*
124 	 * Always set the access flag, as this library assumes access flag
125 	 * faults aren't managed.
126 	 */
127 	desc |= LOWER_ATTRS(ACCESS_FLAG);
128 
129 	/* Determine the physical address space this region belongs to. */
130 	desc |= xlat_arch_get_pas(attr);
131 
132 	/*
133 	 * Deduce other fields of the descriptor based on the MT_RW memory
134 	 * region attributes.
135 	 */
136 	desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
137 
138 	/*
139 	 * Do not allow unprivileged access when the mapping is for a privileged
140 	 * EL. For translation regimes that do not have mappings for access for
141 	 * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
142 	 */
143 	if (ctx->xlat_regime == EL1_EL0_REGIME) {
144 		if ((attr & MT_USER) != 0U) {
145 			/* EL0 mapping requested, so we give User access */
146 			desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
147 		} else {
148 			/* EL1 mapping requested, no User access granted */
149 			desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
150 		}
151 	} else {
152 		assert((ctx->xlat_regime == EL2_REGIME) ||
153 		       (ctx->xlat_regime == EL3_REGIME));
154 		desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
155 	}
156 
157 	/*
158 	 * Deduce shareability domain and executability of the memory region
159 	 * from the memory type of the attributes (MT_TYPE).
160 	 *
161 	 * Data accesses to device memory and non-cacheable normal memory are
162 	 * coherent for all observers in the system, and correspondingly are
163 	 * always treated as being Outer Shareable. Therefore, for these 2 types
164 	 * of memory, it is not strictly needed to set the shareability field
165 	 * in the translation tables.
166 	 */
167 	mem_type = MT_TYPE(attr);
168 	if (mem_type == MT_DEVICE) {
169 		desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
170 		/*
171 		 * Always map device memory as execute-never.
172 		 * This is to avoid the possibility of a speculative instruction
173 		 * fetch, which could be an issue if this memory region
174 		 * corresponds to a read-sensitive peripheral.
175 		 */
176 		desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
177 
178 	} else { /* Normal memory */
179 		/*
180 		 * Always map read-write normal memory as execute-never.
181 		 * This library assumes that it is used by software that does
182 		 * not self-modify its code, therefore R/W memory is reserved
183 		 * for data storage, which must not be executable.
184 		 *
185 		 * Note that setting the XN bit here is for consistency only.
186 		 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
187 		 * which makes any writable memory region to be treated as
188 		 * execute-never, regardless of the value of the XN bit in the
189 		 * translation table.
190 		 *
191 		 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
192 		 * attribute to figure out the value of the XN bit.  The actual
193 		 * XN bit(s) to set in the descriptor depends on the context's
194 		 * translation regime and the policy applied in
195 		 * xlat_arch_regime_get_xn_desc().
196 		 */
197 		if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
198 			desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
199 		}
200 
201 		shareability_type = MT_SHAREABILITY(attr);
202 		if (mem_type == MT_MEMORY) {
203 			desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX);
204 			if (shareability_type == MT_SHAREABILITY_NSH) {
205 				desc |= LOWER_ATTRS(NSH);
206 			} else if (shareability_type == MT_SHAREABILITY_OSH) {
207 				desc |= LOWER_ATTRS(OSH);
208 			} else {
209 				desc |= LOWER_ATTRS(ISH);
210 			}
211 
212 			/* Check if Branch Target Identification is enabled */
213 #if ENABLE_BTI
214 			/* Set GP bit for block and page code entries
215 			 * if BTI mechanism is implemented.
216 			 */
217 			if (is_armv8_5_bti_present() &&
218 			   ((attr & (MT_TYPE_MASK | MT_RW |
219 				MT_EXECUTE_NEVER)) == MT_CODE)) {
220 				desc |= GP;
221 			}
222 #endif
223 		} else {
224 			assert(mem_type == MT_NON_CACHEABLE);
225 			desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
226 		}
227 	}
228 
229 	return desc;
230 }
231 
232 /*
233  * Enumeration of actions that can be made when mapping table entries depending
234  * on the previous value in that entry and information about the region being
235  * mapped.
236  */
237 typedef enum {
238 
239 	/* Do nothing */
240 	ACTION_NONE,
241 
242 	/* Write a block (or page, if in level 3) entry. */
243 	ACTION_WRITE_BLOCK_ENTRY,
244 
245 	/*
246 	 * Create a new table and write a table entry pointing to it. Recurse
247 	 * into it for further processing.
248 	 */
249 	ACTION_CREATE_NEW_TABLE,
250 
251 	/*
252 	 * There is a table descriptor in this entry, read it and recurse into
253 	 * that table for further processing.
254 	 */
255 	ACTION_RECURSE_INTO_TABLE,
256 
257 } action_t;
258 
259 /*
260  * Function that returns the first VA of the table affected by the specified
261  * mmap region.
262  */
xlat_tables_find_start_va(mmap_region_t * mm,const uintptr_t table_base_va,const unsigned int level)263 static uintptr_t xlat_tables_find_start_va(mmap_region_t *mm,
264 				   const uintptr_t table_base_va,
265 				   const unsigned int level)
266 {
267 	uintptr_t table_idx_va;
268 
269 	if (mm->base_va > table_base_va) {
270 		/* Find the first index of the table affected by the region. */
271 		table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
272 	} else {
273 		/* Start from the beginning of the table. */
274 		table_idx_va = table_base_va;
275 	}
276 
277 	return table_idx_va;
278 }
279 
280 /*
281  * Function that returns table index for the given VA and level arguments.
282  */
xlat_tables_va_to_index(const uintptr_t table_base_va,const uintptr_t va,const unsigned int level)283 static inline unsigned int  xlat_tables_va_to_index(const uintptr_t table_base_va,
284 						const uintptr_t va,
285 						const unsigned int level)
286 {
287 	return (unsigned int)((va - table_base_va) >> XLAT_ADDR_SHIFT(level));
288 }
289 
290 #if PLAT_XLAT_TABLES_DYNAMIC
291 
292 /*
293  * From the given arguments, it decides which action to take when unmapping the
294  * specified region.
295  */
xlat_tables_unmap_region_action(const mmap_region_t * mm,const uintptr_t table_idx_va,const uintptr_t table_idx_end_va,const unsigned int level,const uint64_t desc_type)296 static action_t xlat_tables_unmap_region_action(const mmap_region_t *mm,
297 		const uintptr_t table_idx_va, const uintptr_t table_idx_end_va,
298 		const unsigned int level, const uint64_t desc_type)
299 {
300 	action_t action;
301 	uintptr_t region_end_va = mm->base_va + mm->size - 1U;
302 
303 	if ((mm->base_va <= table_idx_va) &&
304 	    (region_end_va >= table_idx_end_va)) {
305 		/* Region covers all block */
306 
307 		if (level == 3U) {
308 			/*
309 			 * Last level, only page descriptors allowed,
310 			 * erase it.
311 			 */
312 			assert(desc_type == PAGE_DESC);
313 
314 			action = ACTION_WRITE_BLOCK_ENTRY;
315 		} else {
316 			/*
317 			 * Other levels can have table descriptors. If
318 			 * so, recurse into it and erase descriptors
319 			 * inside it as needed. If there is a block
320 			 * descriptor, just erase it. If an invalid
321 			 * descriptor is found, this table isn't
322 			 * actually mapped, which shouldn't happen.
323 			 */
324 			if (desc_type == TABLE_DESC) {
325 				action = ACTION_RECURSE_INTO_TABLE;
326 			} else {
327 				assert(desc_type == BLOCK_DESC);
328 				action = ACTION_WRITE_BLOCK_ENTRY;
329 			}
330 		}
331 
332 	} else if ((mm->base_va <= table_idx_end_va) ||
333 		   (region_end_va >= table_idx_va)) {
334 		/*
335 		 * Region partially covers block.
336 		 *
337 		 * It can't happen in level 3.
338 		 *
339 		 * There must be a table descriptor here, if not there
340 		 * was a problem when mapping the region.
341 		 */
342 		assert(level < 3U);
343 		assert(desc_type == TABLE_DESC);
344 
345 		action = ACTION_RECURSE_INTO_TABLE;
346 	} else {
347 		/* The region doesn't cover the block at all */
348 		action = ACTION_NONE;
349 	}
350 
351 	return action;
352 }
353 /*
354  * Recursive function that writes to the translation tables and unmaps the
355  * specified region.
356  */
xlat_tables_unmap_region(xlat_ctx_t * ctx,mmap_region_t * mm,const uintptr_t table_base_va,uint64_t * const table_base,const unsigned int table_entries,const unsigned int level)357 static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
358 				     const uintptr_t table_base_va,
359 				     uint64_t *const table_base,
360 				     const unsigned int table_entries,
361 				     const unsigned int level)
362 {
363 	assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
364 
365 	uint64_t *subtable;
366 	uint64_t desc;
367 
368 	uintptr_t table_idx_va;
369 	uintptr_t table_idx_end_va; /* End VA of this entry */
370 
371 	uintptr_t region_end_va = mm->base_va + mm->size - 1U;
372 
373 	unsigned int table_idx;
374 
375 	table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
376 	table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
377 
378 	while (table_idx < table_entries) {
379 
380 		table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
381 
382 		desc = table_base[table_idx];
383 		uint64_t desc_type = desc & DESC_MASK;
384 
385 		action_t action = xlat_tables_unmap_region_action(mm,
386 				table_idx_va, table_idx_end_va, level,
387 				desc_type);
388 
389 		if (action == ACTION_WRITE_BLOCK_ENTRY) {
390 
391 			table_base[table_idx] = INVALID_DESC;
392 			xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
393 
394 		} else if (action == ACTION_RECURSE_INTO_TABLE) {
395 
396 			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
397 
398 			/* Recurse to write into subtable */
399 			xlat_tables_unmap_region(ctx, mm, table_idx_va,
400 						 subtable, XLAT_TABLE_ENTRIES,
401 						 level + 1U);
402 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
403 			xlat_clean_dcache_range((uintptr_t)subtable,
404 				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
405 #endif
406 			/*
407 			 * If the subtable is now empty, remove its reference.
408 			 */
409 			if (xlat_table_is_empty(ctx, subtable)) {
410 				table_base[table_idx] = INVALID_DESC;
411 				xlat_arch_tlbi_va(table_idx_va,
412 						  ctx->xlat_regime);
413 			}
414 
415 		} else {
416 			assert(action == ACTION_NONE);
417 		}
418 
419 		table_idx++;
420 		table_idx_va += XLAT_BLOCK_SIZE(level);
421 
422 		/* If reached the end of the region, exit */
423 		if (region_end_va <= table_idx_va)
424 			break;
425 	}
426 
427 	if (level > ctx->base_level)
428 		xlat_table_dec_regions_count(ctx, table_base);
429 }
430 
431 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
432 
433 /*
434  * From the given arguments, it decides which action to take when mapping the
435  * specified region.
436  */
xlat_tables_map_region_action(const mmap_region_t * mm,unsigned int desc_type,unsigned long long dest_pa,uintptr_t table_entry_base_va,unsigned int level)437 static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
438 		unsigned int desc_type, unsigned long long dest_pa,
439 		uintptr_t table_entry_base_va, unsigned int level)
440 {
441 	uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
442 	uintptr_t table_entry_end_va =
443 			table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
444 
445 	/*
446 	 * The descriptor types allowed depend on the current table level.
447 	 */
448 
449 	if ((mm->base_va <= table_entry_base_va) &&
450 	    (mm_end_va >= table_entry_end_va)) {
451 
452 		/*
453 		 * Table entry is covered by region
454 		 * --------------------------------
455 		 *
456 		 * This means that this table entry can describe the whole
457 		 * translation with this granularity in principle.
458 		 */
459 
460 		if (level == 3U) {
461 			/*
462 			 * Last level, only page descriptors are allowed.
463 			 */
464 			if (desc_type == PAGE_DESC) {
465 				/*
466 				 * There's another region mapped here, don't
467 				 * overwrite.
468 				 */
469 				return ACTION_NONE;
470 			} else {
471 				assert(desc_type == INVALID_DESC);
472 				return ACTION_WRITE_BLOCK_ENTRY;
473 			}
474 
475 		} else {
476 
477 			/*
478 			 * Other levels. Table descriptors are allowed. Block
479 			 * descriptors too, but they have some limitations.
480 			 */
481 
482 			if (desc_type == TABLE_DESC) {
483 				/* There's already a table, recurse into it. */
484 				return ACTION_RECURSE_INTO_TABLE;
485 
486 			} else if (desc_type == INVALID_DESC) {
487 				/*
488 				 * There's nothing mapped here, create a new
489 				 * entry.
490 				 *
491 				 * Check if the destination granularity allows
492 				 * us to use a block descriptor or we need a
493 				 * finer table for it.
494 				 *
495 				 * Also, check if the current level allows block
496 				 * descriptors. If not, create a table instead.
497 				 */
498 				if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
499 				    || (level < MIN_LVL_BLOCK_DESC) ||
500 				    (mm->granularity < XLAT_BLOCK_SIZE(level)))
501 					return ACTION_CREATE_NEW_TABLE;
502 				else
503 					return ACTION_WRITE_BLOCK_ENTRY;
504 
505 			} else {
506 				/*
507 				 * There's another region mapped here, don't
508 				 * overwrite.
509 				 */
510 				assert(desc_type == BLOCK_DESC);
511 
512 				return ACTION_NONE;
513 			}
514 		}
515 
516 	} else if ((mm->base_va <= table_entry_end_va) ||
517 		   (mm_end_va >= table_entry_base_va)) {
518 
519 		/*
520 		 * Region partially covers table entry
521 		 * -----------------------------------
522 		 *
523 		 * This means that this table entry can't describe the whole
524 		 * translation, a finer table is needed.
525 
526 		 * There cannot be partial block overlaps in level 3. If that
527 		 * happens, some of the preliminary checks when adding the
528 		 * mmap region failed to detect that PA and VA must at least be
529 		 * aligned to PAGE_SIZE.
530 		 */
531 		assert(level < 3U);
532 
533 		if (desc_type == INVALID_DESC) {
534 			/*
535 			 * The block is not fully covered by the region. Create
536 			 * a new table, recurse into it and try to map the
537 			 * region with finer granularity.
538 			 */
539 			return ACTION_CREATE_NEW_TABLE;
540 
541 		} else {
542 			assert(desc_type == TABLE_DESC);
543 			/*
544 			 * The block is not fully covered by the region, but
545 			 * there is already a table here. Recurse into it and
546 			 * try to map with finer granularity.
547 			 *
548 			 * PAGE_DESC for level 3 has the same value as
549 			 * TABLE_DESC, but this code can't run on a level 3
550 			 * table because there can't be overlaps in level 3.
551 			 */
552 			return ACTION_RECURSE_INTO_TABLE;
553 		}
554 	} else {
555 
556 		/*
557 		 * This table entry is outside of the region specified in the
558 		 * arguments, don't write anything to it.
559 		 */
560 		return ACTION_NONE;
561 	}
562 }
563 
564 /*
565  * Recursive function that writes to the translation tables and maps the
566  * specified region. On success, it returns the VA of the last byte that was
567  * successfully mapped. On error, it returns the VA of the next entry that
568  * should have been mapped.
569  */
xlat_tables_map_region(xlat_ctx_t * ctx,mmap_region_t * mm,uintptr_t table_base_va,uint64_t * const table_base,unsigned int table_entries,unsigned int level)570 static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
571 				   uintptr_t table_base_va,
572 				   uint64_t *const table_base,
573 				   unsigned int table_entries,
574 				   unsigned int level)
575 {
576 	assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
577 
578 	uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
579 
580 	uintptr_t table_idx_va;
581 	unsigned long long table_idx_pa;
582 
583 	uint64_t *subtable;
584 	uint64_t desc;
585 
586 	unsigned int table_idx;
587 
588 	table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
589 	table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
590 
591 #if PLAT_XLAT_TABLES_DYNAMIC
592 	if (level > ctx->base_level)
593 		xlat_table_inc_regions_count(ctx, table_base);
594 #endif
595 
596 	while (table_idx < table_entries) {
597 
598 		desc = table_base[table_idx];
599 
600 		table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
601 
602 		action_t action = xlat_tables_map_region_action(mm,
603 			(uint32_t)(desc & DESC_MASK), table_idx_pa,
604 			table_idx_va, level);
605 
606 		if (action == ACTION_WRITE_BLOCK_ENTRY) {
607 
608 			table_base[table_idx] =
609 				xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
610 					  level);
611 
612 		} else if (action == ACTION_CREATE_NEW_TABLE) {
613 			uintptr_t end_va;
614 
615 			subtable = xlat_table_get_empty(ctx);
616 			if (subtable == NULL) {
617 				/* Not enough free tables to map this region */
618 				return table_idx_va;
619 			}
620 
621 			/* Point to new subtable from this one. */
622 			table_base[table_idx] =
623 				TABLE_DESC | (uintptr_t)subtable;
624 
625 			/* Recurse to write into subtable */
626 			end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
627 					       subtable, XLAT_TABLE_ENTRIES,
628 					       level + 1U);
629 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
630 			xlat_clean_dcache_range((uintptr_t)subtable,
631 				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
632 #endif
633 			if (end_va !=
634 				(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
635 				return end_va;
636 
637 		} else if (action == ACTION_RECURSE_INTO_TABLE) {
638 			uintptr_t end_va;
639 
640 			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
641 			/* Recurse to write into subtable */
642 			end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
643 					       subtable, XLAT_TABLE_ENTRIES,
644 					       level + 1U);
645 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
646 			xlat_clean_dcache_range((uintptr_t)subtable,
647 				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
648 #endif
649 			if (end_va !=
650 				(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
651 				return end_va;
652 
653 		} else {
654 
655 			assert(action == ACTION_NONE);
656 
657 		}
658 
659 		table_idx++;
660 		table_idx_va += XLAT_BLOCK_SIZE(level);
661 
662 		/* If reached the end of the region, exit */
663 		if (mm_end_va <= table_idx_va)
664 			break;
665 	}
666 
667 	return table_idx_va - 1U;
668 }
669 
670 /*
671  * Function that verifies that a region can be mapped.
672  * Returns:
673  *        0: Success, the mapping is allowed.
674  *   EINVAL: Invalid values were used as arguments.
675  *   ERANGE: The memory limits were surpassed.
676  *   ENOMEM: There is not enough memory in the mmap array.
677  *    EPERM: Region overlaps another one in an invalid way.
678  */
mmap_add_region_check(const xlat_ctx_t * ctx,const mmap_region_t * mm)679 static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
680 {
681 	unsigned long long base_pa = mm->base_pa;
682 	uintptr_t base_va = mm->base_va;
683 	size_t size = mm->size;
684 	size_t granularity = mm->granularity;
685 
686 	unsigned long long end_pa = base_pa + size - 1U;
687 	uintptr_t end_va = base_va + size - 1U;
688 
689 	if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
690 			!IS_PAGE_ALIGNED(size))
691 		return -EINVAL;
692 
693 	if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
694 		(granularity != XLAT_BLOCK_SIZE(2U)) &&
695 		(granularity != XLAT_BLOCK_SIZE(3U))) {
696 		return -EINVAL;
697 	}
698 
699 	/* Check for overflows */
700 	if ((base_pa > end_pa) || (base_va > end_va))
701 		return -ERANGE;
702 
703 	if (end_va > ctx->va_max_address)
704 		return -ERANGE;
705 
706 	if (end_pa > ctx->pa_max_address)
707 		return -ERANGE;
708 
709 	/* Check that there is space in the ctx->mmap array */
710 	if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
711 		return -ENOMEM;
712 
713 	/* Check for PAs and VAs overlaps with all other regions */
714 	for (const mmap_region_t *mm_cursor = ctx->mmap;
715 	     mm_cursor->size != 0U; ++mm_cursor) {
716 
717 		uintptr_t mm_cursor_end_va = mm_cursor->base_va
718 							+ mm_cursor->size - 1U;
719 
720 		/*
721 		 * Check if one of the regions is completely inside the other
722 		 * one.
723 		 */
724 		bool fully_overlapped_va =
725 			((base_va >= mm_cursor->base_va) &&
726 					(end_va <= mm_cursor_end_va)) ||
727 			((mm_cursor->base_va >= base_va) &&
728 						(mm_cursor_end_va <= end_va));
729 
730 		/*
731 		 * Full VA overlaps are only allowed if both regions are
732 		 * identity mapped (zero offset) or have the same VA to PA
733 		 * offset. Also, make sure that it's not the exact same area.
734 		 * This can only be done with static regions.
735 		 */
736 		if (fully_overlapped_va) {
737 
738 #if PLAT_XLAT_TABLES_DYNAMIC
739 			if (((mm->attr & MT_DYNAMIC) != 0U) ||
740 			    ((mm_cursor->attr & MT_DYNAMIC) != 0U))
741 				return -EPERM;
742 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
743 			if ((mm_cursor->base_va - mm_cursor->base_pa) !=
744 							(base_va - base_pa))
745 				return -EPERM;
746 
747 			if ((base_va == mm_cursor->base_va) &&
748 						(size == mm_cursor->size))
749 				return -EPERM;
750 
751 		} else {
752 			/*
753 			 * If the regions do not have fully overlapping VAs,
754 			 * then they must have fully separated VAs and PAs.
755 			 * Partial overlaps are not allowed
756 			 */
757 
758 			unsigned long long mm_cursor_end_pa =
759 				     mm_cursor->base_pa + mm_cursor->size - 1U;
760 
761 			bool separated_pa = (end_pa < mm_cursor->base_pa) ||
762 				(base_pa > mm_cursor_end_pa);
763 			bool separated_va = (end_va < mm_cursor->base_va) ||
764 				(base_va > mm_cursor_end_va);
765 
766 			if (!separated_va || !separated_pa)
767 				return -EPERM;
768 		}
769 	}
770 
771 	return 0;
772 }
773 
mmap_add_region_ctx(xlat_ctx_t * ctx,const mmap_region_t * mm)774 void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
775 {
776 	mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
777 	const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
778 	const mmap_region_t *mm_last;
779 	unsigned long long end_pa = mm->base_pa + mm->size - 1U;
780 	uintptr_t end_va = mm->base_va + mm->size - 1U;
781 	int ret;
782 
783 	/* Ignore empty regions */
784 	if (mm->size == 0U)
785 		return;
786 
787 	/* Static regions must be added before initializing the xlat tables. */
788 	assert(!ctx->initialized);
789 
790 	ret = mmap_add_region_check(ctx, mm);
791 	if (ret != 0) {
792 		ERROR("mmap_add_region_check() failed. error %d\n", ret);
793 		assert(false);
794 		return;
795 	}
796 
797 	/*
798 	 * Find correct place in mmap to insert new region.
799 	 *
800 	 * 1 - Lower region VA end first.
801 	 * 2 - Smaller region size first.
802 	 *
803 	 * VA  0                                   0xFF
804 	 *
805 	 * 1st |------|
806 	 * 2nd |------------|
807 	 * 3rd                 |------|
808 	 * 4th                            |---|
809 	 * 5th                                   |---|
810 	 * 6th                            |----------|
811 	 * 7th |-------------------------------------|
812 	 *
813 	 * This is required for overlapping regions only. It simplifies adding
814 	 * regions with the loop in xlat_tables_init_internal because the outer
815 	 * ones won't overwrite block or page descriptors of regions added
816 	 * previously.
817 	 *
818 	 * Overlapping is only allowed for static regions.
819 	 */
820 
821 	while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
822 	       && (mm_cursor->size != 0U)) {
823 		++mm_cursor;
824 	}
825 
826 	while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
827 	       (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
828 		++mm_cursor;
829 	}
830 
831 	/*
832 	 * Find the last entry marker in the mmap
833 	 */
834 	mm_last = ctx->mmap;
835 	while ((mm_last->size != 0U) && (mm_last < mm_end)) {
836 		++mm_last;
837 	}
838 
839 	/*
840 	 * Check if we have enough space in the memory mapping table.
841 	 * This shouldn't happen as we have checked in mmap_add_region_check
842 	 * that there is free space.
843 	 */
844 	assert(mm_last->size == 0U);
845 
846 	/* Make room for new region by moving other regions up by one place */
847 	mm_destination = mm_cursor + 1;
848 	(void)memmove(mm_destination, mm_cursor,
849 		(uintptr_t)mm_last - (uintptr_t)mm_cursor);
850 
851 	/*
852 	 * Check we haven't lost the empty sentinel from the end of the array.
853 	 * This shouldn't happen as we have checked in mmap_add_region_check
854 	 * that there is free space.
855 	 */
856 	assert(mm_end->size == 0U);
857 
858 	*mm_cursor = *mm;
859 
860 	if (end_pa > ctx->max_pa)
861 		ctx->max_pa = end_pa;
862 	if (end_va > ctx->max_va)
863 		ctx->max_va = end_va;
864 }
865 
866 /*
867  * Determine the table level closest to the initial lookup level that
868  * can describe this translation. Then, align base VA to the next block
869  * at the determined level.
870  */
mmap_alloc_va_align_ctx(xlat_ctx_t * ctx,mmap_region_t * mm)871 static void mmap_alloc_va_align_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
872 {
873 	/*
874 	 * By or'ing the size and base PA the alignment will be the one
875 	 * corresponding to the smallest boundary of the two of them.
876 	 *
877 	 * There are three different cases. For example (for 4 KiB page size):
878 	 *
879 	 * +--------------+------------------++--------------+
880 	 * | PA alignment | Size multiple of || VA alignment |
881 	 * +--------------+------------------++--------------+
882 	 * |     2 MiB    |       2 MiB      ||     2 MiB    | (1)
883 	 * |     2 MiB    |       4 KiB      ||     4 KiB    | (2)
884 	 * |     4 KiB    |       2 MiB      ||     4 KiB    | (3)
885 	 * +--------------+------------------++--------------+
886 	 *
887 	 * - In (1), it is possible to take advantage of the alignment of the PA
888 	 *   and the size of the region to use a level 2 translation table
889 	 *   instead of a level 3 one.
890 	 *
891 	 * - In (2), the size is smaller than a block entry of level 2, so it is
892 	 *   needed to use a level 3 table to describe the region or the library
893 	 *   will map more memory than the desired one.
894 	 *
895 	 * - In (3), even though the region has the size of one level 2 block
896 	 *   entry, it isn't possible to describe the translation with a level 2
897 	 *   block entry because of the alignment of the base PA.
898 	 *
899 	 *   Only bits 47:21 of a level 2 block descriptor are used by the MMU,
900 	 *   bits 20:0 of the resulting address are 0 in this case. Because of
901 	 *   this, the PA generated as result of this translation is aligned to
902 	 *   2 MiB. The PA that was requested to be mapped is aligned to 4 KiB,
903 	 *   though, which means that the resulting translation is incorrect.
904 	 *   The only way to prevent this is by using a finer granularity.
905 	 */
906 	unsigned long long align_check;
907 
908 	align_check = mm->base_pa | (unsigned long long)mm->size;
909 
910 	/*
911 	 * Assume it is always aligned to level 3. There's no need to check that
912 	 * level because its block size is PAGE_SIZE. The checks to verify that
913 	 * the addresses and size are aligned to PAGE_SIZE are inside
914 	 * mmap_add_region.
915 	 */
916 	for (unsigned int level = ctx->base_level; level <= 2U; ++level) {
917 
918 		if ((align_check & XLAT_BLOCK_MASK(level)) != 0U)
919 			continue;
920 
921 		mm->base_va = round_up(mm->base_va, XLAT_BLOCK_SIZE(level));
922 		return;
923 	}
924 }
925 
mmap_add_region_alloc_va_ctx(xlat_ctx_t * ctx,mmap_region_t * mm)926 void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
927 {
928 	mm->base_va = ctx->max_va + 1UL;
929 
930 	assert(mm->size > 0U);
931 
932 	mmap_alloc_va_align_ctx(ctx, mm);
933 
934 	/* Detect overflows. More checks are done in mmap_add_region_check(). */
935 	assert(mm->base_va > ctx->max_va);
936 
937 	mmap_add_region_ctx(ctx, mm);
938 }
939 
mmap_add_ctx(xlat_ctx_t * ctx,const mmap_region_t * mm)940 void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
941 {
942 	const mmap_region_t *mm_cursor = mm;
943 
944 	while (mm_cursor->granularity != 0U) {
945 		mmap_add_region_ctx(ctx, mm_cursor);
946 		mm_cursor++;
947 	}
948 }
949 
950 #if PLAT_XLAT_TABLES_DYNAMIC
951 
mmap_add_dynamic_region_ctx(xlat_ctx_t * ctx,mmap_region_t * mm)952 int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
953 {
954 	mmap_region_t *mm_cursor = ctx->mmap;
955 	const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
956 	unsigned long long end_pa = mm->base_pa + mm->size - 1U;
957 	uintptr_t end_va = mm->base_va + mm->size - 1U;
958 	int ret;
959 
960 	/* Nothing to do */
961 	if (mm->size == 0U)
962 		return 0;
963 
964 	/* Now this region is a dynamic one */
965 	mm->attr |= MT_DYNAMIC;
966 
967 	ret = mmap_add_region_check(ctx, mm);
968 	if (ret != 0)
969 		return ret;
970 
971 	/*
972 	 * Find the adequate entry in the mmap array in the same way done for
973 	 * static regions in mmap_add_region_ctx().
974 	 */
975 
976 	while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
977 	       && (mm_cursor->size != 0U)) {
978 		++mm_cursor;
979 	}
980 
981 	while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
982 	       (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
983 		++mm_cursor;
984 	}
985 
986 	/* Make room for new region by moving other regions up by one place */
987 	(void)memmove(mm_cursor + 1U, mm_cursor,
988 		     (uintptr_t)mm_last - (uintptr_t)mm_cursor);
989 
990 	/*
991 	 * Check we haven't lost the empty sentinal from the end of the array.
992 	 * This shouldn't happen as we have checked in mmap_add_region_check
993 	 * that there is free space.
994 	 */
995 	assert(mm_last->size == 0U);
996 
997 	*mm_cursor = *mm;
998 
999 	/*
1000 	 * Update the translation tables if the xlat tables are initialized. If
1001 	 * not, this region will be mapped when they are initialized.
1002 	 */
1003 	if (ctx->initialized) {
1004 		end_va = xlat_tables_map_region(ctx, mm_cursor,
1005 				0U, ctx->base_table, ctx->base_table_entries,
1006 				ctx->base_level);
1007 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1008 		xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1009 				   ctx->base_table_entries * sizeof(uint64_t));
1010 #endif
1011 		/* Failed to map, remove mmap entry, unmap and return error. */
1012 		if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
1013 			(void)memmove(mm_cursor, mm_cursor + 1U,
1014 				(uintptr_t)mm_last - (uintptr_t)mm_cursor);
1015 
1016 			/*
1017 			 * Check if the mapping function actually managed to map
1018 			 * anything. If not, just return now.
1019 			 */
1020 			if (mm->base_va >= end_va)
1021 				return -ENOMEM;
1022 
1023 			/*
1024 			 * Something went wrong after mapping some table
1025 			 * entries, undo every change done up to this point.
1026 			 */
1027 			mmap_region_t unmap_mm = {
1028 					.base_pa = 0U,
1029 					.base_va = mm->base_va,
1030 					.size = end_va - mm->base_va,
1031 					.attr = 0U
1032 			};
1033 			xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
1034 				ctx->base_table, ctx->base_table_entries,
1035 				ctx->base_level);
1036 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1037 			xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1038 				ctx->base_table_entries * sizeof(uint64_t));
1039 #endif
1040 			return -ENOMEM;
1041 		}
1042 
1043 		/*
1044 		 * Make sure that all entries are written to the memory. There
1045 		 * is no need to invalidate entries when mapping dynamic regions
1046 		 * because new table/block/page descriptors only replace old
1047 		 * invalid descriptors, that aren't TLB cached.
1048 		 */
1049 		dsbishst();
1050 	}
1051 
1052 	if (end_pa > ctx->max_pa)
1053 		ctx->max_pa = end_pa;
1054 	if (end_va > ctx->max_va)
1055 		ctx->max_va = end_va;
1056 
1057 	return 0;
1058 }
1059 
mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t * ctx,mmap_region_t * mm)1060 int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
1061 {
1062 	mm->base_va = ctx->max_va + 1UL;
1063 
1064 	if (mm->size == 0U)
1065 		return 0;
1066 
1067 	mmap_alloc_va_align_ctx(ctx, mm);
1068 
1069 	/* Detect overflows. More checks are done in mmap_add_region_check(). */
1070 	if (mm->base_va < ctx->max_va) {
1071 		return -ENOMEM;
1072 	}
1073 
1074 	return mmap_add_dynamic_region_ctx(ctx, mm);
1075 }
1076 
1077 /*
1078  * Removes the region with given base Virtual Address and size from the given
1079  * context.
1080  *
1081  * Returns:
1082  *        0: Success.
1083  *   EINVAL: Invalid values were used as arguments (region not found).
1084  *    EPERM: Tried to remove a static region.
1085  */
mmap_remove_dynamic_region_ctx(xlat_ctx_t * ctx,uintptr_t base_va,size_t size)1086 int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
1087 				   size_t size)
1088 {
1089 	mmap_region_t *mm = ctx->mmap;
1090 	const mmap_region_t *mm_last = mm + ctx->mmap_num;
1091 	int update_max_va_needed = 0;
1092 	int update_max_pa_needed = 0;
1093 
1094 	/* Check sanity of mmap array. */
1095 	assert(mm[ctx->mmap_num].size == 0U);
1096 
1097 	while (mm->size != 0U) {
1098 		if ((mm->base_va == base_va) && (mm->size == size))
1099 			break;
1100 		++mm;
1101 	}
1102 
1103 	/* Check that the region was found */
1104 	if (mm->size == 0U)
1105 		return -EINVAL;
1106 
1107 	/* If the region is static it can't be removed */
1108 	if ((mm->attr & MT_DYNAMIC) == 0U)
1109 		return -EPERM;
1110 
1111 	/* Check if this region is using the top VAs or PAs. */
1112 	if ((mm->base_va + mm->size - 1U) == ctx->max_va)
1113 		update_max_va_needed = 1;
1114 	if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
1115 		update_max_pa_needed = 1;
1116 
1117 	/* Update the translation tables if needed */
1118 	if (ctx->initialized) {
1119 		xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
1120 					 ctx->base_table_entries,
1121 					 ctx->base_level);
1122 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1123 		xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1124 			ctx->base_table_entries * sizeof(uint64_t));
1125 #endif
1126 		xlat_arch_tlbi_va_sync();
1127 	}
1128 
1129 	/* Remove this region by moving the rest down by one place. */
1130 	(void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
1131 
1132 	/* Check if we need to update the max VAs and PAs */
1133 	if (update_max_va_needed == 1) {
1134 		ctx->max_va = 0U;
1135 		mm = ctx->mmap;
1136 		while (mm->size != 0U) {
1137 			if ((mm->base_va + mm->size - 1U) > ctx->max_va)
1138 				ctx->max_va = mm->base_va + mm->size - 1U;
1139 			++mm;
1140 		}
1141 	}
1142 
1143 	if (update_max_pa_needed == 1) {
1144 		ctx->max_pa = 0U;
1145 		mm = ctx->mmap;
1146 		while (mm->size != 0U) {
1147 			if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
1148 				ctx->max_pa = mm->base_pa + mm->size - 1U;
1149 			++mm;
1150 		}
1151 	}
1152 
1153 	return 0;
1154 }
1155 
xlat_setup_dynamic_ctx(xlat_ctx_t * ctx,unsigned long long pa_max,uintptr_t va_max,struct mmap_region * mmap,unsigned int mmap_num,uint64_t ** tables,unsigned int tables_num,uint64_t * base_table,int xlat_regime,int * mapped_regions)1156 void xlat_setup_dynamic_ctx(xlat_ctx_t *ctx, unsigned long long pa_max,
1157 			    uintptr_t va_max, struct mmap_region *mmap,
1158 			    unsigned int mmap_num, uint64_t **tables,
1159 			    unsigned int tables_num, uint64_t *base_table,
1160 			    int xlat_regime, int *mapped_regions)
1161 {
1162 	ctx->xlat_regime = xlat_regime;
1163 
1164 	ctx->pa_max_address = pa_max;
1165 	ctx->va_max_address = va_max;
1166 
1167 	ctx->mmap = mmap;
1168 	ctx->mmap_num = mmap_num;
1169 	memset(ctx->mmap, 0, sizeof(struct mmap_region) * mmap_num);
1170 
1171 	ctx->tables = (void *) tables;
1172 	ctx->tables_num = tables_num;
1173 
1174 	uintptr_t va_space_size = va_max + 1;
1175 	ctx->base_level = GET_XLAT_TABLE_LEVEL_BASE(va_space_size);
1176 	ctx->base_table = base_table;
1177 	ctx->base_table_entries = GET_NUM_BASE_LEVEL_ENTRIES(va_space_size);
1178 
1179 	ctx->tables_mapped_regions = mapped_regions;
1180 
1181 	ctx->max_pa = 0;
1182 	ctx->max_va = 0;
1183 	ctx->initialized = 0;
1184 }
1185 
1186 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
1187 
init_xlat_tables_ctx(xlat_ctx_t * ctx)1188 void __init init_xlat_tables_ctx(xlat_ctx_t *ctx)
1189 {
1190 	assert(ctx != NULL);
1191 	assert(!ctx->initialized);
1192 	assert((ctx->xlat_regime == EL3_REGIME) ||
1193 	       (ctx->xlat_regime == EL2_REGIME) ||
1194 	       (ctx->xlat_regime == EL1_EL0_REGIME));
1195 	assert(!is_mmu_enabled_ctx(ctx));
1196 
1197 	mmap_region_t *mm = ctx->mmap;
1198 
1199 	assert(ctx->va_max_address >=
1200 		(xlat_get_min_virt_addr_space_size() - 1U));
1201 	assert(ctx->va_max_address <= (MAX_VIRT_ADDR_SPACE_SIZE - 1U));
1202 	assert(IS_POWER_OF_TWO(ctx->va_max_address + 1U));
1203 
1204 	xlat_mmap_print(mm);
1205 
1206 	/* All tables must be zeroed before mapping any region. */
1207 
1208 	for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
1209 		ctx->base_table[i] = INVALID_DESC;
1210 
1211 	for (int j = 0; j < ctx->tables_num; j++) {
1212 #if PLAT_XLAT_TABLES_DYNAMIC
1213 		ctx->tables_mapped_regions[j] = 0;
1214 #endif
1215 		for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
1216 			ctx->tables[j][i] = INVALID_DESC;
1217 	}
1218 
1219 	while (mm->size != 0U) {
1220 		uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
1221 				ctx->base_table, ctx->base_table_entries,
1222 				ctx->base_level);
1223 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1224 		xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1225 				   ctx->base_table_entries * sizeof(uint64_t));
1226 #endif
1227 		if (end_va != (mm->base_va + mm->size - 1U)) {
1228 			ERROR("Not enough memory to map region:\n"
1229 			      " VA:0x%lx  PA:0x%llx  size:0x%zx  attr:0x%x\n",
1230 			      mm->base_va, mm->base_pa, mm->size, mm->attr);
1231 			panic();
1232 		}
1233 
1234 		mm++;
1235 	}
1236 
1237 	assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
1238 	assert(ctx->max_va <= ctx->va_max_address);
1239 	assert(ctx->max_pa <= ctx->pa_max_address);
1240 
1241 	ctx->initialized = true;
1242 
1243 	xlat_tables_print(ctx);
1244 }
1245