1 /*
2 * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdbool.h>
11 #include <stdint.h>
12 #include <stdio.h>
13
14 #include <platform_def.h>
15
16 #include <arch_features.h>
17 #include <arch_helpers.h>
18 #include <common/debug.h>
19 #include <lib/utils_def.h>
20 #include <lib/xlat_tables/xlat_tables_defs.h>
21 #include <lib/xlat_tables/xlat_tables_v2.h>
22
23 #include "xlat_tables_private.h"
24
25 #if LOG_LEVEL < LOG_LEVEL_VERBOSE
26
xlat_mmap_print(__unused const mmap_region_t * mmap)27 void xlat_mmap_print(__unused const mmap_region_t *mmap)
28 {
29 /* Empty */
30 }
31
xlat_tables_print(__unused xlat_ctx_t * ctx)32 void xlat_tables_print(__unused xlat_ctx_t *ctx)
33 {
34 /* Empty */
35 }
36
37 #else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
38
xlat_mmap_print(const mmap_region_t * mmap)39 void xlat_mmap_print(const mmap_region_t *mmap)
40 {
41 printf("mmap:\n");
42 const mmap_region_t *mm = mmap;
43
44 while (mm->size != 0U) {
45 printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x granularity:0x%zx\n",
46 mm->base_va, mm->base_pa, mm->size, mm->attr,
47 mm->granularity);
48 ++mm;
49 };
50 printf("\n");
51 }
52
53 /* Print the attributes of the specified block descriptor. */
xlat_desc_print(const xlat_ctx_t * ctx,uint64_t desc)54 static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
55 {
56 uint64_t mem_type_index = ATTR_INDEX_GET(desc);
57 int xlat_regime = ctx->xlat_regime;
58
59 if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
60 printf("MEM");
61 } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
62 printf("NC");
63 } else {
64 assert(mem_type_index == ATTR_DEVICE_INDEX);
65 printf("DEV");
66 }
67
68 if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) {
69 /* For EL3 and EL2 only check the AP[2] and XN bits. */
70 printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
71 printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
72 } else {
73 assert(xlat_regime == EL1_EL0_REGIME);
74 /*
75 * For EL0 and EL1:
76 * - In AArch64 PXN and UXN can be set independently but in
77 * AArch32 there is no UXN (XN affects both privilege levels).
78 * For consistency, we set them simultaneously in both cases.
79 * - RO and RW permissions must be the same in EL1 and EL0. If
80 * EL0 can access that memory region, so can EL1, with the
81 * same permissions.
82 */
83 #if ENABLE_ASSERTIONS
84 uint64_t xn_mask = xlat_arch_regime_get_xn_desc(EL1_EL0_REGIME);
85 uint64_t xn_perm = desc & xn_mask;
86
87 assert((xn_perm == xn_mask) || (xn_perm == 0ULL));
88 #endif
89 printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
90 /* Only check one of PXN and UXN, the other one is the same. */
91 printf(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-XN" : "-EXEC");
92 /*
93 * Privileged regions can only be accessed from EL1, user
94 * regions can be accessed from EL1 and EL0.
95 */
96 printf(((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) != 0ULL)
97 ? "-USER" : "-PRIV");
98 }
99
100 #if ENABLE_RME
101 switch (desc & LOWER_ATTRS(EL3_S1_NSE | NS)) {
102 case 0ULL:
103 printf("-S");
104 break;
105 case LOWER_ATTRS(NS):
106 printf("-NS");
107 break;
108 case LOWER_ATTRS(EL3_S1_NSE):
109 printf("-RT");
110 break;
111 default: /* LOWER_ATTRS(EL3_S1_NSE | NS) */
112 printf("-RL");
113 }
114 #else
115 printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
116 #endif
117
118 #ifdef __aarch64__
119 /* Check Guarded Page bit */
120 if ((desc & GP) != 0ULL) {
121 printf("-GP");
122 }
123 #endif
124 }
125
126 static const char * const level_spacers[] = {
127 "[LV0] ",
128 " [LV1] ",
129 " [LV2] ",
130 " [LV3] "
131 };
132
133 static const char *invalid_descriptors_ommited =
134 "%s(%d invalid descriptors omitted)\n";
135
136 /*
137 * Recursive function that reads the translation tables passed as an argument
138 * and prints their status.
139 */
xlat_tables_print_internal(xlat_ctx_t * ctx,uintptr_t table_base_va,const uint64_t * table_base,unsigned int table_entries,unsigned int level)140 static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
141 const uint64_t *table_base, unsigned int table_entries,
142 unsigned int level)
143 {
144 assert(level <= XLAT_TABLE_LEVEL_MAX);
145
146 uint64_t desc;
147 uintptr_t table_idx_va = table_base_va;
148 unsigned int table_idx = 0U;
149 size_t level_size = XLAT_BLOCK_SIZE(level);
150
151 /*
152 * Keep track of how many invalid descriptors are counted in a row.
153 * Whenever multiple invalid descriptors are found, only the first one
154 * is printed, and a line is added to inform about how many descriptors
155 * have been omitted.
156 */
157 int invalid_row_count = 0;
158
159 while (table_idx < table_entries) {
160
161 desc = table_base[table_idx];
162
163 if ((desc & DESC_MASK) == INVALID_DESC) {
164
165 if (invalid_row_count == 0) {
166 printf("%sVA:0x%lx size:0x%zx\n",
167 level_spacers[level],
168 table_idx_va, level_size);
169 }
170 invalid_row_count++;
171
172 } else {
173
174 if (invalid_row_count > 1) {
175 printf(invalid_descriptors_ommited,
176 level_spacers[level],
177 invalid_row_count - 1);
178 }
179 invalid_row_count = 0;
180
181 /*
182 * Check if this is a table or a block. Tables are only
183 * allowed in levels other than 3, but DESC_PAGE has the
184 * same value as DESC_TABLE, so we need to check.
185 */
186 if (((desc & DESC_MASK) == TABLE_DESC) &&
187 (level < XLAT_TABLE_LEVEL_MAX)) {
188 /*
189 * Do not print any PA for a table descriptor,
190 * as it doesn't directly map physical memory
191 * but instead points to the next translation
192 * table in the translation table walk.
193 */
194 printf("%sVA:0x%lx size:0x%zx\n",
195 level_spacers[level],
196 table_idx_va, level_size);
197
198 uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
199
200 xlat_tables_print_internal(ctx, table_idx_va,
201 (uint64_t *)addr_inner,
202 XLAT_TABLE_ENTRIES, level + 1U);
203 } else {
204 printf("%sVA:0x%lx PA:0x%" PRIx64 " size:0x%zx ",
205 level_spacers[level], table_idx_va,
206 (uint64_t)(desc & TABLE_ADDR_MASK),
207 level_size);
208 xlat_desc_print(ctx, desc);
209 printf("\n");
210 }
211 }
212
213 table_idx++;
214 table_idx_va += level_size;
215 }
216
217 if (invalid_row_count > 1) {
218 printf(invalid_descriptors_ommited,
219 level_spacers[level], invalid_row_count - 1);
220 }
221 }
222
xlat_tables_print(xlat_ctx_t * ctx)223 void xlat_tables_print(xlat_ctx_t *ctx)
224 {
225 const char *xlat_regime_str;
226 int used_page_tables;
227
228 if (ctx->xlat_regime == EL1_EL0_REGIME) {
229 xlat_regime_str = "1&0";
230 } else if (ctx->xlat_regime == EL2_REGIME) {
231 xlat_regime_str = "2";
232 } else {
233 assert(ctx->xlat_regime == EL3_REGIME);
234 xlat_regime_str = "3";
235 }
236 VERBOSE("Translation tables state:\n");
237 VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
238 VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
239 VERBOSE(" Max allowed VA: 0x%lx\n", ctx->va_max_address);
240 VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
241 VERBOSE(" Max mapped VA: 0x%lx\n", ctx->max_va);
242
243 VERBOSE(" Initial lookup level: %u\n", ctx->base_level);
244 VERBOSE(" Entries @initial lookup level: %u\n",
245 ctx->base_table_entries);
246
247 #if PLAT_XLAT_TABLES_DYNAMIC
248 used_page_tables = 0;
249 for (int i = 0; i < ctx->tables_num; ++i) {
250 if (ctx->tables_mapped_regions[i] != 0)
251 ++used_page_tables;
252 }
253 #else
254 used_page_tables = ctx->next_table;
255 #endif
256 VERBOSE(" Used %d sub-tables out of %d (spare: %d)\n",
257 used_page_tables, ctx->tables_num,
258 ctx->tables_num - used_page_tables);
259
260 xlat_tables_print_internal(ctx, 0U, ctx->base_table,
261 ctx->base_table_entries, ctx->base_level);
262 }
263
264 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
265
266 /*
267 * Do a translation table walk to find the block or page descriptor that maps
268 * virtual_addr.
269 *
270 * On success, return the address of the descriptor within the translation
271 * table. Its lookup level is stored in '*out_level'.
272 * On error, return NULL.
273 *
274 * xlat_table_base
275 * Base address for the initial lookup level.
276 * xlat_table_base_entries
277 * Number of entries in the translation table for the initial lookup level.
278 * virt_addr_space_size
279 * Size in bytes of the virtual address space.
280 */
find_xlat_table_entry(uintptr_t virtual_addr,void * xlat_table_base,unsigned int xlat_table_base_entries,unsigned long long virt_addr_space_size,unsigned int * out_level)281 static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
282 void *xlat_table_base,
283 unsigned int xlat_table_base_entries,
284 unsigned long long virt_addr_space_size,
285 unsigned int *out_level)
286 {
287 unsigned int start_level;
288 uint64_t *table;
289 unsigned int entries;
290
291 start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
292
293 table = xlat_table_base;
294 entries = xlat_table_base_entries;
295
296 for (unsigned int level = start_level;
297 level <= XLAT_TABLE_LEVEL_MAX;
298 ++level) {
299 uint64_t idx, desc, desc_type;
300
301 idx = XLAT_TABLE_IDX(virtual_addr, level);
302 if (idx >= entries) {
303 WARN("Missing xlat table entry at address 0x%lx\n",
304 virtual_addr);
305 return NULL;
306 }
307
308 desc = table[idx];
309 desc_type = desc & DESC_MASK;
310
311 if (desc_type == INVALID_DESC) {
312 VERBOSE("Invalid entry (memory not mapped)\n");
313 return NULL;
314 }
315
316 if (level == XLAT_TABLE_LEVEL_MAX) {
317 /*
318 * Only page descriptors allowed at the final lookup
319 * level.
320 */
321 assert(desc_type == PAGE_DESC);
322 *out_level = level;
323 return &table[idx];
324 }
325
326 if (desc_type == BLOCK_DESC) {
327 *out_level = level;
328 return &table[idx];
329 }
330
331 assert(desc_type == TABLE_DESC);
332 table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
333 entries = XLAT_TABLE_ENTRIES;
334 }
335
336 /*
337 * This shouldn't be reached, the translation table walk should end at
338 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
339 */
340 assert(false);
341
342 return NULL;
343 }
344
345
xlat_get_mem_attributes_internal(const xlat_ctx_t * ctx,uintptr_t base_va,uint32_t * attributes,uint64_t ** table_entry,unsigned long long * addr_pa,unsigned int * table_level)346 static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx,
347 uintptr_t base_va, uint32_t *attributes, uint64_t **table_entry,
348 unsigned long long *addr_pa, unsigned int *table_level)
349 {
350 uint64_t *entry;
351 uint64_t desc;
352 unsigned int level;
353 unsigned long long virt_addr_space_size;
354
355 /*
356 * Sanity-check arguments.
357 */
358 assert(ctx != NULL);
359 assert(ctx->initialized);
360 assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
361 (ctx->xlat_regime == EL2_REGIME) ||
362 (ctx->xlat_regime == EL3_REGIME));
363
364 virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
365 assert(virt_addr_space_size > 0U);
366
367 entry = find_xlat_table_entry(base_va,
368 ctx->base_table,
369 ctx->base_table_entries,
370 virt_addr_space_size,
371 &level);
372 if (entry == NULL) {
373 WARN("Address 0x%lx is not mapped.\n", base_va);
374 return -EINVAL;
375 }
376
377 if (addr_pa != NULL) {
378 *addr_pa = *entry & TABLE_ADDR_MASK;
379 }
380
381 if (table_entry != NULL) {
382 *table_entry = entry;
383 }
384
385 if (table_level != NULL) {
386 *table_level = level;
387 }
388
389 desc = *entry;
390
391 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
392 VERBOSE("Attributes: ");
393 xlat_desc_print(ctx, desc);
394 printf("\n");
395 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
396
397 assert(attributes != NULL);
398 *attributes = 0U;
399
400 uint64_t attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
401
402 if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
403 *attributes |= MT_MEMORY;
404 } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
405 *attributes |= MT_NON_CACHEABLE;
406 } else {
407 assert(attr_index == ATTR_DEVICE_INDEX);
408 *attributes |= MT_DEVICE;
409 }
410
411 uint64_t ap2_bit = (desc >> AP2_SHIFT) & 1U;
412
413 if (ap2_bit == AP2_RW)
414 *attributes |= MT_RW;
415
416 if (ctx->xlat_regime == EL1_EL0_REGIME) {
417 uint64_t ap1_bit = (desc >> AP1_SHIFT) & 1U;
418
419 if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
420 *attributes |= MT_USER;
421 }
422
423 uint64_t ns_bit = (desc >> NS_SHIFT) & 1ULL;
424
425 #if ENABLE_RME
426 uint64_t nse_bit = (desc >> NSE_SHIFT) & 1ULL;
427 uint32_t sec_state = (uint32_t)(ns_bit | (nse_bit << 1ULL));
428
429 /*
430 * =========================================================
431 * NSE NS | Output PA space
432 * =========================================================
433 * 0 0 | Secure (if S-EL2 is present, else invalid)
434 * 0 1 | Non-secure
435 * 1 0 | Root
436 * 1 1 | Realm
437 *==========================================================
438 */
439 switch (sec_state) {
440 case 0U:
441 /*
442 * We expect to get Secure mapping on an RME system only if
443 * S-EL2 is enabled.
444 * Hence panic() if we hit the case without EEL2 being enabled.
445 */
446 if ((read_scr_el3() & SCR_EEL2_BIT) == 0ULL) {
447 ERROR("A secure descriptor is not supported when"
448 "FEAT_RME is implemented and FEAT_SEL2 is"
449 "not enabled\n");
450 panic();
451 } else {
452 *attributes |= MT_SECURE;
453 }
454 break;
455 case 1U:
456 *attributes |= MT_NS;
457 break;
458 case 2U:
459 *attributes |= MT_ROOT;
460 break;
461 case 3U:
462 *attributes |= MT_REALM;
463 break;
464 default:
465 /* unreachable code */
466 assert(false);
467 break;
468 }
469 #else /* !ENABLE_RME */
470 if (ns_bit == 1ULL) {
471 *attributes |= MT_NS;
472 } else {
473 *attributes |= MT_SECURE;
474 }
475 #endif /* ENABLE_RME */
476
477 uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
478
479 if ((desc & xn_mask) == xn_mask) {
480 *attributes |= MT_EXECUTE_NEVER;
481 } else {
482 assert((desc & xn_mask) == 0U);
483 }
484
485 return 0;
486 }
487
488
xlat_get_mem_attributes_ctx(const xlat_ctx_t * ctx,uintptr_t base_va,uint32_t * attr)489 int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
490 uint32_t *attr)
491 {
492 return xlat_get_mem_attributes_internal(ctx, base_va, attr,
493 NULL, NULL, NULL);
494 }
495
496
xlat_change_mem_attributes_ctx(const xlat_ctx_t * ctx,uintptr_t base_va,size_t size,uint32_t attr)497 int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
498 size_t size, uint32_t attr)
499 {
500 /* Note: This implementation isn't optimized. */
501
502 assert(ctx != NULL);
503 assert(ctx->initialized);
504
505 unsigned long long virt_addr_space_size =
506 (unsigned long long)ctx->va_max_address + 1U;
507 assert(virt_addr_space_size > 0U);
508
509 if (!IS_PAGE_ALIGNED(base_va)) {
510 WARN("%s: Address 0x%lx is not aligned on a page boundary.\n",
511 __func__, base_va);
512 return -EINVAL;
513 }
514
515 if (size == 0U) {
516 WARN("%s: Size is 0.\n", __func__);
517 return -EINVAL;
518 }
519
520 if ((size % PAGE_SIZE) != 0U) {
521 WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
522 __func__, size);
523 return -EINVAL;
524 }
525
526 if (((attr & MT_EXECUTE_NEVER) == 0U) && ((attr & MT_RW) != 0U)) {
527 WARN("%s: Mapping memory as read-write and executable not allowed.\n",
528 __func__);
529 return -EINVAL;
530 }
531
532 size_t pages_count = size / PAGE_SIZE;
533
534 VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n",
535 pages_count, base_va);
536
537 uintptr_t base_va_original = base_va;
538
539 /*
540 * Sanity checks.
541 */
542 for (unsigned int i = 0U; i < pages_count; ++i) {
543 const uint64_t *entry;
544 uint64_t desc, attr_index;
545 unsigned int level;
546
547 entry = find_xlat_table_entry(base_va,
548 ctx->base_table,
549 ctx->base_table_entries,
550 virt_addr_space_size,
551 &level);
552 if (entry == NULL) {
553 WARN("Address 0x%lx is not mapped.\n", base_va);
554 return -EINVAL;
555 }
556
557 desc = *entry;
558
559 /*
560 * Check that all the required pages are mapped at page
561 * granularity.
562 */
563 if (((desc & DESC_MASK) != PAGE_DESC) ||
564 (level != XLAT_TABLE_LEVEL_MAX)) {
565 WARN("Address 0x%lx is not mapped at the right granularity.\n",
566 base_va);
567 WARN("Granularity is 0x%lx, should be 0x%lx.\n",
568 XLAT_BLOCK_SIZE(level), PAGE_SIZE);
569 return -EINVAL;
570 }
571
572 /*
573 * If the region type is device, it shouldn't be executable.
574 */
575 attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
576 if (attr_index == ATTR_DEVICE_INDEX) {
577 if ((attr & MT_EXECUTE_NEVER) == 0U) {
578 WARN("Setting device memory as executable at address 0x%lx.",
579 base_va);
580 return -EINVAL;
581 }
582 }
583
584 base_va += PAGE_SIZE;
585 }
586
587 /* Restore original value. */
588 base_va = base_va_original;
589
590 for (unsigned int i = 0U; i < pages_count; ++i) {
591
592 uint32_t old_attr = 0U, new_attr;
593 uint64_t *entry = NULL;
594 unsigned int level = 0U;
595 unsigned long long addr_pa = 0ULL;
596
597 (void) xlat_get_mem_attributes_internal(ctx, base_va, &old_attr,
598 &entry, &addr_pa, &level);
599
600 /*
601 * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
602 * MT_USER/MT_PRIVILEGED are taken into account. Any other
603 * information is ignored.
604 */
605
606 /* Clean the old attributes so that they can be rebuilt. */
607 new_attr = old_attr & ~(MT_RW | MT_EXECUTE_NEVER | MT_USER);
608
609 /*
610 * Update attributes, but filter out the ones this function
611 * isn't allowed to change.
612 */
613 new_attr |= attr & (MT_RW | MT_EXECUTE_NEVER | MT_USER);
614
615 /*
616 * The break-before-make sequence requires writing an invalid
617 * descriptor and making sure that the system sees the change
618 * before writing the new descriptor.
619 */
620 *entry = INVALID_DESC;
621 #if !HW_ASSISTED_COHERENCY
622 dccvac((uintptr_t)entry);
623 #endif
624 /* Invalidate any cached copy of this mapping in the TLBs. */
625 xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
626
627 /* Ensure completion of the invalidation. */
628 xlat_arch_tlbi_va_sync();
629
630 /* Write new descriptor */
631 *entry = xlat_desc(ctx, new_attr, addr_pa, level);
632 #if !HW_ASSISTED_COHERENCY
633 dccvac((uintptr_t)entry);
634 #endif
635 base_va += PAGE_SIZE;
636 }
637
638 /* Ensure that the last descriptor written is seen by the system. */
639 dsbish();
640
641 return 0;
642 }
643