1 /*
2 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <platform_def.h>
11 #include <utils.h>
12 #include <xlat_tables_arch.h>
13 #include <xlat_tables.h>
14 #include "../xlat_tables_private.h"
15
16 #define XLAT_TABLE_LEVEL_BASE \
17 GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
18
19 #define NUM_BASE_LEVEL_ENTRIES \
20 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
21
22 static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
23 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
24
25 #if ENABLE_ASSERTIONS
get_max_supported_pa(void)26 static unsigned long long get_max_supported_pa(void)
27 {
28 /* Physical address space size for long descriptor format. */
29 return (1ULL << 40) - 1ULL;
30 }
31 #endif /* ENABLE_ASSERTIONS */
32
xlat_arch_current_el(void)33 int xlat_arch_current_el(void)
34 {
35 /*
36 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
37 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
38 */
39 return 3;
40 }
41
xlat_arch_get_xn_desc(int el __unused)42 uint64_t xlat_arch_get_xn_desc(int el __unused)
43 {
44 return UPPER_ATTRS(XN);
45 }
46
init_xlat_tables(void)47 void init_xlat_tables(void)
48 {
49 unsigned long long max_pa;
50 uintptr_t max_va;
51 print_mmap();
52 init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
53 &max_va, &max_pa);
54
55 assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
56 assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
57 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
58 }
59
60 /*******************************************************************************
61 * Function for enabling the MMU in Secure PL1, assuming that the
62 * page-tables have already been created.
63 ******************************************************************************/
enable_mmu_secure(unsigned int flags)64 void enable_mmu_secure(unsigned int flags)
65 {
66 unsigned int mair0, ttbcr, sctlr;
67 uint64_t ttbr0;
68
69 assert(IS_IN_SECURE());
70 assert((read_sctlr() & SCTLR_M_BIT) == 0);
71
72 /* Set attributes in the right indices of the MAIR */
73 mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
74 mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
75 ATTR_IWBWA_OWBWA_NTR_INDEX);
76 mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
77 ATTR_NON_CACHEABLE_INDEX);
78 write_mair0(mair0);
79
80 /* Invalidate TLBs at the current exception level */
81 tlbiall();
82
83 /*
84 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
85 */
86 if (flags & XLAT_TABLE_NC) {
87 /* Inner & outer non-cacheable non-shareable. */
88 ttbcr = TTBCR_EAE_BIT |
89 TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
90 TTBCR_RGN0_INNER_NC |
91 (32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
92 } else {
93 /* Inner & outer WBWA & shareable. */
94 ttbcr = TTBCR_EAE_BIT |
95 TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
96 TTBCR_RGN0_INNER_WBA |
97 (32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
98 }
99 ttbcr |= TTBCR_EPD1_BIT;
100 write_ttbcr(ttbcr);
101
102 /* Set TTBR0 bits as well */
103 ttbr0 = (uintptr_t) base_xlation_table;
104 write64_ttbr0(ttbr0);
105 write64_ttbr1(0);
106
107 /*
108 * Ensure all translation table writes have drained
109 * into memory, the TLB invalidation is complete,
110 * and translation register writes are committed
111 * before enabling the MMU
112 */
113 dsbish();
114 isb();
115
116 sctlr = read_sctlr();
117 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
118
119 if (flags & DISABLE_DCACHE)
120 sctlr &= ~SCTLR_C_BIT;
121 else
122 sctlr |= SCTLR_C_BIT;
123
124 write_sctlr(sctlr);
125
126 /* Ensure the MMU enable takes effect immediately */
127 isb();
128 }
129