1 /*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <cassert.h>
11 #include <platform_def.h>
12 #include <utils.h>
13 #include <utils_def.h>
14 #include <xlat_tables_v2.h>
15 #include "../xlat_tables_private.h"
16
17 #if ENABLE_ASSERTIONS
xlat_arch_get_max_supported_pa(void)18 unsigned long long xlat_arch_get_max_supported_pa(void)
19 {
20 /* Physical address space size for long descriptor format. */
21 return (1ull << 40) - 1ull;
22 }
23 #endif /* ENABLE_ASSERTIONS*/
24
is_mmu_enabled_ctx(const xlat_ctx_t * ctx __unused)25 int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
26 {
27 return (read_sctlr() & SCTLR_M_BIT) != 0;
28 }
29
xlat_arch_tlbi_va(uintptr_t va)30 void xlat_arch_tlbi_va(uintptr_t va)
31 {
32 /*
33 * Ensure the translation table write has drained into memory before
34 * invalidating the TLB entry.
35 */
36 dsbishst();
37
38 tlbimvaais(TLBI_ADDR(va));
39 }
40
xlat_arch_tlbi_va_regime(uintptr_t va,xlat_regime_t xlat_regime __unused)41 void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime __unused)
42 {
43 /*
44 * Ensure the translation table write has drained into memory before
45 * invalidating the TLB entry.
46 */
47 dsbishst();
48
49 tlbimvaais(TLBI_ADDR(va));
50 }
51
xlat_arch_tlbi_va_sync(void)52 void xlat_arch_tlbi_va_sync(void)
53 {
54 /* Invalidate all entries from branch predictors. */
55 bpiallis();
56
57 /*
58 * A TLB maintenance instruction can complete at any time after
59 * it is issued, but is only guaranteed to be complete after the
60 * execution of DSB by the PE that executed the TLB maintenance
61 * instruction. After the TLB invalidate instruction is
62 * complete, no new memory accesses using the invalidated TLB
63 * entries will be observed by any observer of the system
64 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
65 * "Ordering and completion of TLB maintenance instructions".
66 */
67 dsbish();
68
69 /*
70 * The effects of a completed TLB maintenance instruction are
71 * only guaranteed to be visible on the PE that executed the
72 * instruction after the execution of an ISB instruction by the
73 * PE that executed the TLB maintenance instruction.
74 */
75 isb();
76 }
77
xlat_arch_current_el(void)78 int xlat_arch_current_el(void)
79 {
80 /*
81 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
82 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
83 */
84 return 3;
85 }
86
87 /*******************************************************************************
88 * Function for enabling the MMU in Secure PL1, assuming that the page tables
89 * have already been created.
90 ******************************************************************************/
enable_mmu_arch(unsigned int flags,uint64_t * base_table,unsigned long long max_pa,uintptr_t max_va)91 void enable_mmu_arch(unsigned int flags,
92 uint64_t *base_table,
93 unsigned long long max_pa,
94 uintptr_t max_va)
95 {
96 u_register_t mair0, ttbcr, sctlr;
97 uint64_t ttbr0;
98
99 assert(IS_IN_SECURE());
100
101 sctlr = read_sctlr();
102 assert((sctlr & SCTLR_M_BIT) == 0);
103
104 /* Invalidate TLBs at the current exception level */
105 tlbiall();
106
107 /* Set attributes in the right indices of the MAIR */
108 mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
109 mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
110 ATTR_IWBWA_OWBWA_NTR_INDEX);
111 mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
112 ATTR_NON_CACHEABLE_INDEX);
113
114 /*
115 * Configure the control register for stage 1 of the PL1&0 translation
116 * regime.
117 */
118
119 /* Use the Long-descriptor translation table format. */
120 ttbcr = TTBCR_EAE_BIT;
121
122 /*
123 * Disable translation table walk for addresses that are translated
124 * using TTBR1. Therefore, only TTBR0 is used.
125 */
126 ttbcr |= TTBCR_EPD1_BIT;
127
128 /*
129 * Limit the input address ranges and memory region sizes translated
130 * using TTBR0 to the given virtual address space size, if smaller than
131 * 32 bits.
132 */
133 if (max_va != UINT32_MAX) {
134 uintptr_t virtual_addr_space_size = max_va + 1;
135 assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
136 /*
137 * __builtin_ctzll(0) is undefined but here we are guaranteed
138 * that virtual_addr_space_size is in the range [1, UINT32_MAX].
139 */
140 ttbcr |= 32 - __builtin_ctzll(virtual_addr_space_size);
141 }
142
143 /*
144 * Set the cacheability and shareability attributes for memory
145 * associated with translation table walks using TTBR0.
146 */
147 if (flags & XLAT_TABLE_NC) {
148 /* Inner & outer non-cacheable non-shareable. */
149 ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
150 TTBCR_RGN0_INNER_NC;
151 } else {
152 /* Inner & outer WBWA & shareable. */
153 ttbcr |= TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
154 TTBCR_RGN0_INNER_WBA;
155 }
156
157 /* Set TTBR0 bits as well */
158 ttbr0 = (uint64_t)(uintptr_t) base_table;
159 #if ARM_ARCH_AT_LEAST(8, 2)
160 /*
161 * Enable CnP bit so as to share page tables with all PEs.
162 * Mandatory for ARMv8.2 implementations.
163 */
164 ttbr0 |= TTBR_CNP_BIT;
165 #endif
166
167 /* Now program the relevant system registers */
168 write_mair0(mair0);
169 write_ttbcr(ttbcr);
170 write64_ttbr0(ttbr0);
171 write64_ttbr1(0);
172
173 /*
174 * Ensure all translation table writes have drained
175 * into memory, the TLB invalidation is complete,
176 * and translation register writes are committed
177 * before enabling the MMU
178 */
179 dsbish();
180 isb();
181
182 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
183
184 if (flags & DISABLE_DCACHE)
185 sctlr &= ~SCTLR_C_BIT;
186 else
187 sctlr |= SCTLR_C_BIT;
188
189 write_sctlr(sctlr);
190
191 /* Ensure the MMU enable takes effect immediately */
192 isb();
193 }
194