• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <bl_common.h>
11 #include <common_def.h>
12 #include <platform_def.h>
13 #include <sys/types.h>
14 #include <utils.h>
15 #include <xlat_tables.h>
16 #include <xlat_tables_arch.h>
17 #include "../xlat_tables_private.h"
18 
19 #define XLAT_TABLE_LEVEL_BASE	\
20        GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
21 
22 #define NUM_BASE_LEVEL_ENTRIES	\
23        GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
24 
25 static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
26 		__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
27 
28 static unsigned long long tcr_ps_bits;
29 
calc_physical_addr_size_bits(unsigned long long max_addr)30 static unsigned long long calc_physical_addr_size_bits(
31 					unsigned long long max_addr)
32 {
33 	/* Physical address can't exceed 48 bits */
34 	assert((max_addr & ADDR_MASK_48_TO_63) == 0);
35 
36 	/* 48 bits address */
37 	if (max_addr & ADDR_MASK_44_TO_47)
38 		return TCR_PS_BITS_256TB;
39 
40 	/* 44 bits address */
41 	if (max_addr & ADDR_MASK_42_TO_43)
42 		return TCR_PS_BITS_16TB;
43 
44 	/* 42 bits address */
45 	if (max_addr & ADDR_MASK_40_TO_41)
46 		return TCR_PS_BITS_4TB;
47 
48 	/* 40 bits address */
49 	if (max_addr & ADDR_MASK_36_TO_39)
50 		return TCR_PS_BITS_1TB;
51 
52 	/* 36 bits address */
53 	if (max_addr & ADDR_MASK_32_TO_35)
54 		return TCR_PS_BITS_64GB;
55 
56 	return TCR_PS_BITS_4GB;
57 }
58 
59 #if ENABLE_ASSERTIONS
60 /* Physical Address ranges supported in the AArch64 Memory Model */
61 static const unsigned int pa_range_bits_arr[] = {
62 	PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
63 	PARANGE_0101
64 };
65 
get_max_supported_pa(void)66 static unsigned long long get_max_supported_pa(void)
67 {
68 	u_register_t pa_range = read_id_aa64mmfr0_el1() &
69 						ID_AA64MMFR0_EL1_PARANGE_MASK;
70 
71 	/* All other values are reserved */
72 	assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
73 
74 	return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
75 }
76 #endif /* ENABLE_ASSERTIONS */
77 
xlat_arch_current_el(void)78 int xlat_arch_current_el(void)
79 {
80 	int el = GET_EL(read_CurrentEl());
81 
82 	assert(el > 0);
83 
84 	return el;
85 }
86 
xlat_arch_get_xn_desc(int el)87 uint64_t xlat_arch_get_xn_desc(int el)
88 {
89 	if (el == 3) {
90 		return UPPER_ATTRS(XN);
91 	} else {
92 		assert(el == 1);
93 		return UPPER_ATTRS(PXN);
94 	}
95 }
96 
init_xlat_tables(void)97 void init_xlat_tables(void)
98 {
99 	unsigned long long max_pa;
100 	uintptr_t max_va;
101 	print_mmap();
102 	init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
103 			   &max_va, &max_pa);
104 
105 	assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
106 	assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
107 	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
108 
109 	tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
110 }
111 
112 /*******************************************************************************
113  * Macro generating the code for the function enabling the MMU in the given
114  * exception level, assuming that the pagetables have already been created.
115  *
116  *   _el:		Exception level at which the function will run
117  *   _tcr_extra:	Extra bits to set in the TCR register. This mask will
118  *			be OR'ed with the default TCR value.
119  *   _tlbi_fct:		Function to invalidate the TLBs at the current
120  *			exception level
121  ******************************************************************************/
122 #define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct)		\
123 	void enable_mmu_el##_el(unsigned int flags)				\
124 	{								\
125 		uint64_t mair, tcr, ttbr;				\
126 		uint32_t sctlr;						\
127 									\
128 		assert(IS_IN_EL(_el));					\
129 		assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0);	\
130 									\
131 		/* Set attributes in the right indices of the MAIR */	\
132 		mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);	\
133 		mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,		\
134 				ATTR_IWBWA_OWBWA_NTR_INDEX);		\
135 		mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE,		\
136 				ATTR_NON_CACHEABLE_INDEX);		\
137 		write_mair_el##_el(mair);				\
138 									\
139 		/* Invalidate TLBs at the current exception level */	\
140 		_tlbi_fct();						\
141 									\
142 		/* Set TCR bits as well. */				\
143 		/* Set T0SZ to (64 - width of virtual address space) */	\
144 		if (flags & XLAT_TABLE_NC) {				\
145 			/* Inner & outer non-cacheable non-shareable. */\
146 			tcr = TCR_SH_NON_SHAREABLE |			\
147 				TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC |	\
148 				(64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
149 		} else {						\
150 			/* Inner & outer WBWA & shareable. */		\
151 			tcr = TCR_SH_INNER_SHAREABLE |			\
152 				TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA |	\
153 				(64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
154 		}							\
155 		tcr |= _tcr_extra;					\
156 		write_tcr_el##_el(tcr);					\
157 									\
158 		/* Set TTBR bits as well */				\
159 		ttbr = (uint64_t) base_xlation_table;			\
160 		write_ttbr0_el##_el(ttbr);				\
161 									\
162 		/* Ensure all translation table writes have drained */	\
163 		/* into memory, the TLB invalidation is complete, */	\
164 		/* and translation register writes are committed */	\
165 		/* before enabling the MMU */				\
166 		dsbish();						\
167 		isb();							\
168 									\
169 		sctlr = read_sctlr_el##_el();				\
170 		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;			\
171 									\
172 		if (flags & DISABLE_DCACHE)				\
173 			sctlr &= ~SCTLR_C_BIT;				\
174 		else							\
175 			sctlr |= SCTLR_C_BIT;				\
176 									\
177 		write_sctlr_el##_el(sctlr);				\
178 									\
179 		/* Ensure the MMU enable takes effect immediately */	\
180 		isb();							\
181 	}
182 
183 /* Define EL1 and EL3 variants of the function enabling the MMU */
184 DEFINE_ENABLE_MMU_EL(1,
185 		/*
186 		 * TCR_EL1.EPD1: Disable translation table walk for addresses
187 		 * that are translated using TTBR1_EL1.
188 		 */
189 		TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
190 		tlbivmalle1)
191 DEFINE_ENABLE_MMU_EL(3,
192 		TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
193 		tlbialle3)
194