• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 
9 #include <platform_def.h>
10 
11 #include <arch.h>
12 #include <arch_helpers.h>
13 #include <lib/utils.h>
14 #include <lib/xlat_tables/xlat_tables_arch.h>
15 #include <lib/xlat_tables/xlat_tables.h>
16 
17 #include "../xlat_tables_private.h"
18 
19 #if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
20 #error ARMv7 target does not support LPAE MMU descriptors
21 #endif
22 
23 #define XLAT_TABLE_LEVEL_BASE	\
24        GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
25 
26 #define NUM_BASE_LEVEL_ENTRIES	\
27        GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
28 
29 static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
30 		__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
31 
32 #if ENABLE_ASSERTIONS
get_max_supported_pa(void)33 static unsigned long long get_max_supported_pa(void)
34 {
35 	/* Physical address space size for long descriptor format. */
36 	return (1ULL << 40) - 1ULL;
37 }
38 #endif /* ENABLE_ASSERTIONS */
39 
xlat_arch_current_el(void)40 unsigned int xlat_arch_current_el(void)
41 {
42 	/*
43 	 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
44 	 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
45 	 */
46 	return 3U;
47 }
48 
xlat_arch_get_xn_desc(unsigned int el __unused)49 uint64_t xlat_arch_get_xn_desc(unsigned int el __unused)
50 {
51 	return UPPER_ATTRS(XN);
52 }
53 
init_xlat_tables(void)54 void init_xlat_tables(void)
55 {
56 	unsigned long long max_pa;
57 	uintptr_t max_va;
58 
59 	assert(PLAT_VIRT_ADDR_SPACE_SIZE >= MIN_VIRT_ADDR_SPACE_SIZE);
60 	assert(PLAT_VIRT_ADDR_SPACE_SIZE <= MAX_VIRT_ADDR_SPACE_SIZE);
61 	assert(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE));
62 
63 	print_mmap();
64 	init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
65 						&max_va, &max_pa);
66 
67 	assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
68 	assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
69 	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
70 }
71 
enable_mmu_svc_mon(unsigned int flags)72 void enable_mmu_svc_mon(unsigned int flags)
73 {
74 	unsigned int mair0, ttbcr, sctlr;
75 	uint64_t ttbr0;
76 
77 	assert(IS_IN_SECURE());
78 	assert((read_sctlr() & SCTLR_M_BIT) == 0U);
79 
80 	/* Set attributes in the right indices of the MAIR */
81 	mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
82 	mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
83 			ATTR_IWBWA_OWBWA_NTR_INDEX);
84 	mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
85 			ATTR_NON_CACHEABLE_INDEX);
86 	write_mair0(mair0);
87 
88 	/* Invalidate TLBs at the current exception level */
89 	tlbiall();
90 
91 	/*
92 	 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
93 	 */
94 	int t0sz = 32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);
95 
96 	if ((flags & XLAT_TABLE_NC) != 0U) {
97 		/* Inner & outer non-cacheable non-shareable. */
98 		ttbcr = TTBCR_EAE_BIT |
99 			TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
100 			TTBCR_RGN0_INNER_NC | (uint32_t) t0sz;
101 	} else {
102 		/* Inner & outer WBWA & shareable. */
103 		ttbcr = TTBCR_EAE_BIT |
104 			TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
105 			TTBCR_RGN0_INNER_WBA | (uint32_t) t0sz;
106 	}
107 	ttbcr |= TTBCR_EPD1_BIT;
108 	write_ttbcr(ttbcr);
109 
110 	/* Set TTBR0 bits as well */
111 	ttbr0 = (uintptr_t) base_xlation_table;
112 	write64_ttbr0(ttbr0);
113 	write64_ttbr1(0U);
114 
115 	/*
116 	 * Ensure all translation table writes have drained
117 	 * into memory, the TLB invalidation is complete,
118 	 * and translation register writes are committed
119 	 * before enabling the MMU
120 	 */
121 	dsbish();
122 	isb();
123 
124 	sctlr = read_sctlr();
125 	sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
126 
127 	if ((flags & DISABLE_DCACHE) != 0U)
128 		sctlr &= ~SCTLR_C_BIT;
129 	else
130 		sctlr |= SCTLR_C_BIT;
131 
132 	write_sctlr(sctlr);
133 
134 	/* Ensure the MMU enable takes effect immediately */
135 	isb();
136 }
137 
enable_mmu_direct_svc_mon(unsigned int flags)138 void enable_mmu_direct_svc_mon(unsigned int flags)
139 {
140 	enable_mmu_svc_mon(flags);
141 }
142