• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2009-2022 Huawei Technologies Co., Ltd. All rights reserved.
3  *
4  * UniProton is licensed under Mulan PSL v2.
5  * You can use this software according to the terms and conditions of the Mulan PSL v2.
6  * You may obtain a copy of Mulan PSL v2 at:
7  *          http://license.coscl.org.cn/MulanPSL2
8  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
9  * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
10  * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
11  * See the Mulan PSL v2 for more details.
12  * Create: 2009-12-22
13  * Description: UniProton raspi4 demo
14  */
15 #include "prt_buildef.h"
16 #include "prt_typedef.h"
17 #include "prt_module.h"
18 #include "prt_errno.h"
19 #include "mmu.h"
20 #include "cache_asm.h"
21 #include "prt_sys.h"
22 #include "prt_task.h"
23 #include "cpu_config.h"
24 
25 extern U64 g_mmu_page_begin;
26 extern U64 g_mmu_page_end;
27 
28 static mmu_mmap_region_s g_mem_map_info[] = {
29     {
30         .virt      = MMU_OPENAMP_ADDR,
31         .phys      = MMU_OPENAMP_ADDR,
32         .size      = 0x30000,
33         .max_level = 0x2,
34         .attrs     = MMU_ATTR_DEVICE_NGNRNE | MMU_ACCESS_RWX,
35     }, {
36         .virt      = MMU_IMAGE_ADDR,
37         .phys      = MMU_IMAGE_ADDR,
38         .size      = 0x1000000,
39         .max_level = 0x2,
40         .attrs     = MMU_ATTR_CACHE_SHARE | MMU_ACCESS_RWX,
41     }, {
42         .virt      = MMU_GIC_ADDR,
43         .phys      = MMU_GIC_ADDR,
44         .size      = 0x1000000,
45         .max_level = 0x2,
46         .attrs     = MMU_ATTR_DEVICE_NGNRNE | MMU_ACCESS_RWX,
47     }, {
48         .virt      = MMU_UART_ADDR,
49         .phys      = MMU_UART_ADDR,
50         .size      = 0x2000,
51         .max_level = 0x2,
52         .attrs     = MMU_ATTR_DEVICE_NGNRNE | MMU_ACCESS_RWX,
53     }
54 };
55 
56 static mmu_ctrl_s g_mmu_ctrl = { 0 };
57 
mmu_get_tcr(U32 * pips,U32 * pva_bits)58 static U64 mmu_get_tcr(U32 *pips, U32 *pva_bits)
59 {
60     U64 max_addr = 0;
61     U64 ips, va_bits;
62     U64 tcr;
63     U32 i;
64     U32 mmu_table_num = sizeof(g_mem_map_info) / sizeof(mmu_mmap_region_s);
65 
66     for (i = 0; i < mmu_table_num; ++i) {
67         max_addr = MAX(max_addr, g_mem_map_info[i].virt + g_mem_map_info[i].size);
68     }
69 
70     if (max_addr > (1ULL << MMU_BITS_44)) {
71         ips = MMU_PHY_ADDR_LEVEL_5;
72         va_bits = MMU_BITS_48;
73     } else if (max_addr > (1ULL << MMU_BITS_42)) {
74         ips = MMU_PHY_ADDR_LEVEL_4;
75         va_bits = MMU_BITS_44;
76     } else if (max_addr > (1ULL << MMU_BITS_40)) {
77         ips = MMU_PHY_ADDR_LEVEL_3;
78         va_bits = MMU_BITS_42;
79     } else if (max_addr > (1ULL << MMU_BITS_36)) {
80         ips = MMU_PHY_ADDR_LEVEL_2;
81         va_bits = MMU_BITS_40;
82     } else if (max_addr > (1ULL << MMU_BITS_32)) {
83         ips = MMU_PHY_ADDR_LEVEL_1;
84         va_bits = MMU_BITS_36;
85     } else {
86         ips = MMU_PHY_ADDR_LEVEL_0;
87         va_bits = MMU_BITS_32;
88     }
89 
90     tcr = TCR_EL1_RSVD | TCR_IPS(ips);
91 
92     if (g_mmu_ctrl.granule == MMU_GRANULE_4K) {
93         tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
94     } else {
95         tcr |= TCR_TG0_64K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
96     }
97 
98     tcr |= TCR_T0SZ(va_bits);
99 
100     if (pips != NULL) {
101         *pips = ips;
102     }
103 
104     if (pva_bits != NULL) {
105         *pva_bits = va_bits;
106     }
107 
108     return tcr;
109 }
110 
mmu_get_pte_type(U64 const * pte)111 static U32 mmu_get_pte_type(U64 const *pte)
112 {
113     return (U32)(*pte & PTE_TYPE_MASK);
114 }
115 
mmu_level2shift(U32 level)116 static U32 mmu_level2shift(U32 level)
117 {
118     if (g_mmu_ctrl.granule == MMU_GRANULE_4K) {
119         return (U32)(MMU_BITS_12 + MMU_BITS_9 * (MMU_LEVEL_3 - level));
120     } else {
121         return (U32)(MMU_BITS_16 + MMU_BITS_13 * (MMU_LEVEL_3 - level));
122     }
123 }
124 
mmu_find_pte(U64 addr,U32 level)125 static U64 *mmu_find_pte(U64 addr, U32 level)
126 {
127     U64 *pte = NULL;
128     U64 idx;
129     U32 i;
130 
131     if (level < g_mmu_ctrl.start_level) {
132         return NULL;
133     }
134 
135     pte = (U64 *)g_mmu_ctrl.tlb_addr;
136 
137     for (i = g_mmu_ctrl.start_level; i < MMU_LEVEL_MAX; ++i) {
138         if (g_mmu_ctrl.granule == MMU_GRANULE_4K) {
139             idx = (addr >> mmu_level2shift(i)) & 0x1FF;
140         } else {
141             idx = (addr >> mmu_level2shift(i)) & 0x1FFF;
142         }
143 
144         pte += idx;
145 
146         if (i == level) {
147             return pte;
148         }
149 
150         if (mmu_get_pte_type(pte) != PTE_TYPE_TABLE) {
151             return NULL;
152         }
153 
154         if (g_mmu_ctrl.granule == MMU_GRANULE_4K) {
155             pte = (U64 *)(*pte & PTE_TABLE_ADDR_MARK_4K);
156         } else {
157             pte = (U64 *)(*pte & PTE_TABLE_ADDR_MARK_64K);
158         }
159     }
160 
161     return NULL;
162 }
163 
mmu_create_table(void)164 static U64 *mmu_create_table(void)
165 {
166     U32 pt_len;
167     U64 *new_table = (U64 *)g_mmu_ctrl.tlb_fillptr;
168 
169     if (g_mmu_ctrl.granule == MMU_GRANULE_4K) {
170         pt_len = MAX_PTE_ENTRIES_4K * sizeof(U64);
171     } else {
172         pt_len = MAX_PTE_ENTRIES_64K * sizeof(U64);
173     }
174 
175     g_mmu_ctrl.tlb_fillptr += pt_len;
176 
177     if (g_mmu_ctrl.tlb_fillptr - g_mmu_ctrl.tlb_addr > g_mmu_ctrl.tlb_size) {
178         return NULL;
179     }
180 
181     (void)memset_s((void *)new_table, MAX_PTE_ENTRIES_64K * sizeof(U64), 0, pt_len);
182 
183     return new_table;
184 }
185 
mmu_set_pte_table(U64 * pte,U64 * table)186 static void mmu_set_pte_table(U64 *pte, U64 *table)
187 {
188     *pte = PTE_TYPE_TABLE | (U64)table;
189 }
190 
mmu_add_map_pte_process(mmu_mmap_region_s const * map,U64 * pte,U64 phys,U32 level)191 static S32 mmu_add_map_pte_process(mmu_mmap_region_s const *map, U64 *pte, U64 phys, U32 level)
192 {
193     U64 *new_table = NULL;
194 
195     if (level < map->max_level) {
196         if (mmu_get_pte_type(pte) == PTE_TYPE_FAULT) {
197             new_table = mmu_create_table();
198             if (new_table == NULL) {
199                 return -1;
200             }
201             mmu_set_pte_table(pte, new_table);
202         }
203     } else if (level == MMU_LEVEL_3) {
204         *pte = phys | map->attrs | PTE_TYPE_PAGE;
205     } else {
206         *pte = phys | map->attrs | PTE_TYPE_BLOCK;
207     }
208 
209     return 0;
210 }
211 
mmu_add_map(mmu_mmap_region_s const * map)212 static S32 mmu_add_map(mmu_mmap_region_s const *map)
213 {
214     U64 virt = map->virt;
215     U64 phys = map->phys;
216     U64 max_level = map->max_level;
217     U64 start_level = g_mmu_ctrl.start_level;
218     U64 block_size = 0;
219     U64 map_size = 0;
220     U32 level;
221     U64 *pte = NULL;
222     S32 ret;
223 
224     if (map->max_level <= start_level) {
225         return -2;
226     }
227 
228     while (map_size < map->size) {
229         for (level = start_level; level <= max_level; ++level) {
230             pte = mmu_find_pte(virt, level);
231             if (pte == NULL) {
232                 return -3;
233             }
234 
235             ret = mmu_add_map_pte_process(map, pte, phys, level);
236             if (ret) {
237                 return ret;
238             }
239 
240             if (level != start_level) {
241                 block_size = 1ULL << mmu_level2shift(level);
242             }
243         }
244 
245         virt += block_size;
246         phys += block_size;
247         map_size += block_size;
248     }
249 
250     return 0;
251 }
252 
mmu_set_ttbr_tcr_mair(U64 table,U64 tcr,U64 attr)253 static inline void mmu_set_ttbr_tcr_mair(U64 table, U64 tcr, U64 attr)
254 {
255     OS_EMBED_ASM("dsb sy");
256 
257     OS_EMBED_ASM("msr ttbr0_el1, %0" : : "r" (table) : "memory");
258     OS_EMBED_ASM("msr ttbr1_el1, %0" : : "r" (table) : "memory");
259     OS_EMBED_ASM("msr tcr_el1, %0" : : "r" (tcr) : "memory");
260     OS_EMBED_ASM("msr mair_el1, %0" : : "r" (attr) : "memory");
261 
262     OS_EMBED_ASM("isb");
263 }
264 
mmu_setup_pgtables(mmu_mmap_region_s * mem_map,U32 mem_region_num,U64 tlb_addr,U64 tlb_len,U32 granule)265 static U32 mmu_setup_pgtables(mmu_mmap_region_s *mem_map, U32 mem_region_num, U64 tlb_addr, U64 tlb_len, U32 granule)
266 {
267     U32 i;
268     U32 ret;
269     U64 tcr;
270     U64 *new_table = NULL;
271 
272     g_mmu_ctrl.tlb_addr = tlb_addr;
273     g_mmu_ctrl.tlb_size = tlb_len;
274     g_mmu_ctrl.tlb_fillptr = tlb_addr;
275     g_mmu_ctrl.granule = granule;
276     g_mmu_ctrl.start_level = 0;
277 
278     tcr = mmu_get_tcr(NULL, &g_mmu_ctrl.va_bits);
279 
280     if (g_mmu_ctrl.granule == MMU_GRANULE_4K) {
281         if (g_mmu_ctrl.va_bits < MMU_BITS_39) {
282             g_mmu_ctrl.start_level = MMU_LEVEL_1;
283         } else {
284             g_mmu_ctrl.start_level = MMU_LEVEL_0;
285         }
286     } else {
287         if (g_mmu_ctrl.va_bits <= MMU_BITS_36) {
288             g_mmu_ctrl.start_level = MMU_LEVEL_2;
289         } else {
290             g_mmu_ctrl.start_level = MMU_LEVEL_1;
291             return 3;
292         }
293     }
294 
295     new_table = mmu_create_table();
296     if (new_table == NULL) {
297         return 1;
298     }
299 
300     for (i = 0; i < mem_region_num; ++i) {
301         ret = mmu_add_map(&mem_map[i]);
302         if (ret) {
303             return ret;
304         }
305     }
306 
307     mmu_set_ttbr_tcr_mair(g_mmu_ctrl.tlb_addr, tcr, MEMORY_ATTRIBUTES);
308 
309     return 0;
310 }
311 
mmu_setup(void)312 static S32 mmu_setup(void)
313 {
314     S32 ret;
315     U64 page_addr;
316     U64 page_len;
317 
318     page_addr = (U64)&g_mmu_page_begin;
319     page_len = (U64)&g_mmu_page_end - (U64)&g_mmu_page_begin;
320 
321     ret = mmu_setup_pgtables(g_mem_map_info, (sizeof(g_mem_map_info) / sizeof(mmu_mmap_region_s)),
322                              page_addr, page_len, MMU_GRANULE_4K);
323     if (ret) {
324         return ret;
325     }
326 
327     return 0;
328 }
329 
mmu_init(void)330 S32 mmu_init(void)
331 {
332     S32 ret;
333 
334     ret = mmu_setup();
335     if (ret) {
336         return ret;
337     }
338 
339     os_asm_invalidate_dcache_all();
340     os_asm_invalidate_icache_all();
341     os_asm_invalidate_tlb_all();
342 
343     set_sctlr(get_sctlr() | CR_C | CR_M | CR_I);
344 
345     return 0;
346 }
347