1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #ifdef CHCORE
13 #include <common/util.h>
14 #include <mm/kmalloc.h>
15 #endif
16
17 #include <common/vars.h>
18 #include <common/macro.h>
19 #include <common/types.h>
20 #include <common/errno.h>
21 #include <lib/printk.h>
22 #include <mm/vmspace.h>
23 #include <mm/mm.h>
24 #include <arch/mmu.h>
25 #include <mm/common_pte.h>
26
27 #include <arch/mm/page_table.h>
28
29 /* Page_table.c: Use simple impl for now. */
30
set_page_table(paddr_t pgtbl)31 void set_page_table(paddr_t pgtbl)
32 {
33 set_ttbr0_el1(pgtbl);
34 }
35
__vmr_prot_to_ap(vmr_prop_t prot)36 static int __vmr_prot_to_ap(vmr_prop_t prot)
37 {
38 if ((prot & VMR_READ) && !(prot & VMR_WRITE)) {
39 return AARCH64_MMU_ATTR_PAGE_AP_HIGH_RO_EL0_RO;
40 } else if (prot & VMR_WRITE) {
41 return AARCH64_MMU_ATTR_PAGE_AP_HIGH_RW_EL0_RW;
42 }
43 return 0;
44 }
45
__ap_to_vmr_prot(int ap)46 static int __ap_to_vmr_prot(int ap)
47 {
48 if (ap == AARCH64_MMU_ATTR_PAGE_AP_HIGH_RO_EL0_RO) {
49 return VMR_READ;
50 } else if (ap == AARCH64_MMU_ATTR_PAGE_AP_HIGH_RW_EL0_RW) {
51 return VMR_READ | VMR_WRITE;
52 }
53 return 0;
54 }
55
56 #define USER_PTE 0
57 #define KERNEL_PTE 1
58
59 /*
60 * the 3rd arg means the kind of PTE.
61 */
set_pte_flags(pte_t * entry,vmr_prop_t flags,int kind)62 static int set_pte_flags(pte_t *entry, vmr_prop_t flags, int kind)
63 {
64 BUG_ON(kind != USER_PTE && kind != KERNEL_PTE);
65
66 /*
67 * Current access permission (AP) setting:
68 * Mapped pages are always readable (No considering XOM).
69 * EL1 can directly access EL0 (No restriction like SMAP
70 * as ChCore is a microkernel).
71 */
72 entry->l3_page.AP = __vmr_prot_to_ap(flags);
73
74 if (kind == KERNEL_PTE) {
75 // kernel PTE
76 if (!(flags & VMR_EXEC))
77 entry->l3_page.PXN = AARCH64_MMU_ATTR_PAGE_PXN;
78 entry->l3_page.UXN = AARCH64_MMU_ATTR_PAGE_UXN;
79 } else {
80 // User PTE
81 if (!(flags & VMR_EXEC))
82 entry->l3_page.UXN = AARCH64_MMU_ATTR_PAGE_UXN;
83 // EL1 cannot directly execute EL0 accessiable region.
84 entry->l3_page.PXN = AARCH64_MMU_ATTR_PAGE_PXN;
85 }
86
87 // Set AF (access flag) in advance.
88 entry->l3_page.AF = AARCH64_MMU_ATTR_PAGE_AF_ACCESSED;
89 // Mark the mapping as not global
90 entry->l3_page.nG = 1;
91 // Mark the mappint as inner sharable
92 entry->l3_page.SH = INNER_SHAREABLE;
93 // Set the memory type
94 if (flags & VMR_DEVICE) {
95 entry->l3_page.attr_index = DEVICE_MEMORY;
96 entry->l3_page.SH = 0;
97 } else if (flags & VMR_NOCACHE) {
98 entry->l3_page.attr_index = NORMAL_MEMORY_NOCACHE;
99 } else {
100 entry->l3_page.attr_index = NORMAL_MEMORY;
101 }
102
103 #ifdef CHCORE_OH_TEE
104 if (flags & VMR_TZ_NS) {
105 entry->l3_page.NS = AARCH64_MMU_ATTR_PAGE_NS_NON_SECURE;
106 }
107 #endif /* CHCORE_OH_TEE */
108
109 return 0;
110 }
111
112 #define GET_PADDR_IN_PTE(entry) \
113 (((u64)(entry)->table.next_table_addr) << PAGE_SHIFT)
114 #define GET_NEXT_PTP(entry) phys_to_virt(GET_PADDR_IN_PTE(entry))
115
116 #define NORMAL_PTP (0)
117 #define BLOCK_PTP (1)
118
119 /*
120 * Find next page table page for the "va".
121 *
122 * cur_ptp: current page table page
123 * level: current ptp level
124 *
125 * next_ptp: returns "next_ptp"
126 * pte : returns "pte" (points to next_ptp) in "cur_ptp"
127 *
128 * alloc: if true, allocate a ptp when missing
129 *
130 */
get_next_ptp(ptp_t * cur_ptp,u32 level,vaddr_t va,ptp_t ** next_ptp,pte_t ** pte,bool alloc,long * rss)131 static int get_next_ptp(ptp_t *cur_ptp, u32 level, vaddr_t va, ptp_t **next_ptp,
132 pte_t **pte, bool alloc, long *rss)
133 {
134 u32 index = 0;
135 pte_t *entry;
136
137 if (cur_ptp == NULL)
138 return -ENOMAPPING;
139
140 switch (level) {
141 case L0:
142 index = GET_L0_INDEX(va);
143 break;
144 case L1:
145 index = GET_L1_INDEX(va);
146 break;
147 case L2:
148 index = GET_L2_INDEX(va);
149 break;
150 case L3:
151 index = GET_L3_INDEX(va);
152 break;
153 default:
154 BUG_ON(1);
155 }
156
157 entry = &(cur_ptp->ent[index]);
158 if (IS_PTE_INVALID(entry->pte)) {
159 if (alloc == false) {
160 return -ENOMAPPING;
161 } else {
162 /* alloc a new page table page */
163 ptp_t *new_ptp;
164 paddr_t new_ptp_paddr;
165 pte_t new_pte_val;
166
167 /* alloc a single physical page as a new page table page
168 */
169 new_ptp = get_pages(0);
170 BUG_ON(new_ptp == NULL);
171 memset((void *)new_ptp, 0, PAGE_SIZE);
172 if (rss) {
173 *rss += PAGE_SIZE;
174 }
175 new_ptp_paddr = virt_to_phys((vaddr_t)new_ptp);
176
177 new_pte_val.pte = 0;
178 new_pte_val.table.is_valid = 1;
179 new_pte_val.table.is_table = 1;
180 new_pte_val.table.next_table_addr = new_ptp_paddr >> PAGE_SHIFT;
181
182 /* same effect as: cur_ptp->ent[index] = new_pte_val; */
183 entry->pte = new_pte_val.pte;
184 }
185 }
186
187 *next_ptp = (ptp_t *)GET_NEXT_PTP(entry);
188 *pte = entry;
189 if (IS_PTE_TABLE(entry->pte))
190 return NORMAL_PTP;
191 else
192 return BLOCK_PTP;
193 }
194
debug_query_in_pgtbl(void * pgtbl,vaddr_t va,paddr_t * pa,pte_t ** entry)195 int debug_query_in_pgtbl(void *pgtbl, vaddr_t va, paddr_t *pa, pte_t **entry)
196 {
197 ptp_t *l0_ptp, *l1_ptp, *l2_ptp, *l3_ptp;
198 ptp_t *phys_page;
199 pte_t *pte;
200 int ret;
201
202 // L0 page table
203 l0_ptp = (ptp_t *)pgtbl;
204 ret = get_next_ptp(l0_ptp, L0, va, &l1_ptp, &pte, false, NULL);
205 if (ret < 0) {
206 printk("[debug_query_in_pgtbl] L0 no mapping.\n");
207 return ret;
208 }
209 printk("L0 pte is 0x%lx\n", pte->pte);
210
211 // L1 page table
212 ret = get_next_ptp(l1_ptp, L1, va, &l2_ptp, &pte, false, NULL);
213 if (ret < 0) {
214 printk("[debug_query_in_pgtbl] L1 no mapping.\n");
215 return ret;
216 }
217 printk("L1 pte is 0x%lx\n", pte->pte);
218
219 // L2 page table
220 ret = get_next_ptp(l2_ptp, L2, va, &l3_ptp, &pte, false, NULL);
221 if (ret < 0) {
222 printk("[debug_query_in_pgtbl] L2 no mapping.\n");
223 return ret;
224 }
225 printk("L2 pte is 0x%lx\n", pte->pte);
226
227 // L3 page table
228 ret = get_next_ptp(l3_ptp, L3, va, &phys_page, &pte, false, NULL);
229 if (ret < 0) {
230 printk("[debug_query_in_pgtbl] L3 no mapping.\n");
231 return ret;
232 }
233 printk("L3 pte is 0x%lx\n", pte->pte);
234
235 *pa = virt_to_phys((vaddr_t)phys_page) + GET_VA_OFFSET_L3(va);
236 *entry = pte;
237 return 0;
238 }
239
240 #ifdef CHCORE
free_page_table(void * pgtbl)241 void free_page_table(void *pgtbl)
242 {
243 ptp_t *l0_ptp, *l1_ptp, *l2_ptp, *l3_ptp;
244 pte_t *l0_pte, *l1_pte, *l2_pte;
245 int i, j, k;
246
247 if (pgtbl == NULL) {
248 kwarn("%s: input arg is NULL.\n", __func__);
249 return;
250 }
251
252 /* L0 page table */
253 l0_ptp = (ptp_t *)pgtbl;
254
255 /* Interate each entry in the l0 page table */
256 for (i = 0; i < PTP_ENTRIES; ++i) {
257 l0_pte = &l0_ptp->ent[i];
258 if (IS_PTE_INVALID(l0_pte->pte))
259 continue;
260 l1_ptp = (ptp_t *)GET_NEXT_PTP(l0_pte);
261
262 /* Interate each entry in the l1 page table */
263 for (j = 0; j < PTP_ENTRIES; ++j) {
264 l1_pte = &l1_ptp->ent[j];
265 if (IS_PTE_INVALID(l1_pte->pte))
266 continue;
267 l2_ptp = (ptp_t *)GET_NEXT_PTP(l1_pte);
268
269 /* Interate each entry in the l2 page table*/
270 for (k = 0; k < PTP_ENTRIES; ++k) {
271 l2_pte = &l2_ptp->ent[k];
272 if (IS_PTE_INVALID(l2_pte->pte))
273 continue;
274 l3_ptp = (ptp_t *)GET_NEXT_PTP(l2_pte);
275 /* Free the l3 page table page */
276 kfree(l3_ptp);
277 }
278
279 /* Free the l2 page table page */
280 kfree(l2_ptp);
281 }
282
283 /* Free the l1 page table page */
284 kfree(l1_ptp);
285 }
286
287 kfree(l0_ptp);
288 }
289 #endif
290
291 /*
292 * Translate a va to pa, and get its pte for the flags
293 */
query_in_pgtbl(void * pgtbl,vaddr_t va,paddr_t * pa,pte_t ** entry)294 int query_in_pgtbl(void *pgtbl, vaddr_t va, paddr_t *pa, pte_t **entry)
295 {
296 /* On aarch64, l0 is the highest level page table */
297 ptp_t *l0_ptp, *l1_ptp, *l2_ptp, *l3_ptp;
298 ptp_t *phys_page;
299 pte_t *pte;
300 int ret;
301
302 // L0 page table
303 l0_ptp = (ptp_t *)pgtbl;
304 ret = get_next_ptp(l0_ptp, L0, va, &l1_ptp, &pte, false, NULL);
305 if (ret < 0)
306 return ret;
307
308 // L1 page table
309 ret = get_next_ptp(l1_ptp, L1, va, &l2_ptp, &pte, false, NULL);
310 if (ret < 0)
311 return ret;
312 else if (ret == BLOCK_PTP) {
313 *pa = virt_to_phys((vaddr_t)l2_ptp) + GET_VA_OFFSET_L1(va);
314 if (entry)
315 *entry = pte;
316 return 0;
317 }
318
319 // L2 page table
320 ret = get_next_ptp(l2_ptp, L2, va, &l3_ptp, &pte, false, NULL);
321 if (ret < 0)
322 return ret;
323 else if (ret == BLOCK_PTP) {
324 *pa = virt_to_phys((vaddr_t)l3_ptp) + GET_VA_OFFSET_L2(va);
325 if (entry)
326 *entry = pte;
327 return 0;
328 }
329
330 // L3 page table
331 ret = get_next_ptp(l3_ptp, L3, va, &phys_page, &pte, false, NULL);
332 if (ret < 0)
333 return ret;
334
335 *pa = virt_to_phys((vaddr_t)phys_page) + GET_VA_OFFSET_L3(va);
336 if (entry)
337 *entry = pte;
338 return 0;
339 }
340
map_range_in_pgtbl_common(void * pgtbl,vaddr_t va,paddr_t pa,size_t len,vmr_prop_t flags,int kind,long * rss)341 static int map_range_in_pgtbl_common(void *pgtbl, vaddr_t va, paddr_t pa,
342 size_t len, vmr_prop_t flags, int kind, long *rss)
343 {
344 s64 total_page_cnt;
345 ptp_t *l0_ptp, *l1_ptp, *l2_ptp, *l3_ptp;
346 pte_t *pte;
347 int ret;
348 int pte_index; // the index of pte in the last level page table
349 int i;
350
351 BUG_ON(pgtbl == NULL); // alloc the root page table page at first
352
353 total_page_cnt = len / PAGE_SIZE + (((len % PAGE_SIZE) > 0) ? 1 : 0);
354
355 l0_ptp = (ptp_t *)pgtbl;
356
357 l1_ptp = NULL;
358 l2_ptp = NULL;
359 l3_ptp = NULL;
360
361 while (total_page_cnt > 0) {
362 // l0
363 ret = get_next_ptp(l0_ptp, L0, va, &l1_ptp, &pte, true, rss);
364 BUG_ON(ret != 0);
365
366 // l1
367 ret = get_next_ptp(l1_ptp, L1, va, &l2_ptp, &pte, true, rss);
368 BUG_ON(ret != 0);
369
370 // l2
371 ret = get_next_ptp(l2_ptp, L2, va, &l3_ptp, &pte, true, rss);
372 BUG_ON(ret != 0);
373
374 // l3
375 // step-1: get the index of pte
376 pte_index = GET_L3_INDEX(va);
377 for (i = pte_index; i < PTP_ENTRIES; ++i) {
378 pte_t new_pte_val;
379
380 new_pte_val.pte = 0;
381 new_pte_val.l3_page.is_valid = 1;
382 new_pte_val.l3_page.is_page = 1;
383 new_pte_val.l3_page.pfn = pa >> PAGE_SHIFT;
384 set_pte_flags(&new_pte_val, flags, kind);
385 l3_ptp->ent[i].pte = new_pte_val.pte;
386
387 va += PAGE_SIZE;
388 pa += PAGE_SIZE;
389 if (rss) {
390 *rss += PAGE_SIZE;
391 }
392 total_page_cnt -= 1;
393 if (total_page_cnt == 0)
394 break;
395 }
396 }
397
398 /* Since we are adding new mappings, there is no need to flush TLBs. */
399 return 0;
400 }
401
402 /* Map vm range in kernel */
map_range_in_pgtbl_kernel(void * pgtbl,vaddr_t va,paddr_t pa,size_t len,vmr_prop_t flags)403 int map_range_in_pgtbl_kernel(void *pgtbl, vaddr_t va, paddr_t pa, size_t len,
404 vmr_prop_t flags)
405 {
406 return map_range_in_pgtbl_common(pgtbl, va, pa, len, flags, KERNEL_PTE, NULL);
407 }
408
409 /* Map vm range in user */
map_range_in_pgtbl(void * pgtbl,vaddr_t va,paddr_t pa,size_t len,vmr_prop_t flags,long * rss)410 int map_range_in_pgtbl(void *pgtbl, vaddr_t va, paddr_t pa, size_t len,
411 vmr_prop_t flags, long *rss)
412 {
413 return map_range_in_pgtbl_common(pgtbl, va, pa, len, flags, USER_PTE, rss);
414 }
415
416 /*
417 * Try to relase a lower level page table page (low_ptp).
418 * @high_ptp: the higher level page table page
419 * @low_ptp: the next level page table page
420 * @index: the index of low_ptp in high ptp entries
421 * @return:
422 * - zero if lower page table page is not all empty
423 * - nonzero otherwise
424 */
try_release_ptp(ptp_t * high_ptp,ptp_t * low_ptp,int index,long * rss)425 static inline int try_release_ptp(ptp_t *high_ptp, ptp_t *low_ptp, int index, long *rss)
426 {
427 int i;
428
429 for (i = 0; i < PTP_ENTRIES; i++) {
430 if (low_ptp->ent[i].pte != PTE_DESCRIPTOR_INVALID) {
431 return 0;
432 }
433 }
434
435 BUG_ON(index < 0 || index >= PTP_ENTRIES);
436 high_ptp->ent[index].pte = PTE_DESCRIPTOR_INVALID;
437 kfree(low_ptp);
438 if (rss) {
439 *rss -= PAGE_SIZE;
440 }
441 return 1;
442 }
443
recycle_pgtable_entry(ptp_t * l0_ptp,ptp_t * l1_ptp,ptp_t * l2_ptp,ptp_t * l3_ptp,vaddr_t va,long * rss)444 static void recycle_pgtable_entry(ptp_t *l0_ptp, ptp_t *l1_ptp, ptp_t *l2_ptp,
445 ptp_t *l3_ptp, vaddr_t va, long *rss)
446 {
447 if (!try_release_ptp(l2_ptp, l3_ptp, GET_L2_INDEX(va), rss))
448 return;
449
450 if (!try_release_ptp(l1_ptp, l2_ptp, GET_L1_INDEX(va), rss))
451 return;
452
453 try_release_ptp(l0_ptp, l1_ptp, GET_L0_INDEX(va), rss);
454 }
455
unmap_range_in_pgtbl(void * pgtbl,vaddr_t va,size_t len,long * rss)456 int unmap_range_in_pgtbl(void *pgtbl, vaddr_t va, size_t len, long *rss)
457 {
458 s64 total_page_cnt; // must be signed
459 s64 left_page_cnt_in_current_level;
460 ptp_t *l0_ptp, *l1_ptp, *l2_ptp, *l3_ptp;
461 pte_t *pte;
462 vaddr_t old_va;
463
464 int ret;
465 int pte_index; // the index of pte in the last level page table
466 int i;
467
468 BUG_ON(pgtbl == NULL);
469
470 l0_ptp = (ptp_t *)pgtbl;
471
472 #ifdef CHCORE_KERNEL_DEBUG
473 BUG_ON(va % PAGE_SIZE != 0);
474 #endif
475
476 total_page_cnt = len / PAGE_SIZE + (((len % PAGE_SIZE) > 0) ? 1 : 0);
477 while (total_page_cnt > 0) {
478 old_va = va;
479 // l0
480 ret = get_next_ptp(l0_ptp, L0, va, &l1_ptp, &pte, false, NULL);
481 if (ret == -ENOMAPPING) {
482 left_page_cnt_in_current_level =
483 (L0_PER_ENTRY_PAGES - GET_L1_INDEX(va) * L1_PER_ENTRY_PAGES);
484 total_page_cnt -= (left_page_cnt_in_current_level > total_page_cnt ?
485 total_page_cnt :
486 left_page_cnt_in_current_level);
487 va += left_page_cnt_in_current_level * PAGE_SIZE;
488 continue;
489 }
490
491 // l1
492 ret = get_next_ptp(l1_ptp, L1, va, &l2_ptp, &pte, false, NULL);
493 if (ret == -ENOMAPPING) {
494 left_page_cnt_in_current_level =
495 (L1_PER_ENTRY_PAGES - GET_L2_INDEX(va) * L2_PER_ENTRY_PAGES);
496 total_page_cnt -= (left_page_cnt_in_current_level > total_page_cnt ?
497 total_page_cnt :
498 left_page_cnt_in_current_level);
499 va += left_page_cnt_in_current_level * PAGE_SIZE;
500 continue;
501 }
502
503 // l2
504 ret = get_next_ptp(l2_ptp, L2, va, &l3_ptp, &pte, false, NULL);
505 if (ret == -ENOMAPPING) {
506 left_page_cnt_in_current_level =
507 (L2_PER_ENTRY_PAGES - GET_L3_INDEX(va) * L3_PER_ENTRY_PAGES);
508 total_page_cnt -= (left_page_cnt_in_current_level > total_page_cnt ?
509 total_page_cnt :
510 left_page_cnt_in_current_level);
511 va += left_page_cnt_in_current_level * PAGE_SIZE;
512 continue;
513 }
514
515 // l3
516 // step-1: get the index of pte
517 pte_index = GET_L3_INDEX(va);
518 for (i = pte_index; i < PTP_ENTRIES; ++i) {
519 if (l3_ptp->ent[i].l3_page.is_valid && rss)
520 *rss -= PAGE_SIZE;
521 l3_ptp->ent[i].pte = PTE_DESCRIPTOR_INVALID;
522 va += PAGE_SIZE;
523 total_page_cnt -= 1;
524 if (total_page_cnt == 0)
525 break;
526 }
527 recycle_pgtable_entry(l0_ptp, l1_ptp, l2_ptp, l3_ptp, old_va, rss);
528 }
529
530 return 0;
531 }
532
mprotect_in_pgtbl(void * pgtbl,vaddr_t va,size_t len,vmr_prop_t flags)533 int mprotect_in_pgtbl(void *pgtbl, vaddr_t va, size_t len, vmr_prop_t flags)
534 {
535 s64 total_page_cnt; // must be signed
536 ptp_t *l0_ptp, *l1_ptp, *l2_ptp, *l3_ptp;
537 pte_t *pte;
538 int ret;
539 int pte_index; // the index of pte in the last level page table
540 int i;
541
542 BUG_ON(pgtbl == NULL);
543
544 l0_ptp = (ptp_t *)pgtbl;
545
546
547 total_page_cnt = len / PAGE_SIZE + (((len % PAGE_SIZE) > 0) ? 1 : 0);
548 while (total_page_cnt > 0) {
549 // l0
550 ret = get_next_ptp(l0_ptp, L0, va, &l1_ptp, &pte, false, NULL);
551 if (ret == -ENOMAPPING) {
552 total_page_cnt -= L0_PER_ENTRY_PAGES;
553 va += L0_PER_ENTRY_PAGES * PAGE_SIZE;
554 continue;
555 }
556
557 // l1
558 ret = get_next_ptp(l1_ptp, L1, va, &l2_ptp, &pte, false, NULL);
559 if (ret == -ENOMAPPING) {
560 total_page_cnt -= L1_PER_ENTRY_PAGES;
561 va += L1_PER_ENTRY_PAGES * PAGE_SIZE;
562 continue;
563 }
564
565 // l2
566 ret = get_next_ptp(l2_ptp, L2, va, &l3_ptp, &pte, false, NULL);
567 if (ret == -ENOMAPPING) {
568 total_page_cnt -= L2_PER_ENTRY_PAGES;
569 va += L2_PER_ENTRY_PAGES * PAGE_SIZE;
570 continue;
571 }
572
573 // l3
574 // step-1: get the index of pte
575 pte_index = GET_L3_INDEX(va);
576 for (i = pte_index; i < PTP_ENTRIES; ++i) {
577 /* Modify the permission in the pte if it exists */
578 if (!IS_PTE_INVALID(l3_ptp->ent[i].pte))
579 set_pte_flags(&(l3_ptp->ent[i]), flags, USER_PTE);
580
581 va += PAGE_SIZE;
582 total_page_cnt -= 1;
583 if (total_page_cnt == 0)
584 break;
585 }
586 }
587
588 return 0;
589 }
590
parse_pte_to_common(pte_t * pte,unsigned int level,struct common_pte_t * ret)591 void parse_pte_to_common(pte_t *pte, unsigned int level,
592 struct common_pte_t *ret)
593 {
594 switch (level) {
595 case L3:
596 ret->ppn = pte->l3_page.pfn;
597 ret->perm = 0;
598 ret->_unused = 0;
599 ret->perm |= (pte->l3_page.UXN ? 0 : VMR_EXEC);
600 ret->perm |= __ap_to_vmr_prot(pte->l3_page.AP);
601
602 ret->perm |=
603 (pte->l3_page.attr_index == DEVICE_MEMORY ? VMR_DEVICE : 0);
604
605 ret->access = pte->l3_page.AF;
606 ret->dirty = pte->l3_page.DBM;
607 ret->valid = pte->l3_page.is_valid;
608 break;
609 default:
610 BUG("parse upper level PTEs is not supported now!\n");
611 }
612 }
613
update_pte(pte_t * dest,unsigned int level,struct common_pte_t * src)614 void update_pte(pte_t *dest, unsigned int level, struct common_pte_t *src)
615 {
616 switch (level) {
617 case L3:
618 dest->l3_page.pfn = src->ppn;
619 dest->l3_page.AP = __vmr_prot_to_ap(src->perm);
620
621 dest->l3_page.UXN = ((src->perm & VMR_EXEC) ?
622 AARCH64_MMU_ATTR_PAGE_UX :
623 AARCH64_MMU_ATTR_PAGE_UXN);
624
625 dest->l3_page.is_valid = src->valid;
626 #if !(defined(CHCORE_PLAT_RASPI3) || defined(CHCORE_PLAT_RASPI4) \
627 || defined(CHCORE_PLAT_RK3399) || defined(CHCORE_PLAT_RK3568))
628 /**
629 * Raspberry Pi platform does not support setting AF and DBM
630 * by hardware, so on these platforms we ignored them.
631 */
632 dest->l3_page.AF = src->access;
633 dest->l3_page.DBM = src->dirty;
634 #endif
635 break;
636 default:
637 BUG("update upper level PTEs is not supported now!\n");
638 }
639 }
640