1 /*
2 * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3 * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this list of
9 * conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12 * of conditions and the following disclaimer in the documentation and/or other materials
13 * provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16 * to endorse or promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /**
33 * @defgroup los_arch_mmu architecture mmu
34 * @ingroup kernel
35 */
36
37 #include "los_arch_mmu.h"
38 #include "los_asid.h"
39 #include "los_pte_ops.h"
40 #include "los_tlb_v6.h"
41 #include "los_printf.h"
42 #include "los_vm_common.h"
43 #include "los_vm_map.h"
44 #include "los_vm_boot.h"
45 #include "los_mmu_descriptor_v6.h"
46 #include "los_process_pri.h"
47
48 #ifdef LOSCFG_KERNEL_MMU
49 typedef struct {
50 LosArchMmu *archMmu;
51 VADDR_T *vaddr;
52 PADDR_T *paddr;
53 UINT32 *flags;
54 } MmuMapInfo;
55
56 #define TRY_MAX_TIMES 10
57
58 __attribute__((aligned(MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS))) \
59 __attribute__((section(".bss.prebss.translation_table"))) UINT8 \
60 g_firstPageTable[MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS];
61 #ifdef LOSCFG_KERNEL_SMP
62 __attribute__((aligned(MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS))) \
63 __attribute__((section(".bss.prebss.translation_table"))) UINT8 \
64 g_tempPageTable[MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS];
65 UINT8 *g_mmuJumpPageTable = g_tempPageTable;
66 #else
67 extern CHAR __mmu_ttlb_begin; /* defined in .ld script */
68 UINT8 *g_mmuJumpPageTable = (UINT8 *)&__mmu_ttlb_begin; /* temp page table, this is only used when system power up */
69 #endif
70
OsGetPteLock(LosArchMmu * archMmu,PADDR_T paddr,UINT32 * intSave)71 STATIC SPIN_LOCK_S *OsGetPteLock(LosArchMmu *archMmu, PADDR_T paddr, UINT32 *intSave)
72 {
73 SPIN_LOCK_S *lock = NULL;
74 #ifdef LOSCFG_PAGE_TABLE_FINE_LOCK
75 LosVmPage *vmPage = NULL;
76
77 vmPage = OsVmPaddrToPage(paddr);
78 if (vmPage == NULL) {
79 return NULL;
80 }
81 lock = &vmPage->lock;
82 #else
83 lock = &archMmu->lock;
84 #endif
85
86 LOS_SpinLockSave(lock, intSave);
87 return lock;
88 }
89
OsGetPte1Lock(LosArchMmu * archMmu,PADDR_T paddr,UINT32 * intSave)90 STATIC SPIN_LOCK_S *OsGetPte1Lock(LosArchMmu *archMmu, PADDR_T paddr, UINT32 *intSave)
91 {
92 return OsGetPteLock(archMmu, paddr, intSave);
93 }
94
OsUnlockPte1(SPIN_LOCK_S * lock,UINT32 intSave)95 STATIC INLINE VOID OsUnlockPte1(SPIN_LOCK_S *lock, UINT32 intSave)
96 {
97 if (lock == NULL) {
98 return;
99 }
100 LOS_SpinUnlockRestore(lock, intSave);
101 }
102
OsGetPte1LockTmp(LosArchMmu * archMmu,PADDR_T paddr,UINT32 * intSave)103 STATIC SPIN_LOCK_S *OsGetPte1LockTmp(LosArchMmu *archMmu, PADDR_T paddr, UINT32 *intSave)
104 {
105 SPIN_LOCK_S *spinLock = NULL;
106 #ifdef LOSCFG_PAGE_TABLE_FINE_LOCK
107 spinLock = OsGetPteLock(archMmu, paddr, intSave);
108 #else
109 (VOID)archMmu;
110 (VOID)paddr;
111 (VOID)intSave;
112 #endif
113 return spinLock;
114 }
115
OsUnlockPte1Tmp(SPIN_LOCK_S * lock,UINT32 intSave)116 STATIC INLINE VOID OsUnlockPte1Tmp(SPIN_LOCK_S *lock, UINT32 intSave)
117 {
118 #ifdef LOSCFG_PAGE_TABLE_FINE_LOCK
119 if (lock == NULL) {
120 return;
121 }
122 LOS_SpinUnlockRestore(lock, intSave);
123 #else
124 (VOID)lock;
125 (VOID)intSave;
126 #endif
127 }
128
OsGetPte2Lock(LosArchMmu * archMmu,PTE_T pte1,UINT32 * intSave)129 STATIC INLINE SPIN_LOCK_S *OsGetPte2Lock(LosArchMmu *archMmu, PTE_T pte1, UINT32 *intSave)
130 {
131 PADDR_T pa = MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(pte1);
132 return OsGetPteLock(archMmu, pa, intSave);
133 }
134
OsUnlockPte2(SPIN_LOCK_S * lock,UINT32 intSave)135 STATIC INLINE VOID OsUnlockPte2(SPIN_LOCK_S *lock, UINT32 intSave)
136 {
137 return OsUnlockPte1(lock, intSave);
138 }
139
OsGetPte2BasePtr(PTE_T pte1)140 STATIC INLINE PTE_T *OsGetPte2BasePtr(PTE_T pte1)
141 {
142 PADDR_T pa = MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(pte1);
143 return LOS_PaddrToKVaddr(pa);
144 }
145
OsGFirstTableGet(VOID)146 VADDR_T *OsGFirstTableGet(VOID)
147 {
148 return (VADDR_T *)g_firstPageTable;
149 }
150
OsUnmapL1Invalid(vaddr_t * vaddr,UINT32 * count)151 STATIC INLINE UINT32 OsUnmapL1Invalid(vaddr_t *vaddr, UINT32 *count)
152 {
153 UINT32 unmapCount;
154
155 unmapCount = MIN2((MMU_DESCRIPTOR_L1_SMALL_SIZE - (*vaddr % MMU_DESCRIPTOR_L1_SMALL_SIZE)) >>
156 MMU_DESCRIPTOR_L2_SMALL_SHIFT, *count);
157 *vaddr += unmapCount << MMU_DESCRIPTOR_L2_SMALL_SHIFT;
158 *count -= unmapCount;
159
160 return unmapCount;
161 }
162
OsMapParamCheck(UINT32 flags,VADDR_T vaddr,PADDR_T paddr)163 STATIC INT32 OsMapParamCheck(UINT32 flags, VADDR_T vaddr, PADDR_T paddr)
164 {
165 #if !WITH_ARCH_MMU_PICK_SPOT
166 if (flags & VM_MAP_REGION_FLAG_NS) {
167 /* WITH_ARCH_MMU_PICK_SPOT is required to support NS memory */
168 LOS_Panic("NS mem is not supported\n");
169 }
170 #endif
171
172 /* paddr and vaddr must be aligned */
173 if (!MMU_DESCRIPTOR_IS_L2_SIZE_ALIGNED(vaddr) || !MMU_DESCRIPTOR_IS_L2_SIZE_ALIGNED(paddr)) {
174 return LOS_ERRNO_VM_INVALID_ARGS;
175 }
176
177 return 0;
178 }
179
OsCvtPte2AttsToFlags(PTE_T l1Entry,PTE_T l2Entry,UINT32 * flags)180 STATIC VOID OsCvtPte2AttsToFlags(PTE_T l1Entry, PTE_T l2Entry, UINT32 *flags)
181 {
182 *flags = 0;
183 /* NS flag is only present on L1 entry */
184 if (l1Entry & MMU_DESCRIPTOR_L1_PAGETABLE_NON_SECURE) {
185 *flags |= VM_MAP_REGION_FLAG_NS;
186 }
187
188 switch (l2Entry & MMU_DESCRIPTOR_L2_TEX_TYPE_MASK) {
189 case MMU_DESCRIPTOR_L2_TYPE_STRONGLY_ORDERED:
190 *flags |= VM_MAP_REGION_FLAG_STRONGLY_ORDERED;
191 break;
192 case MMU_DESCRIPTOR_L2_TYPE_NORMAL_NOCACHE:
193 *flags |= VM_MAP_REGION_FLAG_UNCACHED;
194 break;
195 case MMU_DESCRIPTOR_L2_TYPE_DEVICE_SHARED:
196 case MMU_DESCRIPTOR_L2_TYPE_DEVICE_NON_SHARED:
197 *flags |= VM_MAP_REGION_FLAG_UNCACHED_DEVICE;
198 break;
199 default:
200 break;
201 }
202
203 *flags |= VM_MAP_REGION_FLAG_PERM_READ;
204
205 switch (l2Entry & MMU_DESCRIPTOR_L2_AP_MASK) {
206 case MMU_DESCRIPTOR_L2_AP_P_RO_U_NA:
207 break;
208 case MMU_DESCRIPTOR_L2_AP_P_RW_U_NA:
209 *flags |= VM_MAP_REGION_FLAG_PERM_WRITE;
210 break;
211 case MMU_DESCRIPTOR_L2_AP_P_RO_U_RO:
212 *flags |= VM_MAP_REGION_FLAG_PERM_USER;
213 break;
214 case MMU_DESCRIPTOR_L2_AP_P_RW_U_RW:
215 *flags |= VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_WRITE;
216 break;
217 default:
218 break;
219 }
220 if ((l2Entry & MMU_DESCRIPTOR_L2_TYPE_MASK) != MMU_DESCRIPTOR_L2_TYPE_SMALL_PAGE_XN) {
221 *flags |= VM_MAP_REGION_FLAG_PERM_EXECUTE;
222 }
223 }
224
OsPutL2Table(const LosArchMmu * archMmu,UINT32 l1Index,paddr_t l2Paddr)225 STATIC VOID OsPutL2Table(const LosArchMmu *archMmu, UINT32 l1Index, paddr_t l2Paddr)
226 {
227 UINT32 index;
228 PTE_T ttEntry;
229 /* check if any l1 entry points to this l2 table */
230 for (index = 0; index < MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE; index++) {
231 ttEntry = archMmu->virtTtb[ROUNDDOWN(l1Index, MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE) + index];
232 if ((ttEntry & MMU_DESCRIPTOR_L1_TYPE_MASK) == MMU_DESCRIPTOR_L1_TYPE_PAGE_TABLE) {
233 return;
234 }
235 }
236 #ifdef LOSCFG_KERNEL_VM
237 /* we can free this l2 table */
238 LosVmPage *vmPage = LOS_VmPageGet(l2Paddr);
239 if (vmPage == NULL) {
240 LOS_Panic("bad page table paddr %#x\n", l2Paddr);
241 return;
242 }
243
244 LOS_ListDelete(&vmPage->node);
245 LOS_PhysPageFree(vmPage);
246 #else
247 (VOID)LOS_MemFree(OS_SYS_MEM_ADDR, LOS_PaddrToKVaddr(l2Paddr));
248 #endif
249 }
250
OsTryUnmapL1PTE(LosArchMmu * archMmu,PTE_T * l1Entry,vaddr_t vaddr,UINT32 scanIndex,UINT32 scanCount)251 STATIC VOID OsTryUnmapL1PTE(LosArchMmu *archMmu, PTE_T *l1Entry, vaddr_t vaddr, UINT32 scanIndex, UINT32 scanCount)
252 {
253 /*
254 * Check if all pages related to this l1 entry are deallocated.
255 * We only need to check pages that we did not clear above starting
256 * from scanIndex and wrapped around SECTION.
257 */
258 UINT32 l1Index;
259 PTE_T *pte2BasePtr = NULL;
260 SPIN_LOCK_S *pte1Lock = NULL;
261 SPIN_LOCK_S *pte2Lock = NULL;
262 UINT32 pte1IntSave;
263 UINT32 pte2IntSave;
264 PTE_T pte1Val;
265 PADDR_T pte1Paddr;
266
267 pte1Paddr = OsGetPte1Paddr(archMmu->physTtb, vaddr);
268 pte2Lock = OsGetPte2Lock(archMmu, *l1Entry, &pte2IntSave);
269 if (pte2Lock == NULL) {
270 return;
271 }
272 pte2BasePtr = OsGetPte2BasePtr(*l1Entry);
273 if (pte2BasePtr == NULL) {
274 OsUnlockPte2(pte2Lock, pte2IntSave);
275 return;
276 }
277
278 while (scanCount) {
279 if (scanIndex == MMU_DESCRIPTOR_L2_NUMBERS_PER_L1) {
280 scanIndex = 0;
281 }
282 if (pte2BasePtr[scanIndex++]) {
283 break;
284 }
285 scanCount--;
286 }
287
288 if (!scanCount) {
289 /*
290 * The pte1 of kprocess is placed in kernel image when compiled. So the pte1Lock will be null.
291 * There is no situation to simultaneous access the pte1 of kprocess.
292 */
293 pte1Lock = OsGetPte1LockTmp(archMmu, pte1Paddr, &pte1IntSave);
294 if (!OsIsPte1PageTable(*l1Entry)) {
295 OsUnlockPte1Tmp(pte1Lock, pte1IntSave);
296 OsUnlockPte2(pte2Lock, pte2IntSave);
297 return;
298 }
299 pte1Val = *l1Entry;
300 /* we can kill l1 entry */
301 OsClearPte1(l1Entry);
302 l1Index = OsGetPte1Index(vaddr);
303 OsArmInvalidateTlbMvaNoBarrier(l1Index << MMU_DESCRIPTOR_L1_SMALL_SHIFT);
304
305 /* try to free l2 page itself */
306 OsPutL2Table(archMmu, l1Index, MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(pte1Val));
307 OsUnlockPte1Tmp(pte1Lock, pte1IntSave);
308 }
309 OsUnlockPte2(pte2Lock, pte2IntSave);
310 }
311
OsCvtSecCacheFlagsToMMUFlags(UINT32 flags)312 STATIC UINT32 OsCvtSecCacheFlagsToMMUFlags(UINT32 flags)
313 {
314 UINT32 mmuFlags = 0;
315
316 switch (flags & VM_MAP_REGION_FLAG_CACHE_MASK) {
317 case VM_MAP_REGION_FLAG_CACHED:
318 mmuFlags |= MMU_DESCRIPTOR_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
319 #ifdef LOSCFG_KERNEL_SMP
320 mmuFlags |= MMU_DESCRIPTOR_L1_SECTION_SHAREABLE;
321 #endif
322 break;
323 case VM_MAP_REGION_FLAG_STRONGLY_ORDERED:
324 mmuFlags |= MMU_DESCRIPTOR_L1_TYPE_STRONGLY_ORDERED;
325 break;
326 case VM_MAP_REGION_FLAG_UNCACHED:
327 mmuFlags |= MMU_DESCRIPTOR_L1_TYPE_NORMAL_NOCACHE;
328 break;
329 case VM_MAP_REGION_FLAG_UNCACHED_DEVICE:
330 mmuFlags |= MMU_DESCRIPTOR_L1_TYPE_DEVICE_SHARED;
331 break;
332 default:
333 return LOS_ERRNO_VM_INVALID_ARGS;
334 }
335 return mmuFlags;
336 }
337
OsCvtSecAccessFlagsToMMUFlags(UINT32 flags)338 STATIC UINT32 OsCvtSecAccessFlagsToMMUFlags(UINT32 flags)
339 {
340 UINT32 mmuFlags = 0;
341
342 switch (flags & (VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE)) {
343 case 0:
344 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_NA_U_NA;
345 break;
346 case VM_MAP_REGION_FLAG_PERM_READ:
347 case VM_MAP_REGION_FLAG_PERM_USER:
348 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_RO_U_NA;
349 break;
350 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ:
351 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_RO_U_RO;
352 break;
353 case VM_MAP_REGION_FLAG_PERM_WRITE:
354 case VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE:
355 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_RW_U_NA;
356 break;
357 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_WRITE:
358 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE:
359 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_RW_U_RW;
360 break;
361 default:
362 break;
363 }
364 return mmuFlags;
365 }
366
367 /* convert user level mmu flags to L1 descriptors flags */
OsCvtSecFlagsToAttrs(UINT32 flags)368 STATIC UINT32 OsCvtSecFlagsToAttrs(UINT32 flags)
369 {
370 UINT32 mmuFlags;
371
372 mmuFlags = OsCvtSecCacheFlagsToMMUFlags(flags);
373 if (mmuFlags == LOS_ERRNO_VM_INVALID_ARGS) {
374 return mmuFlags;
375 }
376
377 mmuFlags |= MMU_DESCRIPTOR_L1_SMALL_DOMAIN_CLIENT;
378
379 mmuFlags |= OsCvtSecAccessFlagsToMMUFlags(flags);
380
381 if (!(flags & VM_MAP_REGION_FLAG_PERM_EXECUTE)) {
382 mmuFlags |= MMU_DESCRIPTOR_L1_SECTION_XN;
383 }
384
385 if (flags & VM_MAP_REGION_FLAG_NS) {
386 mmuFlags |= MMU_DESCRIPTOR_L1_SECTION_NON_SECURE;
387 }
388
389 if (flags & VM_MAP_REGION_FLAG_PERM_USER) {
390 mmuFlags |= MMU_DESCRIPTOR_L1_SECTION_NON_GLOBAL;
391 }
392
393 return mmuFlags;
394 }
395
OsCvtSecAttsToFlags(PTE_T l1Entry,UINT32 * flags)396 STATIC VOID OsCvtSecAttsToFlags(PTE_T l1Entry, UINT32 *flags)
397 {
398 *flags = 0;
399 if (l1Entry & MMU_DESCRIPTOR_L1_SECTION_NON_SECURE) {
400 *flags |= VM_MAP_REGION_FLAG_NS;
401 }
402
403 switch (l1Entry & MMU_DESCRIPTOR_L1_TEX_TYPE_MASK) {
404 case MMU_DESCRIPTOR_L1_TYPE_STRONGLY_ORDERED:
405 *flags |= VM_MAP_REGION_FLAG_STRONGLY_ORDERED;
406 break;
407 case MMU_DESCRIPTOR_L1_TYPE_NORMAL_NOCACHE:
408 *flags |= VM_MAP_REGION_FLAG_UNCACHED;
409 break;
410 case MMU_DESCRIPTOR_L1_TYPE_DEVICE_SHARED:
411 case MMU_DESCRIPTOR_L1_TYPE_DEVICE_NON_SHARED:
412 *flags |= VM_MAP_REGION_FLAG_UNCACHED_DEVICE;
413 break;
414 default:
415 break;
416 }
417
418 *flags |= VM_MAP_REGION_FLAG_PERM_READ;
419
420 switch (l1Entry & MMU_DESCRIPTOR_L1_AP_MASK) {
421 case MMU_DESCRIPTOR_L1_AP_P_RO_U_NA:
422 break;
423 case MMU_DESCRIPTOR_L1_AP_P_RW_U_NA:
424 *flags |= VM_MAP_REGION_FLAG_PERM_WRITE;
425 break;
426 case MMU_DESCRIPTOR_L1_AP_P_RO_U_RO:
427 *flags |= VM_MAP_REGION_FLAG_PERM_USER;
428 break;
429 case MMU_DESCRIPTOR_L1_AP_P_RW_U_RW:
430 *flags |= VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_WRITE;
431 break;
432 default:
433 break;
434 }
435
436 if (!(l1Entry & MMU_DESCRIPTOR_L1_SECTION_XN)) {
437 *flags |= VM_MAP_REGION_FLAG_PERM_EXECUTE;
438 }
439 }
440
OsUnmapL2PTE(LosArchMmu * archMmu,PTE_T * pte1,vaddr_t vaddr,UINT32 * count)441 STATIC UINT32 OsUnmapL2PTE(LosArchMmu *archMmu, PTE_T *pte1, vaddr_t vaddr, UINT32 *count)
442 {
443 UINT32 unmapCount;
444 UINT32 pte2Index;
445 UINT32 intSave;
446 PTE_T *pte2BasePtr = NULL;
447 SPIN_LOCK_S *lock = NULL;
448
449 pte2Index = OsGetPte2Index(vaddr);
450 unmapCount = MIN2(MMU_DESCRIPTOR_L2_NUMBERS_PER_L1 - pte2Index, *count);
451
452 lock = OsGetPte2Lock(archMmu, *pte1, &intSave);
453 if (lock == NULL) {
454 return unmapCount;
455 }
456
457 pte2BasePtr = OsGetPte2BasePtr(*pte1);
458 if (pte2BasePtr == NULL) {
459 OsUnlockPte2(lock, intSave);
460 return unmapCount;
461 }
462
463 /* unmap page run */
464 OsClearPte2Continuous(&pte2BasePtr[pte2Index], unmapCount);
465
466 /* invalidate tlb */
467 OsArmInvalidateTlbMvaRangeNoBarrier(vaddr, unmapCount);
468 OsUnlockPte2(lock, intSave);
469
470 *count -= unmapCount;
471 return unmapCount;
472 }
473
OsUnmapSection(LosArchMmu * archMmu,PTE_T * l1Entry,vaddr_t * vaddr,UINT32 * count)474 STATIC UINT32 OsUnmapSection(LosArchMmu *archMmu, PTE_T *l1Entry, vaddr_t *vaddr, UINT32 *count)
475 {
476 UINT32 intSave;
477 PADDR_T pte1Paddr;
478 SPIN_LOCK_S *lock = NULL;
479
480 pte1Paddr = OsGetPte1Paddr(archMmu->physTtb, *vaddr);
481 lock = OsGetPte1Lock(archMmu, pte1Paddr, &intSave);
482 if (!OsIsPte1Section(*l1Entry)) {
483 OsUnlockPte1(lock, intSave);
484 return 0;
485 }
486 OsClearPte1(OsGetPte1Ptr((PTE_T *)archMmu->virtTtb, *vaddr));
487 OsArmInvalidateTlbMvaNoBarrier(*vaddr);
488 OsUnlockPte1(lock, intSave);
489
490 *vaddr += MMU_DESCRIPTOR_L1_SMALL_SIZE;
491 *count -= MMU_DESCRIPTOR_L2_NUMBERS_PER_L1;
492
493 return MMU_DESCRIPTOR_L2_NUMBERS_PER_L1;
494 }
495
OsArchMmuInit(LosArchMmu * archMmu,VADDR_T * virtTtb)496 BOOL OsArchMmuInit(LosArchMmu *archMmu, VADDR_T *virtTtb)
497 {
498 #ifdef LOSCFG_KERNEL_VM
499 if (OsAllocAsid(&archMmu->asid) != LOS_OK) {
500 VM_ERR("alloc arch mmu asid failed");
501 return FALSE;
502 }
503 #endif
504
505 #ifndef LOSCFG_PAGE_TABLE_FINE_LOCK
506 LOS_SpinInit(&archMmu->lock);
507 #endif
508 LOS_ListInit(&archMmu->ptList);
509 archMmu->virtTtb = virtTtb;
510 archMmu->physTtb = (VADDR_T)(UINTPTR)virtTtb - KERNEL_ASPACE_BASE + SYS_MEM_BASE;
511 return TRUE;
512 }
513
LOS_ArchMmuQuery(const LosArchMmu * archMmu,VADDR_T vaddr,PADDR_T * paddr,UINT32 * flags)514 STATUS_T LOS_ArchMmuQuery(const LosArchMmu *archMmu, VADDR_T vaddr, PADDR_T *paddr, UINT32 *flags)
515 {
516 PTE_T l1Entry = OsGetPte1(archMmu->virtTtb, vaddr);
517 PTE_T l2Entry;
518 PTE_T* l2Base = NULL;
519
520 if (OsIsPte1Invalid(l1Entry)) {
521 return LOS_ERRNO_VM_NOT_FOUND;
522 } else if (OsIsPte1Section(l1Entry)) {
523 if (paddr != NULL) {
524 *paddr = MMU_DESCRIPTOR_L1_SECTION_ADDR(l1Entry) + (vaddr & (MMU_DESCRIPTOR_L1_SMALL_SIZE - 1));
525 }
526
527 if (flags != NULL) {
528 OsCvtSecAttsToFlags(l1Entry, flags);
529 }
530 } else if (OsIsPte1PageTable(l1Entry)) {
531 l2Base = OsGetPte2BasePtr(l1Entry);
532 if (l2Base == NULL) {
533 return LOS_ERRNO_VM_NOT_FOUND;
534 }
535 l2Entry = OsGetPte2(l2Base, vaddr);
536 if (OsIsPte2SmallPage(l2Entry) || OsIsPte2SmallPageXN(l2Entry)) {
537 if (paddr != NULL) {
538 *paddr = MMU_DESCRIPTOR_L2_SMALL_PAGE_ADDR(l2Entry) + (vaddr & (MMU_DESCRIPTOR_L2_SMALL_SIZE - 1));
539 }
540
541 if (flags != NULL) {
542 OsCvtPte2AttsToFlags(l1Entry, l2Entry, flags);
543 }
544 } else if (OsIsPte2LargePage(l2Entry)) {
545 LOS_Panic("%s %d, large page unimplemented\n", __FUNCTION__, __LINE__);
546 } else {
547 return LOS_ERRNO_VM_NOT_FOUND;
548 }
549 }
550
551 return LOS_OK;
552 }
553
LOS_ArchMmuUnmap(LosArchMmu * archMmu,VADDR_T vaddr,size_t count)554 STATUS_T LOS_ArchMmuUnmap(LosArchMmu *archMmu, VADDR_T vaddr, size_t count)
555 {
556 PTE_T *l1Entry = NULL;
557 INT32 unmapped = 0;
558 UINT32 unmapCount = 0;
559 INT32 tryTime = TRY_MAX_TIMES;
560
561 while (count > 0) {
562 l1Entry = OsGetPte1Ptr(archMmu->virtTtb, vaddr);
563 if (OsIsPte1Invalid(*l1Entry)) {
564 unmapCount = OsUnmapL1Invalid(&vaddr, &count);
565 } else if (OsIsPte1Section(*l1Entry)) {
566 if (MMU_DESCRIPTOR_IS_L1_SIZE_ALIGNED(vaddr) && count >= MMU_DESCRIPTOR_L2_NUMBERS_PER_L1) {
567 unmapCount = OsUnmapSection(archMmu, l1Entry, &vaddr, &count);
568 } else {
569 LOS_Panic("%s %d, unimplemented\n", __FUNCTION__, __LINE__);
570 }
571 } else if (OsIsPte1PageTable(*l1Entry)) {
572 unmapCount = OsUnmapL2PTE(archMmu, l1Entry, vaddr, &count);
573 OsTryUnmapL1PTE(archMmu, l1Entry, vaddr, OsGetPte2Index(vaddr) + unmapCount,
574 MMU_DESCRIPTOR_L2_NUMBERS_PER_L1);
575 vaddr += unmapCount << MMU_DESCRIPTOR_L2_SMALL_SHIFT;
576 } else {
577 LOS_Panic("%s %d, unimplemented\n", __FUNCTION__, __LINE__);
578 }
579 tryTime = (unmapCount == 0) ? (tryTime - 1) : tryTime;
580 if (tryTime == 0) {
581 return LOS_ERRNO_VM_FAULT;
582 }
583 unmapped += unmapCount;
584 }
585 OsArmInvalidateTlbBarrier();
586 return unmapped;
587 }
588
OsMapSection(MmuMapInfo * mmuMapInfo,UINT32 * count)589 STATIC UINT32 OsMapSection(MmuMapInfo *mmuMapInfo, UINT32 *count)
590 {
591 UINT32 mmuFlags = 0;
592 UINT32 intSave;
593 PADDR_T pte1Paddr;
594 SPIN_LOCK_S *lock = NULL;
595
596 mmuFlags |= OsCvtSecFlagsToAttrs(*mmuMapInfo->flags);
597 pte1Paddr = OsGetPte1Paddr(mmuMapInfo->archMmu->physTtb, *mmuMapInfo->vaddr);
598 lock = OsGetPte1Lock(mmuMapInfo->archMmu, pte1Paddr, &intSave);
599 OsSavePte1(OsGetPte1Ptr(mmuMapInfo->archMmu->virtTtb, *mmuMapInfo->vaddr),
600 OsTruncPte1(*mmuMapInfo->paddr) | mmuFlags | MMU_DESCRIPTOR_L1_TYPE_SECTION);
601 OsUnlockPte1(lock, intSave);
602 *count -= MMU_DESCRIPTOR_L2_NUMBERS_PER_L1;
603 *mmuMapInfo->vaddr += MMU_DESCRIPTOR_L1_SMALL_SIZE;
604 *mmuMapInfo->paddr += MMU_DESCRIPTOR_L1_SMALL_SIZE;
605
606 return MMU_DESCRIPTOR_L2_NUMBERS_PER_L1;
607 }
608
OsGetL2Table(LosArchMmu * archMmu,UINT32 l1Index,paddr_t * ppa)609 STATIC STATUS_T OsGetL2Table(LosArchMmu *archMmu, UINT32 l1Index, paddr_t *ppa)
610 {
611 UINT32 index;
612 PTE_T ttEntry;
613 VADDR_T *kvaddr = NULL;
614 UINT32 l2Offset = (MMU_DESCRIPTOR_L2_SMALL_SIZE / MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE) *
615 (l1Index & (MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE - 1));
616 /* lookup an existing l2 page table */
617 for (index = 0; index < MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE; index++) {
618 ttEntry = archMmu->virtTtb[ROUNDDOWN(l1Index, MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE) + index];
619 if ((ttEntry & MMU_DESCRIPTOR_L1_TYPE_MASK) == MMU_DESCRIPTOR_L1_TYPE_PAGE_TABLE) {
620 *ppa = (PADDR_T)ROUNDDOWN(MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(ttEntry), MMU_DESCRIPTOR_L2_SMALL_SIZE) +
621 l2Offset;
622 return LOS_OK;
623 }
624 }
625
626 #ifdef LOSCFG_KERNEL_VM
627 /* not found: allocate one (paddr) */
628 LosVmPage *vmPage = LOS_PhysPageAlloc();
629 if (vmPage == NULL) {
630 VM_ERR("have no memory to save l2 page");
631 return LOS_ERRNO_VM_NO_MEMORY;
632 }
633 LOS_ListAdd(&archMmu->ptList, &vmPage->node);
634 kvaddr = OsVmPageToVaddr(vmPage);
635 #else
636 kvaddr = LOS_MemAlloc(OS_SYS_MEM_ADDR, MMU_DESCRIPTOR_L2_SMALL_SIZE);
637 if (kvaddr == NULL) {
638 VM_ERR("have no memory to save l2 page");
639 return LOS_ERRNO_VM_NO_MEMORY;
640 }
641 #endif
642 (VOID)memset_s(kvaddr, MMU_DESCRIPTOR_L2_SMALL_SIZE, 0, MMU_DESCRIPTOR_L2_SMALL_SIZE);
643
644 /* get physical address */
645 *ppa = OsKVaddrToPaddr((VADDR_T)kvaddr) + l2Offset;
646 return LOS_OK;
647 }
648
OsCvtPte2CacheFlagsToMMUFlags(UINT32 flags)649 STATIC UINT32 OsCvtPte2CacheFlagsToMMUFlags(UINT32 flags)
650 {
651 UINT32 mmuFlags = 0;
652
653 switch (flags & VM_MAP_REGION_FLAG_CACHE_MASK) {
654 case VM_MAP_REGION_FLAG_CACHED:
655 #ifdef LOSCFG_KERNEL_SMP
656 mmuFlags |= MMU_DESCRIPTOR_L2_SHAREABLE;
657 #endif
658 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
659 break;
660 case VM_MAP_REGION_FLAG_STRONGLY_ORDERED:
661 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_STRONGLY_ORDERED;
662 break;
663 case VM_MAP_REGION_FLAG_UNCACHED:
664 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_NORMAL_NOCACHE;
665 break;
666 case VM_MAP_REGION_FLAG_UNCACHED_DEVICE:
667 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_DEVICE_SHARED;
668 break;
669 default:
670 return LOS_ERRNO_VM_INVALID_ARGS;
671 }
672 return mmuFlags;
673 }
674
OsCvtPte2AccessFlagsToMMUFlags(UINT32 flags)675 STATIC UINT32 OsCvtPte2AccessFlagsToMMUFlags(UINT32 flags)
676 {
677 UINT32 mmuFlags = 0;
678
679 switch (flags & (VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE)) {
680 case 0:
681 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_NA_U_NA;
682 break;
683 case VM_MAP_REGION_FLAG_PERM_READ:
684 case VM_MAP_REGION_FLAG_PERM_USER:
685 mmuFlags |= MMU_DESCRIPTOR_L2_AP_P_RO_U_NA;
686 break;
687 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ:
688 mmuFlags |= MMU_DESCRIPTOR_L2_AP_P_RO_U_RO;
689 break;
690 case VM_MAP_REGION_FLAG_PERM_WRITE:
691 case VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE:
692 mmuFlags |= MMU_DESCRIPTOR_L2_AP_P_RW_U_NA;
693 break;
694 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_WRITE:
695 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE:
696 mmuFlags |= MMU_DESCRIPTOR_L2_AP_P_RW_U_RW;
697 break;
698 default:
699 break;
700 }
701 return mmuFlags;
702 }
703
704 /* convert user level mmu flags to L2 descriptors flags */
OsCvtPte2FlagsToAttrs(UINT32 flags)705 STATIC UINT32 OsCvtPte2FlagsToAttrs(UINT32 flags)
706 {
707 UINT32 mmuFlags;
708
709 mmuFlags = OsCvtPte2CacheFlagsToMMUFlags(flags);
710 if (mmuFlags == LOS_ERRNO_VM_INVALID_ARGS) {
711 return mmuFlags;
712 }
713
714 mmuFlags |= OsCvtPte2AccessFlagsToMMUFlags(flags);
715
716 if (!(flags & VM_MAP_REGION_FLAG_PERM_EXECUTE)) {
717 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_SMALL_PAGE_XN;
718 } else {
719 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_SMALL_PAGE;
720 }
721
722 if (flags & VM_MAP_REGION_FLAG_PERM_USER) {
723 mmuFlags |= MMU_DESCRIPTOR_L2_NON_GLOBAL;
724 }
725
726 return mmuFlags;
727 }
728
OsMapL1PTE(MmuMapInfo * mmuMapInfo,PTE_T * l1Entry,UINT32 * count)729 STATIC UINT32 OsMapL1PTE(MmuMapInfo *mmuMapInfo, PTE_T *l1Entry, UINT32 *count)
730 {
731 PADDR_T pte2Base = 0;
732 PADDR_T pte1Paddr;
733 SPIN_LOCK_S *pte1Lock = NULL;
734 SPIN_LOCK_S *pte2Lock = NULL;
735 PTE_T *pte2BasePtr = NULL;
736 UINT32 saveCounts, archFlags, pte1IntSave, pte2IntSave;
737
738 pte1Paddr = OsGetPte1Paddr(mmuMapInfo->archMmu->physTtb, *mmuMapInfo->vaddr);
739 pte1Lock = OsGetPte1Lock(mmuMapInfo->archMmu, pte1Paddr, &pte1IntSave);
740 if (!OsIsPte1Invalid(*l1Entry)) {
741 OsUnlockPte1(pte1Lock, pte1IntSave);
742 return 0;
743 }
744 if (OsGetL2Table(mmuMapInfo->archMmu, OsGetPte1Index(*mmuMapInfo->vaddr), &pte2Base) != LOS_OK) {
745 LOS_Panic("%s %d, failed to allocate pagetable\n", __FUNCTION__, __LINE__);
746 }
747
748 *l1Entry = pte2Base | MMU_DESCRIPTOR_L1_TYPE_PAGE_TABLE;
749 if (*mmuMapInfo->flags & VM_MAP_REGION_FLAG_NS) {
750 *l1Entry |= MMU_DESCRIPTOR_L1_PAGETABLE_NON_SECURE;
751 }
752 *l1Entry &= MMU_DESCRIPTOR_L1_SMALL_DOMAIN_MASK;
753 *l1Entry |= MMU_DESCRIPTOR_L1_SMALL_DOMAIN_CLIENT; // use client AP
754 OsSavePte1(OsGetPte1Ptr(mmuMapInfo->archMmu->virtTtb, *mmuMapInfo->vaddr), *l1Entry);
755 OsUnlockPte1(pte1Lock, pte1IntSave);
756
757 pte2Lock = OsGetPte2Lock(mmuMapInfo->archMmu, *l1Entry, &pte2IntSave);
758 if (pte2Lock == NULL) {
759 LOS_Panic("pte2 should not be null!\n");
760 }
761 pte2BasePtr = (PTE_T *)LOS_PaddrToKVaddr(pte2Base);
762
763 /* compute the arch flags for L2 4K pages */
764 archFlags = OsCvtPte2FlagsToAttrs(*mmuMapInfo->flags);
765 saveCounts = OsSavePte2Continuous(pte2BasePtr, OsGetPte2Index(*mmuMapInfo->vaddr), *mmuMapInfo->paddr | archFlags,
766 *count);
767 OsUnlockPte2(pte2Lock, pte2IntSave);
768 *mmuMapInfo->paddr += (saveCounts << MMU_DESCRIPTOR_L2_SMALL_SHIFT);
769 *mmuMapInfo->vaddr += (saveCounts << MMU_DESCRIPTOR_L2_SMALL_SHIFT);
770 *count -= saveCounts;
771 return saveCounts;
772 }
773
OsMapL2PageContinous(MmuMapInfo * mmuMapInfo,PTE_T * pte1,UINT32 * count)774 STATIC UINT32 OsMapL2PageContinous(MmuMapInfo *mmuMapInfo, PTE_T *pte1, UINT32 *count)
775 {
776 PTE_T *pte2BasePtr = NULL;
777 UINT32 archFlags;
778 UINT32 saveCounts;
779 UINT32 intSave;
780 SPIN_LOCK_S *lock = NULL;
781
782 lock = OsGetPte2Lock(mmuMapInfo->archMmu, *pte1, &intSave);
783 if (lock == NULL) {
784 return 0;
785 }
786 pte2BasePtr = OsGetPte2BasePtr(*pte1);
787 if (pte2BasePtr == NULL) {
788 OsUnlockPte2(lock, intSave);
789 return 0;
790 }
791
792 /* compute the arch flags for L2 4K pages */
793 archFlags = OsCvtPte2FlagsToAttrs(*mmuMapInfo->flags);
794 saveCounts = OsSavePte2Continuous(pte2BasePtr, OsGetPte2Index(*mmuMapInfo->vaddr), *mmuMapInfo->paddr | archFlags,
795 *count);
796 OsUnlockPte2(lock, intSave);
797 *mmuMapInfo->paddr += (saveCounts << MMU_DESCRIPTOR_L2_SMALL_SHIFT);
798 *mmuMapInfo->vaddr += (saveCounts << MMU_DESCRIPTOR_L2_SMALL_SHIFT);
799 *count -= saveCounts;
800 return saveCounts;
801 }
802
LOS_ArchMmuMap(LosArchMmu * archMmu,VADDR_T vaddr,PADDR_T paddr,size_t count,UINT32 flags)803 status_t LOS_ArchMmuMap(LosArchMmu *archMmu, VADDR_T vaddr, PADDR_T paddr, size_t count, UINT32 flags)
804 {
805 PTE_T *l1Entry = NULL;
806 UINT32 saveCounts = 0;
807 INT32 mapped = 0;
808 INT32 tryTime = TRY_MAX_TIMES;
809 INT32 checkRst;
810 MmuMapInfo mmuMapInfo = {
811 .archMmu = archMmu,
812 .vaddr = &vaddr,
813 .paddr = &paddr,
814 .flags = &flags,
815 };
816
817 checkRst = OsMapParamCheck(flags, vaddr, paddr);
818 if (checkRst < 0) {
819 return checkRst;
820 }
821
822 /* see what kind of mapping we can use */
823 while (count > 0) {
824 if (MMU_DESCRIPTOR_IS_L1_SIZE_ALIGNED(*mmuMapInfo.vaddr) &&
825 MMU_DESCRIPTOR_IS_L1_SIZE_ALIGNED(*mmuMapInfo.paddr) &&
826 count >= MMU_DESCRIPTOR_L2_NUMBERS_PER_L1) {
827 /* compute the arch flags for L1 sections cache, r ,w ,x, domain and type */
828 saveCounts = OsMapSection(&mmuMapInfo, &count);
829 } else {
830 /* have to use a L2 mapping, we only allocate 4KB for L1, support 0 ~ 1GB */
831 l1Entry = OsGetPte1Ptr(archMmu->virtTtb, *mmuMapInfo.vaddr);
832 if (OsIsPte1Invalid(*l1Entry)) {
833 saveCounts = OsMapL1PTE(&mmuMapInfo, l1Entry, &count);
834 } else if (OsIsPte1PageTable(*l1Entry)) {
835 saveCounts = OsMapL2PageContinous(&mmuMapInfo, l1Entry, &count);
836 } else {
837 LOS_Panic("%s %d, unimplemented tt_entry %x\n", __FUNCTION__, __LINE__, l1Entry);
838 }
839 }
840 mapped += saveCounts;
841 tryTime = (saveCounts == 0) ? (tryTime - 1) : tryTime;
842 if (tryTime == 0) {
843 return LOS_ERRNO_VM_TIMED_OUT;
844 }
845 }
846
847 return mapped;
848 }
849
LOS_ArchMmuChangeProt(LosArchMmu * archMmu,VADDR_T vaddr,size_t count,UINT32 flags)850 STATUS_T LOS_ArchMmuChangeProt(LosArchMmu *archMmu, VADDR_T vaddr, size_t count, UINT32 flags)
851 {
852 STATUS_T status;
853 PADDR_T paddr = 0;
854
855 if ((archMmu == NULL) || (vaddr == 0) || (count == 0)) {
856 VM_ERR("invalid args: archMmu %p, vaddr %p, count %d", archMmu, vaddr, count);
857 return LOS_NOK;
858 }
859
860 while (count > 0) {
861 count--;
862 status = LOS_ArchMmuQuery(archMmu, vaddr, &paddr, NULL);
863 if (status != LOS_OK) {
864 vaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
865 continue;
866 }
867
868 status = LOS_ArchMmuUnmap(archMmu, vaddr, 1);
869 if (status < 0) {
870 VM_ERR("invalid args:aspace %p, vaddr %p, count %d", archMmu, vaddr, count);
871 return LOS_NOK;
872 }
873
874 status = LOS_ArchMmuMap(archMmu, vaddr, paddr, 1, flags);
875 if (status < 0) {
876 VM_ERR("invalid args:aspace %p, vaddr %p, count %d",
877 archMmu, vaddr, count);
878 return LOS_NOK;
879 }
880 vaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
881 }
882 return LOS_OK;
883 }
884
LOS_ArchMmuMove(LosArchMmu * archMmu,VADDR_T oldVaddr,VADDR_T newVaddr,size_t count,UINT32 flags)885 STATUS_T LOS_ArchMmuMove(LosArchMmu *archMmu, VADDR_T oldVaddr, VADDR_T newVaddr, size_t count, UINT32 flags)
886 {
887 STATUS_T status;
888 PADDR_T paddr = 0;
889
890 if ((archMmu == NULL) || (oldVaddr == 0) || (newVaddr == 0) || (count == 0)) {
891 VM_ERR("invalid args: archMmu %p, oldVaddr %p, newVaddr %p, count %d",
892 archMmu, oldVaddr, newVaddr, count);
893 return LOS_NOK;
894 }
895
896 while (count > 0) {
897 count--;
898 status = LOS_ArchMmuQuery(archMmu, oldVaddr, &paddr, NULL);
899 if (status != LOS_OK) {
900 oldVaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
901 newVaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
902 continue;
903 }
904 // we need to clear the mapping here and remain the phy page.
905 status = LOS_ArchMmuUnmap(archMmu, oldVaddr, 1);
906 if (status < 0) {
907 VM_ERR("invalid args: archMmu %p, vaddr %p, count %d",
908 archMmu, oldVaddr, count);
909 return LOS_NOK;
910 }
911
912 status = LOS_ArchMmuMap(archMmu, newVaddr, paddr, 1, flags);
913 if (status < 0) {
914 VM_ERR("invalid args:archMmu %p, old_vaddr %p, new_addr %p, count %d",
915 archMmu, oldVaddr, newVaddr, count);
916 return LOS_NOK;
917 }
918 oldVaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
919 newVaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
920 }
921
922 return LOS_OK;
923 }
924
LOS_ArchMmuContextSwitch(LosArchMmu * archMmu)925 VOID LOS_ArchMmuContextSwitch(LosArchMmu *archMmu)
926 {
927 UINT32 ttbr;
928 UINT32 ttbcr = OsArmReadTtbcr();
929 if (archMmu) {
930 ttbr = MMU_TTBRx_FLAGS | (archMmu->physTtb);
931 /* enable TTBR0 */
932 ttbcr &= ~MMU_DESCRIPTOR_TTBCR_PD0;
933 } else {
934 ttbr = 0;
935 /* disable TTBR0 */
936 ttbcr |= MMU_DESCRIPTOR_TTBCR_PD0;
937 }
938
939 #ifdef LOSCFG_KERNEL_VM
940 /* from armv7a arm B3.10.4, we should do synchronization changes of ASID and TTBR. */
941 OsArmWriteContextidr(LOS_GetKVmSpace()->archMmu.asid);
942 ISB;
943 #endif
944 OsArmWriteTtbr0(ttbr);
945 ISB;
946 OsArmWriteTtbcr(ttbcr);
947 ISB;
948 #ifdef LOSCFG_KERNEL_VM
949 if (archMmu) {
950 OsArmWriteContextidr(archMmu->asid);
951 ISB;
952 }
953 #endif
954 }
955
LOS_ArchMmuDestroy(LosArchMmu * archMmu)956 STATUS_T LOS_ArchMmuDestroy(LosArchMmu *archMmu)
957 {
958 #ifdef LOSCFG_KERNEL_VM
959 LosVmPage *page = NULL;
960 /* free all of the pages allocated in archMmu->ptList */
961 while ((page = LOS_ListRemoveHeadType(&archMmu->ptList, LosVmPage, node)) != NULL) {
962 LOS_PhysPageFree(page);
963 }
964
965 OsArmWriteTlbiasidis(archMmu->asid);
966 OsFreeAsid(archMmu->asid);
967 #endif
968 return LOS_OK;
969 }
970
OsSwitchTmpTTB(VOID)971 STATIC VOID OsSwitchTmpTTB(VOID)
972 {
973 PTE_T *tmpTtbase = NULL;
974 errno_t err;
975 LosVmSpace *kSpace = LOS_GetKVmSpace();
976
977 /* ttbr address should be 16KByte align */
978 tmpTtbase = LOS_MemAllocAlign(m_aucSysMem0, MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS,
979 MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS);
980 if (tmpTtbase == NULL) {
981 VM_ERR("memory alloc failed");
982 return;
983 }
984
985 kSpace->archMmu.virtTtb = tmpTtbase;
986 err = memcpy_s(kSpace->archMmu.virtTtb, MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS,
987 g_firstPageTable, MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS);
988 if (err != EOK) {
989 (VOID)LOS_MemFree(m_aucSysMem0, tmpTtbase);
990 kSpace->archMmu.virtTtb = (VADDR_T *)g_firstPageTable;
991 VM_ERR("memcpy failed, errno: %d", err);
992 return;
993 }
994 kSpace->archMmu.physTtb = LOS_PaddrQuery(kSpace->archMmu.virtTtb);
995 OsArmWriteTtbr0(kSpace->archMmu.physTtb | MMU_TTBRx_FLAGS);
996 ISB;
997 }
998
OsSetKSectionAttr(UINTPTR virtAddr,BOOL uncached)999 STATIC VOID OsSetKSectionAttr(UINTPTR virtAddr, BOOL uncached)
1000 {
1001 UINT32 offset = virtAddr - KERNEL_VMM_BASE;
1002 /* every section should be page aligned */
1003 UINTPTR textStart = (UINTPTR)&__text_start + offset;
1004 UINTPTR textEnd = (UINTPTR)&__text_end + offset;
1005 UINTPTR rodataStart = (UINTPTR)&__rodata_start + offset;
1006 UINTPTR rodataEnd = (UINTPTR)&__rodata_end + offset;
1007 UINTPTR ramDataStart = (UINTPTR)&__ram_data_start + offset;
1008 UINTPTR bssEnd = (UINTPTR)&__bss_end + offset;
1009 UINT32 bssEndBoundary = ROUNDUP(bssEnd, MB);
1010 LosArchMmuInitMapping mmuKernelMappings[] = {
1011 {
1012 .phys = SYS_MEM_BASE + textStart - virtAddr,
1013 .virt = textStart,
1014 .size = ROUNDUP(textEnd - textStart, MMU_DESCRIPTOR_L2_SMALL_SIZE),
1015 .flags = VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_EXECUTE,
1016 .name = "kernel_text"
1017 },
1018 {
1019 .phys = SYS_MEM_BASE + rodataStart - virtAddr,
1020 .virt = rodataStart,
1021 .size = ROUNDUP(rodataEnd - rodataStart, MMU_DESCRIPTOR_L2_SMALL_SIZE),
1022 .flags = VM_MAP_REGION_FLAG_PERM_READ,
1023 .name = "kernel_rodata"
1024 },
1025 {
1026 .phys = SYS_MEM_BASE + ramDataStart - virtAddr,
1027 .virt = ramDataStart,
1028 .size = ROUNDUP(bssEndBoundary - ramDataStart, MMU_DESCRIPTOR_L2_SMALL_SIZE),
1029 .flags = VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE,
1030 .name = "kernel_data_bss"
1031 }
1032 };
1033 LosVmSpace *kSpace = LOS_GetKVmSpace();
1034 status_t status;
1035 UINT32 length;
1036 INT32 i;
1037 LosArchMmuInitMapping *kernelMap = NULL;
1038 UINT32 kmallocLength;
1039 UINT32 flags;
1040
1041 /* use second-level mapping of default READ and WRITE */
1042 kSpace->archMmu.virtTtb = (PTE_T *)g_firstPageTable;
1043 kSpace->archMmu.physTtb = LOS_PaddrQuery(kSpace->archMmu.virtTtb);
1044 status = LOS_ArchMmuUnmap(&kSpace->archMmu, virtAddr,
1045 (bssEndBoundary - virtAddr) >> MMU_DESCRIPTOR_L2_SMALL_SHIFT);
1046 if (status != ((bssEndBoundary - virtAddr) >> MMU_DESCRIPTOR_L2_SMALL_SHIFT)) {
1047 VM_ERR("unmap failed, status: %d", status);
1048 return;
1049 }
1050
1051 flags = VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE | VM_MAP_REGION_FLAG_PERM_EXECUTE;
1052 if (uncached) {
1053 flags |= VM_MAP_REGION_FLAG_UNCACHED;
1054 }
1055 status = LOS_ArchMmuMap(&kSpace->archMmu, virtAddr, SYS_MEM_BASE,
1056 (textStart - virtAddr) >> MMU_DESCRIPTOR_L2_SMALL_SHIFT,
1057 flags);
1058 if (status != ((textStart - virtAddr) >> MMU_DESCRIPTOR_L2_SMALL_SHIFT)) {
1059 VM_ERR("mmap failed, status: %d", status);
1060 return;
1061 }
1062
1063 length = sizeof(mmuKernelMappings) / sizeof(LosArchMmuInitMapping);
1064 for (i = 0; i < length; i++) {
1065 kernelMap = &mmuKernelMappings[i];
1066 if (uncached) {
1067 kernelMap->flags |= VM_MAP_REGION_FLAG_UNCACHED;
1068 }
1069 status = LOS_ArchMmuMap(&kSpace->archMmu, kernelMap->virt, kernelMap->phys,
1070 kernelMap->size >> MMU_DESCRIPTOR_L2_SMALL_SHIFT, kernelMap->flags);
1071 if (status != (kernelMap->size >> MMU_DESCRIPTOR_L2_SMALL_SHIFT)) {
1072 VM_ERR("mmap failed, status: %d", status);
1073 return;
1074 }
1075 LOS_VmSpaceReserve(kSpace, kernelMap->size, kernelMap->virt);
1076 }
1077
1078 kmallocLength = virtAddr + SYS_MEM_SIZE_DEFAULT - bssEndBoundary;
1079 flags = VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE;
1080 if (uncached) {
1081 flags |= VM_MAP_REGION_FLAG_UNCACHED;
1082 }
1083 status = LOS_ArchMmuMap(&kSpace->archMmu, bssEndBoundary,
1084 SYS_MEM_BASE + bssEndBoundary - virtAddr,
1085 kmallocLength >> MMU_DESCRIPTOR_L2_SMALL_SHIFT,
1086 flags);
1087 if (status != (kmallocLength >> MMU_DESCRIPTOR_L2_SMALL_SHIFT)) {
1088 VM_ERR("mmap failed, status: %d", status);
1089 return;
1090 }
1091 LOS_VmSpaceReserve(kSpace, kmallocLength, bssEndBoundary);
1092 }
1093
OsKSectionNewAttrEnable(VOID)1094 STATIC VOID OsKSectionNewAttrEnable(VOID)
1095 {
1096 LosVmSpace *kSpace = LOS_GetKVmSpace();
1097 paddr_t oldTtPhyBase;
1098
1099 kSpace->archMmu.virtTtb = (PTE_T *)g_firstPageTable;
1100 kSpace->archMmu.physTtb = LOS_PaddrQuery(kSpace->archMmu.virtTtb);
1101
1102 /* we need free tmp ttbase */
1103 oldTtPhyBase = OsArmReadTtbr0();
1104 oldTtPhyBase = oldTtPhyBase & MMU_DESCRIPTOR_L2_SMALL_FRAME;
1105 OsArmWriteTtbr0(kSpace->archMmu.physTtb | MMU_TTBRx_FLAGS);
1106 ISB;
1107
1108 /* we changed page table entry, so we need to clean TLB here */
1109 OsCleanTLB();
1110
1111 (VOID)LOS_MemFree(m_aucSysMem0, (VOID *)(UINTPTR)(oldTtPhyBase - SYS_MEM_BASE + KERNEL_VMM_BASE));
1112 }
1113
1114 /* disable TTBCR0 and set the split between TTBR0 and TTBR1 */
OsArchMmuInitPerCPU(VOID)1115 VOID OsArchMmuInitPerCPU(VOID)
1116 {
1117 UINT32 n = __builtin_clz(KERNEL_ASPACE_BASE) + 1;
1118 UINT32 ttbcr = MMU_DESCRIPTOR_TTBCR_PD0 | n;
1119
1120 OsArmWriteTtbr1(OsArmReadTtbr0());
1121 ISB;
1122 OsArmWriteTtbcr(ttbcr);
1123 ISB;
1124 OsArmWriteTtbr0(0);
1125 ISB;
1126 }
1127
OsInitMappingStartUp(VOID)1128 VOID OsInitMappingStartUp(VOID)
1129 {
1130 OsArmInvalidateTlbBarrier();
1131
1132 OsSwitchTmpTTB();
1133
1134 OsSetKSectionAttr(KERNEL_VMM_BASE, FALSE);
1135 OsSetKSectionAttr(UNCACHED_VMM_BASE, TRUE);
1136 OsKSectionNewAttrEnable();
1137 }
1138 #endif
1139
1140