1 /*
2 * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3 * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this list of
9 * conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12 * of conditions and the following disclaimer in the documentation and/or other materials
13 * provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16 * to endorse or promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "los_vm_phys.h"
33 #include "los_vm_boot.h"
34 #include "los_vm_common.h"
35 #include "los_vm_map.h"
36 #include "los_vm_dump.h"
37 #include "los_process_pri.h"
38
39
40 #ifdef LOSCFG_KERNEL_VM
41
42 #define ONE_PAGE 1
43
44 /* Physical memory area array */
45 STATIC struct VmPhysArea g_physArea[] = {
46 {
47 .start = SYS_MEM_BASE,
48 .size = SYS_MEM_SIZE_DEFAULT,
49 },
50 };
51
52 struct VmPhysSeg g_vmPhysSeg[VM_PHYS_SEG_MAX];
53 INT32 g_vmPhysSegNum = 0;
54
OsGVmPhysSegGet(void)55 LosVmPhysSeg *OsGVmPhysSegGet(void)
56 {
57 return g_vmPhysSeg;
58 }
59
OsVmPhysLruInit(struct VmPhysSeg * seg)60 STATIC VOID OsVmPhysLruInit(struct VmPhysSeg *seg)
61 {
62 INT32 i;
63 UINT32 intSave;
64 LOS_SpinInit(&seg->lruLock);
65
66 LOS_SpinLockSave(&seg->lruLock, &intSave);
67 for (i = 0; i < VM_NR_LRU_LISTS; i++) {
68 seg->lruSize[i] = 0;
69 LOS_ListInit(&seg->lruList[i]);
70 }
71 LOS_SpinUnlockRestore(&seg->lruLock, intSave);
72 }
73
OsVmPhysSegCreate(paddr_t start,size_t size)74 STATIC INT32 OsVmPhysSegCreate(paddr_t start, size_t size)
75 {
76 struct VmPhysSeg *seg = NULL;
77
78 if (g_vmPhysSegNum >= VM_PHYS_SEG_MAX) {
79 return -1;
80 }
81
82 seg = &g_vmPhysSeg[g_vmPhysSegNum++];
83 for (; (seg > g_vmPhysSeg) && ((seg - 1)->start > (start + size)); seg--) {
84 *seg = *(seg - 1);
85 }
86 seg->start = start;
87 seg->size = size;
88
89 return 0;
90 }
91
OsVmPhysSegAdd(VOID)92 VOID OsVmPhysSegAdd(VOID)
93 {
94 INT32 i, ret;
95
96 LOS_ASSERT(g_vmPhysSegNum < VM_PHYS_SEG_MAX);
97
98 for (i = 0; i < (sizeof(g_physArea) / sizeof(g_physArea[0])); i++) {
99 ret = OsVmPhysSegCreate(g_physArea[i].start, g_physArea[i].size);
100 if (ret != 0) {
101 VM_ERR("create phys seg failed");
102 }
103 }
104 }
105
OsVmPhysAreaSizeAdjust(size_t size)106 VOID OsVmPhysAreaSizeAdjust(size_t size)
107 {
108 /*
109 * The first physics memory segment is used for kernel image and kernel heap,
110 * so just need to adjust the first one here.
111 */
112 g_physArea[0].start += size;
113 g_physArea[0].size -= size;
114 }
115
OsVmPhysPageNumGet(VOID)116 UINT32 OsVmPhysPageNumGet(VOID)
117 {
118 UINT32 nPages = 0;
119 INT32 i;
120
121 for (i = 0; i < (sizeof(g_physArea) / sizeof(g_physArea[0])); i++) {
122 nPages += g_physArea[i].size >> PAGE_SHIFT;
123 }
124
125 return nPages;
126 }
127
OsVmPhysFreeListInit(struct VmPhysSeg * seg)128 STATIC INLINE VOID OsVmPhysFreeListInit(struct VmPhysSeg *seg)
129 {
130 int i;
131 UINT32 intSave;
132 struct VmFreeList *list = NULL;
133
134 LOS_SpinInit(&seg->freeListLock);
135
136 LOS_SpinLockSave(&seg->freeListLock, &intSave);
137 for (i = 0; i < VM_LIST_ORDER_MAX; i++) {
138 list = &seg->freeList[i];
139 LOS_ListInit(&list->node);
140 list->listCnt = 0;
141 }
142 LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
143 }
144
OsVmPhysInit(VOID)145 VOID OsVmPhysInit(VOID)
146 {
147 struct VmPhysSeg *seg = NULL;
148 UINT32 nPages = 0;
149 int i;
150
151 for (i = 0; i < g_vmPhysSegNum; i++) {
152 seg = &g_vmPhysSeg[i];
153 seg->pageBase = &g_vmPageArray[nPages];
154 nPages += seg->size >> PAGE_SHIFT;
155 OsVmPhysFreeListInit(seg);
156 OsVmPhysLruInit(seg);
157 }
158 }
159
OsVmPhysFreeListAddUnsafe(LosVmPage * page,UINT8 order)160 STATIC VOID OsVmPhysFreeListAddUnsafe(LosVmPage *page, UINT8 order)
161 {
162 struct VmPhysSeg *seg = NULL;
163 struct VmFreeList *list = NULL;
164
165 if (page->segID >= VM_PHYS_SEG_MAX) {
166 LOS_Panic("The page segment id(%d) is invalid\n", page->segID);
167 }
168
169 page->order = order;
170 seg = &g_vmPhysSeg[page->segID];
171
172 list = &seg->freeList[order];
173 LOS_ListTailInsert(&list->node, &page->node);
174 list->listCnt++;
175 }
176
OsVmPhysFreeListDelUnsafe(LosVmPage * page)177 STATIC VOID OsVmPhysFreeListDelUnsafe(LosVmPage *page)
178 {
179 struct VmPhysSeg *seg = NULL;
180 struct VmFreeList *list = NULL;
181
182 if ((page->segID >= VM_PHYS_SEG_MAX) || (page->order >= VM_LIST_ORDER_MAX)) {
183 LOS_Panic("The page segment id(%u) or order(%u) is invalid\n", page->segID, page->order);
184 }
185
186 seg = &g_vmPhysSeg[page->segID];
187 list = &seg->freeList[page->order];
188 list->listCnt--;
189 LOS_ListDelete(&page->node);
190 page->order = VM_LIST_ORDER_MAX;
191 }
192
OsVmPhysPagesSpiltUnsafe(LosVmPage * page,UINT8 oldOrder,UINT8 newOrder)193 STATIC VOID OsVmPhysPagesSpiltUnsafe(LosVmPage *page, UINT8 oldOrder, UINT8 newOrder)
194 {
195 UINT32 order;
196 LosVmPage *buddyPage = NULL;
197
198 for (order = newOrder; order > oldOrder;) {
199 order--;
200 buddyPage = &page[VM_ORDER_TO_PAGES(order)];
201 LOS_ASSERT(buddyPage->order == VM_LIST_ORDER_MAX);
202 OsVmPhysFreeListAddUnsafe(buddyPage, order);
203 }
204 }
205
OsVmPhysToPage(paddr_t pa,UINT8 segID)206 LosVmPage *OsVmPhysToPage(paddr_t pa, UINT8 segID)
207 {
208 struct VmPhysSeg *seg = NULL;
209 paddr_t offset;
210
211 if (segID >= VM_PHYS_SEG_MAX) {
212 LOS_Panic("The page segment id(%d) is invalid\n", segID);
213 }
214 seg = &g_vmPhysSeg[segID];
215 if ((pa < seg->start) || (pa >= (seg->start + seg->size))) {
216 return NULL;
217 }
218
219 offset = pa - seg->start;
220 return (seg->pageBase + (offset >> PAGE_SHIFT));
221 }
222
OsVmPaddrToPage(paddr_t paddr)223 LosVmPage *OsVmPaddrToPage(paddr_t paddr)
224 {
225 INT32 segID;
226 LosVmPage *vmPage = NULL;
227
228 for (segID = 0; segID < g_vmPhysSegNum; segID++) {
229 vmPage = OsVmPhysToPage(paddr, segID);
230 if (vmPage != NULL) {
231 return vmPage;
232 }
233 }
234 return NULL;
235 }
236
OsVmPageToVaddr(LosVmPage * page)237 VOID *OsVmPageToVaddr(LosVmPage *page)
238 {
239 VADDR_T vaddr;
240 vaddr = KERNEL_ASPACE_BASE + page->physAddr - SYS_MEM_BASE;
241
242 return (VOID *)(UINTPTR)vaddr;
243 }
244
OsVmVaddrToPage(VOID * ptr)245 LosVmPage *OsVmVaddrToPage(VOID *ptr)
246 {
247 struct VmPhysSeg *seg = NULL;
248 PADDR_T pa = LOS_PaddrQuery(ptr);
249 UINT32 segID;
250
251 for (segID = 0; segID < g_vmPhysSegNum; segID++) {
252 seg = &g_vmPhysSeg[segID];
253 if ((pa >= seg->start) && (pa < (seg->start + seg->size))) {
254 return seg->pageBase + ((pa - seg->start) >> PAGE_SHIFT);
255 }
256 }
257
258 return NULL;
259 }
260
OsVmRecycleExtraPages(LosVmPage * page,size_t startPage,size_t endPage)261 STATIC INLINE VOID OsVmRecycleExtraPages(LosVmPage *page, size_t startPage, size_t endPage)
262 {
263 if (startPage >= endPage) {
264 return;
265 }
266
267 OsVmPhysPagesFreeContiguous(page, endPage - startPage);
268 }
269
OsVmPhysLargeAlloc(struct VmPhysSeg * seg,size_t nPages)270 STATIC LosVmPage *OsVmPhysLargeAlloc(struct VmPhysSeg *seg, size_t nPages)
271 {
272 struct VmFreeList *list = NULL;
273 LosVmPage *page = NULL;
274 LosVmPage *tmp = NULL;
275 PADDR_T paStart;
276 PADDR_T paEnd;
277 size_t size = nPages << PAGE_SHIFT;
278
279 list = &seg->freeList[VM_LIST_ORDER_MAX - 1];
280 LOS_DL_LIST_FOR_EACH_ENTRY(page, &list->node, LosVmPage, node) {
281 paStart = page->physAddr;
282 paEnd = paStart + size;
283 if (paEnd > (seg->start + seg->size)) {
284 continue;
285 }
286
287 for (;;) {
288 paStart += PAGE_SIZE << (VM_LIST_ORDER_MAX - 1);
289 if ((paStart >= paEnd) || (paStart < seg->start) ||
290 (paStart >= (seg->start + seg->size))) {
291 break;
292 }
293 tmp = &seg->pageBase[(paStart - seg->start) >> PAGE_SHIFT];
294 if (tmp->order != (VM_LIST_ORDER_MAX - 1)) {
295 break;
296 }
297 }
298 if (paStart >= paEnd) {
299 return page;
300 }
301 }
302
303 return NULL;
304 }
305
OsVmPhysPagesAlloc(struct VmPhysSeg * seg,size_t nPages)306 STATIC LosVmPage *OsVmPhysPagesAlloc(struct VmPhysSeg *seg, size_t nPages)
307 {
308 struct VmFreeList *list = NULL;
309 LosVmPage *page = NULL;
310 LosVmPage *tmp = NULL;
311 UINT32 order;
312 UINT32 newOrder;
313
314 order = OsVmPagesToOrder(nPages);
315 if (order < VM_LIST_ORDER_MAX) {
316 for (newOrder = order; newOrder < VM_LIST_ORDER_MAX; newOrder++) {
317 list = &seg->freeList[newOrder];
318 if (LOS_ListEmpty(&list->node)) {
319 continue;
320 }
321 page = LOS_DL_LIST_ENTRY(LOS_DL_LIST_FIRST(&list->node), LosVmPage, node);
322 goto DONE;
323 }
324 } else {
325 newOrder = VM_LIST_ORDER_MAX - 1;
326 page = OsVmPhysLargeAlloc(seg, nPages);
327 if (page != NULL) {
328 goto DONE;
329 }
330 }
331 return NULL;
332 DONE:
333
334 for (tmp = page; tmp < &page[nPages]; tmp = &tmp[1 << newOrder]) {
335 OsVmPhysFreeListDelUnsafe(tmp);
336 }
337 OsVmPhysPagesSpiltUnsafe(page, order, newOrder);
338 OsVmRecycleExtraPages(&page[nPages], nPages, ROUNDUP(nPages, (1 << min(order, newOrder))));
339
340 return page;
341 }
342
OsVmPhysPagesFree(LosVmPage * page,UINT8 order)343 VOID OsVmPhysPagesFree(LosVmPage *page, UINT8 order)
344 {
345 paddr_t pa;
346 LosVmPage *buddyPage = NULL;
347
348 if ((page == NULL) || (order >= VM_LIST_ORDER_MAX)) {
349 return;
350 }
351
352 if (order < VM_LIST_ORDER_MAX - 1) {
353 pa = VM_PAGE_TO_PHYS(page);
354 do {
355 pa ^= VM_ORDER_TO_PHYS(order);
356 buddyPage = OsVmPhysToPage(pa, page->segID);
357 if ((buddyPage == NULL) || (buddyPage->order != order)) {
358 break;
359 }
360 OsVmPhysFreeListDelUnsafe(buddyPage);
361 order++;
362 pa &= ~(VM_ORDER_TO_PHYS(order) - 1);
363 page = OsVmPhysToPage(pa, page->segID);
364 } while (order < VM_LIST_ORDER_MAX - 1);
365 }
366
367 OsVmPhysFreeListAddUnsafe(page, order);
368 }
369
OsVmPhysPagesFreeContiguous(LosVmPage * page,size_t nPages)370 VOID OsVmPhysPagesFreeContiguous(LosVmPage *page, size_t nPages)
371 {
372 paddr_t pa;
373 UINT32 order;
374 size_t n;
375
376 while (TRUE) {
377 pa = VM_PAGE_TO_PHYS(page);
378 order = VM_PHYS_TO_ORDER(pa);
379 n = VM_ORDER_TO_PAGES(order);
380 if (n > nPages) {
381 break;
382 }
383 OsVmPhysPagesFree(page, order);
384 nPages -= n;
385 page += n;
386 }
387
388 while (nPages > 0) {
389 order = LOS_HighBitGet(nPages);
390 n = VM_ORDER_TO_PAGES(order);
391 OsVmPhysPagesFree(page, order);
392 nPages -= n;
393 page += n;
394 }
395 }
396
OsVmPhysPagesGet(size_t nPages)397 STATIC LosVmPage *OsVmPhysPagesGet(size_t nPages)
398 {
399 UINT32 intSave;
400 struct VmPhysSeg *seg = NULL;
401 LosVmPage *page = NULL;
402 UINT32 segID;
403
404 for (segID = 0; segID < g_vmPhysSegNum; segID++) {
405 seg = &g_vmPhysSeg[segID];
406 LOS_SpinLockSave(&seg->freeListLock, &intSave);
407 page = OsVmPhysPagesAlloc(seg, nPages);
408 if (page != NULL) {
409 /* the first page of continuous physical addresses holds refCounts */
410 LOS_AtomicSet(&page->refCounts, 0);
411 page->nPages = nPages;
412 LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
413 return page;
414 }
415 LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
416 }
417 return NULL;
418 }
419
LOS_PhysPagesAllocContiguous(size_t nPages)420 VOID *LOS_PhysPagesAllocContiguous(size_t nPages)
421 {
422 LosVmPage *page = NULL;
423
424 if (nPages == 0) {
425 return NULL;
426 }
427
428 page = OsVmPhysPagesGet(nPages);
429 if (page == NULL) {
430 return NULL;
431 }
432
433 return OsVmPageToVaddr(page);
434 }
435
LOS_PhysPagesFreeContiguous(VOID * ptr,size_t nPages)436 VOID LOS_PhysPagesFreeContiguous(VOID *ptr, size_t nPages)
437 {
438 UINT32 intSave;
439 struct VmPhysSeg *seg = NULL;
440 LosVmPage *page = NULL;
441
442 if (ptr == NULL) {
443 return;
444 }
445
446 page = OsVmVaddrToPage(ptr);
447 if (page == NULL) {
448 VM_ERR("vm page of ptr(%#x) is null", ptr);
449 return;
450 }
451 page->nPages = 0;
452
453 seg = &g_vmPhysSeg[page->segID];
454 LOS_SpinLockSave(&seg->freeListLock, &intSave);
455
456 OsVmPhysPagesFreeContiguous(page, nPages);
457
458 LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
459 #ifdef LOSCFG_KERNEL_PLIMITS
460 OsMemLimitMemFree(nPages * PAGE_SIZE);
461 #endif
462 }
463
OsKVaddrToPaddr(VADDR_T kvaddr)464 PADDR_T OsKVaddrToPaddr(VADDR_T kvaddr)
465 {
466 if (kvaddr == 0) {
467 return 0;
468 }
469 return (kvaddr - KERNEL_ASPACE_BASE + SYS_MEM_BASE);
470 }
471
LOS_PaddrToKVaddr(PADDR_T paddr)472 VADDR_T *LOS_PaddrToKVaddr(PADDR_T paddr)
473 {
474 struct VmPhysSeg *seg = NULL;
475 UINT32 segID;
476
477 if (paddr == 0) {
478 return NULL;
479 }
480
481 for (segID = 0; segID < g_vmPhysSegNum; segID++) {
482 seg = &g_vmPhysSeg[segID];
483 if ((paddr >= seg->start) && (paddr < (seg->start + seg->size))) {
484 return (VADDR_T *)(UINTPTR)(paddr - SYS_MEM_BASE + KERNEL_ASPACE_BASE);
485 }
486 }
487
488 return (VADDR_T *)(UINTPTR)(paddr - SYS_MEM_BASE + KERNEL_ASPACE_BASE);
489 }
490
LOS_PhysPageFree(LosVmPage * page)491 VOID LOS_PhysPageFree(LosVmPage *page)
492 {
493 UINT32 intSave;
494 struct VmPhysSeg *seg = NULL;
495
496 if (page == NULL) {
497 return;
498 }
499
500 if (LOS_AtomicDecRet(&page->refCounts) <= 0) {
501 seg = &g_vmPhysSeg[page->segID];
502 LOS_SpinLockSave(&seg->freeListLock, &intSave);
503
504 OsVmPhysPagesFreeContiguous(page, ONE_PAGE);
505 LOS_AtomicSet(&page->refCounts, 0);
506
507 LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
508 }
509 #ifdef LOSCFG_KERNEL_PLIMITS
510 OsMemLimitMemFree(PAGE_SIZE);
511 #endif
512 }
513
LOS_PhysPageAlloc(VOID)514 LosVmPage *LOS_PhysPageAlloc(VOID)
515 {
516 return OsVmPhysPagesGet(ONE_PAGE);
517 }
518
LOS_PhysPagesAlloc(size_t nPages,LOS_DL_LIST * list)519 size_t LOS_PhysPagesAlloc(size_t nPages, LOS_DL_LIST *list)
520 {
521 LosVmPage *page = NULL;
522 size_t count = 0;
523
524 if ((list == NULL) || (nPages == 0)) {
525 return 0;
526 }
527
528 while (nPages--) {
529 page = OsVmPhysPagesGet(ONE_PAGE);
530 if (page == NULL) {
531 break;
532 }
533 LOS_ListTailInsert(list, &page->node);
534 count++;
535 }
536
537 return count;
538 }
539
OsPhysSharePageCopy(PADDR_T oldPaddr,PADDR_T * newPaddr,LosVmPage * newPage)540 VOID OsPhysSharePageCopy(PADDR_T oldPaddr, PADDR_T *newPaddr, LosVmPage *newPage)
541 {
542 UINT32 intSave;
543 LosVmPage *oldPage = NULL;
544 VOID *newMem = NULL;
545 VOID *oldMem = NULL;
546 LosVmPhysSeg *seg = NULL;
547
548 if ((newPage == NULL) || (newPaddr == NULL)) {
549 VM_ERR("new Page invalid");
550 return;
551 }
552
553 oldPage = LOS_VmPageGet(oldPaddr);
554 if (oldPage == NULL) {
555 VM_ERR("invalid oldPaddr %p", oldPaddr);
556 return;
557 }
558
559 seg = &g_vmPhysSeg[oldPage->segID];
560 LOS_SpinLockSave(&seg->freeListLock, &intSave);
561 if (LOS_AtomicRead(&oldPage->refCounts) == 1) {
562 *newPaddr = oldPaddr;
563 } else {
564 newMem = LOS_PaddrToKVaddr(*newPaddr);
565 oldMem = LOS_PaddrToKVaddr(oldPaddr);
566 if ((newMem == NULL) || (oldMem == NULL)) {
567 LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
568 return;
569 }
570 if (memcpy_s(newMem, PAGE_SIZE, oldMem, PAGE_SIZE) != EOK) {
571 VM_ERR("memcpy_s failed");
572 }
573
574 LOS_AtomicInc(&newPage->refCounts);
575 LOS_AtomicDec(&oldPage->refCounts);
576 }
577 LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
578 return;
579 }
580
OsVmPhysSegGet(LosVmPage * page)581 struct VmPhysSeg *OsVmPhysSegGet(LosVmPage *page)
582 {
583 if ((page == NULL) || (page->segID >= VM_PHYS_SEG_MAX)) {
584 return NULL;
585 }
586
587 return (OsGVmPhysSegGet() + page->segID);
588 }
589
OsVmPagesToOrder(size_t nPages)590 UINT32 OsVmPagesToOrder(size_t nPages)
591 {
592 UINT32 order;
593
594 for (order = 0; VM_ORDER_TO_PAGES(order) < nPages; order++);
595
596 return order;
597 }
598
LOS_PhysPagesFree(LOS_DL_LIST * list)599 size_t LOS_PhysPagesFree(LOS_DL_LIST *list)
600 {
601 UINT32 intSave;
602 LosVmPage *page = NULL;
603 LosVmPage *nPage = NULL;
604 LosVmPhysSeg *seg = NULL;
605 size_t count = 0;
606
607 if (list == NULL) {
608 return 0;
609 }
610
611 LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(page, nPage, list, LosVmPage, node) {
612 LOS_ListDelete(&page->node);
613 if (LOS_AtomicDecRet(&page->refCounts) <= 0) {
614 seg = &g_vmPhysSeg[page->segID];
615 LOS_SpinLockSave(&seg->freeListLock, &intSave);
616 OsVmPhysPagesFreeContiguous(page, ONE_PAGE);
617 LOS_AtomicSet(&page->refCounts, 0);
618 LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
619 }
620 count++;
621 }
622
623 return count;
624 }
625 #else
LOS_PaddrToKVaddr(PADDR_T paddr)626 VADDR_T *LOS_PaddrToKVaddr(PADDR_T paddr)
627 {
628 if ((paddr < DDR_MEM_ADDR) || (paddr >= ((PADDR_T)DDR_MEM_ADDR + DDR_MEM_SIZE))) {
629 return NULL;
630 }
631
632 return (VADDR_T *)DMA_TO_VMM_ADDR(paddr);
633 }
634 #endif
635
636