• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "los_memory.h"
33 #include "los_memory_pri.h"
34 #include "sys/param.h"
35 #include "los_spinlock.h"
36 #include "los_vm_phys.h"
37 #include "los_vm_boot.h"
38 #include "los_vm_filemap.h"
39 #include "los_task_pri.h"
40 #include "los_hook.h"
41 
42 #ifdef LOSCFG_KERNEL_LMS
43 #include "los_lms_pri.h"
44 #endif
45 
46 /* Used to cut non-essential functions. */
47 #define OS_MEM_FREE_BY_TASKID   0
48 #ifdef LOSCFG_KERNEL_VM
49 #define OS_MEM_EXPAND_ENABLE    1
50 #else
51 #define OS_MEM_EXPAND_ENABLE    0
52 #endif
53 
54 /* the dump size of current broken node when memcheck error */
55 #define OS_MEM_NODE_DUMP_SIZE   64
56 /* column num of the output info of mem node */
57 #define OS_MEM_COLUMN_NUM       8
58 
59 UINT8 *m_aucSysMem0 = NULL;
60 UINT8 *m_aucSysMem1 = NULL;
61 
62 #ifdef LOSCFG_MEM_MUL_POOL
63 VOID *g_poolHead = NULL;
64 #endif
65 
66 /* The following is the macro definition and interface implementation related to the TLSF. */
67 
68 /* Supposing a Second Level Index: SLI = 3. */
69 #define OS_MEM_SLI                      3
70 /* Giving 1 free list for each small bucket: 4, 8, 12, up to 124. */
71 #define OS_MEM_SMALL_BUCKET_COUNT       31
72 #define OS_MEM_SMALL_BUCKET_MAX_SIZE    128
73 /* Giving OS_MEM_FREE_LIST_NUM free lists for each large bucket. */
74 #define OS_MEM_LARGE_BUCKET_COUNT       24
75 #define OS_MEM_FREE_LIST_NUM            (1 << OS_MEM_SLI)
76 /* OS_MEM_SMALL_BUCKET_MAX_SIZE to the power of 2 is 7. */
77 #define OS_MEM_LARGE_START_BUCKET       7
78 
79 /* The count of free list. */
80 #define OS_MEM_FREE_LIST_COUNT  (OS_MEM_SMALL_BUCKET_COUNT + (OS_MEM_LARGE_BUCKET_COUNT << OS_MEM_SLI))
81 /* The bitmap is used to indicate whether the free list is empty, 1: not empty, 0: empty. */
82 #define OS_MEM_BITMAP_WORDS     ((OS_MEM_FREE_LIST_COUNT >> 5) + 1)
83 
84 #define OS_MEM_BITMAP_MASK 0x1FU
85 
86 /* Used to find the first bit of 1 in bitmap. */
OsMemFFS(UINT32 bitmap)87 STATIC INLINE UINT16 OsMemFFS(UINT32 bitmap)
88 {
89     bitmap &= ~bitmap + 1;
90     return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
91 }
92 
93 /* Used to find the last bit of 1 in bitmap. */
OsMemFLS(UINT32 bitmap)94 STATIC INLINE UINT16 OsMemFLS(UINT32 bitmap)
95 {
96     return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
97 }
98 
OsMemLog2(UINT32 size)99 STATIC INLINE UINT32 OsMemLog2(UINT32 size)
100 {
101     return OsMemFLS(size);
102 }
103 
104 /* Get the first level: f = log2(size). */
OsMemFlGet(UINT32 size)105 STATIC INLINE UINT32 OsMemFlGet(UINT32 size)
106 {
107     if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
108         return ((size >> 2) - 1); /* 2: The small bucket setup is 4. */
109     }
110     return OsMemLog2(size);
111 }
112 
113 /* Get the second level: s = (size - 2^f) * 2^SLI / 2^f. */
OsMemSlGet(UINT32 size,UINT32 fl)114 STATIC INLINE UINT32 OsMemSlGet(UINT32 size, UINT32 fl)
115 {
116     return (((size << OS_MEM_SLI) >> fl) - OS_MEM_FREE_LIST_NUM);
117 }
118 
119 /* The following is the memory algorithm related macro definition and interface implementation. */
120 
121 struct OsMemNodeHead {
122     UINT32 magic;
123     union {
124         struct OsMemNodeHead *prev; /* The prev is used for current node points to the previous node */
125         struct OsMemNodeHead *next; /* The next is used for last node points to the expand node */
126     } ptr;
127 #ifdef LOSCFG_MEM_LEAKCHECK
128     UINTPTR linkReg[LOS_RECORD_LR_CNT];
129 #endif
130     UINT32 sizeAndFlag;
131 };
132 
133 struct OsMemUsedNodeHead {
134     struct OsMemNodeHead header;
135 #if OS_MEM_FREE_BY_TASKID
136     UINT32 taskID;
137 #endif
138 };
139 
140 struct OsMemFreeNodeHead {
141     struct OsMemNodeHead header;
142     struct OsMemFreeNodeHead *prev;
143     struct OsMemFreeNodeHead *next;
144 };
145 
146 struct OsMemPoolInfo {
147     VOID *pool;
148     UINT32 totalSize;
149     UINT32 attr;
150 #ifdef LOSCFG_MEM_WATERLINE
151     UINT32 waterLine;   /* Maximum usage size in a memory pool */
152     UINT32 curUsedSize; /* Current usage size in a memory pool */
153 #endif
154 };
155 
156 struct OsMemPoolHead {
157     struct OsMemPoolInfo info;
158     UINT32 freeListBitmap[OS_MEM_BITMAP_WORDS];
159     struct OsMemFreeNodeHead *freeList[OS_MEM_FREE_LIST_COUNT];
160     SPIN_LOCK_S spinlock;
161 #ifdef LOSCFG_MEM_MUL_POOL
162     VOID *nextPool;
163 #endif
164 };
165 
166 /* Spinlock for mem module, only available on SMP mode */
167 #define MEM_LOCK(pool, state)       LOS_SpinLockSave(&(pool)->spinlock, &(state))
168 #define MEM_UNLOCK(pool, state)     LOS_SpinUnlockRestore(&(pool)->spinlock, (state))
169 
170 /* The memory pool support expand. */
171 #define OS_MEM_POOL_EXPAND_ENABLE  0x01
172 /* The memory pool support no lock. */
173 #define OS_MEM_POOL_LOCK_ENABLE    0x02
174 
175 #define OS_MEM_NODE_MAGIC        0xABCDDCBA
176 #define OS_MEM_MIN_ALLOC_SIZE    (sizeof(struct OsMemFreeNodeHead) - sizeof(struct OsMemUsedNodeHead))
177 
178 #define OS_MEM_NODE_USED_FLAG      0x80000000U
179 #define OS_MEM_NODE_ALIGNED_FLAG   0x40000000U
180 #define OS_MEM_NODE_LAST_FLAG      0x20000000U  /* Sentinel Node */
181 #define OS_MEM_NODE_ALIGNED_AND_USED_FLAG (OS_MEM_NODE_USED_FLAG | OS_MEM_NODE_ALIGNED_FLAG | OS_MEM_NODE_LAST_FLAG)
182 
183 #define OS_MEM_NODE_GET_ALIGNED_FLAG(sizeAndFlag) \
184             ((sizeAndFlag) & OS_MEM_NODE_ALIGNED_FLAG)
185 #define OS_MEM_NODE_SET_ALIGNED_FLAG(sizeAndFlag) \
186             ((sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_ALIGNED_FLAG))
187 #define OS_MEM_NODE_GET_ALIGNED_GAPSIZE(sizeAndFlag) \
188             ((sizeAndFlag) & ~OS_MEM_NODE_ALIGNED_FLAG)
189 #define OS_MEM_NODE_GET_USED_FLAG(sizeAndFlag) \
190             ((sizeAndFlag) & OS_MEM_NODE_USED_FLAG)
191 #define OS_MEM_NODE_SET_USED_FLAG(sizeAndFlag) \
192             ((sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_USED_FLAG))
193 #define OS_MEM_NODE_GET_SIZE(sizeAndFlag) \
194             ((sizeAndFlag) & ~OS_MEM_NODE_ALIGNED_AND_USED_FLAG)
195 #define OS_MEM_NODE_SET_LAST_FLAG(sizeAndFlag) \
196                         ((sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_LAST_FLAG))
197 #define OS_MEM_NODE_GET_LAST_FLAG(sizeAndFlag) \
198             ((sizeAndFlag) & OS_MEM_NODE_LAST_FLAG)
199 
200 #define OS_MEM_ALIGN_SIZE           sizeof(UINTPTR)
201 #define OS_MEM_IS_POW_TWO(value)    ((((UINTPTR)(value)) & ((UINTPTR)(value) - 1)) == 0)
202 #define OS_MEM_ALIGN(p, alignSize)  (((UINTPTR)(p) + (alignSize) - 1) & ~((UINTPTR)((alignSize) - 1)))
203 #define OS_MEM_IS_ALIGNED(a, b)     (!(((UINTPTR)(a)) & (((UINTPTR)(b)) - 1)))
204 #define OS_MEM_NODE_HEAD_SIZE       sizeof(struct OsMemUsedNodeHead)
205 #define OS_MEM_MIN_POOL_SIZE        (OS_MEM_NODE_HEAD_SIZE + sizeof(struct OsMemPoolHead))
206 #define OS_MEM_NEXT_NODE(node) \
207     ((struct OsMemNodeHead *)(VOID *)((UINT8 *)(node) + OS_MEM_NODE_GET_SIZE((node)->sizeAndFlag)))
208 #define OS_MEM_FIRST_NODE(pool) \
209     (struct OsMemNodeHead *)((UINT8 *)(pool) + sizeof(struct OsMemPoolHead))
210 #define OS_MEM_END_NODE(pool, size) \
211     (struct OsMemNodeHead *)((UINT8 *)(pool) + (size) - OS_MEM_NODE_HEAD_SIZE)
212 #define OS_MEM_MIDDLE_ADDR_OPEN_END(startAddr, middleAddr, endAddr) \
213     (((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) < (UINT8 *)(endAddr)))
214 #define OS_MEM_MIDDLE_ADDR(startAddr, middleAddr, endAddr) \
215     (((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) <= (UINT8 *)(endAddr)))
216 #define OS_MEM_SET_MAGIC(node)      ((node)->magic = OS_MEM_NODE_MAGIC)
217 #define OS_MEM_MAGIC_VALID(node)    ((node)->magic == OS_MEM_NODE_MAGIC)
218 
219 STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node);
220 STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node);
221 STATIC VOID OsMemInfoPrint(VOID *pool);
222 #ifdef LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK
223 STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave);
224 #endif
225 
226 #if OS_MEM_FREE_BY_TASKID
OsMemNodeSetTaskID(struct OsMemUsedNodeHead * node)227 STATIC INLINE VOID OsMemNodeSetTaskID(struct OsMemUsedNodeHead *node)
228 {
229     node->taskID = LOS_CurTaskIDGet();
230 }
231 #endif
232 
233 #ifdef LOSCFG_MEM_WATERLINE
OsMemWaterUsedRecord(struct OsMemPoolHead * pool,UINT32 size)234 STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
235 {
236     pool->info.curUsedSize += size;
237     if (pool->info.curUsedSize > pool->info.waterLine) {
238         pool->info.waterLine = pool->info.curUsedSize;
239     }
240 }
241 #else
OsMemWaterUsedRecord(struct OsMemPoolHead * pool,UINT32 size)242 STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
243 {
244     (VOID)pool;
245     (VOID)size;
246 }
247 #endif
248 
249 #if OS_MEM_EXPAND_ENABLE
OsMemLastSentinelNodeGet(const struct OsMemNodeHead * sentinelNode)250 STATIC INLINE struct OsMemNodeHead *OsMemLastSentinelNodeGet(const struct OsMemNodeHead *sentinelNode)
251 {
252     struct OsMemNodeHead *node = NULL;
253     VOID *ptr = sentinelNode->ptr.next;
254     UINT32 size = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
255 
256     while ((ptr != NULL) && (size != 0)) {
257         node = OS_MEM_END_NODE(ptr, size);
258         ptr = node->ptr.next;
259         size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
260     }
261 
262     return node;
263 }
264 
OsMemSentinelNodeCheck(struct OsMemNodeHead * sentinelNode)265 STATIC INLINE BOOL OsMemSentinelNodeCheck(struct OsMemNodeHead *sentinelNode)
266 {
267     if (!OS_MEM_NODE_GET_USED_FLAG(sentinelNode->sizeAndFlag)) {
268         return FALSE;
269     }
270 
271     if (!OS_MEM_MAGIC_VALID(sentinelNode)) {
272         return FALSE;
273     }
274 
275     return TRUE;
276 }
277 
OsMemIsLastSentinelNode(struct OsMemNodeHead * sentinelNode)278 STATIC INLINE BOOL OsMemIsLastSentinelNode(struct OsMemNodeHead *sentinelNode)
279 {
280     if (OsMemSentinelNodeCheck(sentinelNode) == FALSE) {
281         PRINT_ERR("%s %d, The current sentinel node is invalid\n", __FUNCTION__, __LINE__);
282         return TRUE;
283     }
284 
285     if ((OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag) == 0) ||
286         (sentinelNode->ptr.next == NULL)) {
287         return TRUE;
288     }
289 
290     return FALSE;
291 }
292 
OsMemSentinelNodeSet(struct OsMemNodeHead * sentinelNode,VOID * newNode,UINT32 size)293 STATIC INLINE VOID OsMemSentinelNodeSet(struct OsMemNodeHead *sentinelNode, VOID *newNode, UINT32 size)
294 {
295     if (sentinelNode->ptr.next != NULL) {
296         sentinelNode = OsMemLastSentinelNodeGet(sentinelNode);
297     }
298 
299     sentinelNode->sizeAndFlag = size;
300     sentinelNode->ptr.next = newNode;
301     OS_MEM_NODE_SET_USED_FLAG(sentinelNode->sizeAndFlag);
302     OS_MEM_NODE_SET_LAST_FLAG(sentinelNode->sizeAndFlag);
303 }
304 
OsMemSentinelNodeGet(struct OsMemNodeHead * node)305 STATIC INLINE VOID *OsMemSentinelNodeGet(struct OsMemNodeHead *node)
306 {
307     return node->ptr.next;
308 }
309 
PreSentinelNodeGet(const VOID * pool,const struct OsMemNodeHead * node)310 STATIC INLINE struct OsMemNodeHead *PreSentinelNodeGet(const VOID *pool, const struct OsMemNodeHead *node)
311 {
312     UINT32 nextSize;
313     struct OsMemNodeHead *nextNode = NULL;
314     struct OsMemNodeHead *sentinelNode = NULL;
315 
316     sentinelNode = OS_MEM_END_NODE(pool, ((struct OsMemPoolHead *)pool)->info.totalSize);
317     while (sentinelNode != NULL) {
318         if (OsMemIsLastSentinelNode(sentinelNode)) {
319             PRINT_ERR("PreSentinelNodeGet can not find node %#x\n", node);
320             return NULL;
321         }
322         nextNode = OsMemSentinelNodeGet(sentinelNode);
323         if (nextNode == node) {
324             return sentinelNode;
325         }
326         nextSize = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
327         sentinelNode = OS_MEM_END_NODE(nextNode, nextSize);
328     }
329 
330     return NULL;
331 }
332 
OsMemLargeNodeFree(const VOID * ptr)333 UINT32 OsMemLargeNodeFree(const VOID *ptr)
334 {
335     LosVmPage *page = OsVmVaddrToPage((VOID *)ptr);
336     if ((page == NULL) || (page->nPages == 0)) {
337         return LOS_NOK;
338     }
339     LOS_PhysPagesFreeContiguous((VOID *)ptr, page->nPages);
340 
341     return LOS_OK;
342 }
343 
TryShrinkPool(const VOID * pool,const struct OsMemNodeHead * node)344 STATIC INLINE BOOL TryShrinkPool(const VOID *pool, const struct OsMemNodeHead *node)
345 {
346     struct OsMemNodeHead *mySentinel = NULL;
347     struct OsMemNodeHead *preSentinel = NULL;
348     size_t totalSize = (UINTPTR)node->ptr.prev - (UINTPTR)node;
349     size_t nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
350 
351     if (nodeSize != totalSize) {
352         return FALSE;
353     }
354 
355     preSentinel = PreSentinelNodeGet(pool, node);
356     if (preSentinel == NULL) {
357         return FALSE;
358     }
359 
360     mySentinel = node->ptr.prev;
361     if (OsMemIsLastSentinelNode(mySentinel)) { /* prev node becomes sentinel node */
362         preSentinel->ptr.next = NULL;
363         OsMemSentinelNodeSet(preSentinel, NULL, 0);
364     } else {
365         preSentinel->sizeAndFlag = mySentinel->sizeAndFlag;
366         preSentinel->ptr.next = mySentinel->ptr.next;
367     }
368 
369     if (OsMemLargeNodeFree(node) != LOS_OK) {
370         PRINT_ERR("TryShrinkPool free %#x failed!\n", node);
371         return FALSE;
372     }
373 #ifdef LOSCFG_KERNEL_LMS
374     LOS_LmsCheckPoolDel(node);
375 #endif
376     return TRUE;
377 }
378 
OsMemPoolExpandSub(VOID * pool,UINT32 size,UINT32 intSave)379 STATIC INLINE INT32 OsMemPoolExpandSub(VOID *pool, UINT32 size, UINT32 intSave)
380 {
381     UINT32 tryCount = MAX_SHRINK_PAGECACHE_TRY;
382     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
383     struct OsMemNodeHead *newNode = NULL;
384     struct OsMemNodeHead *endNode = NULL;
385 
386     size = ROUNDUP(size + OS_MEM_NODE_HEAD_SIZE, PAGE_SIZE);
387     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
388 
389 RETRY:
390     newNode = (struct OsMemNodeHead *)LOS_PhysPagesAllocContiguous(size >> PAGE_SHIFT);
391     if (newNode == NULL) {
392         if (tryCount > 0) {
393             tryCount--;
394             MEM_UNLOCK(poolInfo, intSave);
395             OsTryShrinkMemory(size >> PAGE_SHIFT);
396             MEM_LOCK(poolInfo, intSave);
397             goto RETRY;
398         }
399 
400         PRINT_ERR("OsMemPoolExpand alloc failed size = %u\n", size);
401         return -1;
402     }
403 #ifdef LOSCFG_KERNEL_LMS
404     UINT32 resize = 0;
405     if (g_lms != NULL) {
406         /*
407          * resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
408          * resize != 0, shadow memory init successful, set poolSize as resize.
409          */
410         resize = g_lms->init(newNode, size);
411         size = (resize == 0) ? size : resize;
412     }
413 #endif
414     newNode->sizeAndFlag = (size - OS_MEM_NODE_HEAD_SIZE);
415     newNode->ptr.prev = OS_MEM_END_NODE(newNode, size);
416     OsMemSentinelNodeSet(endNode, newNode, size);
417     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
418 
419     endNode = OS_MEM_END_NODE(newNode, size);
420     (VOID)memset(endNode, 0, sizeof(*endNode));
421     endNode->ptr.next = NULL;
422     endNode->magic = OS_MEM_NODE_MAGIC;
423     OsMemSentinelNodeSet(endNode, NULL, 0);
424     OsMemWaterUsedRecord(poolInfo, OS_MEM_NODE_HEAD_SIZE);
425 
426     return 0;
427 }
428 
OsMemPoolExpand(VOID * pool,UINT32 allocSize,UINT32 intSave)429 STATIC INLINE INT32 OsMemPoolExpand(VOID *pool, UINT32 allocSize, UINT32 intSave)
430 {
431     UINT32 expandDefault = MEM_EXPAND_SIZE(LOS_MemPoolSizeGet(pool));
432     UINT32 expandSize = MAX(expandDefault, allocSize);
433     UINT32 tryCount = 1;
434     UINT32 ret;
435 
436     do {
437         ret = OsMemPoolExpandSub(pool, expandSize, intSave);
438         if (ret == 0) {
439             return 0;
440         }
441 
442         if (allocSize > expandDefault) {
443             break;
444         }
445         expandSize = allocSize;
446     } while (tryCount--);
447 
448     return -1;
449 }
450 
LOS_MemExpandEnable(VOID * pool)451 VOID LOS_MemExpandEnable(VOID *pool)
452 {
453     if (pool == NULL) {
454         return;
455     }
456 
457     ((struct OsMemPoolHead *)pool)->info.attr |= OS_MEM_POOL_EXPAND_ENABLE;
458 }
459 #endif
460 
461 #ifdef LOSCFG_KERNEL_LMS
OsLmsFirstNodeMark(VOID * pool,struct OsMemNodeHead * node)462 STATIC INLINE VOID OsLmsFirstNodeMark(VOID *pool, struct OsMemNodeHead *node)
463 {
464     if (g_lms == NULL) {
465         return;
466     }
467 
468     g_lms->simpleMark((UINTPTR)pool, (UINTPTR)node, LMS_SHADOW_PAINT_U8);
469     g_lms->simpleMark((UINTPTR)node, (UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
470     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node), (UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
471         LMS_SHADOW_REDZONE_U8);
472     g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
473         LMS_SHADOW_AFTERFREE_U8);
474 }
475 
OsLmsAllocAlignMark(VOID * ptr,VOID * alignedPtr,UINT32 size)476 STATIC INLINE VOID OsLmsAllocAlignMark(VOID *ptr, VOID *alignedPtr, UINT32 size)
477 {
478     struct OsMemNodeHead *allocNode = NULL;
479 
480     if ((g_lms == NULL) || (ptr == NULL)) {
481         return;
482     }
483     allocNode = (struct OsMemNodeHead *)((struct OsMemUsedNodeHead *)ptr - 1);
484     if (ptr != alignedPtr) {
485         g_lms->simpleMark((UINTPTR)ptr, (UINTPTR)ptr + sizeof(UINT32), LMS_SHADOW_PAINT_U8);
486         g_lms->simpleMark((UINTPTR)ptr + sizeof(UINT32), (UINTPTR)alignedPtr, LMS_SHADOW_REDZONE_U8);
487     }
488 
489     /* mark remaining as redzone */
490     g_lms->simpleMark(LMS_ADDR_ALIGN((UINTPTR)alignedPtr + size), (UINTPTR)OS_MEM_NEXT_NODE(allocNode),
491         LMS_SHADOW_REDZONE_U8);
492 }
493 
OsLmsReallocMergeNodeMark(struct OsMemNodeHead * node)494 STATIC INLINE VOID OsLmsReallocMergeNodeMark(struct OsMemNodeHead *node)
495 {
496     if (g_lms == NULL) {
497         return;
498     }
499 
500     g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
501         LMS_SHADOW_ACCESSIBLE_U8);
502 }
503 
OsLmsReallocSplitNodeMark(struct OsMemNodeHead * node)504 STATIC INLINE VOID OsLmsReallocSplitNodeMark(struct OsMemNodeHead *node)
505 {
506     if (g_lms == NULL) {
507         return;
508     }
509     /* mark next node */
510     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node),
511         (UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
512     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
513         (UINTPTR)OS_MEM_NEXT_NODE(OS_MEM_NEXT_NODE(node)), LMS_SHADOW_AFTERFREE_U8);
514 }
515 
OsLmsReallocResizeMark(struct OsMemNodeHead * node,UINT32 resize)516 STATIC INLINE VOID OsLmsReallocResizeMark(struct OsMemNodeHead *node, UINT32 resize)
517 {
518     if (g_lms == NULL) {
519         return;
520     }
521     /* mark remaining as redzone */
522     g_lms->simpleMark((UINTPTR)node + resize, (UINTPTR)OS_MEM_NEXT_NODE(node), LMS_SHADOW_REDZONE_U8);
523 }
524 #endif
525 
526 #ifdef LOSCFG_MEM_LEAKCHECK
OsMemLinkRegisterRecord(struct OsMemNodeHead * node)527 STATIC INLINE VOID OsMemLinkRegisterRecord(struct OsMemNodeHead *node)
528 {
529     LOS_RecordLR(node->linkReg, LOS_RECORD_LR_CNT, LOS_RECORD_LR_CNT, LOS_OMIT_LR_CNT);
530 }
531 
OsMemUsedNodePrint(struct OsMemNodeHead * node)532 STATIC INLINE VOID OsMemUsedNodePrint(struct OsMemNodeHead *node)
533 {
534     UINT32 count;
535 
536     if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
537 #ifdef __LP64__
538         PRINTK("0x%018x: ", node);
539 #else
540         PRINTK("0x%010x: ", node);
541 #endif
542         for (count = 0; count < LOS_RECORD_LR_CNT; count++) {
543 #ifdef __LP64__
544             PRINTK(" 0x%018x ", node->linkReg[count]);
545 #else
546             PRINTK(" 0x%010x ", node->linkReg[count]);
547 #endif
548         }
549         PRINTK("\n");
550     }
551 }
552 
OsMemUsedNodeShow(VOID * pool)553 VOID OsMemUsedNodeShow(VOID *pool)
554 {
555     if (pool == NULL) {
556         PRINTK("input param is NULL\n");
557         return;
558     }
559     if (LOS_MemIntegrityCheck(pool)) {
560         PRINTK("LOS_MemIntegrityCheck error\n");
561         return;
562     }
563     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
564     struct OsMemNodeHead *tmpNode = NULL;
565     struct OsMemNodeHead *endNode = NULL;
566     UINT32 size;
567     UINT32 intSave;
568     UINT32 count;
569 
570 #ifdef __LP64__
571     PRINTK("\n\rnode                ");
572 #else
573     PRINTK("\n\rnode        ");
574 #endif
575     for (count = 0; count < LOS_RECORD_LR_CNT; count++) {
576 #ifdef __LP64__
577         PRINTK("        LR[%u]       ", count);
578 #else
579         PRINTK("    LR[%u]   ", count);
580 #endif
581     }
582     PRINTK("\n");
583 
584     MEM_LOCK(poolInfo, intSave);
585     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
586 #if OS_MEM_EXPAND_ENABLE
587     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;
588          tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
589         if (tmpNode == endNode) {
590             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
591                 size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
592                 tmpNode = OsMemSentinelNodeGet(endNode);
593                 endNode = OS_MEM_END_NODE(tmpNode, size);
594                 continue;
595             } else {
596                 break;
597             }
598         } else {
599             OsMemUsedNodePrint(tmpNode);
600         }
601     }
602 #else
603     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode;
604          tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
605         OsMemUsedNodePrint(tmpNode);
606     }
607 #endif
608     MEM_UNLOCK(poolInfo, intSave);
609 }
610 
OsMemNodeBacktraceInfo(const struct OsMemNodeHead * tmpNode,const struct OsMemNodeHead * preNode)611 STATIC VOID OsMemNodeBacktraceInfo(const struct OsMemNodeHead *tmpNode,
612                                    const struct OsMemNodeHead *preNode)
613 {
614     int i;
615     PRINTK("\n broken node head LR info: \n");
616     for (i = 0; i < LOS_RECORD_LR_CNT; i++) {
617         PRINTK(" LR[%d]:%#x\n", i, tmpNode->linkReg[i]);
618     }
619 
620     PRINTK("\n pre node head LR info: \n");
621     for (i = 0; i < LOS_RECORD_LR_CNT; i++) {
622         PRINTK(" LR[%d]:%#x\n", i, preNode->linkReg[i]);
623     }
624 }
625 #endif
626 
OsMemFreeListIndexGet(UINT32 size)627 STATIC INLINE UINT32 OsMemFreeListIndexGet(UINT32 size)
628 {
629     UINT32 fl = OsMemFlGet(size);
630     if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
631         return fl;
632     }
633 
634     UINT32 sl = OsMemSlGet(size, fl);
635     return (OS_MEM_SMALL_BUCKET_COUNT + ((fl - OS_MEM_LARGE_START_BUCKET) << OS_MEM_SLI) + sl);
636 }
637 
OsMemFindCurSuitableBlock(struct OsMemPoolHead * poolHead,UINT32 index,UINT32 size)638 STATIC INLINE struct OsMemFreeNodeHead *OsMemFindCurSuitableBlock(struct OsMemPoolHead *poolHead,
639                                                                   UINT32 index, UINT32 size)
640 {
641     struct OsMemFreeNodeHead *node = NULL;
642 
643     for (node = poolHead->freeList[index]; node != NULL; node = node->next) {
644         if (node->header.sizeAndFlag >= size) {
645             return node;
646         }
647     }
648 
649     return NULL;
650 }
651 
652 #define BITMAP_INDEX(index) ((index) >> 5)
OsMemNotEmptyIndexGet(struct OsMemPoolHead * poolHead,UINT32 index)653 STATIC INLINE UINT32 OsMemNotEmptyIndexGet(struct OsMemPoolHead *poolHead, UINT32 index)
654 {
655     UINT32 mask;
656 
657     mask = poolHead->freeListBitmap[BITMAP_INDEX(index)];
658     mask &= ~((1 << (index & OS_MEM_BITMAP_MASK)) - 1);
659     if (mask != 0) {
660         index = OsMemFFS(mask) + (index & ~OS_MEM_BITMAP_MASK);
661         return index;
662     }
663 
664     return OS_MEM_FREE_LIST_COUNT;
665 }
666 
OsMemFindNextSuitableBlock(VOID * pool,UINT32 size,UINT32 * outIndex)667 STATIC INLINE struct OsMemFreeNodeHead *OsMemFindNextSuitableBlock(VOID *pool, UINT32 size, UINT32 *outIndex)
668 {
669     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
670     UINT32 fl = OsMemFlGet(size);
671     UINT32 sl;
672     UINT32 index, tmp;
673     UINT32 curIndex = OS_MEM_FREE_LIST_COUNT;
674     UINT32 mask;
675 
676     do {
677         if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
678             index = fl;
679         } else {
680             sl = OsMemSlGet(size, fl);
681             curIndex = ((fl - OS_MEM_LARGE_START_BUCKET) << OS_MEM_SLI) + sl + OS_MEM_SMALL_BUCKET_COUNT;
682             index = curIndex + 1;
683         }
684 
685         tmp = OsMemNotEmptyIndexGet(poolHead, index);
686         if (tmp != OS_MEM_FREE_LIST_COUNT) {
687             index = tmp;
688             goto DONE;
689         }
690 
691         for (index = LOS_Align(index + 1, 32); index < OS_MEM_FREE_LIST_COUNT; index += 32) { /* 32: align size */
692             mask = poolHead->freeListBitmap[BITMAP_INDEX(index)];
693             if (mask != 0) {
694                 index = OsMemFFS(mask) + index;
695                 goto DONE;
696             }
697         }
698     } while (0);
699 
700     if (curIndex == OS_MEM_FREE_LIST_COUNT) {
701         return NULL;
702     }
703 
704     *outIndex = curIndex;
705     return OsMemFindCurSuitableBlock(poolHead, curIndex, size);
706 DONE:
707     *outIndex = index;
708     return poolHead->freeList[index];
709 }
710 
OsMemSetFreeListBit(struct OsMemPoolHead * head,UINT32 index)711 STATIC INLINE VOID OsMemSetFreeListBit(struct OsMemPoolHead *head, UINT32 index)
712 {
713     head->freeListBitmap[BITMAP_INDEX(index)] |= 1U << (index & 0x1f);
714 }
715 
OsMemClearFreeListBit(struct OsMemPoolHead * head,UINT32 index)716 STATIC INLINE VOID OsMemClearFreeListBit(struct OsMemPoolHead *head, UINT32 index)
717 {
718     head->freeListBitmap[BITMAP_INDEX(index)] &= ~(1U << (index & 0x1f));
719 }
720 
OsMemListAdd(struct OsMemPoolHead * pool,UINT32 listIndex,struct OsMemFreeNodeHead * node)721 STATIC INLINE VOID OsMemListAdd(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
722 {
723     struct OsMemFreeNodeHead *firstNode = pool->freeList[listIndex];
724     if (firstNode != NULL) {
725         firstNode->prev = node;
726     }
727     node->prev = NULL;
728     node->next = firstNode;
729     pool->freeList[listIndex] = node;
730     OsMemSetFreeListBit(pool, listIndex);
731     node->header.magic = OS_MEM_NODE_MAGIC;
732 }
733 
OsMemListDelete(struct OsMemPoolHead * pool,UINT32 listIndex,struct OsMemFreeNodeHead * node)734 STATIC INLINE VOID OsMemListDelete(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
735 {
736     if (node == pool->freeList[listIndex]) {
737         pool->freeList[listIndex] = node->next;
738         if (node->next == NULL) {
739             OsMemClearFreeListBit(pool, listIndex);
740         } else {
741             node->next->prev = NULL;
742         }
743     } else {
744         node->prev->next = node->next;
745         if (node->next != NULL) {
746             node->next->prev = node->prev;
747         }
748     }
749     node->header.magic = OS_MEM_NODE_MAGIC;
750 }
751 
OsMemFreeNodeAdd(VOID * pool,struct OsMemFreeNodeHead * node)752 STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node)
753 {
754     UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
755     if (index >= OS_MEM_FREE_LIST_COUNT) {
756         LOS_Panic("The index of free lists is error, index = %u\n", index);
757         return;
758     }
759     OsMemListAdd(pool, index, node);
760 }
761 
OsMemFreeNodeDelete(VOID * pool,struct OsMemFreeNodeHead * node)762 STATIC INLINE VOID OsMemFreeNodeDelete(VOID *pool, struct OsMemFreeNodeHead *node)
763 {
764     UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
765     if (index >= OS_MEM_FREE_LIST_COUNT) {
766         LOS_Panic("The index of free lists is error, index = %u\n", index);
767         return;
768     }
769     OsMemListDelete(pool, index, node);
770 }
771 
OsMemFreeNodeGet(VOID * pool,UINT32 size)772 STATIC INLINE struct OsMemNodeHead *OsMemFreeNodeGet(VOID *pool, UINT32 size)
773 {
774     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
775     UINT32 index;
776     struct OsMemFreeNodeHead *firstNode = OsMemFindNextSuitableBlock(pool, size, &index);
777     if (firstNode == NULL) {
778         return NULL;
779     }
780 
781     OsMemListDelete(poolHead, index, firstNode);
782 
783     return &firstNode->header;
784 }
785 
OsMemMergeNode(struct OsMemNodeHead * node)786 STATIC INLINE VOID OsMemMergeNode(struct OsMemNodeHead *node)
787 {
788     struct OsMemNodeHead *nextNode = NULL;
789 
790     node->ptr.prev->sizeAndFlag += node->sizeAndFlag;
791     nextNode = (struct OsMemNodeHead *)((UINTPTR)node + node->sizeAndFlag);
792     if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {
793         nextNode->ptr.prev = node->ptr.prev;
794     }
795 }
796 
OsMemSplitNode(VOID * pool,struct OsMemNodeHead * allocNode,UINT32 allocSize)797 STATIC INLINE VOID OsMemSplitNode(VOID *pool, struct OsMemNodeHead *allocNode, UINT32 allocSize)
798 {
799     struct OsMemFreeNodeHead *newFreeNode = NULL;
800     struct OsMemNodeHead *nextNode = NULL;
801 
802     newFreeNode = (struct OsMemFreeNodeHead *)(VOID *)((UINT8 *)allocNode + allocSize);
803     newFreeNode->header.ptr.prev = allocNode;
804     newFreeNode->header.sizeAndFlag = allocNode->sizeAndFlag - allocSize;
805     allocNode->sizeAndFlag = allocSize;
806     nextNode = OS_MEM_NEXT_NODE(&newFreeNode->header);
807     if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {
808         nextNode->ptr.prev = &newFreeNode->header;
809         if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
810             OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
811             OsMemMergeNode(nextNode);
812         }
813     }
814 
815     OsMemFreeNodeAdd(pool, newFreeNode);
816 }
817 
OsMemCreateUsedNode(VOID * addr)818 STATIC INLINE VOID *OsMemCreateUsedNode(VOID *addr)
819 {
820     struct OsMemUsedNodeHead *node = (struct OsMemUsedNodeHead *)addr;
821 
822 #if OS_MEM_FREE_BY_TASKID
823     OsMemNodeSetTaskID(node);
824 #endif
825 
826 #ifdef LOSCFG_KERNEL_LMS
827     struct OsMemNodeHead *newNode = (struct OsMemNodeHead *)node;
828     if (g_lms != NULL) {
829         g_lms->mallocMark(newNode, OS_MEM_NEXT_NODE(newNode), OS_MEM_NODE_HEAD_SIZE);
830     }
831 #endif
832     return node + 1;
833 }
834 
OsMemPoolInit(VOID * pool,UINT32 size)835 STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size)
836 {
837     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
838     struct OsMemNodeHead *newNode = NULL;
839     struct OsMemNodeHead *endNode = NULL;
840 #ifdef LOSCFG_KERNEL_LMS
841     UINT32 resize = 0;
842     if (g_lms != NULL) {
843         /*
844          * resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
845          * resize != 0, shadow memory init successful, set poolSize as resize.
846          */
847         resize = g_lms->init(pool, size);
848         size = (resize == 0) ? size : resize;
849     }
850 #endif
851     (VOID)memset(poolHead, 0, sizeof(struct OsMemPoolHead));
852 
853     LOS_SpinInit(&poolHead->spinlock);
854     poolHead->info.pool = pool;
855     poolHead->info.totalSize = size;
856     poolHead->info.attr = OS_MEM_POOL_LOCK_ENABLE; /* default attr: lock, not expand. */
857 
858     newNode = OS_MEM_FIRST_NODE(pool);
859     newNode->sizeAndFlag = (size - sizeof(struct OsMemPoolHead) - OS_MEM_NODE_HEAD_SIZE);
860     newNode->ptr.prev = NULL;
861     newNode->magic = OS_MEM_NODE_MAGIC;
862     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
863 
864     /* The last mem node */
865     endNode = OS_MEM_END_NODE(pool, size);
866     endNode->magic = OS_MEM_NODE_MAGIC;
867 #if OS_MEM_EXPAND_ENABLE
868     endNode->ptr.next = NULL;
869     OsMemSentinelNodeSet(endNode, NULL, 0);
870 #else
871     endNode->sizeAndFlag = 0;
872     endNode->ptr.prev = newNode;
873     OS_MEM_NODE_SET_USED_FLAG(endNode->sizeAndFlag);
874 #endif
875 #ifdef LOSCFG_MEM_WATERLINE
876     poolHead->info.curUsedSize = sizeof(struct OsMemPoolHead) + OS_MEM_NODE_HEAD_SIZE;
877     poolHead->info.waterLine = poolHead->info.curUsedSize;
878 #endif
879 #ifdef LOSCFG_KERNEL_LMS
880     if (resize != 0) {
881         OsLmsFirstNodeMark(pool, newNode);
882     }
883 #endif
884     return LOS_OK;
885 }
886 
887 #ifdef LOSCFG_MEM_MUL_POOL
OsMemPoolDeinit(VOID * pool)888 STATIC VOID OsMemPoolDeinit(VOID *pool)
889 {
890     (VOID)memset(pool, 0, sizeof(struct OsMemPoolHead));
891 }
892 
OsMemPoolAdd(VOID * pool,UINT32 size)893 STATIC UINT32 OsMemPoolAdd(VOID *pool, UINT32 size)
894 {
895     VOID *nextPool = g_poolHead;
896     VOID *curPool = g_poolHead;
897     UINTPTR poolEnd;
898     while (nextPool != NULL) {
899         poolEnd = (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool);
900         if (((pool <= nextPool) && (((UINTPTR)pool + size) > (UINTPTR)nextPool)) ||
901             (((UINTPTR)pool < poolEnd) && (((UINTPTR)pool + size) >= poolEnd))) {
902             PRINT_ERR("pool [%#x, %#x) conflict with pool [%#x, %#x)\n",
903                       pool, (UINTPTR)pool + size,
904                       nextPool, (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool));
905             return LOS_NOK;
906         }
907         curPool = nextPool;
908         nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
909     }
910 
911     if (g_poolHead == NULL) {
912         g_poolHead = pool;
913     } else {
914         ((struct OsMemPoolHead *)curPool)->nextPool = pool;
915     }
916 
917     ((struct OsMemPoolHead *)pool)->nextPool = NULL;
918     return LOS_OK;
919 }
920 
OsMemPoolDelete(const VOID * pool)921 STATIC UINT32 OsMemPoolDelete(const VOID *pool)
922 {
923     UINT32 ret = LOS_NOK;
924     VOID *nextPool = NULL;
925     VOID *curPool = NULL;
926 
927     do {
928         if (pool == g_poolHead) {
929             g_poolHead = ((struct OsMemPoolHead *)g_poolHead)->nextPool;
930             ret = LOS_OK;
931             break;
932         }
933 
934         curPool = g_poolHead;
935         nextPool = g_poolHead;
936         while (nextPool != NULL) {
937             if (pool == nextPool) {
938                 ((struct OsMemPoolHead *)curPool)->nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
939                 ret = LOS_OK;
940                 break;
941             }
942             curPool = nextPool;
943             nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
944         }
945     } while (0);
946 
947     return ret;
948 }
949 #endif
950 
LOS_MemInit(VOID * pool,UINT32 size)951 UINT32 LOS_MemInit(VOID *pool, UINT32 size)
952 {
953     if ((pool == NULL) || (size <= OS_MEM_MIN_POOL_SIZE)) {
954         return OS_ERROR;
955     }
956 
957     size = OS_MEM_ALIGN(size, OS_MEM_ALIGN_SIZE);
958     if (OsMemPoolInit(pool, size)) {
959         return OS_ERROR;
960     }
961 
962 #ifdef LOSCFG_MEM_MUL_POOL
963     if (OsMemPoolAdd(pool, size)) {
964         (VOID)OsMemPoolDeinit(pool);
965         return OS_ERROR;
966     }
967 #endif
968 
969     OsHookCall(LOS_HOOK_TYPE_MEM_INIT, pool, size);
970     return LOS_OK;
971 }
972 
973 #ifdef LOSCFG_MEM_MUL_POOL
LOS_MemDeInit(VOID * pool)974 UINT32 LOS_MemDeInit(VOID *pool)
975 {
976     if (pool == NULL) {
977         return OS_ERROR;
978     }
979 
980     if (OsMemPoolDelete(pool)) {
981         return OS_ERROR;
982     }
983 
984     OsMemPoolDeinit(pool);
985 
986     OsHookCall(LOS_HOOK_TYPE_MEM_DEINIT, pool);
987     return LOS_OK;
988 }
989 
LOS_MemPoolList(VOID)990 UINT32 LOS_MemPoolList(VOID)
991 {
992     VOID *nextPool = g_poolHead;
993     UINT32 index = 0;
994     while (nextPool != NULL) {
995         PRINTK("pool%u :\n", index);
996         index++;
997         OsMemInfoPrint(nextPool);
998         nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
999     }
1000     return index;
1001 }
1002 #endif
1003 
OsMemAlloc(struct OsMemPoolHead * pool,UINT32 size,UINT32 intSave)1004 STATIC INLINE VOID *OsMemAlloc(struct OsMemPoolHead *pool, UINT32 size, UINT32 intSave)
1005 {
1006     struct OsMemNodeHead *allocNode = NULL;
1007 
1008 #ifdef LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK
1009     if (OsMemAllocCheck(pool, intSave) == LOS_NOK) {
1010         return NULL;
1011     }
1012 #endif
1013 
1014     UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
1015 #if OS_MEM_EXPAND_ENABLE
1016 retry:
1017 #endif
1018     allocNode = OsMemFreeNodeGet(pool, allocSize);
1019     if (allocNode == NULL) {
1020 #if OS_MEM_EXPAND_ENABLE
1021         if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
1022             INT32 ret = OsMemPoolExpand(pool, allocSize, intSave);
1023             if (ret == 0) {
1024                 goto retry;
1025             }
1026         }
1027 #endif
1028         MEM_UNLOCK(pool, intSave);
1029         PRINT_ERR("---------------------------------------------------"
1030                   "--------------------------------------------------------\n");
1031         OsMemInfoPrint(pool);
1032         PRINT_ERR("[%s] No suitable free block, require free node size: 0x%x\n", __FUNCTION__, allocSize);
1033         PRINT_ERR("----------------------------------------------------"
1034                   "-------------------------------------------------------\n");
1035         MEM_LOCK(pool, intSave);
1036         return NULL;
1037     }
1038 
1039     if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= allocNode->sizeAndFlag) {
1040         OsMemSplitNode(pool, allocNode, allocSize);
1041     }
1042 
1043     OS_MEM_NODE_SET_USED_FLAG(allocNode->sizeAndFlag);
1044     OsMemWaterUsedRecord(pool, OS_MEM_NODE_GET_SIZE(allocNode->sizeAndFlag));
1045 
1046 #ifdef LOSCFG_MEM_LEAKCHECK
1047     OsMemLinkRegisterRecord(allocNode);
1048 #endif
1049     return OsMemCreateUsedNode((VOID *)allocNode);
1050 }
1051 
LOS_MemAlloc(VOID * pool,UINT32 size)1052 VOID *LOS_MemAlloc(VOID *pool, UINT32 size)
1053 {
1054     if ((pool == NULL) || (size == 0)) {
1055         return (size > 0) ? OsVmBootMemAlloc(size) : NULL;
1056     }
1057 
1058     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1059         size = OS_MEM_MIN_ALLOC_SIZE;
1060     }
1061 
1062     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1063     VOID *ptr = NULL;
1064     UINT32 intSave;
1065 
1066     do {
1067         if (OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
1068             break;
1069         }
1070         MEM_LOCK(poolHead, intSave);
1071         ptr = OsMemAlloc(poolHead, size, intSave);
1072         MEM_UNLOCK(poolHead, intSave);
1073     } while (0);
1074 
1075     OsHookCall(LOS_HOOK_TYPE_MEM_ALLOC, pool, ptr, size);
1076     return ptr;
1077 }
1078 
LOS_MemAllocAlign(VOID * pool,UINT32 size,UINT32 boundary)1079 VOID *LOS_MemAllocAlign(VOID *pool, UINT32 size, UINT32 boundary)
1080 {
1081     UINT32 gapSize;
1082 
1083     if ((pool == NULL) || (size == 0) || (boundary == 0) || !OS_MEM_IS_POW_TWO(boundary) ||
1084         !OS_MEM_IS_ALIGNED(boundary, sizeof(VOID *))) {
1085         return NULL;
1086     }
1087 
1088     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1089         size = OS_MEM_MIN_ALLOC_SIZE;
1090     }
1091 
1092     /*
1093      * sizeof(gapSize) bytes stores offset between alignedPtr and ptr,
1094      * the ptr has been OS_MEM_ALIGN_SIZE(4 or 8) aligned, so maximum
1095      * offset between alignedPtr and ptr is boundary - OS_MEM_ALIGN_SIZE
1096      */
1097     if ((boundary - sizeof(gapSize)) > ((UINT32)(-1) - size)) {
1098         return NULL;
1099     }
1100 
1101     UINT32 useSize = (size + boundary) - sizeof(gapSize);
1102     if (OS_MEM_NODE_GET_USED_FLAG(useSize) || OS_MEM_NODE_GET_ALIGNED_FLAG(useSize)) {
1103         return NULL;
1104     }
1105 
1106     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1107     UINT32 intSave;
1108     VOID *ptr = NULL;
1109     VOID *alignedPtr = NULL;
1110 
1111     do {
1112         MEM_LOCK(poolHead, intSave);
1113         ptr = OsMemAlloc(pool, useSize, intSave);
1114         MEM_UNLOCK(poolHead, intSave);
1115         alignedPtr = (VOID *)OS_MEM_ALIGN(ptr, boundary);
1116         if (ptr == alignedPtr) {
1117 #ifdef LOSCFG_KERNEL_LMS
1118             OsLmsAllocAlignMark(ptr, alignedPtr, size);
1119 #endif
1120             break;
1121         }
1122 
1123         /* store gapSize in address (ptr - 4), it will be checked while free */
1124         gapSize = (UINT32)((UINTPTR)alignedPtr - (UINTPTR)ptr);
1125         struct OsMemUsedNodeHead *allocNode = (struct OsMemUsedNodeHead *)ptr - 1;
1126         OS_MEM_NODE_SET_ALIGNED_FLAG(allocNode->header.sizeAndFlag);
1127         OS_MEM_NODE_SET_ALIGNED_FLAG(gapSize);
1128         *(UINT32 *)((UINTPTR)alignedPtr - sizeof(gapSize)) = gapSize;
1129 #ifdef LOSCFG_KERNEL_LMS
1130         OsLmsAllocAlignMark(ptr, alignedPtr, size);
1131 #endif
1132         ptr = alignedPtr;
1133     } while (0);
1134 
1135     OsHookCall(LOS_HOOK_TYPE_MEM_ALLOCALIGN, pool, ptr, size, boundary);
1136     return ptr;
1137 }
1138 
OsMemAddrValidCheck(const struct OsMemPoolHead * pool,const VOID * addr)1139 STATIC INLINE BOOL OsMemAddrValidCheck(const struct OsMemPoolHead *pool, const VOID *addr)
1140 {
1141     UINT32 size;
1142 
1143     /* First node prev is NULL */
1144     if (addr == NULL) {
1145         return TRUE;
1146     }
1147 
1148     size = pool->info.totalSize;
1149     if (OS_MEM_MIDDLE_ADDR_OPEN_END(pool + 1, addr, (UINTPTR)pool + size)) {
1150         return TRUE;
1151     }
1152 #if OS_MEM_EXPAND_ENABLE
1153     struct OsMemNodeHead *node = NULL;
1154     struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, size);
1155     while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1156         size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1157         node = OsMemSentinelNodeGet(sentinel);
1158         sentinel = OS_MEM_END_NODE(node, size);
1159         if (OS_MEM_MIDDLE_ADDR_OPEN_END(node, addr, (UINTPTR)node + size)) {
1160             return TRUE;
1161         }
1162     }
1163 #endif
1164     return FALSE;
1165 }
1166 
OsMemIsNodeValid(const struct OsMemNodeHead * node,const struct OsMemNodeHead * startNode,const struct OsMemNodeHead * endNode,const struct OsMemPoolHead * poolInfo)1167 STATIC INLINE BOOL OsMemIsNodeValid(const struct OsMemNodeHead *node, const struct OsMemNodeHead *startNode,
1168                                     const struct OsMemNodeHead *endNode,
1169                                     const struct OsMemPoolHead *poolInfo)
1170 {
1171     if (!OS_MEM_MIDDLE_ADDR(startNode, node, endNode)) {
1172         return FALSE;
1173     }
1174 
1175     if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1176         if (!OS_MEM_MAGIC_VALID(node)) {
1177             return FALSE;
1178         }
1179         return TRUE;
1180     }
1181 
1182     if (!OsMemAddrValidCheck(poolInfo, node->ptr.prev)) {
1183         return FALSE;
1184     }
1185 
1186     return TRUE;
1187 }
1188 
MemCheckUsedNode(const struct OsMemPoolHead * pool,const struct OsMemNodeHead * node,const struct OsMemNodeHead * startNode,const struct OsMemNodeHead * endNode)1189 STATIC  BOOL MemCheckUsedNode(const struct OsMemPoolHead *pool, const struct OsMemNodeHead *node,
1190                               const struct OsMemNodeHead *startNode, const struct OsMemNodeHead *endNode)
1191 {
1192     if (!OsMemIsNodeValid(node, startNode, endNode, pool)) {
1193         return FALSE;
1194     }
1195 
1196     if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1197         return FALSE;
1198     }
1199 
1200     const struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node);
1201     if (!OsMemIsNodeValid(nextNode, startNode, endNode, pool)) {
1202         return FALSE;
1203     }
1204 
1205     if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {
1206         if (nextNode->ptr.prev != node) {
1207             return FALSE;
1208         }
1209     }
1210 
1211     if ((node != startNode) &&
1212         ((!OsMemIsNodeValid(node->ptr.prev, startNode, endNode, pool)) ||
1213         (OS_MEM_NEXT_NODE(node->ptr.prev) != node))) {
1214         return FALSE;
1215     }
1216 
1217     return TRUE;
1218 }
1219 
OsMemCheckUsedNode(const struct OsMemPoolHead * pool,const struct OsMemNodeHead * node)1220 STATIC UINT32 OsMemCheckUsedNode(const struct OsMemPoolHead *pool, const struct OsMemNodeHead *node)
1221 {
1222     struct OsMemNodeHead *startNode = (struct OsMemNodeHead *)OS_MEM_FIRST_NODE(pool);
1223     struct OsMemNodeHead *endNode = (struct OsMemNodeHead *)OS_MEM_END_NODE(pool, pool->info.totalSize);
1224     BOOL doneFlag = FALSE;
1225 
1226     do {
1227         doneFlag = MemCheckUsedNode(pool, node, startNode, endNode);
1228         if (!doneFlag) {
1229 #if OS_MEM_EXPAND_ENABLE
1230             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
1231                 startNode = OsMemSentinelNodeGet(endNode);
1232                 endNode = OS_MEM_END_NODE(startNode, OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag));
1233                 continue;
1234             }
1235 #endif
1236             return LOS_NOK;
1237         }
1238     } while (!doneFlag);
1239 
1240     return LOS_OK;
1241 }
1242 
OsMemFree(struct OsMemPoolHead * pool,struct OsMemNodeHead * node)1243 STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node)
1244 {
1245     UINT32 ret = OsMemCheckUsedNode(pool, node);
1246     if (ret != LOS_OK) {
1247         PRINT_ERR("OsMemFree check error!\n");
1248         return ret;
1249     }
1250 
1251 #ifdef LOSCFG_MEM_WATERLINE
1252     pool->info.curUsedSize -= OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1253 #endif
1254 
1255     node->sizeAndFlag = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1256 #ifdef LOSCFG_MEM_LEAKCHECK
1257     OsMemLinkRegisterRecord(node);
1258 #endif
1259 #ifdef LOSCFG_KERNEL_LMS
1260     struct OsMemNodeHead *nextNodeBackup = OS_MEM_NEXT_NODE(node);
1261     struct OsMemNodeHead *curNodeBackup = node;
1262     if (g_lms != NULL) {
1263         g_lms->check((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, TRUE);
1264     }
1265 #endif
1266     struct OsMemNodeHead *preNode = node->ptr.prev; /* merage preNode */
1267     if ((preNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1268         OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)preNode);
1269         OsMemMergeNode(node);
1270         node = preNode;
1271     }
1272 
1273     struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node); /* merage nextNode */
1274     if ((nextNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
1275         OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
1276         OsMemMergeNode(nextNode);
1277     }
1278 
1279 #if OS_MEM_EXPAND_ENABLE
1280     if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
1281         /* if this is a expand head node, and all unused, free it to pmm */
1282         if ((node->ptr.prev != NULL) && (node->ptr.prev > node)) {
1283             if (TryShrinkPool(pool, node)) {
1284                 return LOS_OK;
1285             }
1286         }
1287     }
1288 #endif
1289     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)node);
1290 #ifdef LOSCFG_KERNEL_LMS
1291     if (g_lms != NULL) {
1292         g_lms->freeMark(curNodeBackup, nextNodeBackup, OS_MEM_NODE_HEAD_SIZE);
1293     }
1294 #endif
1295     return ret;
1296 }
1297 
LOS_MemFree(VOID * pool,VOID * ptr)1298 UINT32 LOS_MemFree(VOID *pool, VOID *ptr)
1299 {
1300     UINT32 intSave;
1301     UINT32 ret = LOS_NOK;
1302 
1303     if ((pool == NULL) || (ptr == NULL) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *)) ||
1304         !OS_MEM_IS_ALIGNED(ptr, sizeof(VOID *))) {
1305         return ret;
1306     }
1307     OsHookCall(LOS_HOOK_TYPE_MEM_FREE, pool, ptr);
1308 
1309     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1310     struct OsMemNodeHead *node = NULL;
1311 
1312     do {
1313         UINT32 gapSize = *(UINT32 *)((UINTPTR)ptr - sizeof(UINT32));
1314         if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize) && OS_MEM_NODE_GET_USED_FLAG(gapSize)) {
1315             PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1316             break;
1317         }
1318 
1319         node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
1320 
1321         if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize)) {
1322             gapSize = OS_MEM_NODE_GET_ALIGNED_GAPSIZE(gapSize);
1323             if ((gapSize & (OS_MEM_ALIGN_SIZE - 1)) || (gapSize > ((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE))) {
1324                 PRINT_ERR("illegal gapSize: 0x%x\n", gapSize);
1325                 break;
1326             }
1327             node = (struct OsMemNodeHead *)((UINTPTR)ptr - gapSize - OS_MEM_NODE_HEAD_SIZE);
1328         }
1329         MEM_LOCK(poolHead, intSave);
1330         ret = OsMemFree(poolHead, node);
1331         MEM_UNLOCK(poolHead, intSave);
1332     } while (0);
1333 
1334     return ret;
1335 }
1336 
OsMemReAllocSmaller(VOID * pool,UINT32 allocSize,struct OsMemNodeHead * node,UINT32 nodeSize)1337 STATIC INLINE VOID OsMemReAllocSmaller(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node, UINT32 nodeSize)
1338 {
1339 #ifdef LOSCFG_MEM_WATERLINE
1340     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1341 #endif
1342     node->sizeAndFlag = nodeSize;
1343     if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= nodeSize) {
1344         OsMemSplitNode(pool, node, allocSize);
1345         OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1346 #ifdef LOSCFG_MEM_WATERLINE
1347         poolInfo->info.curUsedSize -= nodeSize - allocSize;
1348 #endif
1349 #ifdef LOSCFG_KERNEL_LMS
1350         OsLmsReallocSplitNodeMark(node);
1351     } else {
1352         OsLmsReallocResizeMark(node, allocSize);
1353 #endif
1354     }
1355     OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1356 #ifdef LOSCFG_MEM_LEAKCHECK
1357     OsMemLinkRegisterRecord(node);
1358 #endif
1359 }
1360 
OsMemMergeNodeForReAllocBigger(VOID * pool,UINT32 allocSize,struct OsMemNodeHead * node,UINT32 nodeSize,struct OsMemNodeHead * nextNode)1361 STATIC INLINE VOID OsMemMergeNodeForReAllocBigger(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node,
1362                                                   UINT32 nodeSize, struct OsMemNodeHead *nextNode)
1363 {
1364     node->sizeAndFlag = nodeSize;
1365     OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
1366     OsMemMergeNode(nextNode);
1367 #ifdef LOSCFG_KERNEL_LMS
1368     OsLmsReallocMergeNodeMark(node);
1369 #endif
1370     if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= node->sizeAndFlag) {
1371         OsMemSplitNode(pool, node, allocSize);
1372 #ifdef LOSCFG_KERNEL_LMS
1373         OsLmsReallocSplitNodeMark(node);
1374     } else {
1375         OsLmsReallocResizeMark(node, allocSize);
1376 #endif
1377     }
1378     OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1379     OsMemWaterUsedRecord((struct OsMemPoolHead *)pool, node->sizeAndFlag - nodeSize);
1380 #ifdef LOSCFG_MEM_LEAKCHECK
1381     OsMemLinkRegisterRecord(node);
1382 #endif
1383 }
1384 
OsGetRealPtr(const VOID * pool,VOID * ptr)1385 STATIC INLINE VOID *OsGetRealPtr(const VOID *pool, VOID *ptr)
1386 {
1387     VOID *realPtr = ptr;
1388     UINT32 gapSize = *((UINT32 *)((UINTPTR)ptr - sizeof(UINT32)));
1389 
1390     if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize) && OS_MEM_NODE_GET_USED_FLAG(gapSize)) {
1391         PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1392         return NULL;
1393     }
1394     if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize)) {
1395         gapSize = OS_MEM_NODE_GET_ALIGNED_GAPSIZE(gapSize);
1396         if ((gapSize & (OS_MEM_ALIGN_SIZE - 1)) ||
1397             (gapSize > ((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE - (UINTPTR)pool))) {
1398             PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1399             return NULL;
1400         }
1401         realPtr = (VOID *)((UINTPTR)ptr - (UINTPTR)gapSize);
1402     }
1403     return realPtr;
1404 }
1405 
OsMemRealloc(struct OsMemPoolHead * pool,const VOID * ptr,struct OsMemNodeHead * node,UINT32 size,UINT32 intSave)1406 STATIC INLINE VOID *OsMemRealloc(struct OsMemPoolHead *pool, const VOID *ptr,
1407                                  struct OsMemNodeHead *node, UINT32 size, UINT32 intSave)
1408 {
1409     struct OsMemNodeHead *nextNode = NULL;
1410     UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
1411     UINT32 nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1412     VOID *tmpPtr = NULL;
1413 
1414     if (nodeSize >= allocSize) {
1415         OsMemReAllocSmaller(pool, allocSize, node, nodeSize);
1416         return (VOID *)ptr;
1417     }
1418 
1419     nextNode = OS_MEM_NEXT_NODE(node);
1420     if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag) &&
1421         ((nextNode->sizeAndFlag + nodeSize) >= allocSize)) {
1422         OsMemMergeNodeForReAllocBigger(pool, allocSize, node, nodeSize, nextNode);
1423         return (VOID *)ptr;
1424     }
1425 
1426     tmpPtr = OsMemAlloc(pool, size, intSave);
1427     if (tmpPtr != NULL) {
1428         if (memcpy_s(tmpPtr, size, ptr, (nodeSize - OS_MEM_NODE_HEAD_SIZE)) != EOK) {
1429             MEM_UNLOCK(pool, intSave);
1430             (VOID)LOS_MemFree((VOID *)pool, (VOID *)tmpPtr);
1431             MEM_LOCK(pool, intSave);
1432             return NULL;
1433         }
1434         (VOID)OsMemFree(pool, node);
1435     }
1436     return tmpPtr;
1437 }
1438 
LOS_MemRealloc(VOID * pool,VOID * ptr,UINT32 size)1439 VOID *LOS_MemRealloc(VOID *pool, VOID *ptr, UINT32 size)
1440 {
1441     if ((pool == NULL) || OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
1442         return NULL;
1443     }
1444     OsHookCall(LOS_HOOK_TYPE_MEM_REALLOC, pool, ptr, size);
1445     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1446         size = OS_MEM_MIN_ALLOC_SIZE;
1447     }
1448 
1449     if (ptr == NULL) {
1450         return LOS_MemAlloc(pool, size);
1451     }
1452 
1453     if (size == 0) {
1454         (VOID)LOS_MemFree(pool, ptr);
1455         return NULL;
1456     }
1457 
1458     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1459     struct OsMemNodeHead *node = NULL;
1460     VOID *newPtr = NULL;
1461     UINT32 intSave;
1462 
1463     MEM_LOCK(poolHead, intSave);
1464     do {
1465         ptr = OsGetRealPtr(pool, ptr);
1466         if (ptr == NULL) {
1467             break;
1468         }
1469 
1470         node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
1471         if (OsMemCheckUsedNode(pool, node) != LOS_OK) {
1472             break;
1473         }
1474 
1475         newPtr = OsMemRealloc(pool, ptr, node, size, intSave);
1476     } while (0);
1477     MEM_UNLOCK(poolHead, intSave);
1478 
1479     return newPtr;
1480 }
1481 
1482 #if OS_MEM_FREE_BY_TASKID
LOS_MemFreeByTaskID(VOID * pool,UINT32 taskID)1483 UINT32 LOS_MemFreeByTaskID(VOID *pool, UINT32 taskID)
1484 {
1485     if (pool == NULL) {
1486         return OS_ERROR;
1487     }
1488 
1489     if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
1490         return OS_ERROR;
1491     }
1492 
1493     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1494     struct OsMemNodeHead *tmpNode = NULL;
1495     struct OsMemUsedNodeHead *node = NULL;
1496     struct OsMemNodeHead *endNode = NULL;
1497     UINT32 size;
1498     UINT32 intSave;
1499 
1500     MEM_LOCK(poolHead, intSave);
1501     endNode = OS_MEM_END_NODE(pool, poolHead->info.totalSize);
1502     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;) {
1503         if (tmpNode == endNode) {
1504             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
1505                 size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
1506                 tmpNode = OsMemSentinelNodeGet(endNode);
1507                 endNode = OS_MEM_END_NODE(tmpNode, size);
1508                 continue;
1509             } else {
1510                 break;
1511             }
1512         } else {
1513             if (!OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
1514                 tmpNode = OS_MEM_NEXT_NODE(tmpNode);
1515                 continue;
1516             }
1517 
1518             node = (struct OsMemUsedNodeHead *)tmpNode;
1519             tmpNode = OS_MEM_NEXT_NODE(tmpNode);
1520 
1521             if (node->taskID == taskID) {
1522                 OsMemFree(poolHead, &node->header);
1523             }
1524         }
1525     }
1526     MEM_UNLOCK(poolHead, intSave);
1527 
1528     return LOS_OK;
1529 }
1530 #endif
1531 
LOS_MemPoolSizeGet(const VOID * pool)1532 UINT32 LOS_MemPoolSizeGet(const VOID *pool)
1533 {
1534     UINT32 count = 0;
1535 
1536     if (pool == NULL) {
1537         return LOS_NOK;
1538     }
1539 
1540     count += ((struct OsMemPoolHead *)pool)->info.totalSize;
1541 
1542 #if OS_MEM_EXPAND_ENABLE
1543     UINT32 size;
1544     struct OsMemNodeHead *node = NULL;
1545     struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, count);
1546 
1547     while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1548         size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1549         node = OsMemSentinelNodeGet(sentinel);
1550         sentinel = OS_MEM_END_NODE(node, size);
1551         count += size;
1552     }
1553 #endif
1554     return count;
1555 }
1556 
LOS_MemTotalUsedGet(VOID * pool)1557 UINT32 LOS_MemTotalUsedGet(VOID *pool)
1558 {
1559     struct OsMemNodeHead *tmpNode = NULL;
1560     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1561     struct OsMemNodeHead *endNode = NULL;
1562     UINT32 memUsed = 0;
1563     UINT32 intSave;
1564 
1565     if (pool == NULL) {
1566         return LOS_NOK;
1567     }
1568 
1569     MEM_LOCK(poolInfo, intSave);
1570     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
1571 #if OS_MEM_EXPAND_ENABLE
1572     UINT32 size;
1573     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;) {
1574         if (tmpNode == endNode) {
1575             memUsed += OS_MEM_NODE_HEAD_SIZE;
1576             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
1577                 size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
1578                 tmpNode = OsMemSentinelNodeGet(endNode);
1579                 endNode = OS_MEM_END_NODE(tmpNode, size);
1580                 continue;
1581             } else {
1582                 break;
1583             }
1584         } else {
1585             if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
1586                 memUsed += OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag);
1587             }
1588             tmpNode = OS_MEM_NEXT_NODE(tmpNode);
1589         }
1590     }
1591 #else
1592     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode;) {
1593         if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
1594             memUsed += OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag);
1595         }
1596         tmpNode = OS_MEM_NEXT_NODE(tmpNode);
1597     }
1598 #endif
1599     MEM_UNLOCK(poolInfo, intSave);
1600 
1601     return memUsed;
1602 }
1603 
OsMemMagicCheckPrint(struct OsMemNodeHead ** tmpNode)1604 STATIC INLINE VOID OsMemMagicCheckPrint(struct OsMemNodeHead **tmpNode)
1605 {
1606     PRINT_ERR("[%s], %d, memory check error!\n"
1607               "memory used but magic num wrong, magic num = %#x\n",
1608               __FUNCTION__, __LINE__, (*tmpNode)->magic);
1609 }
1610 
OsMemAddrValidCheckPrint(const VOID * pool,struct OsMemFreeNodeHead ** tmpNode)1611 STATIC UINT32 OsMemAddrValidCheckPrint(const VOID *pool, struct OsMemFreeNodeHead **tmpNode)
1612 {
1613     if (((*tmpNode)->prev != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->prev)) {
1614         PRINT_ERR("[%s], %d, memory check error!\n"
1615                   " freeNode.prev:%#x is out of legal mem range\n",
1616                   __FUNCTION__, __LINE__, (*tmpNode)->prev);
1617         return LOS_NOK;
1618     }
1619     if (((*tmpNode)->next != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->next)) {
1620         PRINT_ERR("[%s], %d, memory check error!\n"
1621                   " freeNode.next:%#x is out of legal mem range\n",
1622                   __FUNCTION__, __LINE__, (*tmpNode)->next);
1623         return LOS_NOK;
1624     }
1625     return LOS_OK;
1626 }
1627 
OsMemIntegrityCheckSub(struct OsMemNodeHead ** tmpNode,const VOID * pool,const struct OsMemNodeHead * endNode)1628 STATIC UINT32 OsMemIntegrityCheckSub(struct OsMemNodeHead **tmpNode, const VOID *pool,
1629                                      const struct OsMemNodeHead *endNode)
1630 {
1631     if (!OS_MEM_MAGIC_VALID(*tmpNode)) {
1632         OsMemMagicCheckPrint(tmpNode);
1633         return LOS_NOK;
1634     }
1635 
1636     if (!OS_MEM_NODE_GET_USED_FLAG((*tmpNode)->sizeAndFlag)) { /* is free node, check free node range */
1637         if (OsMemAddrValidCheckPrint(pool, (struct OsMemFreeNodeHead **)tmpNode)) {
1638             return LOS_NOK;
1639         }
1640     }
1641     return LOS_OK;
1642 }
1643 
OsMemFreeListNodeCheck(const struct OsMemPoolHead * pool,const struct OsMemFreeNodeHead * node)1644 STATIC UINT32 OsMemFreeListNodeCheck(const struct OsMemPoolHead *pool,
1645                                      const struct OsMemFreeNodeHead *node)
1646 {
1647     if (!OsMemAddrValidCheck(pool, node) ||
1648         !OsMemAddrValidCheck(pool, node->prev) ||
1649         !OsMemAddrValidCheck(pool, node->next) ||
1650         !OsMemAddrValidCheck(pool, node->header.ptr.prev)) {
1651         return LOS_NOK;
1652     }
1653 
1654     if (!OS_MEM_IS_ALIGNED(node, sizeof(VOID *)) ||
1655         !OS_MEM_IS_ALIGNED(node->prev, sizeof(VOID *)) ||
1656         !OS_MEM_IS_ALIGNED(node->next, sizeof(VOID *)) ||
1657         !OS_MEM_IS_ALIGNED(node->header.ptr.prev, sizeof(VOID *))) {
1658         return LOS_NOK;
1659     }
1660 
1661     return LOS_OK;
1662 }
1663 
OsMemPoolHeadCheck(const struct OsMemPoolHead * pool)1664 STATIC VOID OsMemPoolHeadCheck(const struct OsMemPoolHead *pool)
1665 {
1666     struct OsMemFreeNodeHead *tmpNode = NULL;
1667     UINT32 index;
1668     UINT32 flag = 0;
1669 
1670     if ((pool->info.pool != pool) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *))) {
1671         PRINT_ERR("wrong mem pool addr: %#x, func:%s, line:%d\n", pool, __FUNCTION__, __LINE__);
1672         return;
1673     }
1674 
1675     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
1676         for (tmpNode = pool->freeList[index]; tmpNode != NULL; tmpNode = tmpNode->next) {
1677             if (OsMemFreeListNodeCheck(pool, tmpNode)) {
1678                 flag = 1;
1679                 PRINT_ERR("FreeListIndex: %u, node: %#x, bNode: %#x, prev: %#x, next: %#x\n",
1680                           index, tmpNode, tmpNode->header.ptr.prev, tmpNode->prev, tmpNode->next);
1681                 goto OUT;
1682             }
1683         }
1684     }
1685 
1686 OUT:
1687     if (flag) {
1688         PRINTK("mem pool info: poolAddr: %#x, poolSize: 0x%x\n", pool, pool->info.totalSize);
1689 #ifdef LOSCFG_MEM_WATERLINE
1690         PRINTK("mem pool info: poolWaterLine: 0x%x, poolCurUsedSize: 0x%x\n", pool->info.waterLine,
1691                pool->info.curUsedSize);
1692 #endif
1693 #if OS_MEM_EXPAND_ENABLE
1694         UINT32 size;
1695         struct OsMemNodeHead *node = NULL;
1696         struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, pool->info.totalSize);
1697         while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1698             size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1699             node = OsMemSentinelNodeGet(sentinel);
1700             sentinel = OS_MEM_END_NODE(node, size);
1701             PRINTK("expand node info: nodeAddr: %#x, nodeSize: 0x%x\n", node, size);
1702         }
1703 #endif
1704     }
1705 }
1706 
OsMemIntegrityCheck(const struct OsMemPoolHead * pool,struct OsMemNodeHead ** tmpNode,struct OsMemNodeHead ** preNode)1707 STATIC UINT32 OsMemIntegrityCheck(const struct OsMemPoolHead *pool, struct OsMemNodeHead **tmpNode,
1708                                   struct OsMemNodeHead **preNode)
1709 {
1710     struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
1711 
1712     OsMemPoolHeadCheck(pool);
1713 
1714     *preNode = OS_MEM_FIRST_NODE(pool);
1715     do {
1716         for (*tmpNode = *preNode; *tmpNode < endNode; *tmpNode = OS_MEM_NEXT_NODE(*tmpNode)) {
1717             if (OsMemIntegrityCheckSub(tmpNode, pool, endNode) == LOS_NOK) {
1718                 return LOS_NOK;
1719             }
1720             *preNode = *tmpNode;
1721         }
1722 #if OS_MEM_EXPAND_ENABLE
1723         if (OsMemIsLastSentinelNode(*tmpNode) == FALSE) {
1724             *preNode = OsMemSentinelNodeGet(*tmpNode);
1725             endNode = OS_MEM_END_NODE(*preNode, OS_MEM_NODE_GET_SIZE((*tmpNode)->sizeAndFlag));
1726         } else
1727 #endif
1728         {
1729             break;
1730         }
1731     } while (1);
1732     return LOS_OK;
1733 }
1734 
OsMemNodeInfo(const struct OsMemNodeHead * tmpNode,const struct OsMemNodeHead * preNode)1735 STATIC VOID OsMemNodeInfo(const struct OsMemNodeHead *tmpNode,
1736                           const struct OsMemNodeHead *preNode)
1737 {
1738     struct OsMemUsedNodeHead *usedNode = NULL;
1739     struct OsMemFreeNodeHead *freeNode = NULL;
1740 
1741     if (tmpNode == preNode) {
1742         PRINTK("\n the broken node is the first node\n");
1743     }
1744 
1745     if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
1746         usedNode = (struct OsMemUsedNodeHead *)tmpNode;
1747         PRINTK("\n broken node head: %#x  %#x  %#x, ",
1748                usedNode->header.ptr.prev, usedNode->header.magic, usedNode->header.sizeAndFlag);
1749     } else {
1750         freeNode = (struct OsMemFreeNodeHead *)tmpNode;
1751         PRINTK("\n broken node head: %#x  %#x  %#x  %#x, %#x",
1752                freeNode->header.ptr.prev, freeNode->next, freeNode->prev, freeNode->header.magic,
1753                freeNode->header.sizeAndFlag);
1754     }
1755 
1756     if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1757         usedNode = (struct OsMemUsedNodeHead *)preNode;
1758         PRINTK("prev node head: %#x  %#x  %#x\n",
1759                usedNode->header.ptr.prev, usedNode->header.magic, usedNode->header.sizeAndFlag);
1760     } else {
1761         freeNode = (struct OsMemFreeNodeHead *)preNode;
1762         PRINTK("prev node head: %#x  %#x  %#x  %#x, %#x",
1763                freeNode->header.ptr.prev, freeNode->next, freeNode->prev, freeNode->header.magic,
1764                freeNode->header.sizeAndFlag);
1765     }
1766 
1767 #ifdef LOSCFG_MEM_LEAKCHECK
1768     OsMemNodeBacktraceInfo(tmpNode, preNode);
1769 #endif
1770 
1771     PRINTK("\n---------------------------------------------\n");
1772     PRINTK(" dump mem tmpNode:%#x ~ %#x\n", tmpNode, ((UINTPTR)tmpNode + OS_MEM_NODE_DUMP_SIZE));
1773     OsDumpMemByte(OS_MEM_NODE_DUMP_SIZE, (UINTPTR)tmpNode);
1774     PRINTK("\n---------------------------------------------\n");
1775     if (preNode != tmpNode) {
1776         PRINTK(" dump mem :%#x ~ tmpNode:%#x\n", ((UINTPTR)tmpNode - OS_MEM_NODE_DUMP_SIZE), tmpNode);
1777         OsDumpMemByte(OS_MEM_NODE_DUMP_SIZE, ((UINTPTR)tmpNode - OS_MEM_NODE_DUMP_SIZE));
1778         PRINTK("\n---------------------------------------------\n");
1779     }
1780 }
1781 
OsMemIntegrityCheckError(struct OsMemPoolHead * pool,const struct OsMemNodeHead * tmpNode,const struct OsMemNodeHead * preNode,UINT32 intSave)1782 STATIC VOID OsMemIntegrityCheckError(struct OsMemPoolHead *pool,
1783                                      const struct OsMemNodeHead *tmpNode,
1784                                      const struct OsMemNodeHead *preNode,
1785                                      UINT32 intSave)
1786 {
1787     OsMemNodeInfo(tmpNode, preNode);
1788 
1789 #if OS_MEM_FREE_BY_TASKID
1790     LosTaskCB *taskCB = NULL;
1791     if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1792         struct OsMemUsedNodeHead *usedNode = (struct OsMemUsedNodeHead *)preNode;
1793         UINT32 taskID = usedNode->taskID;
1794         if (OS_TID_CHECK_INVALID(taskID)) {
1795             MEM_UNLOCK(pool, intSave);
1796             LOS_Panic("Task ID %u in pre node is invalid!\n", taskID);
1797             return;
1798         }
1799 
1800         taskCB = OS_TCB_FROM_TID(taskID);
1801         if (OsTaskIsUnused(taskCB) || (taskCB->taskEntry == NULL)) {
1802             MEM_UNLOCK(pool, intSave);
1803             LOS_Panic("\r\nTask ID %u in pre node is not created!\n", taskID);
1804             return;
1805         }
1806     } else {
1807         PRINTK("The prev node is free\n");
1808     }
1809     MEM_UNLOCK(pool, intSave);
1810     LOS_Panic("cur node: %#x\npre node: %#x\npre node was allocated by task:%s\n",
1811               tmpNode, preNode, taskCB->taskName);
1812 #else
1813     MEM_UNLOCK(pool, intSave);
1814     LOS_Panic("Memory integrity check error, cur node: %#x, pre node: %#x\n", tmpNode, preNode);
1815 #endif
1816 }
1817 
1818 #ifdef LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK
OsMemAllocCheck(struct OsMemPoolHead * pool,UINT32 intSave)1819 STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave)
1820 {
1821     struct OsMemNodeHead *tmpNode = NULL;
1822     struct OsMemNodeHead *preNode = NULL;
1823 
1824     if (OsMemIntegrityCheck(pool, &tmpNode, &preNode)) {
1825         OsMemIntegrityCheckError(pool, tmpNode, preNode, intSave);
1826         return LOS_NOK;
1827     }
1828     return LOS_OK;
1829 }
1830 #endif
1831 
LOS_MemIntegrityCheck(const VOID * pool)1832 UINT32 LOS_MemIntegrityCheck(const VOID *pool)
1833 {
1834     if (pool == NULL) {
1835         return LOS_NOK;
1836     }
1837 
1838     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1839     struct OsMemNodeHead *tmpNode = NULL;
1840     struct OsMemNodeHead *preNode = NULL;
1841     UINT32 intSave = 0;
1842 
1843     MEM_LOCK(poolHead, intSave);
1844     if (OsMemIntegrityCheck(poolHead, &tmpNode, &preNode)) {
1845         goto ERROR_OUT;
1846     }
1847     MEM_UNLOCK(poolHead, intSave);
1848     return LOS_OK;
1849 
1850 ERROR_OUT:
1851     OsMemIntegrityCheckError(poolHead, tmpNode, preNode, intSave);
1852     return LOS_NOK;
1853 }
1854 
OsMemInfoGet(struct OsMemPoolHead * poolInfo,struct OsMemNodeHead * node,LOS_MEM_POOL_STATUS * poolStatus)1855 STATIC INLINE VOID OsMemInfoGet(struct OsMemPoolHead *poolInfo, struct OsMemNodeHead *node,
1856                                 LOS_MEM_POOL_STATUS *poolStatus)
1857 {
1858     UINT32 totalUsedSize = 0;
1859     UINT32 totalFreeSize = 0;
1860     UINT32 usedNodeNum = 0;
1861     UINT32 freeNodeNum = 0;
1862     UINT32 maxFreeSize = 0;
1863     UINT32 size;
1864 
1865     if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1866         size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1867         ++freeNodeNum;
1868         totalFreeSize += size;
1869         if (maxFreeSize < size) {
1870             maxFreeSize = size;
1871         }
1872     } else {
1873         size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1874         ++usedNodeNum;
1875         totalUsedSize += size;
1876     }
1877 
1878     poolStatus->totalUsedSize += totalUsedSize;
1879     poolStatus->totalFreeSize += totalFreeSize;
1880     poolStatus->maxFreeNodeSize = MAX(poolStatus->maxFreeNodeSize, maxFreeSize);
1881     poolStatus->usedNodeNum += usedNodeNum;
1882     poolStatus->freeNodeNum += freeNodeNum;
1883 }
1884 
LOS_MemInfoGet(VOID * pool,LOS_MEM_POOL_STATUS * poolStatus)1885 UINT32 LOS_MemInfoGet(VOID *pool, LOS_MEM_POOL_STATUS *poolStatus)
1886 {
1887     struct OsMemPoolHead *poolInfo = pool;
1888 
1889     if (poolStatus == NULL) {
1890         PRINT_ERR("can't use NULL addr to save info\n");
1891         return LOS_NOK;
1892     }
1893 
1894     if ((pool == NULL) || (poolInfo->info.pool != pool)) {
1895         PRINT_ERR("wrong mem pool addr: %#x, line:%d\n", poolInfo, __LINE__);
1896         return LOS_NOK;
1897     }
1898 
1899     (VOID)memset(poolStatus, 0, sizeof(LOS_MEM_POOL_STATUS));
1900 
1901     struct OsMemNodeHead *tmpNode = NULL;
1902     struct OsMemNodeHead *endNode = NULL;
1903     UINT32 intSave;
1904 
1905     MEM_LOCK(poolInfo, intSave);
1906     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
1907 #if OS_MEM_EXPAND_ENABLE
1908     UINT32 size;
1909     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
1910         if (tmpNode == endNode) {
1911             poolStatus->totalUsedSize += OS_MEM_NODE_HEAD_SIZE;
1912             poolStatus->usedNodeNum++;
1913             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
1914                 size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
1915                 tmpNode = OsMemSentinelNodeGet(endNode);
1916                 endNode = OS_MEM_END_NODE(tmpNode, size);
1917                 continue;
1918             } else {
1919                 break;
1920             }
1921         } else {
1922             OsMemInfoGet(poolInfo, tmpNode, poolStatus);
1923         }
1924     }
1925 #else
1926     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
1927         OsMemInfoGet(poolInfo, tmpNode, poolStatus);
1928     }
1929 #endif
1930 #ifdef LOSCFG_MEM_WATERLINE
1931     poolStatus->usageWaterLine = poolInfo->info.waterLine;
1932 #endif
1933     MEM_UNLOCK(poolInfo, intSave);
1934 
1935     return LOS_OK;
1936 }
1937 
OsMemInfoPrint(VOID * pool)1938 STATIC VOID OsMemInfoPrint(VOID *pool)
1939 {
1940     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1941     LOS_MEM_POOL_STATUS status = {0};
1942 
1943     if (LOS_MemInfoGet(pool, &status) == LOS_NOK) {
1944         return;
1945     }
1946 
1947 #ifdef LOSCFG_MEM_WATERLINE
1948     PRINTK("pool addr          pool size    used size     free size    "
1949            "max free node size   used node num     free node num      UsageWaterLine\n");
1950     PRINTK("---------------    --------     -------       --------     "
1951            "--------------       -------------      ------------      ------------\n");
1952     PRINTK("%-16#x   0x%-8x   0x%-8x    0x%-8x   0x%-16x   0x%-13x    0x%-13x    0x%-13x\n",
1953            poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
1954            status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
1955            status.freeNodeNum, status.usageWaterLine);
1956 #else
1957     PRINTK("pool addr          pool size    used size     free size    "
1958            "max free node size   used node num     free node num\n");
1959     PRINTK("---------------    --------     -------       --------     "
1960            "--------------       -------------      ------------\n");
1961     PRINTK("%-16#x   0x%-8x   0x%-8x    0x%-8x   0x%-16x   0x%-13x    0x%-13x\n",
1962            poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
1963            status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
1964            status.freeNodeNum);
1965 #endif
1966 }
1967 
LOS_MemFreeNodeShow(VOID * pool)1968 UINT32 LOS_MemFreeNodeShow(VOID *pool)
1969 {
1970     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1971 
1972     if ((poolInfo == NULL) || ((UINTPTR)pool != (UINTPTR)poolInfo->info.pool)) {
1973         PRINT_ERR("wrong mem pool addr: %#x, line:%d\n", poolInfo, __LINE__);
1974         return LOS_NOK;
1975     }
1976 
1977     struct OsMemFreeNodeHead *node = NULL;
1978     UINT32 countNum[OS_MEM_FREE_LIST_COUNT] = {0};
1979     UINT32 index;
1980     UINT32 intSave;
1981 
1982     MEM_LOCK(poolInfo, intSave);
1983     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
1984         node = poolInfo->freeList[index];
1985         while (node) {
1986             node = node->next;
1987             countNum[index]++;
1988         }
1989     }
1990     MEM_UNLOCK(poolInfo, intSave);
1991 
1992     PRINTK("\n   ************************ left free node number**********************\n");
1993     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
1994         if (countNum[index] == 0) {
1995             continue;
1996         }
1997 
1998         PRINTK("free index: %03u, ", index);
1999         if (index < OS_MEM_SMALL_BUCKET_COUNT) {
2000             PRINTK("size: [%#x], num: %u\n", (index + 1) << 2, countNum[index]); /* 2: setup is 4. */
2001         } else {
2002             UINT32 val = 1 << (((index - OS_MEM_SMALL_BUCKET_COUNT) >> OS_MEM_SLI) + OS_MEM_LARGE_START_BUCKET);
2003             UINT32 offset = val >> OS_MEM_SLI;
2004             PRINTK("size: [%#x, %#x], num: %u\n",
2005                    (offset * ((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI))) + val,
2006                    ((offset * (((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI)) + 1)) + val - 1),
2007                    countNum[index]);
2008         }
2009     }
2010     PRINTK("\n   ********************************************************************\n\n");
2011 
2012     return LOS_OK;
2013 }
2014 
OsKHeapInit(size_t size)2015 STATUS_T OsKHeapInit(size_t size)
2016 {
2017     STATUS_T ret;
2018     VOID *ptr = NULL;
2019     /*
2020      * roundup to MB aligned in order to set kernel attributes. kernel text/code/data attributes
2021      * should page mapping, remaining region should section mapping. so the boundary should be
2022      * MB aligned.
2023      */
2024     UINTPTR end = ROUNDUP(g_vmBootMemBase + size, MB);
2025     size = end - g_vmBootMemBase;
2026 
2027     ptr = OsVmBootMemAlloc(size);
2028     if (!ptr) {
2029         PRINT_ERR("vmm_kheap_init boot_alloc_mem failed! %d\n", size);
2030         return -1;
2031     }
2032 
2033     m_aucSysMem0 = m_aucSysMem1 = ptr;
2034     ret = LOS_MemInit(m_aucSysMem0, size);
2035     if (ret != LOS_OK) {
2036         PRINT_ERR("vmm_kheap_init LOS_MemInit failed!\n");
2037         g_vmBootMemBase -= size;
2038         return ret;
2039     }
2040 #if OS_MEM_EXPAND_ENABLE
2041     LOS_MemExpandEnable(OS_SYS_MEM_ADDR);
2042 #endif
2043     return LOS_OK;
2044 }
2045 
OsMemIsHeapNode(const VOID * ptr)2046 BOOL OsMemIsHeapNode(const VOID *ptr)
2047 {
2048     struct OsMemPoolHead *pool = (struct OsMemPoolHead *)m_aucSysMem1;
2049     struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool);
2050     struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
2051 
2052     if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) {
2053         return TRUE;
2054     }
2055 
2056 #if OS_MEM_EXPAND_ENABLE
2057     UINT32 intSave;
2058     UINT32 size;
2059     MEM_LOCK(pool, intSave);
2060     while (OsMemIsLastSentinelNode(endNode) == FALSE) {
2061         size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
2062         firstNode = OsMemSentinelNodeGet(endNode);
2063         endNode = OS_MEM_END_NODE(firstNode, size);
2064         if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) {
2065             MEM_UNLOCK(pool, intSave);
2066             return TRUE;
2067         }
2068     }
2069     MEM_UNLOCK(pool, intSave);
2070 #endif
2071     return FALSE;
2072 }
2073 
2074 
2075