• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "los_memory.h"
33 #include "securec.h"
34 #include "los_arch.h"
35 #include "los_config.h"
36 #include "los_debug.h"
37 #include "los_hook.h"
38 #include "los_interrupt.h"
39 #include "los_task.h"
40 #ifdef LOSCFG_KERNEL_LMS
41 #include "los_lms_pri.h"
42 #endif
43 #if (LOSCFG_KERNEL_LMK == 1)
44 #include "los_lmk.h"
45 #endif
46 
47 /* Used to cut non-essential functions. */
48 #define OS_MEM_EXPAND_ENABLE    0
49 
50 UINT8 *m_aucSysMem0 = NULL;
51 
52 #if (LOSCFG_SYS_EXTERNAL_HEAP == 0)
53 STATIC UINT8 g_memStart[LOSCFG_SYS_HEAP_SIZE];
54 #endif
55 
56 #if (LOSCFG_MEM_MUL_POOL == 1)
57 VOID *g_poolHead = NULL;
58 #endif
59 
60 /* The following is the macro definition and interface implementation related to the TLSF. */
61 
62 #define OS_MEM_BITMAP_MASK 0x1FU
63 
64 /* Used to find the first bit of 1 in bitmap. */
OsMemFFS(UINT32 bitmap)65 STATIC INLINE UINT16 OsMemFFS(UINT32 bitmap)
66 {
67     bitmap &= ~bitmap + 1;
68     return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
69 }
70 
71 /* Used to find the last bit of 1 in bitmap. */
OsMemFLS(UINT32 bitmap)72 STATIC INLINE UINT16 OsMemFLS(UINT32 bitmap)
73 {
74     return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
75 }
76 
OsMemLog2(UINT32 size)77 STATIC INLINE UINT32 OsMemLog2(UINT32 size)
78 {
79     return (size > 0) ? OsMemFLS(size) : 0;
80 }
81 
82 /* Get the first level: f = log2(size). */
OsMemFlGet(UINT32 size)83 STATIC INLINE UINT32 OsMemFlGet(UINT32 size)
84 {
85     if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
86         return ((size >> 2) - 1); /* 2: The small bucket setup is 4. */
87     }
88     return (OsMemLog2(size) - OS_MEM_LARGE_START_BUCKET + OS_MEM_SMALL_BUCKET_COUNT);
89 }
90 
91 /* Get the second level: s = (size - 2^f) * 2^SLI / 2^f. */
OsMemSlGet(UINT32 size,UINT32 fl)92 STATIC INLINE UINT32 OsMemSlGet(UINT32 size, UINT32 fl)
93 {
94     if ((fl < OS_MEM_SMALL_BUCKET_COUNT) || (size < OS_MEM_SMALL_BUCKET_MAX_SIZE)) {
95         PRINT_ERR("fl or size is too small, fl = %u, size = %u\n", fl, size);
96         return 0;
97     }
98 
99     UINT32 sl = (size << OS_MEM_SLI) >> (fl - OS_MEM_SMALL_BUCKET_COUNT + OS_MEM_LARGE_START_BUCKET);
100     return (sl - (1 << OS_MEM_SLI));
101 }
102 
103 /* The following is the memory algorithm related macro definition and interface implementation. */
104 #if (LOSCFG_TASK_MEM_USED != 1 && LOSCFG_MEM_FREE_BY_TASKID == 1 && (LOSCFG_BASE_CORE_TSK_LIMIT + 1) > 64)
105 #error "When enter here, LOSCFG_BASE_CORE_TSK_LIMIT larger than 63 is not support"
106 #endif
107 
108 struct OsMemUsedNodeHead {
109     struct OsMemNodeHead header;
110 };
111 
112 /* The memory pool support expand. */
113 #define OS_MEM_POOL_EXPAND_ENABLE   0x01
114 /* The memory pool support no lock. */
115 #define OS_MEM_POOL_UNLOCK_ENABLE   0x02
116 
117 #define MEM_LOCK(pool, state)       do {                    \
118     if (!((pool)->info.attr & OS_MEM_POOL_UNLOCK_ENABLE)) { \
119         (state) = LOS_IntLock();                            \
120     }                                                       \
121 } while (0);
122 #define MEM_UNLOCK(pool, state)     do {                    \
123     if (!((pool)->info.attr & OS_MEM_POOL_UNLOCK_ENABLE)) { \
124         LOS_IntRestore(state);                              \
125     }                                                       \
126 } while (0);
127 
128 #define OS_MEM_NODE_MAGIC          0xABCDDCBA
129 #if (LOSCFG_TASK_MEM_USED != 1 && LOSCFG_MEM_FREE_BY_TASKID == 1)
130 #define OS_MEM_NODE_USED_FLAG      (1U << 25)
131 #define OS_MEM_NODE_ALIGNED_FLAG   (1U << 24)
132 #if (LOSCFG_MEM_LEAKCHECK == 1)
133 #define OS_MEM_NODE_LEAK_FLAG      (1U << 23)
134 #else
135 #define OS_MEM_NODE_LEAK_FLAG      0
136 #endif
137 #if (OS_MEM_EXPAND_ENABLE == 1)
138 #define OS_MEM_NODE_LAST_FLAG      (1U << 22)  /* Sentinel Node */
139 #else
140 #define OS_MEM_NODE_LAST_FLAG      0
141 #endif
142 #else
143 #define OS_MEM_NODE_USED_FLAG      (1U << 31)
144 #define OS_MEM_NODE_ALIGNED_FLAG   (1U << 30)
145 #if (LOSCFG_MEM_LEAKCHECK == 1)
146 #define OS_MEM_NODE_LEAK_FLAG      (1U << 29)
147 #else
148 #define OS_MEM_NODE_LEAK_FLAG      0
149 #endif
150 #if (OS_MEM_EXPAND_ENABLE == 1)
151 #define OS_MEM_NODE_LAST_FLAG      (1U << 28)  /* Sentinel Node */
152 #else
153 #define OS_MEM_NODE_LAST_FLAG      0
154 #endif
155 #endif
156 
157 #define OS_MEM_NODE_ALIGNED_AND_USED_FLAG \
158     (OS_MEM_NODE_USED_FLAG | OS_MEM_NODE_ALIGNED_FLAG | OS_MEM_NODE_LEAK_FLAG | OS_MEM_NODE_LAST_FLAG)
159 
160 #define OS_MEM_NODE_GET_ALIGNED_FLAG(sizeAndFlag) \
161             ((sizeAndFlag) & OS_MEM_NODE_ALIGNED_FLAG)
162 #define OS_MEM_NODE_SET_ALIGNED_FLAG(sizeAndFlag) \
163             (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_ALIGNED_FLAG)
164 #define OS_MEM_NODE_GET_USED_FLAG(sizeAndFlag) \
165             ((sizeAndFlag) & OS_MEM_NODE_USED_FLAG)
166 #define OS_MEM_NODE_SET_USED_FLAG(sizeAndFlag) \
167             (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_USED_FLAG)
168 #define OS_MEM_NODE_GET_SIZE(sizeAndFlag) \
169             ((sizeAndFlag) & ~OS_MEM_NODE_ALIGNED_AND_USED_FLAG)
170 
171 #define OS_MEM_GAPSIZE_USED_FLAG      0x80000000U
172 #define OS_MEM_GAPSIZE_ALIGNED_FLAG   0x40000000U
173 #define OS_MEM_GET_ALIGNED_GAPSIZE(gapsize) \
174             ((gapsize) & ~OS_MEM_GAPSIZE_ALIGNED_FLAG)
175 #define OS_MEM_GET_GAPSIZE_ALIGNED_FLAG(gapsize) \
176                 ((gapsize) & OS_MEM_GAPSIZE_ALIGNED_FLAG)
177 #define OS_MEM_SET_GAPSIZE_ALIGNED_FLAG(gapsize) \
178                 (gapsize) = ((gapsize) | OS_MEM_GAPSIZE_ALIGNED_FLAG)
179 #define OS_MEM_GET_GAPSIZE_USED_FLAG(gapsize) \
180                 ((gapsize) & OS_MEM_GAPSIZE_USED_FLAG)
181 #define OS_MEM_GAPSIZE_CHECK(gapsize) \
182                 (OS_MEM_GET_GAPSIZE_ALIGNED_FLAG(gapsize) && \
183                  OS_MEM_GET_GAPSIZE_USED_FLAG(gapsize))
184 
185 #define OS_MEM_NODE_SET_LAST_FLAG(sizeAndFlag) \
186             (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_LAST_FLAG)
187 #define OS_MEM_NODE_GET_LAST_FLAG(sizeAndFlag) \
188             ((sizeAndFlag) & OS_MEM_NODE_LAST_FLAG)
189 #define OS_MEM_NODE_GET_LEAK_FLAG(sizeAndFlag) \
190             ((sizeAndFlag) & OS_MEM_NODE_LEAK_FLAG)
191 #define OS_MEM_NODE_SET_LEAK_FLAG(sizeAndFlag) \
192             (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_LEAK_FLAG)
193 
194 #define OS_MEM_ALIGN_SIZE           sizeof(UINTPTR)
195 #define OS_MEM_IS_POW_TWO(value)    ((((UINTPTR)(value)) & ((UINTPTR)(value) - 1)) == 0)
196 #define OS_MEM_ALIGN(p, alignSize)  (((UINTPTR)(p) + (alignSize) - 1) & ~((UINTPTR)((alignSize) - 1)))
197 #define OS_MEM_IS_ALIGNED(a, b)     (!(((UINTPTR)(a)) & (((UINTPTR)(b)) - 1)))
198 #define OS_MEM_NODE_HEAD_SIZE       sizeof(struct OsMemUsedNodeHead)
199 #define OS_MEM_MIN_POOL_SIZE        (OS_MEM_NODE_HEAD_SIZE + sizeof(struct OsMemPoolHead))
200 #define OS_MEM_MIN_LEFT_SIZE        sizeof(struct OsMemFreeNodeHead)
201 #define OS_MEM_MIN_ALLOC_SIZE       8
202 #define OS_MEM_NEXT_NODE(node) \
203     ((struct OsMemNodeHead *)(VOID *)((UINT8 *)(node) + OS_MEM_NODE_GET_SIZE((node)->sizeAndFlag)))
204 #define OS_MEM_FIRST_NODE(pool) \
205     (struct OsMemNodeHead *)((UINT8 *)(pool) + sizeof(struct OsMemPoolHead))
206 #define OS_MEM_END_NODE(pool, size) \
207     (struct OsMemNodeHead *)((UINT8 *)(pool) + (size) - OS_MEM_NODE_HEAD_SIZE)
208 #define OS_MEM_MIDDLE_ADDR_OPEN_END(startAddr, middleAddr, endAddr) \
209     (((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) < (UINT8 *)(endAddr)))
210 #define OS_MEM_MIDDLE_ADDR(startAddr, middleAddr, endAddr) \
211     (((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) <= (UINT8 *)(endAddr)))
212 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
213 STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave);
214 #define OS_MEM_SET_MAGIC(node)      ((node)->magic = OS_MEM_NODE_MAGIC)
215 #define OS_MEM_MAGIC_VALID(node)    ((node)->magic == OS_MEM_NODE_MAGIC)
216 #else
217 #define OS_MEM_SET_MAGIC(node)
218 #define OS_MEM_MAGIC_VALID(node)    TRUE
219 #endif
220 
221 #if (LOSCFG_MEM_MUL_REGIONS == 1)
222 /**
223  *  When LOSCFG_MEM_MUL_REGIONS is enabled to support multiple non-continuous memory regions,
224  *  the gap between two memory regions is marked as a used OsMemNodeHead node. The gap node
225  *  couldn't be freed, and would also be skipped in some DFX functions. The 'ptr.prev' pointer
226  *  of this node is set to OS_MEM_GAP_NODE_MAGIC to identify that this is a gap node.
227 */
228 #define OS_MEM_GAP_NODE_MAGIC       0xDCBAABCD
229 #define OS_MEM_MARK_GAP_NODE(node)  \
230     (((struct OsMemNodeHead *)(node))->ptr.prev = (struct OsMemNodeHead *)OS_MEM_GAP_NODE_MAGIC)
231 #define OS_MEM_IS_GAP_NODE(node)    \
232     (((struct OsMemNodeHead *)(node))->ptr.prev == (struct OsMemNodeHead *)OS_MEM_GAP_NODE_MAGIC)
233 #else
234 #define OS_MEM_MARK_GAP_NODE(node)
235 #define OS_MEM_IS_GAP_NODE(node)    FALSE
236 #endif
237 
238 STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node);
239 STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node);
240 STATIC VOID OsMemInfoPrint(VOID *pool);
241 
242 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
OsMemNodeSetTaskID(struct OsMemUsedNodeHead * node)243 STATIC INLINE VOID OsMemNodeSetTaskID(struct OsMemUsedNodeHead *node)
244 {
245     node->header.taskID = LOS_CurTaskIDGet();
246 }
247 #endif
OsAllMemNodeDoHandle(VOID * pool,VOID (* handle)(struct OsMemNodeHead * curNode,VOID * arg),VOID * arg)248 STATIC VOID OsAllMemNodeDoHandle(VOID *pool, VOID (*handle)(struct OsMemNodeHead *curNode, VOID *arg), VOID *arg)
249 {
250     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
251     struct OsMemNodeHead *tmpNode = NULL;
252     struct OsMemNodeHead *endNode = NULL;
253     UINT32 intSave = 0;
254 
255     if (pool == NULL) {
256         PRINTK("input param is NULL\n");
257         return;
258     }
259     if (LOS_MemIntegrityCheck(pool)) {
260         PRINTK("LOS_MemIntegrityCheck error\n");
261         return;
262     }
263 
264     MEM_LOCK(poolInfo, intSave);
265     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
266     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
267         if (tmpNode == endNode) {
268 #if OS_MEM_EXPAND_ENABLE
269             UINT32 size;
270             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
271                 size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
272                 tmpNode = OsMemSentinelNodeGet(endNode);
273                 endNode = OS_MEM_END_NODE(tmpNode, size);
274                 continue;
275             }
276 #endif
277             break;
278         }
279         handle(tmpNode, arg);
280     }
281     MEM_UNLOCK(poolInfo, intSave);
282 }
283 
284 #if (LOSCFG_TASK_MEM_USED == 1)
GetTaskMemUsedHandle(struct OsMemNodeHead * curNode,VOID * arg)285 STATIC VOID GetTaskMemUsedHandle(struct OsMemNodeHead *curNode, VOID *arg)
286 {
287     UINT32 *args = (UINT32 *)arg;
288     UINT32 *tskMemInfoBuf = (UINT32 *)(UINTPTR)*args;
289     UINT32 tskMemInfoCnt = *(args + 1);
290 #ifndef LOSCFG_MEM_MUL_REGIONS
291     if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
292 #else
293     if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(curNode)) {
294 #endif
295         if (curNode->taskID < tskMemInfoCnt) {
296             tskMemInfoBuf[curNode->taskID] += OS_MEM_NODE_GET_SIZE(curNode->sizeAndFlag);
297         }
298     }
299     return;
300 }
301 
302 VOID OsTaskMemUsed(VOID *pool, UINT32 *tskMemInfoBuf, UINT32 tskMemInfoCnt)
303 {
304     UINT32 args[2] = {(UINT32)(UINTPTR)tskMemInfoBuf, tskMemInfoCnt};
305     OsAllMemNodeDoHandle(pool, GetTaskMemUsedHandle, (VOID *)args);
306     return;
307 }
308 #endif
309 
310 #if (LOSCFG_MEM_WATERLINE == 1)
311 STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
312 {
313     pool->info.curUsedSize += size;
314     if (pool->info.curUsedSize > pool->info.waterLine) {
315         pool->info.waterLine = pool->info.curUsedSize;
316     }
317 }
318 #else
319 STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
320 {
321     (VOID)pool;
322     (VOID)size;
323 }
324 #endif
325 
326 #if OS_MEM_EXPAND_ENABLE
327 STATIC INLINE struct OsMemNodeHead *OsMemLastSentinelNodeGet(const struct OsMemNodeHead *sentinelNode)
328 {
329     struct OsMemNodeHead *node = NULL;
330     VOID *ptr = sentinelNode->ptr.next;
331     UINT32 size = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
332 
333     while ((ptr != NULL) && (size != 0)) {
334         node = OS_MEM_END_NODE(ptr, size);
335         ptr = node->ptr.next;
336         size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
337     }
338 
339     return node;
340 }
341 
342 STATIC INLINE BOOL OsMemSentinelNodeCheck(struct OsMemNodeHead *sentinelNode)
343 {
344     if (!OS_MEM_NODE_GET_USED_FLAG(sentinelNode->sizeAndFlag)) {
345         return FALSE;
346     }
347 
348     if (!OS_MEM_MAGIC_VALID(sentinelNode)) {
349         return FALSE;
350     }
351 
352     return TRUE;
353 }
354 
355 STATIC INLINE BOOL OsMemIsLastSentinelNode(struct OsMemNodeHead *sentinelNode)
356 {
357     if (OsMemSentinelNodeCheck(sentinelNode) == FALSE) {
358         PRINT_ERR("%s %d, The current sentinel node is invalid\n", __FUNCTION__, __LINE__);
359         return TRUE;
360     }
361 
362     if ((OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag) == 0) ||
363         (sentinelNode->ptr.next == NULL)) {
364         return TRUE;
365     }
366 
367     return FALSE;
368 }
369 
370 STATIC INLINE VOID OsMemSentinelNodeSet(struct OsMemNodeHead *sentinelNode, VOID *newNode, UINT32 size)
371 {
372     if (sentinelNode->ptr.next != NULL) {
373         sentinelNode = OsMemLastSentinelNodeGet(sentinelNode);
374     }
375 
376     sentinelNode->sizeAndFlag = size;
377     sentinelNode->ptr.next = newNode;
378     OS_MEM_NODE_SET_USED_FLAG(sentinelNode->sizeAndFlag);
379     OS_MEM_NODE_SET_LAST_FLAG(sentinelNode->sizeAndFlag);
380 }
381 
382 STATIC INLINE VOID *OsMemSentinelNodeGet(struct OsMemNodeHead *node)
383 {
384     if (OsMemSentinelNodeCheck(node) == FALSE) {
385         return NULL;
386     }
387 
388     return node->ptr.next;
389 }
390 
391 STATIC INLINE struct OsMemNodeHead *PreSentinelNodeGet(const VOID *pool, const struct OsMemNodeHead *node)
392 {
393     UINT32 nextSize;
394     struct OsMemNodeHead *nextNode = NULL;
395     struct OsMemNodeHead *sentinelNode = NULL;
396 
397     sentinelNode = OS_MEM_END_NODE(pool, ((struct OsMemPoolHead *)pool)->info.totalSize);
398     while (sentinelNode != NULL) {
399         if (OsMemIsLastSentinelNode(sentinelNode)) {
400             PRINT_ERR("PreSentinelNodeGet can not find node 0x%x\n", node);
401             return NULL;
402         }
403         nextNode = OsMemSentinelNodeGet(sentinelNode);
404         if (nextNode == node) {
405             return sentinelNode;
406         }
407         nextSize = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
408         sentinelNode = OS_MEM_END_NODE(nextNode, nextSize);
409     }
410 
411     return NULL;
412 }
413 
414 STATIC INLINE BOOL TryShrinkPool(const VOID *pool, const struct OsMemNodeHead *node)
415 {
416     struct OsMemNodeHead *mySentinel = NULL;
417     struct OsMemNodeHead *preSentinel = NULL;
418     size_t totalSize = (UINTPTR)node->ptr.prev - (UINTPTR)node;
419     size_t nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
420 
421     if (nodeSize != totalSize) {
422         return FALSE;
423     }
424 
425     preSentinel = PreSentinelNodeGet(pool, node);
426     if (preSentinel == NULL) {
427         return FALSE;
428     }
429 
430     mySentinel = node->ptr.prev;
431     if (OsMemIsLastSentinelNode(mySentinel)) { /* prev node becomes sentinel node */
432         preSentinel->ptr.next = NULL;
433         OsMemSentinelNodeSet(preSentinel, NULL, 0);
434     } else {
435         preSentinel->sizeAndFlag = mySentinel->sizeAndFlag;
436         preSentinel->ptr.next = mySentinel->ptr.next;
437     }
438 
439     if (OsMemLargeNodeFree(node) != LOS_OK) {
440         PRINT_ERR("TryShrinkPool free 0x%x failed!\n", node);
441         return FALSE;
442     }
443 
444     return TRUE;
445 }
446 
447 STATIC INLINE INT32 OsMemPoolExpand(VOID *pool, UINT32 size, UINT32 intSave)
448 {
449     UINT32 tryCount = MAX_SHRINK_PAGECACHE_TRY;
450     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
451     struct OsMemNodeHead *newNode = NULL;
452     struct OsMemNodeHead *endNode = NULL;
453 
454     size = ROUNDUP(size + OS_MEM_NODE_HEAD_SIZE, PAGE_SIZE);
455     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
456 
457 RETRY:
458     newNode = (struct OsMemNodeHead *)LOS_PhysPagesAllocContiguous(size >> PAGE_SHIFT);
459     if (newNode == NULL) {
460         if (tryCount > 0) {
461             tryCount--;
462             MEM_UNLOCK(poolInfo, intSave);
463             OsTryShrinkMemory(size >> PAGE_SHIFT);
464             MEM_LOCK(poolInfo, intSave);
465             goto RETRY;
466         }
467 
468         PRINT_ERR("OsMemPoolExpand alloc failed size = %u\n", size);
469         return -1;
470     }
471     newNode->sizeAndFlag = (size - OS_MEM_NODE_HEAD_SIZE);
472     newNode->ptr.prev = OS_MEM_END_NODE(newNode, size);
473     OsMemSentinelNodeSet(endNode, newNode, size);
474     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
475 
476     endNode = OS_MEM_END_NODE(newNode, size);
477     (VOID)memset(endNode, 0, sizeof(*endNode));
478     endNode->ptr.next = NULL;
479     OS_MEM_SET_MAGIC(endNode);
480     OsMemSentinelNodeSet(endNode, NULL, 0);
481     OsMemWaterUsedRecord(poolInfo, OS_MEM_NODE_HEAD_SIZE);
482 
483     return 0;
484 }
485 
486 VOID LOS_MemExpandEnable(VOID *pool)
487 {
488     if (pool == NULL) {
489         return;
490     }
491 
492     ((struct OsMemPoolHead *)pool)->info.attr |= OS_MEM_POOL_EXPAND_ENABLE;
493 }
494 #endif
495 
496 #ifdef LOSCFG_KERNEL_LMS
497 STATIC INLINE VOID OsLmsFirstNodeMark(VOID *pool, struct OsMemNodeHead *node)
498 {
499     if (g_lms == NULL) {
500         return;
501     }
502 
503     g_lms->simpleMark((UINTPTR)pool, (UINTPTR)node, LMS_SHADOW_PAINT_U8);
504     g_lms->simpleMark((UINTPTR)node, (UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
505     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node), (UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
506         LMS_SHADOW_REDZONE_U8);
507     g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
508         LMS_SHADOW_AFTERFREE_U8);
509 }
510 
511 STATIC INLINE VOID OsLmsAllocAlignMark(VOID *ptr, VOID *alignedPtr, UINT32 size)
512 {
513     struct OsMemNodeHead *allocNode = NULL;
514 
515     if ((g_lms == NULL) || (ptr == NULL)) {
516         return;
517     }
518     allocNode = (struct OsMemNodeHead *)((struct OsMemUsedNodeHead *)ptr - 1);
519     if (ptr != alignedPtr) {
520         g_lms->simpleMark((UINTPTR)ptr, (UINTPTR)ptr + sizeof(UINT32), LMS_SHADOW_PAINT_U8);
521         g_lms->simpleMark((UINTPTR)ptr + sizeof(UINT32), (UINTPTR)alignedPtr, LMS_SHADOW_REDZONE_U8);
522     }
523 
524     /* mark remining as redzone */
525     g_lms->simpleMark(LMS_ADDR_ALIGN((UINTPTR)alignedPtr + size), (UINTPTR)OS_MEM_NEXT_NODE(allocNode),
526         LMS_SHADOW_REDZONE_U8);
527 }
528 
529 STATIC INLINE VOID OsLmsReallocMergeNodeMark(struct OsMemNodeHead *node)
530 {
531     if (g_lms == NULL) {
532         return;
533     }
534 
535     g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
536         LMS_SHADOW_ACCESSIBLE_U8);
537 }
538 
539 STATIC INLINE VOID OsLmsReallocSplitNodeMark(struct OsMemNodeHead *node)
540 {
541     if (g_lms == NULL) {
542         return;
543     }
544     /* mark next node */
545     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node),
546         (UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
547     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
548         (UINTPTR)OS_MEM_NEXT_NODE(OS_MEM_NEXT_NODE(node)), LMS_SHADOW_AFTERFREE_U8);
549 }
550 
551 STATIC INLINE VOID OsLmsReallocResizeMark(struct OsMemNodeHead *node, UINT32 resize)
552 {
553     if (g_lms == NULL) {
554         return;
555     }
556     /* mark remaining as redzone */
557     g_lms->simpleMark((UINTPTR)node + resize, (UINTPTR)OS_MEM_NEXT_NODE(node), LMS_SHADOW_REDZONE_U8);
558 }
559 #endif
560 
561 #if (LOSCFG_MEM_LEAKCHECK == 1)
562 struct OsMemLeakCheckInfo {
563     struct OsMemNodeHead *node;
564     UINTPTR linkReg[LOSCFG_MEM_RECORD_LR_CNT];
565 };
566 
567 struct OsMemLeakCheckInfo g_leakCheckRecord[LOSCFG_MEM_LEAKCHECK_RECORD_MAX_NUM] = {0};
568 STATIC UINT32 g_leakCheckRecordCnt = 0;
569 
570 STATIC INLINE VOID OsMemLeakCheckInfoRecord(struct OsMemNodeHead *node)
571 {
572     struct OsMemLeakCheckInfo *info = &g_leakCheckRecord[g_leakCheckRecordCnt];
573 
574     if (!OS_MEM_NODE_GET_LEAK_FLAG(node->sizeAndFlag)) {
575         info->node = node;
576         (VOID)memcpy(info->linkReg, node->linkReg, sizeof(node->linkReg));
577         OS_MEM_NODE_SET_LEAK_FLAG(node->sizeAndFlag);
578         g_leakCheckRecordCnt++;
579         if (g_leakCheckRecordCnt >= LOSCFG_MEM_LEAKCHECK_RECORD_MAX_NUM) {
580             g_leakCheckRecordCnt = 0;
581         }
582     }
583 }
584 
585 STATIC INLINE VOID OsMemLeakCheckInit(VOID)
586 {
587     (VOID)memset(g_leakCheckRecord, 0, sizeof(struct OsMemLeakCheckInfo) * LOSCFG_MEM_LEAKCHECK_RECORD_MAX_NUM);
588     g_leakCheckRecordCnt = 0;
589 }
590 
591 STATIC INLINE VOID OsMemLinkRegisterRecord(struct OsMemNodeHead *node)
592 {
593     (VOID)memset(node->linkReg, 0, sizeof(node->linkReg));
594     OsBackTraceHookCall(node->linkReg, LOSCFG_MEM_RECORD_LR_CNT, LOSCFG_MEM_OMIT_LR_CNT, 0);
595 }
596 
597 STATIC INLINE VOID OsMemUsedNodePrint(struct OsMemNodeHead *node)
598 {
599     UINT32 count;
600 
601     if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(node)) {
602         PRINTK("0x%x: 0x%x ", (UINTPTR)node, OS_MEM_NODE_GET_SIZE(node->sizeAndFlag));
603         for (count = 0; count < LOSCFG_MEM_RECORD_LR_CNT; count++) {
604             PRINTK(" 0x%x ", node->linkReg[count]);
605         }
606         PRINTK("\n");
607 
608         OsMemLeakCheckInfoRecord(node);
609     }
610 }
611 
612 STATIC VOID OsMemUsedNodePrintHandle(struct OsMemNodeHead *node, VOID *arg)
613 {
614     UNUSED(arg);
615     OsMemUsedNodePrint(node);
616     return;
617 }
618 
619 VOID LOS_MemUsedNodeShow(VOID *pool)
620 {
621     UINT32 count;
622 
623     PRINTK("\n\rnode          size    ");
624     for (count = 0; count < LOSCFG_MEM_RECORD_LR_CNT; count++) {
625         PRINTK("    LR[%u]   ", count);
626     }
627     PRINTK("\n");
628 
629     OsMemLeakCheckInit();
630     OsAllMemNodeDoHandle(pool, OsMemUsedNodePrintHandle, NULL);
631     return;
632 }
633 
634 #if (LOSCFG_KERNEL_PRINTF != 0)
635 STATIC VOID OsMemNodeBacktraceInfo(const struct OsMemNodeHead *tmpNode,
636                                    const struct OsMemNodeHead *preNode)
637 {
638     int i;
639     PRINTK("\n broken node head LR info: \n");
640     for (i = 0; i < LOSCFG_MEM_RECORD_LR_CNT; i++) {
641         PRINTK(" LR[%d]:0x%x\n", i, tmpNode->linkReg[i]);
642     }
643 
644     PRINTK("\n pre node head LR info: \n");
645     for (i = 0; i < LOSCFG_MEM_RECORD_LR_CNT; i++) {
646         PRINTK(" LR[%d]:0x%x\n", i, preNode->linkReg[i]);
647     }
648 }
649 #endif
650 #endif
651 
652 STATIC INLINE UINT32 OsMemFreeListIndexGet(UINT32 size)
653 {
654     UINT32 fl = OsMemFlGet(size);
655     if (fl < OS_MEM_SMALL_BUCKET_COUNT) {
656         return fl;
657     }
658 
659     UINT32 sl = OsMemSlGet(size, fl);
660     return (OS_MEM_SMALL_BUCKET_COUNT + ((fl - OS_MEM_SMALL_BUCKET_COUNT) << OS_MEM_SLI) + sl);
661 }
662 
663 STATIC INLINE struct OsMemFreeNodeHead *OsMemFindCurSuitableBlock(struct OsMemPoolHead *poolHead,
664                                         UINT32 index, UINT32 size)
665 {
666     struct OsMemFreeNodeHead *node = NULL;
667 
668     for (node = poolHead->freeList[index]; node != NULL; node = node->next) {
669         if (node->header.sizeAndFlag >= size) {
670             return node;
671         }
672     }
673 
674     return NULL;
675 }
676 
677 STATIC INLINE UINT32 OsMemNotEmptyIndexGet(struct OsMemPoolHead *poolHead, UINT32 index)
678 {
679     /* 5: Divide by 32 to calculate the index of the bitmap array. */
680     UINT32 mask = poolHead->freeListBitmap[index >> 5];
681     mask &= ~((1 << (index & OS_MEM_BITMAP_MASK)) - 1);
682     if (mask != 0) {
683         index = OsMemFFS(mask) + (index & ~OS_MEM_BITMAP_MASK);
684         return index;
685     }
686 
687     return OS_MEM_FREE_LIST_COUNT;
688 }
689 
690 STATIC INLINE struct OsMemFreeNodeHead *OsMemFindNextSuitableBlock(VOID *pool, UINT32 size, UINT32 *outIndex)
691 {
692     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
693     UINT32 fl = OsMemFlGet(size);
694     UINT32 sl;
695     UINT32 index, tmp;
696     UINT32 curIndex = OS_MEM_FREE_LIST_COUNT;
697     UINT32 mask;
698 
699     do {
700         if (fl < OS_MEM_SMALL_BUCKET_COUNT) {
701             index = fl;
702         } else {
703             sl = OsMemSlGet(size, fl);
704             curIndex = ((fl - OS_MEM_SMALL_BUCKET_COUNT) << OS_MEM_SLI) + sl + OS_MEM_SMALL_BUCKET_COUNT;
705             index = curIndex + 1;
706         }
707 
708         tmp = OsMemNotEmptyIndexGet(poolHead, index);
709         if (tmp != OS_MEM_FREE_LIST_COUNT) {
710             index = tmp;
711             goto DONE;
712         }
713 
714         for (index = LOS_Align(index + 1, 32); index < OS_MEM_FREE_LIST_COUNT; index += 32) {
715             /* 5: Divide by 32 to calculate the index of the bitmap array. */
716             mask = poolHead->freeListBitmap[index >> 5];
717             if (mask != 0) {
718                 index = OsMemFFS(mask) + index;
719                 goto DONE;
720             }
721         }
722     } while (0);
723 
724     if (curIndex == OS_MEM_FREE_LIST_COUNT) {
725         return NULL;
726     }
727 
728     *outIndex = curIndex;
729     return OsMemFindCurSuitableBlock(poolHead, curIndex, size);
730 DONE:
731     *outIndex = index;
732     return poolHead->freeList[index];
733 }
734 
735 STATIC INLINE VOID OsMemSetFreeListBit(struct OsMemPoolHead *head, UINT32 index)
736 {
737     /* 5: Divide by 32 to calculate the index of the bitmap array. */
738     head->freeListBitmap[index >> 5] |= 1U << (index & 0x1f);
739 }
740 
741 STATIC INLINE VOID OsMemClearFreeListBit(struct OsMemPoolHead *head, UINT32 index)
742 {
743     /* 5: Divide by 32 to calculate the index of the bitmap array. */
744     head->freeListBitmap[index >> 5] &= ~(1U << (index & 0x1f));
745 }
746 
747 STATIC INLINE VOID OsMemListAdd(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
748 {
749     struct OsMemFreeNodeHead *firstNode = pool->freeList[listIndex];
750     if (firstNode != NULL) {
751         firstNode->prev = node;
752     }
753     node->prev = NULL;
754     node->next = firstNode;
755     pool->freeList[listIndex] = node;
756     OsMemSetFreeListBit(pool, listIndex);
757     OS_MEM_SET_MAGIC(&node->header);
758 }
759 
760 STATIC INLINE VOID OsMemListDelete(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
761 {
762     if (node == pool->freeList[listIndex]) {
763         pool->freeList[listIndex] = node->next;
764         if (node->next == NULL) {
765             OsMemClearFreeListBit(pool, listIndex);
766         } else {
767             node->next->prev = NULL;
768         }
769     } else {
770         node->prev->next = node->next;
771         if (node->next != NULL) {
772             node->next->prev = node->prev;
773         }
774     }
775     OS_MEM_SET_MAGIC(&node->header);
776 }
777 
778 STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node)
779 {
780     UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
781     if (index >= OS_MEM_FREE_LIST_COUNT) {
782         LOS_Panic("The index of free lists is error, index = %u\n", index);
783     }
784     OsMemListAdd(pool, index, node);
785 }
786 
787 STATIC INLINE VOID OsMemFreeNodeDelete(VOID *pool, struct OsMemFreeNodeHead *node)
788 {
789     UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
790     OsMemListDelete(pool, index, node);
791 }
792 
793 STATIC INLINE struct OsMemNodeHead *OsMemFreeNodeGet(VOID *pool, UINT32 size)
794 {
795     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
796     UINT32 index;
797     struct OsMemFreeNodeHead *firstNode = OsMemFindNextSuitableBlock(pool, size, &index);
798     if (firstNode == NULL) {
799         return NULL;
800     }
801 
802     OsMemListDelete(poolHead, index, firstNode);
803 
804     return &firstNode->header;
805 }
806 
807 STATIC INLINE VOID OsMemMergeNode(struct OsMemNodeHead *node)
808 {
809     struct OsMemNodeHead *nextNode = NULL;
810 
811     node->ptr.prev->sizeAndFlag += node->sizeAndFlag;
812     nextNode = (struct OsMemNodeHead *)((UINTPTR)node + node->sizeAndFlag);
813     if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(nextNode)) {
814         nextNode->ptr.prev = node->ptr.prev;
815     }
816 }
817 
818 STATIC INLINE VOID OsMemSplitNode(VOID *pool, struct OsMemNodeHead *allocNode, UINT32 allocSize)
819 {
820     struct OsMemFreeNodeHead *newFreeNode = NULL;
821     struct OsMemNodeHead *nextNode = NULL;
822 
823     newFreeNode = (struct OsMemFreeNodeHead *)(VOID *)((UINT8 *)allocNode + allocSize);
824     newFreeNode->header.ptr.prev = allocNode;
825     newFreeNode->header.sizeAndFlag = allocNode->sizeAndFlag - allocSize;
826     allocNode->sizeAndFlag = allocSize;
827     nextNode = OS_MEM_NEXT_NODE(&newFreeNode->header);
828     if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(nextNode)) {
829         nextNode->ptr.prev = &newFreeNode->header;
830         if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
831             OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
832             OsMemMergeNode(nextNode);
833         }
834     }
835 
836     OsMemFreeNodeAdd(pool, newFreeNode);
837 }
838 
839 STATIC INLINE VOID *OsMemCreateUsedNode(VOID *addr)
840 {
841     struct OsMemUsedNodeHead *node = (struct OsMemUsedNodeHead *)addr;
842 
843 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
844     OsMemNodeSetTaskID(node);
845 #endif
846 
847 #ifdef LOSCFG_KERNEL_LMS
848     struct OsMemNodeHead *newNode = (struct OsMemNodeHead *)node;
849     if (g_lms != NULL) {
850         g_lms->mallocMark(newNode, OS_MEM_NEXT_NODE(newNode), OS_MEM_NODE_HEAD_SIZE);
851     }
852 #endif
853     return node + 1;
854 }
855 
856 STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size)
857 {
858     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
859     struct OsMemNodeHead *newNode = NULL;
860     struct OsMemNodeHead *endNode = NULL;
861 #ifdef LOSCFG_KERNEL_LMS
862     UINT32 resize = 0;
863     if (g_lms != NULL) {
864         /*
865          * resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
866          * resize != 0, shadow memory init successful, set poolSize as resize.
867          */
868         resize = g_lms->init(pool, size);
869         size = (resize == 0) ? size : resize;
870     }
871 #endif
872     (VOID)memset(poolHead, 0, sizeof(struct OsMemPoolHead));
873 
874     poolHead->info.pool = pool;
875     poolHead->info.totalSize = size;
876     /* default attr: lock, not expand. */
877     poolHead->info.attr &= ~(OS_MEM_POOL_UNLOCK_ENABLE | OS_MEM_POOL_EXPAND_ENABLE);
878 
879     newNode = OS_MEM_FIRST_NODE(pool);
880     newNode->sizeAndFlag = (size - sizeof(struct OsMemPoolHead) - OS_MEM_NODE_HEAD_SIZE);
881     newNode->ptr.prev = OS_MEM_END_NODE(pool, size);
882     OS_MEM_SET_MAGIC(newNode);
883     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
884 
885     /* The last mem node */
886     endNode = OS_MEM_END_NODE(pool, size);
887     OS_MEM_SET_MAGIC(endNode);
888 #if OS_MEM_EXPAND_ENABLE
889     endNode->ptr.next = NULL;
890     OsMemSentinelNodeSet(endNode, NULL, 0);
891 #else
892     endNode->sizeAndFlag = 0;
893     endNode->ptr.prev = newNode;
894     OS_MEM_NODE_SET_USED_FLAG(endNode->sizeAndFlag);
895 #endif
896 #if (LOSCFG_MEM_WATERLINE == 1)
897     poolHead->info.curUsedSize = sizeof(struct OsMemPoolHead) + OS_MEM_NODE_HEAD_SIZE;
898     poolHead->info.waterLine = poolHead->info.curUsedSize;
899 #endif
900 
901 #ifdef LOSCFG_KERNEL_LMS
902     if (resize != 0) {
903         OsLmsFirstNodeMark(pool, newNode);
904     }
905 #endif
906     return LOS_OK;
907 }
908 
909 #if (LOSCFG_MEM_MUL_POOL == 1)
910 STATIC VOID OsMemPoolDeinit(VOID *pool)
911 {
912     (VOID)memset(pool, 0, sizeof(struct OsMemPoolHead));
913 }
914 
915 STATIC UINT32 OsMemPoolAdd(VOID *pool, UINT32 size)
916 {
917     VOID *nextPool = g_poolHead;
918     VOID *curPool = g_poolHead;
919     UINTPTR poolEnd;
920     while (nextPool != NULL) {
921         poolEnd = (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool);
922         if (((pool <= nextPool) && (((UINTPTR)pool + size) > (UINTPTR)nextPool)) ||
923             (((UINTPTR)pool < poolEnd) && (((UINTPTR)pool + size) >= poolEnd))) {
924             PRINT_ERR("pool [0x%x, 0x%x) conflict with pool [0x%x, 0x%x)\n", (UINTPTR)pool,
925                       (UINTPTR)pool + size, (UINTPTR)nextPool, (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool));
926             return LOS_NOK;
927         }
928         curPool = nextPool;
929         nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
930     }
931 
932     if (g_poolHead == NULL) {
933         g_poolHead = pool;
934     } else {
935         ((struct OsMemPoolHead *)curPool)->nextPool = pool;
936     }
937 
938     ((struct OsMemPoolHead *)pool)->nextPool = NULL;
939     return LOS_OK;
940 }
941 
942 STATIC UINT32 OsMemPoolDelete(VOID *pool)
943 {
944     UINT32 ret = LOS_NOK;
945     VOID *nextPool = NULL;
946     VOID *curPool = NULL;
947 
948     do {
949         if (pool == g_poolHead) {
950             g_poolHead = ((struct OsMemPoolHead *)g_poolHead)->nextPool;
951             ret = LOS_OK;
952             break;
953         }
954 
955         curPool = g_poolHead;
956         nextPool = g_poolHead;
957         while (nextPool != NULL) {
958             if (pool == nextPool) {
959                 ((struct OsMemPoolHead *)curPool)->nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
960                 ret = LOS_OK;
961                 break;
962             }
963             curPool = nextPool;
964             nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
965         }
966     } while (0);
967 
968     return ret;
969 }
970 #endif
971 
972 UINT32 LOS_MemInit(VOID *pool, UINT32 size)
973 {
974     if ((pool == NULL) || (size <= OS_MEM_MIN_POOL_SIZE)) {
975         return LOS_NOK;
976     }
977 
978     if (((UINTPTR)pool & (OS_MEM_ALIGN_SIZE - 1)) || \
979         (size & (OS_MEM_ALIGN_SIZE - 1))) {
980         PRINT_ERR("LiteOS heap memory address or size configured not aligned:address:0x%x,size:0x%x, alignsize:%d\n", \
981                   (UINTPTR)pool, size, OS_MEM_ALIGN_SIZE);
982         return LOS_NOK;
983     }
984 
985     if (OsMemPoolInit(pool, size)) {
986         return LOS_NOK;
987     }
988 
989 #if (LOSCFG_MEM_MUL_POOL == 1)
990     if (OsMemPoolAdd(pool, size)) {
991         (VOID)OsMemPoolDeinit(pool);
992         return LOS_NOK;
993     }
994 #endif
995 
996     OsHookCall(LOS_HOOK_TYPE_MEM_INIT, pool, size);
997 
998     return LOS_OK;
999 }
1000 
1001 #if (LOSCFG_MEM_MUL_POOL == 1)
1002 UINT32 LOS_MemDeInit(VOID *pool)
1003 {
1004     if (pool == NULL) {
1005         return LOS_NOK;
1006     }
1007 
1008     if (OsMemPoolDelete(pool)) {
1009         return LOS_NOK;
1010     }
1011 
1012     OsMemPoolDeinit(pool);
1013 
1014     OsHookCall(LOS_HOOK_TYPE_MEM_DEINIT, pool);
1015 
1016     return LOS_OK;
1017 }
1018 
1019 UINT32 LOS_MemPoolList(VOID)
1020 {
1021     VOID *nextPool = g_poolHead;
1022     UINT32 index = 0;
1023     while (nextPool != NULL) {
1024         PRINTK("pool%u :\n", index);
1025         index++;
1026         OsMemInfoPrint(nextPool);
1027         nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
1028     }
1029     return index;
1030 }
1031 #endif
1032 
1033 STATIC INLINE VOID *OsMemAlloc(struct OsMemPoolHead *pool, UINT32 size, UINT32 intSave)
1034 {
1035     struct OsMemNodeHead *allocNode = NULL;
1036 
1037 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1038     if (OsMemAllocCheck(pool, intSave) == LOS_NOK) {
1039         return NULL;
1040     }
1041 #endif
1042 
1043     UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
1044 #if OS_MEM_EXPAND_ENABLE || (LOSCFG_KERNEL_LMK == 1)
1045 retry:
1046 #endif
1047     allocNode = OsMemFreeNodeGet(pool, allocSize);
1048     if (allocNode == NULL) {
1049 #if OS_MEM_EXPAND_ENABLE
1050         if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
1051             INT32 ret = OsMemPoolExpand(pool, allocSize, intSave);
1052             if (ret == 0) {
1053                 goto retry;
1054             }
1055         }
1056 #endif
1057 
1058 #if (LOSCFG_KERNEL_LMK == 1)
1059         UINT32 killRet = LOS_LmkTasksKill();
1060         if (killRet == LOS_OK) {
1061             goto retry;
1062         }
1063 #endif
1064         PRINT_ERR("---------------------------------------------------"
1065                   "--------------------------------------------------------\n");
1066         MEM_UNLOCK(pool, intSave);
1067         OsMemInfoPrint(pool);
1068         MEM_LOCK(pool, intSave);
1069         PRINT_ERR("[%s] No suitable free block, require free node size: 0x%x\n", __FUNCTION__, allocSize);
1070         PRINT_ERR("----------------------------------------------------"
1071                   "-------------------------------------------------------\n");
1072         return NULL;
1073     }
1074 
1075     if ((allocSize + OS_MEM_MIN_LEFT_SIZE) <= allocNode->sizeAndFlag) {
1076         OsMemSplitNode(pool, allocNode, allocSize);
1077     }
1078 
1079     OS_MEM_NODE_SET_USED_FLAG(allocNode->sizeAndFlag);
1080     OsMemWaterUsedRecord(pool, OS_MEM_NODE_GET_SIZE(allocNode->sizeAndFlag));
1081 
1082 #if (LOSCFG_MEM_LEAKCHECK == 1)
1083     OsMemLinkRegisterRecord(allocNode);
1084 #endif
1085     return OsMemCreateUsedNode((VOID *)allocNode);
1086 }
1087 
1088 VOID *LOS_MemAlloc(VOID *pool, UINT32 size)
1089 {
1090     if ((pool == NULL) || (size == 0)) {
1091         return NULL;
1092     }
1093 
1094     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1095         size = OS_MEM_MIN_ALLOC_SIZE;
1096     }
1097 
1098     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1099     VOID *ptr = NULL;
1100     UINT32 intSave = 0;
1101 
1102     MEM_LOCK(poolHead, intSave);
1103     do {
1104         if (OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
1105             break;
1106         }
1107         ptr = OsMemAlloc(poolHead, size, intSave);
1108     } while (0);
1109     MEM_UNLOCK(poolHead, intSave);
1110 
1111     OsHookCall(LOS_HOOK_TYPE_MEM_ALLOC, pool, ptr, size);
1112 
1113     return ptr;
1114 }
1115 
1116 VOID *LOS_MemAllocAlign(VOID *pool, UINT32 size, UINT32 boundary)
1117 {
1118     UINT32 gapSize;
1119 
1120     if ((pool == NULL) || (size == 0) || (boundary == 0) || !OS_MEM_IS_POW_TWO(boundary) ||
1121         !OS_MEM_IS_ALIGNED(boundary, sizeof(VOID *))) {
1122         return NULL;
1123     }
1124 
1125     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1126         size = OS_MEM_MIN_ALLOC_SIZE;
1127     }
1128 
1129     /*
1130      * sizeof(gapSize) bytes stores offset between alignedPtr and ptr,
1131      * the ptr has been OS_MEM_ALIGN_SIZE(4 or 8) aligned, so maximum
1132      * offset between alignedPtr and ptr is boundary - OS_MEM_ALIGN_SIZE
1133      */
1134     if ((boundary - sizeof(gapSize)) > ((UINT32)(-1) - size)) {
1135         return NULL;
1136     }
1137 
1138     UINT32 useSize = (size + boundary) - sizeof(gapSize);
1139     if (OS_MEM_NODE_GET_USED_FLAG(useSize) || OS_MEM_NODE_GET_ALIGNED_FLAG(useSize)) {
1140         return NULL;
1141     }
1142 
1143     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1144     UINT32 intSave = 0;
1145     VOID *ptr = NULL;
1146     VOID *alignedPtr = NULL;
1147 
1148     MEM_LOCK(poolHead, intSave);
1149     do {
1150         ptr = OsMemAlloc(pool, useSize, intSave);
1151         alignedPtr = (VOID *)OS_MEM_ALIGN(ptr, boundary);
1152         if (ptr == alignedPtr) {
1153 #ifdef LOSCFG_KERNEL_LMS
1154             OsLmsAllocAlignMark(ptr, alignedPtr, size);
1155 #endif
1156             break;
1157         }
1158 
1159         /* store gapSize in address (ptr - 4), it will be checked while free */
1160         gapSize = (UINT32)((UINTPTR)alignedPtr - (UINTPTR)ptr);
1161         struct OsMemUsedNodeHead *allocNode = (struct OsMemUsedNodeHead *)ptr - 1;
1162         OS_MEM_NODE_SET_ALIGNED_FLAG(allocNode->header.sizeAndFlag);
1163         OS_MEM_SET_GAPSIZE_ALIGNED_FLAG(gapSize);
1164         *(UINT32 *)((UINTPTR)alignedPtr - sizeof(gapSize)) = gapSize;
1165 #ifdef LOSCFG_KERNEL_LMS
1166         OsLmsAllocAlignMark(ptr, alignedPtr, size);
1167 #endif
1168         ptr = alignedPtr;
1169     } while (0);
1170     MEM_UNLOCK(poolHead, intSave);
1171 
1172     OsHookCall(LOS_HOOK_TYPE_MEM_ALLOCALIGN, pool, ptr, size, boundary);
1173 
1174     return ptr;
1175 }
1176 
1177 STATIC INLINE BOOL OsMemAddrValidCheck(const struct OsMemPoolHead *pool, const VOID *addr)
1178 {
1179     UINT32 size;
1180 
1181     size = pool->info.totalSize;
1182     if (OS_MEM_MIDDLE_ADDR_OPEN_END(pool + 1, addr, (UINTPTR)pool + size)) {
1183         return TRUE;
1184     }
1185 #if OS_MEM_EXPAND_ENABLE
1186     struct OsMemNodeHead *node = NULL;
1187     struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, size);
1188     while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1189         size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1190         node = OsMemSentinelNodeGet(sentinel);
1191         sentinel = OS_MEM_END_NODE(node, size);
1192         if (OS_MEM_MIDDLE_ADDR_OPEN_END(node, addr, (UINTPTR)node + size)) {
1193             return TRUE;
1194         }
1195     }
1196 #endif
1197     return FALSE;
1198 }
1199 
1200 STATIC INLINE BOOL OsMemIsNodeValid(const struct OsMemNodeHead *node, const struct OsMemNodeHead *startNode,
1201                                     const struct OsMemNodeHead *endNode,
1202                                     const struct OsMemPoolHead *poolInfo)
1203 {
1204     if (!OS_MEM_MIDDLE_ADDR(startNode, node, endNode)) {
1205         return FALSE;
1206     }
1207 
1208     if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1209         if (!OS_MEM_MAGIC_VALID(node)) {
1210             return FALSE;
1211         }
1212         return TRUE;
1213     }
1214 
1215     if (!OsMemAddrValidCheck(poolInfo, node->ptr.prev)) {
1216         return FALSE;
1217     }
1218 
1219     return TRUE;
1220 }
1221 
1222 STATIC UINT32 OsMemCheckUsedNode(const struct OsMemPoolHead *pool, const struct OsMemNodeHead *node)
1223 {
1224     struct OsMemNodeHead *startNode = (struct OsMemNodeHead *)OS_MEM_FIRST_NODE(pool);
1225     struct OsMemNodeHead *endNode = (struct OsMemNodeHead *)OS_MEM_END_NODE(pool, pool->info.totalSize);
1226     struct OsMemNodeHead *nextNode = NULL;
1227     BOOL doneFlag = FALSE;
1228 
1229     do {
1230         do {
1231             if (OS_MEM_IS_GAP_NODE(node)) {
1232                 break;
1233             }
1234 
1235             if (!OsMemIsNodeValid(node, startNode, endNode, pool)) {
1236                 break;
1237             }
1238 
1239             if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1240                 break;
1241             }
1242 
1243             nextNode = OS_MEM_NEXT_NODE(node);
1244             if (!OsMemIsNodeValid(nextNode, startNode, endNode, pool)) {
1245                 break;
1246             }
1247 
1248             if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(nextNode)) {
1249                 if (nextNode->ptr.prev != node) {
1250                     break;
1251                 }
1252             }
1253 
1254             if ((node != startNode) &&
1255                 ((!OsMemIsNodeValid(node->ptr.prev, startNode, endNode, pool)) ||
1256                 (OS_MEM_NEXT_NODE(node->ptr.prev) != node))) {
1257                 break;
1258             }
1259             doneFlag = TRUE;
1260         } while (0);
1261 
1262         if (!doneFlag) {
1263 #if OS_MEM_EXPAND_ENABLE
1264             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
1265                 startNode = OsMemSentinelNodeGet(endNode);
1266                 endNode = OS_MEM_END_NODE(startNode, OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag));
1267                 continue;
1268             }
1269 #endif
1270             return LOS_NOK;
1271         }
1272     } while (!doneFlag);
1273 
1274     return LOS_OK;
1275 }
1276 
1277 STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node)
1278 {
1279     UINT32 ret = OsMemCheckUsedNode(pool, node);
1280     if (ret != LOS_OK) {
1281         PRINT_ERR("OsMemFree check error!\n");
1282         return ret;
1283     }
1284 
1285 #if (LOSCFG_MEM_WATERLINE == 1)
1286     pool->info.curUsedSize -= OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1287 #endif
1288 
1289     node->sizeAndFlag = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1290 #if (LOSCFG_MEM_LEAKCHECK == 1)
1291     OsMemLinkRegisterRecord(node);
1292 #endif
1293 #ifdef LOSCFG_KERNEL_LMS
1294     struct OsMemNodeHead *nextNodeBackup = OS_MEM_NEXT_NODE(node);
1295     struct OsMemNodeHead *curNodeBackup = node;
1296     if (g_lms != NULL) {
1297         g_lms->check((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, TRUE);
1298     }
1299 #endif
1300     struct OsMemNodeHead *preNode = node->ptr.prev; /* merage preNode */
1301     if ((preNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1302         OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)preNode);
1303         OsMemMergeNode(node);
1304         node = preNode;
1305     }
1306 
1307     struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node); /* merage nextNode */
1308     if ((nextNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
1309         OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
1310         OsMemMergeNode(nextNode);
1311     }
1312 
1313 #if OS_MEM_EXPAND_ENABLE
1314     if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
1315         struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool);
1316         /* if this is a expand head node, and all unused, free it to pmm */
1317         if ((node->prev > node) && (node != firstNode)) {
1318             if (TryShrinkPool(pool, node)) {
1319                 return LOS_OK;
1320             }
1321         }
1322     }
1323 #endif
1324 
1325     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)node);
1326 #ifdef LOSCFG_KERNEL_LMS
1327     if (g_lms != NULL) {
1328         g_lms->freeMark(curNodeBackup, nextNodeBackup, OS_MEM_NODE_HEAD_SIZE);
1329     }
1330 #endif
1331     return ret;
1332 }
1333 
1334 STATIC INLINE VOID *OsGetRealPtr(const VOID *pool, VOID *ptr)
1335 {
1336     VOID *realPtr = ptr;
1337     UINT32 gapSize = *((UINT32 *)((UINTPTR)ptr - sizeof(UINT32)));
1338 
1339     if (OS_MEM_GAPSIZE_CHECK(gapSize)) {
1340         PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1341         return NULL;
1342     }
1343 
1344     if (OS_MEM_GET_GAPSIZE_ALIGNED_FLAG(gapSize)) {
1345         gapSize = OS_MEM_GET_ALIGNED_GAPSIZE(gapSize);
1346         if ((gapSize & (OS_MEM_ALIGN_SIZE - 1)) ||
1347             (gapSize > ((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE - (UINTPTR)pool))) {
1348             PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1349             return NULL;
1350         }
1351         realPtr = (VOID *)((UINTPTR)ptr - (UINTPTR)gapSize);
1352     }
1353     return realPtr;
1354 }
1355 
1356 UINT32 LOS_MemFree(VOID *pool, VOID *ptr)
1357 {
1358     if ((pool == NULL) || (ptr == NULL) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *)) ||
1359         !OS_MEM_IS_ALIGNED(ptr, sizeof(VOID *))) {
1360         return LOS_NOK;
1361     }
1362 
1363     OsHookCall(LOS_HOOK_TYPE_MEM_FREE, pool, ptr);
1364 
1365     UINT32 ret = LOS_NOK;
1366     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1367     struct OsMemNodeHead *node = NULL;
1368     UINT32 intSave = 0;
1369 
1370     MEM_LOCK(poolHead, intSave);
1371     do {
1372         ptr = OsGetRealPtr(pool, ptr);
1373         if (ptr == NULL) {
1374             break;
1375         }
1376         node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
1377         ret = OsMemFree(poolHead, node);
1378     } while (0);
1379     MEM_UNLOCK(poolHead, intSave);
1380 
1381     return ret;
1382 }
1383 
1384 STATIC INLINE VOID OsMemReAllocSmaller(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node, UINT32 nodeSize)
1385 {
1386 #if (LOSCFG_MEM_WATERLINE == 1)
1387     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1388 #endif
1389     node->sizeAndFlag = nodeSize;
1390     if ((allocSize + OS_MEM_MIN_LEFT_SIZE) <= nodeSize) {
1391         OsMemSplitNode(pool, node, allocSize);
1392 #if (LOSCFG_MEM_WATERLINE == 1)
1393         poolInfo->info.curUsedSize -= nodeSize - allocSize;
1394 #endif
1395 #ifdef LOSCFG_KERNEL_LMS
1396         OsLmsReallocSplitNodeMark(node);
1397     } else {
1398         OsLmsReallocResizeMark(node, allocSize);
1399 #endif
1400     }
1401     OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1402 #if (LOSCFG_MEM_LEAKCHECK == 1)
1403     OsMemLinkRegisterRecord(node);
1404 #endif
1405 }
1406 
1407 STATIC INLINE VOID OsMemMergeNodeForReAllocBigger(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node,
1408                                                   UINT32 nodeSize, struct OsMemNodeHead *nextNode)
1409 {
1410     node->sizeAndFlag = nodeSize;
1411     OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
1412     OsMemMergeNode(nextNode);
1413 #ifdef LOSCFG_KERNEL_LMS
1414     OsLmsReallocMergeNodeMark(node);
1415 #endif
1416     if ((allocSize + OS_MEM_MIN_LEFT_SIZE) <= node->sizeAndFlag) {
1417         OsMemSplitNode(pool, node, allocSize);
1418 #ifdef LOSCFG_KERNEL_LMS
1419         OsLmsReallocSplitNodeMark(node);
1420     } else {
1421         OsLmsReallocResizeMark(node, allocSize);
1422 #endif
1423     }
1424     OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1425     OsMemWaterUsedRecord((struct OsMemPoolHead *)pool, OS_MEM_NODE_GET_SIZE(node->sizeAndFlag) - nodeSize);
1426 #if (LOSCFG_MEM_LEAKCHECK == 1)
1427     OsMemLinkRegisterRecord(node);
1428 #endif
1429 }
1430 
1431 STATIC INLINE VOID *OsMemRealloc(struct OsMemPoolHead *pool, const VOID *ptr,
1432                 struct OsMemNodeHead *node, UINT32 size, UINT32 intSave)
1433 {
1434     struct OsMemNodeHead *nextNode = NULL;
1435     UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
1436     UINT32 nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1437     VOID *tmpPtr = NULL;
1438 
1439     if (nodeSize >= allocSize) {
1440         OsMemReAllocSmaller(pool, allocSize, node, nodeSize);
1441         return (VOID *)ptr;
1442     }
1443 
1444     nextNode = OS_MEM_NEXT_NODE(node);
1445     if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag) &&
1446         ((nextNode->sizeAndFlag + nodeSize) >= allocSize)) {
1447         OsMemMergeNodeForReAllocBigger(pool, allocSize, node, nodeSize, nextNode);
1448         return (VOID *)ptr;
1449     }
1450 
1451     tmpPtr = OsMemAlloc(pool, size, intSave);
1452     if (tmpPtr != NULL) {
1453         if (memcpy_s(tmpPtr, size, ptr, (nodeSize - OS_MEM_NODE_HEAD_SIZE)) != EOK) {
1454             MEM_UNLOCK(pool, intSave);
1455             (VOID)LOS_MemFree((VOID *)pool, (VOID *)tmpPtr);
1456             MEM_LOCK(pool, intSave);
1457             return NULL;
1458         }
1459         (VOID)OsMemFree(pool, node);
1460     }
1461     return tmpPtr;
1462 }
1463 
1464 VOID *LOS_MemRealloc(VOID *pool, VOID *ptr, UINT32 size)
1465 {
1466     if ((pool == NULL) || OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
1467         return NULL;
1468     }
1469 
1470     OsHookCall(LOS_HOOK_TYPE_MEM_REALLOC, pool, ptr, size);
1471 
1472     if (ptr == NULL) {
1473         return LOS_MemAlloc(pool, size);
1474     }
1475 
1476     if (size == 0) {
1477         (VOID)LOS_MemFree(pool, ptr);
1478         return NULL;
1479     }
1480 
1481     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1482         size = OS_MEM_MIN_ALLOC_SIZE;
1483     }
1484 
1485     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1486     struct OsMemNodeHead *node = NULL;
1487     VOID *newPtr = NULL;
1488     UINT32 intSave = 0;
1489 
1490     MEM_LOCK(poolHead, intSave);
1491     do {
1492         ptr = OsGetRealPtr(pool, ptr);
1493         if (ptr == NULL) {
1494             break;
1495         }
1496 
1497         node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
1498         if (OsMemCheckUsedNode(pool, node) != LOS_OK) {
1499             break;
1500         }
1501 
1502         newPtr = OsMemRealloc(pool, ptr, node, size, intSave);
1503     } while (0);
1504     MEM_UNLOCK(poolHead, intSave);
1505 
1506     return newPtr;
1507 }
1508 
1509 #if (LOSCFG_MEM_FREE_BY_TASKID == 1)
1510 STATIC VOID MemNodeFreeByTaskIDHandle(struct OsMemNodeHead *curNode, VOID *arg)
1511 {
1512     UINT32 *args = (UINT32 *)arg;
1513     UINT32 taskID = *args;
1514     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)(UINTPTR)(*(args + 1));
1515     struct OsMemUsedNodeHead *node = NULL;
1516     if (!OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
1517         return;
1518     }
1519 
1520     node = (struct OsMemUsedNodeHead *)curNode;
1521     if (node->header.taskID == taskID) {
1522         OsMemFree(poolHead, &node->header);
1523     }
1524     return;
1525 }
1526 
1527 UINT32 LOS_MemFreeByTaskID(VOID *pool, UINT32 taskID)
1528 {
1529     UINT32 args[2] = { taskID, (UINT32)(UINTPTR)pool };
1530     if (pool == NULL) {
1531         return LOS_NOK;
1532     }
1533 
1534     if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
1535         return LOS_NOK;
1536     }
1537 
1538     OsAllMemNodeDoHandle(pool, MemNodeFreeByTaskIDHandle, (VOID *)args);
1539 
1540     return LOS_OK;
1541 }
1542 #endif
1543 
1544 UINT32 LOS_MemPoolSizeGet(const VOID *pool)
1545 {
1546     UINT32 count = 0;
1547 
1548     if (pool == NULL) {
1549         return LOS_NOK;
1550     }
1551 
1552     count += ((struct OsMemPoolHead *)pool)->info.totalSize;
1553 #if (LOSCFG_MEM_MUL_REGIONS == 1)
1554     count -= ((struct OsMemPoolHead *)pool)->info.totalGapSize;
1555 #endif
1556 
1557 #if OS_MEM_EXPAND_ENABLE
1558     UINT32 size;
1559     struct OsMemNodeHead *node = NULL;
1560     struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, count);
1561 
1562     while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1563         size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1564         node = OsMemSentinelNodeGet(sentinel);
1565         sentinel = OS_MEM_END_NODE(node, size);
1566         count += size;
1567     }
1568 #endif
1569     return count;
1570 }
1571 
1572 STATIC VOID MemUsedGetHandle(struct OsMemNodeHead *curNode, VOID *arg)
1573 {
1574     UINT32 *memUsed = (UINT32 *)arg;
1575     if (OS_MEM_IS_GAP_NODE(curNode)) {
1576         *memUsed += OS_MEM_NODE_HEAD_SIZE;
1577     } else if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
1578         *memUsed += OS_MEM_NODE_GET_SIZE(curNode->sizeAndFlag);
1579     }
1580     return;
1581 }
1582 
1583 UINT32 LOS_MemTotalUsedGet(VOID *pool)
1584 {
1585     UINT32 memUsed = 0;
1586 
1587     if (pool == NULL) {
1588         return LOS_NOK;
1589     }
1590 
1591     OsAllMemNodeDoHandle(pool, MemUsedGetHandle, (VOID *)&memUsed);
1592 
1593     return memUsed;
1594 }
1595 
1596 STATIC INLINE VOID OsMemMagicCheckPrint(struct OsMemNodeHead **tmpNode)
1597 {
1598 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1599     PRINT_ERR("[%s], %d, memory check error!\n"
1600               "memory used but magic num wrong, magic num = 0x%x\n",
1601               __FUNCTION__, __LINE__, (*tmpNode)->magic);
1602 #else
1603     (VOID)tmpNode;
1604 #endif
1605 }
1606 
1607 STATIC UINT32 OsMemAddrValidCheckPrint(const VOID *pool, struct OsMemFreeNodeHead **tmpNode)
1608 {
1609     if (((*tmpNode)->prev != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->prev)) {
1610         PRINT_ERR("[%s], %d, memory check error!\n"
1611                   " freeNode.prev: %p is out of legal mem range\n",
1612                   __FUNCTION__, __LINE__, (*tmpNode)->prev);
1613         return LOS_NOK;
1614     }
1615     if (((*tmpNode)->next != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->next)) {
1616         PRINT_ERR("[%s], %d, memory check error!\n"
1617                   " freeNode.next: %p is out of legal mem range\n",
1618                   __FUNCTION__, __LINE__, (*tmpNode)->next);
1619         return LOS_NOK;
1620     }
1621     return LOS_OK;
1622 }
1623 
1624 STATIC UINT32 OsMemIntegrityCheckSub(struct OsMemNodeHead **tmpNode, const VOID *pool)
1625 {
1626     if (!OS_MEM_MAGIC_VALID(*tmpNode)) {
1627         OsMemMagicCheckPrint(tmpNode);
1628         return LOS_NOK;
1629     }
1630 
1631     if (!OsMemAddrValidCheck(pool, (*tmpNode)->ptr.prev)) {
1632         PRINT_ERR("[%s], %d, memory check error!\n"
1633                   " node prev: %p is out of legal mem range\n",
1634                   __FUNCTION__, __LINE__, (*tmpNode)->ptr.next);
1635         return LOS_NOK;
1636     }
1637 
1638     if (!OS_MEM_NODE_GET_USED_FLAG((*tmpNode)->sizeAndFlag)) { /* is free node, check free node range */
1639         if (OsMemAddrValidCheckPrint(pool, (struct OsMemFreeNodeHead **)tmpNode)) {
1640             return LOS_NOK;
1641         }
1642     }
1643 
1644     return LOS_OK;
1645 }
1646 
1647 STATIC UINT32 OsMemFreeListNodeCheck(const struct OsMemPoolHead *pool,
1648                 const struct OsMemFreeNodeHead *node)
1649 {
1650     if (!OsMemAddrValidCheck(pool, node) ||
1651         ((node->prev != NULL) && !OsMemAddrValidCheck(pool, node->prev)) ||
1652         ((node->next != NULL) && !OsMemAddrValidCheck(pool, node->next)) ||
1653         !OsMemAddrValidCheck(pool, node->header.ptr.prev)) {
1654         return LOS_NOK;
1655     }
1656 
1657     if (!OS_MEM_IS_ALIGNED(node, sizeof(VOID *)) ||
1658         !OS_MEM_IS_ALIGNED(node->prev, sizeof(VOID *)) ||
1659         !OS_MEM_IS_ALIGNED(node->next, sizeof(VOID *)) ||
1660         !OS_MEM_IS_ALIGNED(node->header.ptr.prev, sizeof(VOID *))) {
1661         return LOS_NOK;
1662     }
1663 
1664     return LOS_OK;
1665 }
1666 
1667 STATIC VOID OsMemPoolHeadCheck(const struct OsMemPoolHead *pool)
1668 {
1669     struct OsMemFreeNodeHead *tmpNode = NULL;
1670     UINT32 index;
1671     UINT32 flag = 0;
1672 
1673     if ((pool->info.pool != pool) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *))) {
1674         PRINT_ERR("wrong mem pool addr: %p, func: %s, line: %d\n", pool, __FUNCTION__, __LINE__);
1675         return;
1676     }
1677 
1678     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
1679         for (tmpNode = pool->freeList[index]; tmpNode != NULL; tmpNode = tmpNode->next) {
1680             if (OsMemFreeListNodeCheck(pool, tmpNode)) {
1681                 flag = 1;
1682                 PRINT_ERR("FreeListIndex: %u, node: %p, bNode: %p, prev:%p, next: %p\n",
1683                           index, tmpNode, tmpNode->header.ptr.prev, tmpNode->prev, tmpNode->next);
1684             }
1685         }
1686     }
1687 
1688     if (flag) {
1689         PRINTK("mem pool info: poolAddr: %p, poolSize: 0x%x\n", pool, pool->info.totalSize);
1690 #if (LOSCFG_MEM_WATERLINE == 1)
1691         PRINTK("mem pool info: poolWaterLine: 0x%x, poolCurUsedSize: 0x%x\n", pool->info.waterLine,
1692                pool->info.curUsedSize);
1693 #endif
1694 #if OS_MEM_EXPAND_ENABLE
1695         UINT32 size;
1696         struct OsMemNodeHead *node = NULL;
1697         struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, pool->info.totalSize);
1698         while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1699             size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1700             node = OsMemSentinelNodeGet(sentinel);
1701             sentinel = OS_MEM_END_NODE(node, size);
1702             PRINTK("expand node info: nodeAddr: 0x%x, nodeSize: 0x%x\n", node, size);
1703         }
1704 #endif
1705     }
1706 }
1707 
1708 STATIC UINT32 OsMemIntegrityCheck(const struct OsMemPoolHead *pool, struct OsMemNodeHead **tmpNode,
1709                 struct OsMemNodeHead **preNode)
1710 {
1711     struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
1712 
1713     OsMemPoolHeadCheck(pool);
1714 
1715     *preNode = OS_MEM_FIRST_NODE(pool);
1716     do {
1717         for (*tmpNode = *preNode; *tmpNode < endNode; *tmpNode = OS_MEM_NEXT_NODE(*tmpNode)) {
1718             if (OS_MEM_IS_GAP_NODE(*tmpNode)) {
1719                 continue;
1720             }
1721             if (OsMemIntegrityCheckSub(tmpNode, pool) == LOS_NOK) {
1722                 return LOS_NOK;
1723             }
1724             *preNode = *tmpNode;
1725         }
1726 #if OS_MEM_EXPAND_ENABLE
1727         if (OsMemIsLastSentinelNode(*tmpNode) == FALSE) {
1728             *preNode = OsMemSentinelNodeGet(*tmpNode);
1729             endNode = OS_MEM_END_NODE(*preNode, OS_MEM_NODE_GET_SIZE((*tmpNode)->sizeAndFlag));
1730         } else
1731 #endif
1732         {
1733             break;
1734         }
1735     } while (1);
1736     return LOS_OK;
1737 }
1738 
1739 #if (LOSCFG_KERNEL_PRINTF != 0)
1740 STATIC VOID OsMemNodeInfo(const struct OsMemNodeHead *tmpNode,
1741                           const struct OsMemNodeHead *preNode)
1742 {
1743     struct OsMemUsedNodeHead *usedNode = NULL;
1744     struct OsMemFreeNodeHead *freeNode = NULL;
1745 
1746     if (tmpNode == preNode) {
1747         PRINTK("\n the broken node is the first node\n");
1748     }
1749 
1750     if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
1751         usedNode = (struct OsMemUsedNodeHead *)tmpNode;
1752         PRINTK("\n broken node head: %p  "
1753 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1754             "0x%x  "
1755 #endif
1756             "0x%x, ",
1757             usedNode->header.ptr.prev,
1758 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1759             usedNode->header.magic,
1760 #endif
1761             usedNode->header.sizeAndFlag);
1762     } else {
1763         freeNode = (struct OsMemFreeNodeHead *)tmpNode;
1764         PRINTK("\n broken node head: %p  %p  %p  "
1765 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1766             "0x%x  "
1767 #endif
1768             "0x%x, ",
1769             freeNode->header.ptr.prev, freeNode->next, freeNode->prev,
1770 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1771             freeNode->header.magic,
1772 #endif
1773             freeNode->header.sizeAndFlag);
1774     }
1775 
1776     if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1777         usedNode = (struct OsMemUsedNodeHead *)preNode;
1778         PRINTK("prev node head: %p  "
1779 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1780             "0x%x  "
1781 #endif
1782             "0x%x\n",
1783             usedNode->header.ptr.prev,
1784 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1785             usedNode->header.magic,
1786 #endif
1787             usedNode->header.sizeAndFlag);
1788     } else {
1789         freeNode = (struct OsMemFreeNodeHead *)preNode;
1790         PRINTK("prev node head: %p  %p  %p  "
1791 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1792             "0x%x  "
1793 #endif
1794             "0x%x, ",
1795             freeNode->header.ptr.prev, freeNode->next, freeNode->prev,
1796 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1797             freeNode->header.magic,
1798 #endif
1799             freeNode->header.sizeAndFlag);
1800     }
1801 
1802 #if (LOSCFG_MEM_LEAKCHECK == 1)
1803     OsMemNodeBacktraceInfo(tmpNode, preNode);
1804 #endif
1805 }
1806 #endif
1807 
1808 struct OsMemIntegrityCheckInfo {
1809     struct OsMemNodeHead preNode;
1810     struct OsMemNodeHead errNode;
1811 };
1812 
1813 struct OsMemIntegrityCheckInfo g_integrityCheckRecord = {0};
1814 
1815 STATIC INLINE VOID OsMemCheckInfoRecord(const struct OsMemNodeHead *errNode,
1816                                      const struct OsMemNodeHead *preNode)
1817 {
1818     (VOID)memcpy(&g_integrityCheckRecord.preNode, preNode, sizeof(struct OsMemNodeHead));
1819     (VOID)memcpy(&g_integrityCheckRecord.errNode, errNode, sizeof(struct OsMemNodeHead));
1820 }
1821 
1822 STATIC VOID OsMemIntegrityCheckError(struct OsMemPoolHead *pool,
1823                                      const struct OsMemNodeHead *tmpNode,
1824                                      const struct OsMemNodeHead *preNode,
1825                                      UINT32 intSave)
1826 {
1827 #if (LOSCFG_KERNEL_PRINTF != 0)
1828     OsMemNodeInfo(tmpNode, preNode);
1829 #endif
1830     OsMemCheckInfoRecord(tmpNode, preNode);
1831 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
1832     LosTaskCB *taskCB = NULL;
1833     if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1834         struct OsMemUsedNodeHead *usedNode = (struct OsMemUsedNodeHead *)preNode;
1835         UINT32 taskID = usedNode->header.taskID;
1836         if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
1837             MEM_UNLOCK(pool, intSave);
1838             LOS_Panic("Task ID %u in pre node is invalid!\n", taskID);
1839         }
1840 
1841         taskCB = OS_TCB_FROM_TID(taskID);
1842         if ((taskCB->taskStatus & OS_TASK_STATUS_UNUSED) || (taskCB->taskEntry == NULL)) {
1843             MEM_UNLOCK(pool, intSave);
1844             LOS_Panic("\r\nTask ID %u in pre node is not created!\n", taskID);
1845         }
1846     } else {
1847         PRINTK("The prev node is free\n");
1848     }
1849     MEM_UNLOCK(pool, intSave);
1850     PRINT_ERR("cur node: 0x%x, pre node: 0x%x, pre node was allocated by task: %d, %s\n",
1851               (unsigned int)tmpNode, (unsigned int)preNode, taskCB->taskID, taskCB->taskName);
1852     LOS_Panic("Memory integrity check error!\n");
1853 #else
1854     MEM_UNLOCK(pool, intSave);
1855     LOS_Panic("Memory integrity check error, cur node: 0x%x, pre node: 0x%x\n", tmpNode, preNode);
1856 #endif
1857 }
1858 
1859 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1860 STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave)
1861 {
1862     struct OsMemNodeHead *tmpNode = NULL;
1863     struct OsMemNodeHead *preNode = NULL;
1864 
1865     if (OsMemIntegrityCheck(pool, &tmpNode, &preNode)) {
1866         OsMemIntegrityCheckError(pool, tmpNode, preNode, intSave);
1867         return LOS_NOK;
1868     }
1869     return LOS_OK;
1870 }
1871 #endif
1872 
1873 UINT32 LOS_MemIntegrityCheck(const VOID *pool)
1874 {
1875     if (pool == NULL) {
1876         return LOS_NOK;
1877     }
1878 
1879     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1880     struct OsMemNodeHead *tmpNode = NULL;
1881     struct OsMemNodeHead *preNode = NULL;
1882     UINT32 intSave = 0;
1883 
1884     MEM_LOCK(poolHead, intSave);
1885     if (OsMemIntegrityCheck(poolHead, &tmpNode, &preNode)) {
1886         goto ERROR_OUT;
1887     }
1888     MEM_UNLOCK(poolHead, intSave);
1889     return LOS_OK;
1890 
1891 ERROR_OUT:
1892     OsMemIntegrityCheckError(poolHead, tmpNode, preNode, intSave);
1893     return LOS_NOK;
1894 }
1895 
1896 STATIC INLINE VOID OsMemInfoGet(struct OsMemNodeHead *node,
1897                 LOS_MEM_POOL_STATUS *poolStatus)
1898 {
1899     UINT32 totalUsedSize = 0;
1900     UINT32 totalFreeSize = 0;
1901     UINT32 usedNodeNum = 0;
1902     UINT32 freeNodeNum = 0;
1903     UINT32 maxFreeSize = 0;
1904     UINT32 size;
1905 
1906     if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1907         size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1908         ++freeNodeNum;
1909         totalFreeSize += size;
1910         if (maxFreeSize < size) {
1911             maxFreeSize = size;
1912         }
1913     } else {
1914         if (OS_MEM_IS_GAP_NODE(node)) {
1915             size = OS_MEM_NODE_HEAD_SIZE;
1916         } else {
1917             size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1918         }
1919         ++usedNodeNum;
1920         totalUsedSize += size;
1921     }
1922 
1923     poolStatus->totalUsedSize += totalUsedSize;
1924     poolStatus->totalFreeSize += totalFreeSize;
1925     poolStatus->maxFreeNodeSize = poolStatus->maxFreeNodeSize > maxFreeSize ?
1926                                   poolStatus->maxFreeNodeSize : maxFreeSize;
1927     poolStatus->usedNodeNum += usedNodeNum;
1928     poolStatus->freeNodeNum += freeNodeNum;
1929 }
1930 
1931 STATIC VOID OsMemNodeInfoGetHandle(struct OsMemNodeHead *curNode, VOID *arg)
1932 {
1933     LOS_MEM_POOL_STATUS *poolStatus = (LOS_MEM_POOL_STATUS *)arg;
1934     OsMemInfoGet(curNode, poolStatus);
1935     return;
1936 }
1937 
1938 UINT32 LOS_MemInfoGet(VOID *pool, LOS_MEM_POOL_STATUS *poolStatus)
1939 {
1940     struct OsMemPoolHead *poolInfo = pool;
1941     UINT32 intSave = 0;
1942 
1943     if (poolStatus == NULL) {
1944         PRINT_ERR("can't use NULL addr to save info\n");
1945         return LOS_NOK;
1946     }
1947 
1948     if ((pool == NULL) || (poolInfo->info.pool != pool)) {
1949         PRINT_ERR("wrong mem pool addr: 0x%x, line:%d\n", (UINTPTR)poolInfo, __LINE__);
1950         return LOS_NOK;
1951     }
1952 
1953     (VOID)memset(poolStatus, 0, sizeof(LOS_MEM_POOL_STATUS));
1954 
1955     OsAllMemNodeDoHandle(pool, OsMemNodeInfoGetHandle, (VOID *)poolStatus);
1956 
1957     MEM_LOCK(poolInfo, intSave);
1958 #if (LOSCFG_MEM_WATERLINE == 1)
1959     poolStatus->usageWaterLine = poolInfo->info.waterLine;
1960 #endif
1961     MEM_UNLOCK(poolInfo, intSave);
1962 
1963     return LOS_OK;
1964 }
1965 
1966 STATIC VOID OsMemInfoPrint(VOID *pool)
1967 {
1968 #if (LOSCFG_KERNEL_PRINTF != 0)
1969     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1970     LOS_MEM_POOL_STATUS status = {0};
1971 
1972     if (LOS_MemInfoGet(pool, &status) == LOS_NOK) {
1973         return;
1974     }
1975 
1976 #if (LOSCFG_MEM_WATERLINE == 1)
1977     PRINTK("pool addr          pool size    used size     free size    "
1978            "max free node size   used node num     free node num      UsageWaterLine\n");
1979     PRINTK("---------------    --------     -------       --------     "
1980            "--------------       -------------      ------------      ------------\n");
1981     PRINTK("%-16p   0x%-8x   0x%-8x    0x%-8x   0x%-16x   0x%-13x    0x%-13x    0x%-13x\n",
1982            poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
1983            status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
1984            status.freeNodeNum, status.usageWaterLine);
1985 #else
1986     PRINTK("pool addr          pool size    used size     free size    "
1987            "max free node size   used node num     free node num\n");
1988     PRINTK("---------------    --------     -------       --------     "
1989            "--------------       -------------      ------------\n");
1990     PRINTK("%-16p  0x%-8x   0x%-8x    0x%-8x   0x%-16x   0x%-13x    0x%-13x\n",
1991            poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
1992            status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
1993            status.freeNodeNum);
1994 #endif
1995 #endif
1996 }
1997 
1998 UINT32 LOS_MemFreeNodeShow(VOID *pool)
1999 {
2000 #if (LOSCFG_KERNEL_PRINTF != 0)
2001     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
2002 
2003     if ((poolInfo == NULL) || ((UINTPTR)pool != (UINTPTR)poolInfo->info.pool)) {
2004         PRINT_ERR("wrong mem pool addr: 0x%x, line: %d\n", (UINTPTR)poolInfo, __LINE__);
2005         return LOS_NOK;
2006     }
2007 
2008     struct OsMemFreeNodeHead *node = NULL;
2009     UINT32 countNum[OS_MEM_FREE_LIST_COUNT] = {0};
2010     UINT32 index;
2011     UINT32 intSave = 0;
2012 
2013     MEM_LOCK(poolInfo, intSave);
2014     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
2015         node = poolInfo->freeList[index];
2016         while (node) {
2017             node = node->next;
2018             countNum[index]++;
2019         }
2020     }
2021     MEM_UNLOCK(poolInfo, intSave);
2022 
2023     PRINTK("\n   ************************ left free node number**********************\n");
2024     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
2025         if (countNum[index] == 0) {
2026             continue;
2027         }
2028 
2029         PRINTK("free index: %03u, ", index);
2030         if (index < OS_MEM_SMALL_BUCKET_COUNT) {
2031             PRINTK("size: [0x%x], num: %u\n", (index + 1) << 2, countNum[index]); /* 2: setup is 4. */
2032         } else {
2033             UINT32 val = 1 << (((index - OS_MEM_SMALL_BUCKET_COUNT) >> OS_MEM_SLI) + OS_MEM_LARGE_START_BUCKET);
2034             UINT32 offset = val >> OS_MEM_SLI;
2035             PRINTK("size: [0x%x, 0x%x], num: %u\n",
2036                    (offset * ((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI))) + val,
2037                    ((offset * (((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI)) + 1)) + val - 1),
2038                    countNum[index]);
2039         }
2040     }
2041     PRINTK("\n   ********************************************************************\n\n");
2042 #endif
2043     return LOS_OK;
2044 }
2045 
2046 VOID LOS_MemUnlockEnable(VOID *pool)
2047 {
2048     if (pool == NULL) {
2049         return;
2050     }
2051 
2052     ((struct OsMemPoolHead *)pool)->info.attr |= OS_MEM_POOL_UNLOCK_ENABLE;
2053 }
2054 
2055 #if (LOSCFG_MEM_MUL_REGIONS == 1)
2056 STATIC INLINE UINT32 OsMemMulRegionsParamCheck(VOID *pool, const LosMemRegion * const memRegions,
2057                                                 UINT32 memRegionCount)
2058 {
2059     const LosMemRegion *memRegion = NULL;
2060     VOID *lastStartAddress = NULL;
2061     VOID *curStartAddress = NULL;
2062     UINT32 lastLength;
2063     UINT32 curLength;
2064     UINT32 regionCount;
2065 
2066     if ((pool != NULL) && (((struct OsMemPoolHead *)pool)->info.pool != pool)) {
2067         PRINT_ERR("wrong mem pool addr: %p, func: %s, line: %d\n", pool, __FUNCTION__, __LINE__);
2068         return LOS_NOK;
2069     }
2070 
2071     if (pool != NULL) {
2072         lastStartAddress = pool;
2073         lastLength = ((struct OsMemPoolHead *)pool)->info.totalSize;
2074     }
2075 
2076     memRegion = memRegions;
2077     regionCount = 0;
2078     while (regionCount < memRegionCount) {
2079         curStartAddress = memRegion->startAddress;
2080         curLength = memRegion->length;
2081         if ((curStartAddress == NULL) || (curLength == 0)) {
2082             PRINT_ERR("Memory address or length configured wrongly:address:0x%x, the length:0x%x\n",
2083                       (UINTPTR)curStartAddress, curLength);
2084             return LOS_NOK;
2085         }
2086         if (((UINTPTR)curStartAddress & (OS_MEM_ALIGN_SIZE - 1)) || (curLength & (OS_MEM_ALIGN_SIZE - 1))) {
2087             PRINT_ERR("Memory address or length configured not aligned:address:0x%x, the length:0x%x, alignsize:%d\n",
2088                       (UINTPTR)curStartAddress, curLength, OS_MEM_ALIGN_SIZE);
2089             return LOS_NOK;
2090         }
2091         if ((lastStartAddress != NULL) && (((UINT8 *)lastStartAddress + lastLength) >= (UINT8 *)curStartAddress)) {
2092             PRINT_ERR("Memory regions overlapped, the last start address:0x%x, "
2093                       "the length:0x%x, the current start address:0x%x\n",
2094                       (UINTPTR)lastStartAddress, lastLength, (UINTPTR)curStartAddress);
2095             return LOS_NOK;
2096         }
2097         memRegion++;
2098         regionCount++;
2099         lastStartAddress = curStartAddress;
2100         lastLength = curLength;
2101     }
2102     return LOS_OK;
2103 }
2104 
2105 STATIC INLINE VOID OsMemMulRegionsLink(struct OsMemPoolHead *poolHead, VOID *lastStartAddress, UINT32 lastLength,
2106                                        struct OsMemNodeHead *lastEndNode, const LosMemRegion *memRegion)
2107 {
2108     UINT32 curLength;
2109     UINT32 gapSize;
2110     struct OsMemNodeHead *curEndNode = NULL;
2111     struct OsMemNodeHead *curFreeNode = NULL;
2112     VOID *curStartAddress = NULL;
2113 
2114     curStartAddress = memRegion->startAddress;
2115     curLength = memRegion->length;
2116 #ifdef LOSCFG_KERNEL_LMS
2117     UINT32 resize = 0;
2118     if (g_lms != NULL) {
2119         /*
2120          * resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
2121          * resize != 0, shadow memory init successful, set poolSize as resize.
2122          */
2123         resize = g_lms->init(curStartAddress, curLength);
2124         curLength = (resize == 0) ? curLength : resize;
2125     }
2126 #endif
2127     // mark the gap between two regions as one used node
2128     gapSize = (UINT8 *)(curStartAddress) - ((UINT8 *)(poolHead) + poolHead->info.totalSize);
2129     lastEndNode->sizeAndFlag = gapSize + OS_MEM_NODE_HEAD_SIZE;
2130     OS_MEM_SET_MAGIC(lastEndNode);
2131     OS_MEM_NODE_SET_USED_FLAG(lastEndNode->sizeAndFlag);
2132 
2133     // mark the gap node with magic number
2134     OS_MEM_MARK_GAP_NODE(lastEndNode);
2135 
2136     poolHead->info.totalSize += (curLength + gapSize);
2137     poolHead->info.totalGapSize += gapSize;
2138 
2139     curFreeNode = (struct OsMemNodeHead *)curStartAddress;
2140     curFreeNode->sizeAndFlag = curLength - OS_MEM_NODE_HEAD_SIZE;
2141     curFreeNode->ptr.prev = lastEndNode;
2142     OS_MEM_SET_MAGIC(curFreeNode);
2143     OsMemFreeNodeAdd(poolHead, (struct OsMemFreeNodeHead *)curFreeNode);
2144 
2145     curEndNode = OS_MEM_END_NODE(curStartAddress, curLength);
2146     curEndNode->sizeAndFlag = 0;
2147     curEndNode->ptr.prev = curFreeNode;
2148     OS_MEM_SET_MAGIC(curEndNode);
2149     OS_MEM_NODE_SET_USED_FLAG(curEndNode->sizeAndFlag);
2150 
2151 #if (LOSCFG_MEM_WATERLINE == 1)
2152     poolHead->info.curUsedSize += OS_MEM_NODE_HEAD_SIZE;
2153     poolHead->info.waterLine = poolHead->info.curUsedSize;
2154 #endif
2155 }
2156 
2157 UINT32 LOS_MemRegionsAdd(VOID *pool, const LosMemRegion *const memRegions, UINT32 memRegionCount)
2158 {
2159     UINT32 ret;
2160     UINT32 lastLength;
2161     UINT32 curLength;
2162     UINT32 regionCount;
2163     struct OsMemPoolHead *poolHead = NULL;
2164     struct OsMemNodeHead *lastEndNode = NULL;
2165     struct OsMemNodeHead *firstFreeNode = NULL;
2166     const LosMemRegion *memRegion = NULL;
2167     VOID *lastStartAddress = NULL;
2168     VOID *curStartAddress = NULL;
2169 
2170     ret = OsMemMulRegionsParamCheck(pool, memRegions, memRegionCount);
2171     if (ret != LOS_OK) {
2172         return ret;
2173     }
2174 
2175     memRegion = memRegions;
2176     regionCount = 0;
2177     if (pool != NULL) { // add the memory regions to the specified memory pool
2178         poolHead = (struct OsMemPoolHead *)pool;
2179         lastStartAddress = pool;
2180         lastLength = poolHead->info.totalSize;
2181     } else { // initialize the memory pool with the first memory region
2182         lastStartAddress = memRegion->startAddress;
2183         lastLength = memRegion->length;
2184         poolHead = (struct OsMemPoolHead *)lastStartAddress;
2185         ret = LOS_MemInit(lastStartAddress, lastLength);
2186         if (ret != LOS_OK) {
2187             return ret;
2188         }
2189         memRegion++;
2190         regionCount++;
2191     }
2192 
2193     firstFreeNode = OS_MEM_FIRST_NODE(lastStartAddress);
2194     lastEndNode = OS_MEM_END_NODE(lastStartAddress, poolHead->info.totalSize);
2195     /* traverse the rest memory regions, and initialize them as free nodes and link together */
2196     while (regionCount < memRegionCount) {
2197         curStartAddress = memRegion->startAddress;
2198         curLength = memRegion->length;
2199 
2200         OsMemMulRegionsLink(poolHead, lastStartAddress, lastLength, lastEndNode, memRegion);
2201         lastStartAddress = curStartAddress;
2202         lastLength = curLength;
2203         lastEndNode = OS_MEM_END_NODE(poolHead, poolHead->info.totalSize);
2204         memRegion++;
2205         regionCount++;
2206     }
2207 
2208     firstFreeNode->ptr.prev = lastEndNode;
2209     return ret;
2210 }
2211 #endif
2212 
2213 UINT32 OsMemSystemInit(VOID)
2214 {
2215     UINT32 ret;
2216 
2217 #if (LOSCFG_SYS_EXTERNAL_HEAP == 0)
2218     m_aucSysMem0 = g_memStart;
2219 #else
2220     m_aucSysMem0 = LOSCFG_SYS_HEAP_ADDR;
2221 #endif
2222 
2223     ret = LOS_MemInit(m_aucSysMem0, LOSCFG_SYS_HEAP_SIZE);
2224     PRINT_INFO("LiteOS heap memory address:%p, size:0x%lx\n", m_aucSysMem0, (unsigned long int)LOSCFG_SYS_HEAP_SIZE);
2225     return ret;
2226 }
2227 
2228 #if (LOSCFG_PLATFORM_EXC == 1)
2229 STATIC VOID OsMemExcInfoGetSub(struct OsMemPoolHead *pool, MemInfoCB *memExcInfo)
2230 {
2231     struct OsMemNodeHead *tmpNode = NULL;
2232     UINT32 taskID = OS_TASK_ERRORID;
2233     UINT32 intSave = 0;
2234 
2235     (VOID)memset(memExcInfo, 0, sizeof(MemInfoCB));
2236 
2237     MEM_LOCK(pool, intSave);
2238     memExcInfo->type = MEM_MANG_MEMORY;
2239     memExcInfo->startAddr = (UINTPTR)pool->info.pool;
2240     memExcInfo->size = pool->info.totalSize;
2241     memExcInfo->free = pool->info.totalSize - pool->info.curUsedSize;
2242 
2243     struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool);
2244     struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
2245 
2246     for (tmpNode = firstNode; tmpNode < endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
2247         memExcInfo->blockSize++;
2248         if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
2249             if (!OS_MEM_MAGIC_VALID(tmpNode) ||
2250                 !OsMemAddrValidCheck(pool, tmpNode->ptr.prev)) {
2251 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
2252                 taskID = ((struct OsMemUsedNodeHead *)tmpNode)->header.taskID;
2253 #endif
2254                 goto ERROUT;
2255             }
2256         } else { /* is free node, check free node range */
2257             struct OsMemFreeNodeHead *freeNode = (struct OsMemFreeNodeHead *)tmpNode;
2258             if (OsMemAddrValidCheckPrint(pool, &freeNode)) {
2259                 goto ERROUT;
2260             }
2261         }
2262     }
2263     MEM_UNLOCK(pool, intSave);
2264     return;
2265 
2266 ERROUT:
2267     memExcInfo->errorAddr = (UINTPTR)((CHAR *)tmpNode + OS_MEM_NODE_HEAD_SIZE);
2268     memExcInfo->errorLen = OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag) - OS_MEM_NODE_HEAD_SIZE;
2269     memExcInfo->errorOwner = taskID;
2270     MEM_UNLOCK(pool, intSave);
2271     return;
2272 }
2273 
2274 UINT32 OsMemExcInfoGet(UINT32 memNumMax, MemInfoCB *memExcInfo)
2275 {
2276     UINT8 *buffer = (UINT8 *)memExcInfo;
2277     UINT32 count = 0;
2278 
2279 #if (LOSCFG_MEM_MUL_POOL == 1)
2280     struct OsMemPoolHead *memPool = g_poolHead;
2281     while (memPool != NULL) {
2282         OsMemExcInfoGetSub(memPool, (MemInfoCB *)buffer);
2283         count++;
2284         buffer += sizeof(MemInfoCB);
2285         if (count >= memNumMax) {
2286             break;
2287         }
2288         memPool = memPool->nextPool;
2289     }
2290 #else
2291     OsMemExcInfoGetSub(m_aucSysMem0, buffer);
2292     count++;
2293 #endif
2294 
2295     return count;
2296 }
2297 #endif
2298