• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "los_memory.h"
33 #include "securec.h"
34 #include "los_arch.h"
35 #include "los_config.h"
36 #include "los_debug.h"
37 #include "los_hook.h"
38 #include "los_interrupt.h"
39 #include "los_task.h"
40 #ifdef LOSCFG_KERNEL_LMS
41 #include "los_lms_pri.h"
42 #endif
43 #if (LOSCFG_KERNEL_LMK == 1)
44 #include "los_lmk.h"
45 #endif
46 
47 /* Used to cut non-essential functions. */
48 #define OS_MEM_EXPAND_ENABLE    0
49 
50 UINT8 *m_aucSysMem0 = NULL;
51 
52 #if (LOSCFG_SYS_EXTERNAL_HEAP == 0)
53 STATIC UINT8 g_memStart[LOSCFG_SYS_HEAP_SIZE];
54 #endif
55 
56 #if (LOSCFG_MEM_MUL_POOL == 1)
57 VOID *g_poolHead = NULL;
58 #endif
59 
60 /* The following is the macro definition and interface implementation related to the TLSF. */
61 
62 #define OS_MEM_BITMAP_MASK 0x1FU
63 
64 /* Used to find the first bit of 1 in bitmap. */
OsMemFFS(UINT32 bitmap)65 STATIC INLINE UINT16 OsMemFFS(UINT32 bitmap)
66 {
67     bitmap &= ~bitmap + 1;
68     return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
69 }
70 
71 /* Used to find the last bit of 1 in bitmap. */
OsMemFLS(UINT32 bitmap)72 STATIC INLINE UINT16 OsMemFLS(UINT32 bitmap)
73 {
74     return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
75 }
76 
OsMemLog2(UINT32 size)77 STATIC INLINE UINT32 OsMemLog2(UINT32 size)
78 {
79     return (size > 0) ? OsMemFLS(size) : 0;
80 }
81 
82 /* Get the first level: f = log2(size). */
OsMemFlGet(UINT32 size)83 STATIC INLINE UINT32 OsMemFlGet(UINT32 size)
84 {
85     if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
86         return ((size >> 2) - 1); /* 2: The small bucket setup is 4. */
87     }
88     return (OsMemLog2(size) - OS_MEM_LARGE_START_BUCKET + OS_MEM_SMALL_BUCKET_COUNT);
89 }
90 
91 /* Get the second level: s = (size - 2^f) * 2^SLI / 2^f. */
OsMemSlGet(UINT32 size,UINT32 fl)92 STATIC INLINE UINT32 OsMemSlGet(UINT32 size, UINT32 fl)
93 {
94     if ((fl < OS_MEM_SMALL_BUCKET_COUNT) || (size < OS_MEM_SMALL_BUCKET_MAX_SIZE)) {
95         PRINT_ERR("fl or size is too small, fl = %u, size = %u\n", fl, size);
96         return 0;
97     }
98 
99     UINT32 sl = (size << OS_MEM_SLI) >> (fl - OS_MEM_SMALL_BUCKET_COUNT + OS_MEM_LARGE_START_BUCKET);
100     return (sl - (1 << OS_MEM_SLI));
101 }
102 
103 /* The following is the memory algorithm related macro definition and interface implementation. */
104 #if (LOSCFG_TASK_MEM_USED != 1 && LOSCFG_MEM_FREE_BY_TASKID == 1 && (LOSCFG_BASE_CORE_TSK_LIMIT + 1) > 64)
105 #error "When enter here, LOSCFG_BASE_CORE_TSK_LIMIT larger than 63 is not support"
106 #endif
107 
108 struct OsMemUsedNodeHead {
109     struct OsMemNodeHead header;
110 };
111 
112 /* The memory pool support expand. */
113 #define OS_MEM_POOL_EXPAND_ENABLE   0x01
114 /* The memory pool support no lock. */
115 #define OS_MEM_POOL_UNLOCK_ENABLE   0x02
116 
117 #define MEM_LOCK(pool, state)       do {                    \
118     if (!((pool)->info.attr & OS_MEM_POOL_UNLOCK_ENABLE)) { \
119         (state) = LOS_IntLock();                            \
120     }                                                       \
121 } while (0);
122 #define MEM_UNLOCK(pool, state)     do {                    \
123     if (!((pool)->info.attr & OS_MEM_POOL_UNLOCK_ENABLE)) { \
124         LOS_IntRestore(state);                              \
125     }                                                       \
126 } while (0);
127 
128 #define OS_MEM_NODE_MAGIC          0xABCDDCBA
129 #if (LOSCFG_TASK_MEM_USED != 1 && LOSCFG_MEM_FREE_BY_TASKID == 1)
130 #define OS_MEM_NODE_USED_FLAG      (1U << 25)
131 #define OS_MEM_NODE_ALIGNED_FLAG   (1U << 24)
132 #if (LOSCFG_MEM_LEAKCHECK == 1)
133 #define OS_MEM_NODE_LEAK_FLAG      (1U << 23)
134 #else
135 #define OS_MEM_NODE_LEAK_FLAG      0
136 #endif
137 #if (OS_MEM_EXPAND_ENABLE == 1)
138 #define OS_MEM_NODE_LAST_FLAG      (1U << 22)  /* Sentinel Node */
139 #else
140 #define OS_MEM_NODE_LAST_FLAG      0
141 #endif
142 #else
143 #define OS_MEM_NODE_USED_FLAG      (1U << 31)
144 #define OS_MEM_NODE_ALIGNED_FLAG   (1U << 30)
145 #if (LOSCFG_MEM_LEAKCHECK == 1)
146 #define OS_MEM_NODE_LEAK_FLAG      (1U << 29)
147 #else
148 #define OS_MEM_NODE_LEAK_FLAG      0
149 #endif
150 #if (OS_MEM_EXPAND_ENABLE == 1)
151 #define OS_MEM_NODE_LAST_FLAG      (1U << 28)  /* Sentinel Node */
152 #else
153 #define OS_MEM_NODE_LAST_FLAG      0
154 #endif
155 #endif
156 
157 #define OS_MEM_NODE_ALIGNED_AND_USED_FLAG \
158     (OS_MEM_NODE_USED_FLAG | OS_MEM_NODE_ALIGNED_FLAG | OS_MEM_NODE_LEAK_FLAG | OS_MEM_NODE_LAST_FLAG)
159 
160 #define OS_MEM_NODE_GET_ALIGNED_FLAG(sizeAndFlag) \
161             ((sizeAndFlag) & OS_MEM_NODE_ALIGNED_FLAG)
162 #define OS_MEM_NODE_SET_ALIGNED_FLAG(sizeAndFlag) \
163             (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_ALIGNED_FLAG)
164 #define OS_MEM_NODE_GET_USED_FLAG(sizeAndFlag) \
165             ((sizeAndFlag) & OS_MEM_NODE_USED_FLAG)
166 #define OS_MEM_NODE_SET_USED_FLAG(sizeAndFlag) \
167             (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_USED_FLAG)
168 #define OS_MEM_NODE_GET_SIZE(sizeAndFlag) \
169             ((sizeAndFlag) & ~OS_MEM_NODE_ALIGNED_AND_USED_FLAG)
170 
171 #define OS_MEM_GAPSIZE_USED_FLAG      0x80000000U
172 #define OS_MEM_GAPSIZE_ALIGNED_FLAG   0x40000000U
173 #define OS_MEM_GET_ALIGNED_GAPSIZE(gapsize) \
174             ((gapsize) & ~OS_MEM_GAPSIZE_ALIGNED_FLAG)
175 #define OS_MEM_GET_GAPSIZE_ALIGNED_FLAG(gapsize) \
176                 ((gapsize) & OS_MEM_GAPSIZE_ALIGNED_FLAG)
177 #define OS_MEM_SET_GAPSIZE_ALIGNED_FLAG(gapsize) \
178                 (gapsize) = ((gapsize) | OS_MEM_GAPSIZE_ALIGNED_FLAG)
179 #define OS_MEM_GET_GAPSIZE_USED_FLAG(gapsize) \
180                 ((gapsize) & OS_MEM_GAPSIZE_USED_FLAG)
181 #define OS_MEM_GAPSIZE_CHECK(gapsize) \
182                 (OS_MEM_GET_GAPSIZE_ALIGNED_FLAG(gapsize) && \
183                  OS_MEM_GET_GAPSIZE_USED_FLAG(gapsize))
184 
185 #define OS_MEM_NODE_SET_LAST_FLAG(sizeAndFlag) \
186             (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_LAST_FLAG)
187 #define OS_MEM_NODE_GET_LAST_FLAG(sizeAndFlag) \
188             ((sizeAndFlag) & OS_MEM_NODE_LAST_FLAG)
189 #define OS_MEM_NODE_GET_LEAK_FLAG(sizeAndFlag) \
190             ((sizeAndFlag) & OS_MEM_NODE_LEAK_FLAG)
191 #define OS_MEM_NODE_SET_LEAK_FLAG(sizeAndFlag) \
192             (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_LEAK_FLAG)
193 
194 #define OS_MEM_ALIGN_SIZE           sizeof(UINTPTR)
195 #define OS_MEM_IS_POW_TWO(value)    ((((UINTPTR)(value)) & ((UINTPTR)(value) - 1)) == 0)
196 #define OS_MEM_ALIGN(p, alignSize)  (((UINTPTR)(p) + (alignSize) - 1) & ~((UINTPTR)((alignSize) - 1)))
197 #define OS_MEM_IS_ALIGNED(a, b)     (!(((UINTPTR)(a)) & (((UINTPTR)(b)) - 1)))
198 #define OS_MEM_NODE_HEAD_SIZE       sizeof(struct OsMemUsedNodeHead)
199 #define OS_MEM_MIN_POOL_SIZE        (OS_MEM_NODE_HEAD_SIZE + sizeof(struct OsMemPoolHead))
200 #define OS_MEM_MIN_LEFT_SIZE        sizeof(struct OsMemFreeNodeHead)
201 #define OS_MEM_MIN_ALLOC_SIZE       8
202 #define OS_MEM_NEXT_NODE(node) \
203     ((struct OsMemNodeHead *)(VOID *)((UINT8 *)(node) + OS_MEM_NODE_GET_SIZE((node)->sizeAndFlag)))
204 #define OS_MEM_FIRST_NODE(pool) \
205     (struct OsMemNodeHead *)((UINT8 *)(pool) + sizeof(struct OsMemPoolHead))
206 #define OS_MEM_END_NODE(pool, size) \
207     (struct OsMemNodeHead *)((UINT8 *)(pool) + (size) - OS_MEM_NODE_HEAD_SIZE)
208 #define OS_MEM_MIDDLE_ADDR_OPEN_END(startAddr, middleAddr, endAddr) \
209     (((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) < (UINT8 *)(endAddr)))
210 #define OS_MEM_MIDDLE_ADDR(startAddr, middleAddr, endAddr) \
211     (((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) <= (UINT8 *)(endAddr)))
212 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
213 STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave);
214 #define OS_MEM_SET_MAGIC(node)      ((node)->magic = OS_MEM_NODE_MAGIC)
215 #define OS_MEM_MAGIC_VALID(node)    ((node)->magic == OS_MEM_NODE_MAGIC)
216 #else
217 #define OS_MEM_SET_MAGIC(node)
218 #define OS_MEM_MAGIC_VALID(node)    TRUE
219 #endif
220 
221 #if (LOSCFG_MEM_MUL_REGIONS == 1)
222 /**
223  *  When LOSCFG_MEM_MUL_REGIONS is enabled to support multiple non-continuous memory regions,
224  *  the gap between two memory regions is marked as a used OsMemNodeHead node. The gap node
225  *  couldn't be freed, and would also be skipped in some DFX functions. The 'ptr.prev' pointer
226  *  of this node is set to OS_MEM_GAP_NODE_MAGIC to identify that this is a gap node.
227 */
228 #define OS_MEM_GAP_NODE_MAGIC       0xDCBAABCD
229 #define OS_MEM_MARK_GAP_NODE(node)  \
230     (((struct OsMemNodeHead *)(node))->ptr.prev = (struct OsMemNodeHead *)OS_MEM_GAP_NODE_MAGIC)
231 #define OS_MEM_IS_GAP_NODE(node)    \
232     (((struct OsMemNodeHead *)(node))->ptr.prev == (struct OsMemNodeHead *)OS_MEM_GAP_NODE_MAGIC)
233 #else
234 #define OS_MEM_MARK_GAP_NODE(node)
235 #define OS_MEM_IS_GAP_NODE(node)    FALSE
236 #endif
237 
238 STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node);
239 STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node);
240 STATIC VOID OsMemInfoPrint(VOID *pool);
241 
242 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
OsMemNodeSetTaskID(struct OsMemUsedNodeHead * node)243 STATIC INLINE VOID OsMemNodeSetTaskID(struct OsMemUsedNodeHead *node)
244 {
245     node->header.taskID = LOS_CurTaskIDGet();
246 }
247 #endif
OsAllMemNodeDoHandle(VOID * pool,VOID (* handle)(struct OsMemNodeHead * curNode,VOID * arg),VOID * arg)248 STATIC VOID OsAllMemNodeDoHandle(VOID *pool, VOID (*handle)(struct OsMemNodeHead *curNode, VOID *arg), VOID *arg)
249 {
250     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
251     struct OsMemNodeHead *tmpNode = NULL;
252     struct OsMemNodeHead *endNode = NULL;
253     UINT32 intSave = 0;
254 
255     if (pool == NULL) {
256         PRINTK("input param is NULL\n");
257         return;
258     }
259     if (LOS_MemIntegrityCheck(pool)) {
260         PRINTK("LOS_MemIntegrityCheck error\n");
261         return;
262     }
263 
264     MEM_LOCK(poolInfo, intSave);
265     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
266     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
267         if (tmpNode == endNode) {
268 #if OS_MEM_EXPAND_ENABLE
269             UINT32 size;
270             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
271                 size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
272                 tmpNode = OsMemSentinelNodeGet(endNode);
273                 endNode = OS_MEM_END_NODE(tmpNode, size);
274                 continue;
275             }
276 #endif
277             break;
278         }
279         handle(tmpNode, arg);
280     }
281     MEM_UNLOCK(poolInfo, intSave);
282 }
283 
284 #if (LOSCFG_TASK_MEM_USED == 1)
GetTaskMemUsedHandle(struct OsMemNodeHead * curNode,VOID * arg)285 STATIC VOID GetTaskMemUsedHandle(struct OsMemNodeHead *curNode, VOID *arg)
286 {
287     UINT32 *args = (UINT32 *)arg;
288     UINT32 *tskMemInfoBuf = (UINT32 *)(UINTPTR)*args;
289     UINT32 tskMemInfoCnt = *(args + 1);
290 #ifndef LOSCFG_MEM_MUL_REGIONS
291     if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
292 #else
293     if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(curNode)) {
294 #endif
295         if (curNode->taskID < tskMemInfoCnt) {
296             tskMemInfoBuf[curNode->taskID] += OS_MEM_NODE_GET_SIZE(curNode->sizeAndFlag);
297         }
298     }
299     return;
300 }
301 
302 VOID OsTaskMemUsed(VOID *pool, UINT32 *tskMemInfoBuf, UINT32 tskMemInfoCnt)
303 {
304     UINT32 args[2] = {(UINT32)(UINTPTR)tskMemInfoBuf, tskMemInfoCnt};
305     OsAllMemNodeDoHandle(pool, GetTaskMemUsedHandle, (VOID *)args);
306     return;
307 }
308 #endif
309 
310 #if (LOSCFG_MEM_WATERLINE == 1)
311 STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
312 {
313     pool->info.curUsedSize += size;
314     if (pool->info.curUsedSize > pool->info.waterLine) {
315         pool->info.waterLine = pool->info.curUsedSize;
316     }
317 }
318 #else
319 STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
320 {
321     (VOID)pool;
322     (VOID)size;
323 }
324 #endif
325 
326 #if OS_MEM_EXPAND_ENABLE
327 STATIC INLINE struct OsMemNodeHead *OsMemLastSentinelNodeGet(const struct OsMemNodeHead *sentinelNode)
328 {
329     struct OsMemNodeHead *node = NULL;
330     VOID *ptr = sentinelNode->ptr.next;
331     UINT32 size = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
332 
333     while ((ptr != NULL) && (size != 0)) {
334         node = OS_MEM_END_NODE(ptr, size);
335         ptr = node->ptr.next;
336         size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
337     }
338 
339     return node;
340 }
341 
342 STATIC INLINE BOOL OsMemSentinelNodeCheck(struct OsMemNodeHead *sentinelNode)
343 {
344     if (!OS_MEM_NODE_GET_USED_FLAG(sentinelNode->sizeAndFlag)) {
345         return FALSE;
346     }
347 
348     if (!OS_MEM_MAGIC_VALID(sentinelNode)) {
349         return FALSE;
350     }
351 
352     return TRUE;
353 }
354 
355 STATIC INLINE BOOL OsMemIsLastSentinelNode(struct OsMemNodeHead *sentinelNode)
356 {
357     if (OsMemSentinelNodeCheck(sentinelNode) == FALSE) {
358         PRINT_ERR("%s %d, The current sentinel node is invalid\n", __FUNCTION__, __LINE__);
359         return TRUE;
360     }
361 
362     if ((OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag) == 0) ||
363         (sentinelNode->ptr.next == NULL)) {
364         return TRUE;
365     }
366 
367     return FALSE;
368 }
369 
370 STATIC INLINE VOID OsMemSentinelNodeSet(struct OsMemNodeHead *sentinelNode, VOID *newNode, UINT32 size)
371 {
372     if (sentinelNode->ptr.next != NULL) {
373         sentinelNode = OsMemLastSentinelNodeGet(sentinelNode);
374     }
375 
376     sentinelNode->sizeAndFlag = size;
377     sentinelNode->ptr.next = newNode;
378     OS_MEM_NODE_SET_USED_FLAG(sentinelNode->sizeAndFlag);
379     OS_MEM_NODE_SET_LAST_FLAG(sentinelNode->sizeAndFlag);
380 }
381 
382 STATIC INLINE VOID *OsMemSentinelNodeGet(struct OsMemNodeHead *node)
383 {
384     if (OsMemSentinelNodeCheck(node) == FALSE) {
385         return NULL;
386     }
387 
388     return node->ptr.next;
389 }
390 
391 STATIC INLINE struct OsMemNodeHead *PreSentinelNodeGet(const VOID *pool, const struct OsMemNodeHead *node)
392 {
393     UINT32 nextSize;
394     struct OsMemNodeHead *nextNode = NULL;
395     struct OsMemNodeHead *sentinelNode = NULL;
396 
397     sentinelNode = OS_MEM_END_NODE(pool, ((struct OsMemPoolHead *)pool)->info.totalSize);
398     while (sentinelNode != NULL) {
399         if (OsMemIsLastSentinelNode(sentinelNode)) {
400             PRINT_ERR("PreSentinelNodeGet can not find node 0x%x\n", node);
401             return NULL;
402         }
403         nextNode = OsMemSentinelNodeGet(sentinelNode);
404         if (nextNode == node) {
405             return sentinelNode;
406         }
407         nextSize = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
408         sentinelNode = OS_MEM_END_NODE(nextNode, nextSize);
409     }
410 
411     return NULL;
412 }
413 
414 STATIC INLINE BOOL TryShrinkPool(const VOID *pool, const struct OsMemNodeHead *node)
415 {
416     struct OsMemNodeHead *mySentinel = NULL;
417     struct OsMemNodeHead *preSentinel = NULL;
418     size_t totalSize = (UINTPTR)node->ptr.prev - (UINTPTR)node;
419     size_t nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
420 
421     if (nodeSize != totalSize) {
422         return FALSE;
423     }
424 
425     preSentinel = PreSentinelNodeGet(pool, node);
426     if (preSentinel == NULL) {
427         return FALSE;
428     }
429 
430     mySentinel = node->ptr.prev;
431     if (OsMemIsLastSentinelNode(mySentinel)) { /* prev node becomes sentinel node */
432         preSentinel->ptr.next = NULL;
433         OsMemSentinelNodeSet(preSentinel, NULL, 0);
434     } else {
435         preSentinel->sizeAndFlag = mySentinel->sizeAndFlag;
436         preSentinel->ptr.next = mySentinel->ptr.next;
437     }
438 
439     if (OsMemLargeNodeFree(node) != LOS_OK) {
440         PRINT_ERR("TryShrinkPool free 0x%x failed!\n", node);
441         return FALSE;
442     }
443 
444     return TRUE;
445 }
446 
447 STATIC INLINE INT32 OsMemPoolExpand(VOID *pool, UINT32 size, UINT32 intSave)
448 {
449     UINT32 tryCount = MAX_SHRINK_PAGECACHE_TRY;
450     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
451     struct OsMemNodeHead *newNode = NULL;
452     struct OsMemNodeHead *endNode = NULL;
453 
454     size = ROUNDUP(size + OS_MEM_NODE_HEAD_SIZE, PAGE_SIZE);
455     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
456 
457 RETRY:
458     newNode = (struct OsMemNodeHead *)LOS_PhysPagesAllocContiguous(size >> PAGE_SHIFT);
459     if (newNode == NULL) {
460         if (tryCount > 0) {
461             tryCount--;
462             MEM_UNLOCK(poolInfo, intSave);
463             OsTryShrinkMemory(size >> PAGE_SHIFT);
464             MEM_LOCK(poolInfo, intSave);
465             goto RETRY;
466         }
467 
468         PRINT_ERR("OsMemPoolExpand alloc failed size = %u\n", size);
469         return -1;
470     }
471     newNode->sizeAndFlag = (size - OS_MEM_NODE_HEAD_SIZE);
472     newNode->ptr.prev = OS_MEM_END_NODE(newNode, size);
473     OsMemSentinelNodeSet(endNode, newNode, size);
474     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
475 
476     endNode = OS_MEM_END_NODE(newNode, size);
477     (VOID)memset_s(endNode, sizeof(*endNode), 0, sizeof(*endNode));
478     endNode->ptr.next = NULL;
479     OS_MEM_SET_MAGIC(endNode);
480     OsMemSentinelNodeSet(endNode, NULL, 0);
481     OsMemWaterUsedRecord(poolInfo, OS_MEM_NODE_HEAD_SIZE);
482 
483     return 0;
484 }
485 
486 VOID LOS_MemExpandEnable(VOID *pool)
487 {
488     if (pool == NULL) {
489         return;
490     }
491 
492     ((struct OsMemPoolHead *)pool)->info.attr |= OS_MEM_POOL_EXPAND_ENABLE;
493 }
494 #endif
495 
496 #ifdef LOSCFG_KERNEL_LMS
497 STATIC INLINE VOID OsLmsFirstNodeMark(VOID *pool, struct OsMemNodeHead *node)
498 {
499     if (g_lms == NULL) {
500         return;
501     }
502 
503     g_lms->simpleMark((UINTPTR)pool, (UINTPTR)node, LMS_SHADOW_PAINT_U8);
504     g_lms->simpleMark((UINTPTR)node, (UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
505     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node), (UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
506         LMS_SHADOW_REDZONE_U8);
507     g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
508         LMS_SHADOW_AFTERFREE_U8);
509 }
510 
511 STATIC INLINE VOID OsLmsAllocAlignMark(VOID *ptr, VOID *alignedPtr, UINT32 size)
512 {
513     struct OsMemNodeHead *allocNode = NULL;
514 
515     if ((g_lms == NULL) || (ptr == NULL)) {
516         return;
517     }
518     allocNode = (struct OsMemNodeHead *)((struct OsMemUsedNodeHead *)ptr - 1);
519     if (ptr != alignedPtr) {
520         g_lms->simpleMark((UINTPTR)ptr, (UINTPTR)ptr + sizeof(UINT32), LMS_SHADOW_PAINT_U8);
521         g_lms->simpleMark((UINTPTR)ptr + sizeof(UINT32), (UINTPTR)alignedPtr, LMS_SHADOW_REDZONE_U8);
522     }
523 
524     /* mark remining as redzone */
525     g_lms->simpleMark(LMS_ADDR_ALIGN((UINTPTR)alignedPtr + size), (UINTPTR)OS_MEM_NEXT_NODE(allocNode),
526         LMS_SHADOW_REDZONE_U8);
527 }
528 
529 STATIC INLINE VOID OsLmsReallocMergeNodeMark(struct OsMemNodeHead *node)
530 {
531     if (g_lms == NULL) {
532         return;
533     }
534 
535     g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
536         LMS_SHADOW_ACCESSIBLE_U8);
537 }
538 
539 STATIC INLINE VOID OsLmsReallocSplitNodeMark(struct OsMemNodeHead *node)
540 {
541     if (g_lms == NULL) {
542         return;
543     }
544     /* mark next node */
545     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node),
546         (UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
547     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
548         (UINTPTR)OS_MEM_NEXT_NODE(OS_MEM_NEXT_NODE(node)), LMS_SHADOW_AFTERFREE_U8);
549 }
550 
551 STATIC INLINE VOID OsLmsReallocResizeMark(struct OsMemNodeHead *node, UINT32 resize)
552 {
553     if (g_lms == NULL) {
554         return;
555     }
556     /* mark remaining as redzone */
557     g_lms->simpleMark((UINTPTR)node + resize, (UINTPTR)OS_MEM_NEXT_NODE(node), LMS_SHADOW_REDZONE_U8);
558 }
559 #endif
560 
561 #if (LOSCFG_MEM_LEAKCHECK == 1)
562 struct OsMemLeakCheckInfo {
563     struct OsMemNodeHead *node;
564     UINTPTR linkReg[LOSCFG_MEM_RECORD_LR_CNT];
565 };
566 
567 struct OsMemLeakCheckInfo g_leakCheckRecord[LOSCFG_MEM_LEAKCHECK_RECORD_MAX_NUM] = {0};
568 STATIC UINT32 g_leakCheckRecordCnt = 0;
569 
570 STATIC INLINE VOID OsMemLeakCheckInfoRecord(struct OsMemNodeHead *node)
571 {
572     struct OsMemLeakCheckInfo *info = &g_leakCheckRecord[g_leakCheckRecordCnt];
573 
574     if (!OS_MEM_NODE_GET_LEAK_FLAG(node->sizeAndFlag)) {
575         info->node = node;
576         (VOID)memcpy(info->linkReg, node->linkReg, sizeof(node->linkReg));
577         OS_MEM_NODE_SET_LEAK_FLAG(node->sizeAndFlag);
578         g_leakCheckRecordCnt++;
579         if (g_leakCheckRecordCnt >= LOSCFG_MEM_LEAKCHECK_RECORD_MAX_NUM) {
580             g_leakCheckRecordCnt = 0;
581         }
582     }
583 }
584 
585 STATIC INLINE VOID OsMemLeakCheckInit(VOID)
586 {
587     (VOID)memset_s(g_leakCheckRecord, sizeof(struct OsMemLeakCheckInfo) * LOSCFG_MEM_LEAKCHECK_RECORD_MAX_NUM,
588                    0, sizeof(struct OsMemLeakCheckInfo) * LOSCFG_MEM_LEAKCHECK_RECORD_MAX_NUM);
589     g_leakCheckRecordCnt = 0;
590 }
591 
592 STATIC INLINE VOID OsMemLinkRegisterRecord(struct OsMemNodeHead *node)
593 {
594     (VOID)memset(node->linkReg, 0, sizeof(node->linkReg));
595     OsBackTraceHookCall(node->linkReg, LOSCFG_MEM_RECORD_LR_CNT, LOSCFG_MEM_OMIT_LR_CNT, 0);
596 }
597 
598 STATIC INLINE VOID OsMemUsedNodePrint(struct OsMemNodeHead *node)
599 {
600     UINT32 count;
601 
602     if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(node)) {
603         PRINTK("0x%x: 0x%x ", (UINTPTR)node, OS_MEM_NODE_GET_SIZE(node->sizeAndFlag));
604         for (count = 0; count < LOSCFG_MEM_RECORD_LR_CNT; count++) {
605             PRINTK(" 0x%x ", node->linkReg[count]);
606         }
607         PRINTK("\n");
608 
609         OsMemLeakCheckInfoRecord(node);
610     }
611 }
612 
613 STATIC VOID OsMemUsedNodePrintHandle(struct OsMemNodeHead *node, VOID *arg)
614 {
615     UNUSED(arg);
616     OsMemUsedNodePrint(node);
617     return;
618 }
619 
620 VOID LOS_MemUsedNodeShow(VOID *pool)
621 {
622     UINT32 count;
623 
624     PRINTK("\n\rnode          size    ");
625     for (count = 0; count < LOSCFG_MEM_RECORD_LR_CNT; count++) {
626         PRINTK("    LR[%u]   ", count);
627     }
628     PRINTK("\n");
629 
630     OsMemLeakCheckInit();
631     OsAllMemNodeDoHandle(pool, OsMemUsedNodePrintHandle, NULL);
632     return;
633 }
634 
635 #if (LOSCFG_KERNEL_PRINTF != 0)
636 STATIC VOID OsMemNodeBacktraceInfo(const struct OsMemNodeHead *tmpNode,
637                                    const struct OsMemNodeHead *preNode)
638 {
639     int i;
640     PRINTK("\n broken node head LR info: \n");
641     for (i = 0; i < LOSCFG_MEM_RECORD_LR_CNT; i++) {
642         PRINTK(" LR[%d]:0x%x\n", i, tmpNode->linkReg[i]);
643     }
644 
645     PRINTK("\n pre node head LR info: \n");
646     for (i = 0; i < LOSCFG_MEM_RECORD_LR_CNT; i++) {
647         PRINTK(" LR[%d]:0x%x\n", i, preNode->linkReg[i]);
648     }
649 }
650 #endif
651 #endif
652 
653 STATIC INLINE UINT32 OsMemFreeListIndexGet(UINT32 size)
654 {
655     UINT32 fl = OsMemFlGet(size);
656     if (fl < OS_MEM_SMALL_BUCKET_COUNT) {
657         return fl;
658     }
659 
660     UINT32 sl = OsMemSlGet(size, fl);
661     return (OS_MEM_SMALL_BUCKET_COUNT + ((fl - OS_MEM_SMALL_BUCKET_COUNT) << OS_MEM_SLI) + sl);
662 }
663 
664 STATIC INLINE struct OsMemFreeNodeHead *OsMemFindCurSuitableBlock(struct OsMemPoolHead *poolHead,
665                                         UINT32 index, UINT32 size)
666 {
667     struct OsMemFreeNodeHead *node = NULL;
668 
669     for (node = poolHead->freeList[index]; node != NULL; node = node->next) {
670         if (node->header.sizeAndFlag >= size) {
671             return node;
672         }
673     }
674 
675     return NULL;
676 }
677 
678 STATIC INLINE UINT32 OsMemNotEmptyIndexGet(struct OsMemPoolHead *poolHead, UINT32 index)
679 {
680     /* 5: Divide by 32 to calculate the index of the bitmap array. */
681     UINT32 mask = poolHead->freeListBitmap[index >> 5];
682     mask &= ~((1 << (index & OS_MEM_BITMAP_MASK)) - 1);
683     if (mask != 0) {
684         index = OsMemFFS(mask) + (index & ~OS_MEM_BITMAP_MASK);
685         return index;
686     }
687 
688     return OS_MEM_FREE_LIST_COUNT;
689 }
690 
691 STATIC INLINE struct OsMemFreeNodeHead *OsMemFindNextSuitableBlock(VOID *pool, UINT32 size, UINT32 *outIndex)
692 {
693     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
694     UINT32 fl = OsMemFlGet(size);
695     UINT32 sl;
696     UINT32 index, tmp;
697     UINT32 curIndex = OS_MEM_FREE_LIST_COUNT;
698     UINT32 mask;
699 
700     do {
701         if (fl < OS_MEM_SMALL_BUCKET_COUNT) {
702             index = fl;
703         } else {
704             sl = OsMemSlGet(size, fl);
705             curIndex = ((fl - OS_MEM_SMALL_BUCKET_COUNT) << OS_MEM_SLI) + sl + OS_MEM_SMALL_BUCKET_COUNT;
706             index = curIndex + 1;
707         }
708 
709         tmp = OsMemNotEmptyIndexGet(poolHead, index);
710         if (tmp != OS_MEM_FREE_LIST_COUNT) {
711             index = tmp;
712             goto DONE;
713         }
714 
715         for (index = LOS_Align(index + 1, 32); index < OS_MEM_FREE_LIST_COUNT; index += 32) {
716             /* 5: Divide by 32 to calculate the index of the bitmap array. */
717             mask = poolHead->freeListBitmap[index >> 5];
718             if (mask != 0) {
719                 index = OsMemFFS(mask) + index;
720                 goto DONE;
721             }
722         }
723     } while (0);
724 
725     if (curIndex == OS_MEM_FREE_LIST_COUNT) {
726         return NULL;
727     }
728 
729     *outIndex = curIndex;
730     return OsMemFindCurSuitableBlock(poolHead, curIndex, size);
731 DONE:
732     *outIndex = index;
733     return poolHead->freeList[index];
734 }
735 
736 STATIC INLINE VOID OsMemSetFreeListBit(struct OsMemPoolHead *head, UINT32 index)
737 {
738     /* 5: Divide by 32 to calculate the index of the bitmap array. */
739     head->freeListBitmap[index >> 5] |= 1U << (index & 0x1f);
740 }
741 
742 STATIC INLINE VOID OsMemClearFreeListBit(struct OsMemPoolHead *head, UINT32 index)
743 {
744     /* 5: Divide by 32 to calculate the index of the bitmap array. */
745     head->freeListBitmap[index >> 5] &= ~(1U << (index & 0x1f));
746 }
747 
748 STATIC INLINE VOID OsMemListAdd(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
749 {
750     struct OsMemFreeNodeHead *firstNode = pool->freeList[listIndex];
751     if (firstNode != NULL) {
752         firstNode->prev = node;
753     }
754     node->prev = NULL;
755     node->next = firstNode;
756     pool->freeList[listIndex] = node;
757     OsMemSetFreeListBit(pool, listIndex);
758     OS_MEM_SET_MAGIC(&node->header);
759 }
760 
761 STATIC INLINE VOID OsMemListDelete(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
762 {
763     if (node == pool->freeList[listIndex]) {
764         pool->freeList[listIndex] = node->next;
765         if (node->next == NULL) {
766             OsMemClearFreeListBit(pool, listIndex);
767         } else {
768             node->next->prev = NULL;
769         }
770     } else {
771         node->prev->next = node->next;
772         if (node->next != NULL) {
773             node->next->prev = node->prev;
774         }
775     }
776     OS_MEM_SET_MAGIC(&node->header);
777 }
778 
779 STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node)
780 {
781     UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
782     if (index >= OS_MEM_FREE_LIST_COUNT) {
783         LOS_Panic("The index of free lists is error, index = %u\n", index);
784     }
785     OsMemListAdd(pool, index, node);
786 }
787 
788 STATIC INLINE VOID OsMemFreeNodeDelete(VOID *pool, struct OsMemFreeNodeHead *node)
789 {
790     UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
791     OsMemListDelete(pool, index, node);
792 }
793 
794 STATIC INLINE struct OsMemNodeHead *OsMemFreeNodeGet(VOID *pool, UINT32 size)
795 {
796     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
797     UINT32 index;
798     struct OsMemFreeNodeHead *firstNode = OsMemFindNextSuitableBlock(pool, size, &index);
799     if (firstNode == NULL) {
800         return NULL;
801     }
802 
803     OsMemListDelete(poolHead, index, firstNode);
804 
805     return &firstNode->header;
806 }
807 
808 STATIC INLINE VOID OsMemMergeNode(struct OsMemNodeHead *node)
809 {
810     struct OsMemNodeHead *nextNode = NULL;
811 
812     node->ptr.prev->sizeAndFlag += node->sizeAndFlag;
813     nextNode = (struct OsMemNodeHead *)((UINTPTR)node + node->sizeAndFlag);
814     if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(nextNode)) {
815         nextNode->ptr.prev = node->ptr.prev;
816     }
817 }
818 
819 STATIC INLINE VOID OsMemSplitNode(VOID *pool, struct OsMemNodeHead *allocNode, UINT32 allocSize)
820 {
821     struct OsMemFreeNodeHead *newFreeNode = NULL;
822     struct OsMemNodeHead *nextNode = NULL;
823 
824     newFreeNode = (struct OsMemFreeNodeHead *)(VOID *)((UINT8 *)allocNode + allocSize);
825     newFreeNode->header.ptr.prev = allocNode;
826     newFreeNode->header.sizeAndFlag = allocNode->sizeAndFlag - allocSize;
827     allocNode->sizeAndFlag = allocSize;
828     nextNode = OS_MEM_NEXT_NODE(&newFreeNode->header);
829     if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(nextNode)) {
830         nextNode->ptr.prev = &newFreeNode->header;
831         if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
832             OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
833             OsMemMergeNode(nextNode);
834         }
835     }
836 
837     OsMemFreeNodeAdd(pool, newFreeNode);
838 }
839 
840 STATIC INLINE VOID *OsMemCreateUsedNode(VOID *addr)
841 {
842     struct OsMemUsedNodeHead *node = (struct OsMemUsedNodeHead *)addr;
843 
844 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
845     OsMemNodeSetTaskID(node);
846 #endif
847 
848 #ifdef LOSCFG_KERNEL_LMS
849     struct OsMemNodeHead *newNode = (struct OsMemNodeHead *)node;
850     if (g_lms != NULL) {
851         g_lms->mallocMark(newNode, OS_MEM_NEXT_NODE(newNode), OS_MEM_NODE_HEAD_SIZE);
852     }
853 #endif
854     return node + 1;
855 }
856 
857 STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size)
858 {
859     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
860     struct OsMemNodeHead *newNode = NULL;
861     struct OsMemNodeHead *endNode = NULL;
862 
863     (VOID)memset_s(poolHead, size, 0, sizeof(struct OsMemPoolHead));
864 
865 #ifdef LOSCFG_KERNEL_LMS
866     UINT32 resize = 0;
867     if (g_lms != NULL) {
868         /*
869          * resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
870          * resize != 0, shadow memory init successful, set poolSize as resize.
871          */
872         resize = g_lms->init(pool, size);
873         size = (resize == 0) ? size : resize;
874     }
875 #endif
876 
877     poolHead->info.pool = pool;
878     poolHead->info.totalSize = size;
879     /* default attr: lock, not expand. */
880     poolHead->info.attr &= ~(OS_MEM_POOL_UNLOCK_ENABLE | OS_MEM_POOL_EXPAND_ENABLE);
881 
882     newNode = OS_MEM_FIRST_NODE(pool);
883     newNode->sizeAndFlag = (size - sizeof(struct OsMemPoolHead) - OS_MEM_NODE_HEAD_SIZE);
884     newNode->ptr.prev = OS_MEM_END_NODE(pool, size);
885     OS_MEM_SET_MAGIC(newNode);
886     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
887 
888     /* The last mem node */
889     endNode = OS_MEM_END_NODE(pool, size);
890     OS_MEM_SET_MAGIC(endNode);
891 #if OS_MEM_EXPAND_ENABLE
892     endNode->ptr.next = NULL;
893     OsMemSentinelNodeSet(endNode, NULL, 0);
894 #else
895     endNode->sizeAndFlag = 0;
896     endNode->ptr.prev = newNode;
897     OS_MEM_NODE_SET_USED_FLAG(endNode->sizeAndFlag);
898 #endif
899 #if (LOSCFG_MEM_WATERLINE == 1)
900     poolHead->info.curUsedSize = sizeof(struct OsMemPoolHead) + OS_MEM_NODE_HEAD_SIZE;
901     poolHead->info.waterLine = poolHead->info.curUsedSize;
902 #endif
903 
904 #ifdef LOSCFG_KERNEL_LMS
905     if (resize != 0) {
906         OsLmsFirstNodeMark(pool, newNode);
907     }
908 #endif
909     return LOS_OK;
910 }
911 
912 #if (LOSCFG_MEM_MUL_POOL == 1)
913 STATIC VOID OsMemPoolDeInit(VOID *pool, UINT32 size)
914 {
915 #ifdef LOSCFG_KERNEL_LMS
916     if (g_lms != NULL) {
917         g_lms->deInit(pool);
918     }
919 #endif
920     (VOID)memset_s(pool, size, 0, sizeof(struct OsMemPoolHead));
921 }
922 
923 STATIC UINT32 OsMemPoolAdd(VOID *pool, UINT32 size)
924 {
925     VOID *nextPool = g_poolHead;
926     VOID *curPool = g_poolHead;
927     UINTPTR poolEnd;
928     while (nextPool != NULL) {
929         poolEnd = (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool);
930         if (((pool <= nextPool) && (((UINTPTR)pool + size) > (UINTPTR)nextPool)) ||
931             (((UINTPTR)pool < poolEnd) && (((UINTPTR)pool + size) >= poolEnd))) {
932             PRINT_ERR("pool [0x%x, 0x%x) conflict with pool [0x%x, 0x%x)\n", (UINTPTR)pool,
933                       (UINTPTR)pool + size, (UINTPTR)nextPool, (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool));
934             return LOS_NOK;
935         }
936         curPool = nextPool;
937         nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
938     }
939 
940     if (g_poolHead == NULL) {
941         g_poolHead = pool;
942     } else {
943         ((struct OsMemPoolHead *)curPool)->nextPool = pool;
944     }
945 
946     ((struct OsMemPoolHead *)pool)->nextPool = NULL;
947     return LOS_OK;
948 }
949 
950 STATIC UINT32 OsMemPoolDelete(VOID *pool)
951 {
952     UINT32 ret = LOS_NOK;
953     VOID *nextPool = NULL;
954     VOID *curPool = NULL;
955 
956     do {
957         if (pool == g_poolHead) {
958             g_poolHead = ((struct OsMemPoolHead *)g_poolHead)->nextPool;
959             ret = LOS_OK;
960             break;
961         }
962 
963         curPool = g_poolHead;
964         nextPool = g_poolHead;
965         while (nextPool != NULL) {
966             if (pool == nextPool) {
967                 ((struct OsMemPoolHead *)curPool)->nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
968                 ret = LOS_OK;
969                 break;
970             }
971             curPool = nextPool;
972             nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
973         }
974     } while (0);
975 
976     return ret;
977 }
978 #endif
979 
980 UINT32 LOS_MemInit(VOID *pool, UINT32 size)
981 {
982     if ((pool == NULL) || (size <= OS_MEM_MIN_POOL_SIZE)) {
983         return LOS_NOK;
984     }
985 
986     if (((UINTPTR)pool & (OS_MEM_ALIGN_SIZE - 1)) || \
987         (size & (OS_MEM_ALIGN_SIZE - 1))) {
988         PRINT_ERR("LiteOS heap memory address or size configured not aligned:address:0x%x,size:0x%x, alignsize:%d\n", \
989                   (UINTPTR)pool, size, OS_MEM_ALIGN_SIZE);
990         return LOS_NOK;
991     }
992 
993     if (OsMemPoolInit(pool, size)) {
994         return LOS_NOK;
995     }
996 
997 #if (LOSCFG_MEM_MUL_POOL == 1)
998     if (OsMemPoolAdd(pool, size)) {
999         (VOID)OsMemPoolDeInit(pool, size);
1000         return LOS_NOK;
1001     }
1002 #endif
1003 
1004     OsHookCall(LOS_HOOK_TYPE_MEM_INIT, pool, size);
1005 
1006     return LOS_OK;
1007 }
1008 
1009 #if (LOSCFG_MEM_MUL_POOL == 1)
1010 UINT32 LOS_MemDeInit(VOID *pool)
1011 {
1012     struct OsMemPoolHead *tmpPool = (struct OsMemPoolHead *)pool;
1013 
1014     if (tmpPool == NULL) {
1015         return LOS_NOK;
1016     }
1017 
1018     if ((tmpPool->info.pool != pool) || (tmpPool->info.totalSize <= OS_MEM_MIN_POOL_SIZE)) {
1019         return LOS_NOK;
1020     }
1021 
1022     if (OsMemPoolDelete(tmpPool)) {
1023         return LOS_NOK;
1024     }
1025 
1026     OsMemPoolDeInit(tmpPool, tmpPool->info.totalSize);
1027 
1028     OsHookCall(LOS_HOOK_TYPE_MEM_DEINIT, tmpPool);
1029 
1030     return LOS_OK;
1031 }
1032 
1033 UINT32 LOS_MemPoolList(VOID)
1034 {
1035     VOID *nextPool = g_poolHead;
1036     UINT32 index = 0;
1037     while (nextPool != NULL) {
1038         PRINTK("pool%u :\n", index);
1039         index++;
1040         OsMemInfoPrint(nextPool);
1041         nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
1042     }
1043     return index;
1044 }
1045 #endif
1046 
1047 STATIC INLINE VOID *OsMemAlloc(struct OsMemPoolHead *pool, UINT32 size, UINT32 intSave)
1048 {
1049     struct OsMemNodeHead *allocNode = NULL;
1050 
1051 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1052     if (OsMemAllocCheck(pool, intSave) == LOS_NOK) {
1053         return NULL;
1054     }
1055 #endif
1056 
1057     UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
1058 #if OS_MEM_EXPAND_ENABLE || (LOSCFG_KERNEL_LMK == 1)
1059 retry:
1060 #endif
1061     allocNode = OsMemFreeNodeGet(pool, allocSize);
1062     if (allocNode == NULL) {
1063 #if OS_MEM_EXPAND_ENABLE
1064         if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
1065             INT32 ret = OsMemPoolExpand(pool, allocSize, intSave);
1066             if (ret == 0) {
1067                 goto retry;
1068             }
1069         }
1070 #endif
1071 
1072 #if (LOSCFG_KERNEL_LMK == 1)
1073         UINT32 killRet = LOS_LmkTasksKill();
1074         if (killRet == LOS_OK) {
1075             goto retry;
1076         }
1077 #endif
1078         PRINT_ERR("---------------------------------------------------"
1079                   "--------------------------------------------------------\n");
1080         MEM_UNLOCK(pool, intSave);
1081         OsMemInfoPrint(pool);
1082         MEM_LOCK(pool, intSave);
1083         PRINT_ERR("[%s] No suitable free block, require free node size: 0x%x\n", __FUNCTION__, allocSize);
1084         PRINT_ERR("----------------------------------------------------"
1085                   "-------------------------------------------------------\n");
1086         return NULL;
1087     }
1088 
1089     if ((allocSize + OS_MEM_MIN_LEFT_SIZE) <= allocNode->sizeAndFlag) {
1090         OsMemSplitNode(pool, allocNode, allocSize);
1091     }
1092 
1093     OS_MEM_NODE_SET_USED_FLAG(allocNode->sizeAndFlag);
1094     OsMemWaterUsedRecord(pool, OS_MEM_NODE_GET_SIZE(allocNode->sizeAndFlag));
1095 
1096 #if (LOSCFG_MEM_LEAKCHECK == 1)
1097     OsMemLinkRegisterRecord(allocNode);
1098 #endif
1099     return OsMemCreateUsedNode((VOID *)allocNode);
1100 }
1101 
1102 VOID *LOS_MemAlloc(VOID *pool, UINT32 size)
1103 {
1104     if ((pool == NULL) || (size == 0)) {
1105         return NULL;
1106     }
1107 
1108     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1109         size = OS_MEM_MIN_ALLOC_SIZE;
1110     }
1111 
1112     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1113     VOID *ptr = NULL;
1114     UINT32 intSave = 0;
1115 
1116     MEM_LOCK(poolHead, intSave);
1117     do {
1118         if (OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
1119             break;
1120         }
1121         ptr = OsMemAlloc(poolHead, size, intSave);
1122     } while (0);
1123     MEM_UNLOCK(poolHead, intSave);
1124 
1125     OsHookCall(LOS_HOOK_TYPE_MEM_ALLOC, pool, ptr, size);
1126 
1127     return ptr;
1128 }
1129 
1130 VOID *LOS_MemAllocAlign(VOID *pool, UINT32 size, UINT32 boundary)
1131 {
1132     UINT32 gapSize;
1133 
1134     if ((pool == NULL) || (size == 0) || (boundary == 0) || !OS_MEM_IS_POW_TWO(boundary) ||
1135         !OS_MEM_IS_ALIGNED(boundary, sizeof(VOID *))) {
1136         return NULL;
1137     }
1138 
1139     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1140         size = OS_MEM_MIN_ALLOC_SIZE;
1141     }
1142 
1143     /*
1144      * sizeof(gapSize) bytes stores offset between alignedPtr and ptr,
1145      * the ptr has been OS_MEM_ALIGN_SIZE(4 or 8) aligned, so maximum
1146      * offset between alignedPtr and ptr is boundary - OS_MEM_ALIGN_SIZE
1147      */
1148     if ((boundary - sizeof(gapSize)) > ((UINT32)(-1) - size)) {
1149         return NULL;
1150     }
1151 
1152     UINT32 useSize = (size + boundary) - sizeof(gapSize);
1153     if (OS_MEM_NODE_GET_USED_FLAG(useSize) || OS_MEM_NODE_GET_ALIGNED_FLAG(useSize)) {
1154         return NULL;
1155     }
1156 
1157     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1158     UINT32 intSave = 0;
1159     VOID *ptr = NULL;
1160     VOID *alignedPtr = NULL;
1161 
1162     MEM_LOCK(poolHead, intSave);
1163     do {
1164         ptr = OsMemAlloc(pool, useSize, intSave);
1165         alignedPtr = (VOID *)OS_MEM_ALIGN(ptr, boundary);
1166         if (ptr == alignedPtr) {
1167 #ifdef LOSCFG_KERNEL_LMS
1168             OsLmsAllocAlignMark(ptr, alignedPtr, size);
1169 #endif
1170             break;
1171         }
1172 
1173         /* store gapSize in address (ptr - 4), it will be checked while free */
1174         gapSize = (UINT32)((UINTPTR)alignedPtr - (UINTPTR)ptr);
1175         struct OsMemUsedNodeHead *allocNode = (struct OsMemUsedNodeHead *)ptr - 1;
1176         OS_MEM_NODE_SET_ALIGNED_FLAG(allocNode->header.sizeAndFlag);
1177         OS_MEM_SET_GAPSIZE_ALIGNED_FLAG(gapSize);
1178         *(UINT32 *)((UINTPTR)alignedPtr - sizeof(gapSize)) = gapSize;
1179 #ifdef LOSCFG_KERNEL_LMS
1180         OsLmsAllocAlignMark(ptr, alignedPtr, size);
1181 #endif
1182         ptr = alignedPtr;
1183     } while (0);
1184     MEM_UNLOCK(poolHead, intSave);
1185 
1186     OsHookCall(LOS_HOOK_TYPE_MEM_ALLOCALIGN, pool, ptr, size, boundary);
1187 
1188     return ptr;
1189 }
1190 
1191 STATIC INLINE BOOL OsMemAddrValidCheck(const struct OsMemPoolHead *pool, const VOID *addr)
1192 {
1193     UINT32 size;
1194 
1195     size = pool->info.totalSize;
1196     if (OS_MEM_MIDDLE_ADDR_OPEN_END(pool + 1, addr, (UINTPTR)pool + size)) {
1197         return TRUE;
1198     }
1199 #if OS_MEM_EXPAND_ENABLE
1200     struct OsMemNodeHead *node = NULL;
1201     struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, size);
1202     while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1203         size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1204         node = OsMemSentinelNodeGet(sentinel);
1205         sentinel = OS_MEM_END_NODE(node, size);
1206         if (OS_MEM_MIDDLE_ADDR_OPEN_END(node, addr, (UINTPTR)node + size)) {
1207             return TRUE;
1208         }
1209     }
1210 #endif
1211     return FALSE;
1212 }
1213 
1214 STATIC INLINE BOOL OsMemIsNodeValid(const struct OsMemNodeHead *node, const struct OsMemNodeHead *startNode,
1215                                     const struct OsMemNodeHead *endNode,
1216                                     const struct OsMemPoolHead *poolInfo)
1217 {
1218     if (!OS_MEM_MIDDLE_ADDR(startNode, node, endNode)) {
1219         return FALSE;
1220     }
1221 
1222     if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1223         if (!OS_MEM_MAGIC_VALID(node)) {
1224             return FALSE;
1225         }
1226         return TRUE;
1227     }
1228 
1229     if (!OsMemAddrValidCheck(poolInfo, node->ptr.prev)) {
1230         return FALSE;
1231     }
1232 
1233     return TRUE;
1234 }
1235 
1236 STATIC UINT32 OsMemCheckUsedNode(const struct OsMemPoolHead *pool, const struct OsMemNodeHead *node)
1237 {
1238     struct OsMemNodeHead *startNode = (struct OsMemNodeHead *)OS_MEM_FIRST_NODE(pool);
1239     struct OsMemNodeHead *endNode = (struct OsMemNodeHead *)OS_MEM_END_NODE(pool, pool->info.totalSize);
1240     struct OsMemNodeHead *nextNode = NULL;
1241     BOOL doneFlag = FALSE;
1242 
1243     do {
1244         do {
1245             if (OS_MEM_IS_GAP_NODE(node)) {
1246                 break;
1247             }
1248 
1249             if (!OsMemIsNodeValid(node, startNode, endNode, pool)) {
1250                 break;
1251             }
1252 
1253             if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1254                 break;
1255             }
1256 
1257             nextNode = OS_MEM_NEXT_NODE(node);
1258             if (!OsMemIsNodeValid(nextNode, startNode, endNode, pool)) {
1259                 break;
1260             }
1261 
1262             if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(nextNode)) {
1263                 if (nextNode->ptr.prev != node) {
1264                     break;
1265                 }
1266             }
1267 
1268             if ((node != startNode) &&
1269                 ((!OsMemIsNodeValid(node->ptr.prev, startNode, endNode, pool)) ||
1270                 (OS_MEM_NEXT_NODE(node->ptr.prev) != node))) {
1271                 break;
1272             }
1273             doneFlag = TRUE;
1274         } while (0);
1275 
1276         if (!doneFlag) {
1277 #if OS_MEM_EXPAND_ENABLE
1278             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
1279                 startNode = OsMemSentinelNodeGet(endNode);
1280                 endNode = OS_MEM_END_NODE(startNode, OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag));
1281                 continue;
1282             }
1283 #endif
1284             return LOS_NOK;
1285         }
1286     } while (!doneFlag);
1287 
1288     return LOS_OK;
1289 }
1290 
1291 STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node)
1292 {
1293     UINT32 ret = OsMemCheckUsedNode(pool, node);
1294     if (ret != LOS_OK) {
1295         PRINT_ERR("OsMemFree check error!\n");
1296         return ret;
1297     }
1298 
1299 #if (LOSCFG_MEM_WATERLINE == 1)
1300     pool->info.curUsedSize -= OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1301 #endif
1302 
1303     node->sizeAndFlag = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1304 #if (LOSCFG_MEM_LEAKCHECK == 1)
1305     OsMemLinkRegisterRecord(node);
1306 #endif
1307 #ifdef LOSCFG_KERNEL_LMS
1308     struct OsMemNodeHead *nextNodeBackup = OS_MEM_NEXT_NODE(node);
1309     struct OsMemNodeHead *curNodeBackup = node;
1310     if (g_lms != NULL) {
1311         g_lms->check((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, TRUE);
1312     }
1313 #endif
1314     struct OsMemNodeHead *preNode = node->ptr.prev; /* merage preNode */
1315     if ((preNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1316         OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)preNode);
1317         OsMemMergeNode(node);
1318         node = preNode;
1319     }
1320 
1321     struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node); /* merage nextNode */
1322     if ((nextNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
1323         OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
1324         OsMemMergeNode(nextNode);
1325     }
1326 
1327 #if OS_MEM_EXPAND_ENABLE
1328     if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
1329         struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool);
1330         /* if this is a expand head node, and all unused, free it to pmm */
1331         if ((node->prev > node) && (node != firstNode)) {
1332             if (TryShrinkPool(pool, node)) {
1333                 return LOS_OK;
1334             }
1335         }
1336     }
1337 #endif
1338 
1339     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)node);
1340 #ifdef LOSCFG_KERNEL_LMS
1341     if (g_lms != NULL) {
1342         g_lms->freeMark(curNodeBackup, nextNodeBackup, OS_MEM_NODE_HEAD_SIZE);
1343     }
1344 #endif
1345     return ret;
1346 }
1347 
1348 STATIC INLINE VOID *OsGetRealPtr(const VOID *pool, VOID *ptr)
1349 {
1350     VOID *realPtr = ptr;
1351     UINT32 gapSize = *((UINT32 *)((UINTPTR)ptr - sizeof(UINT32)));
1352 
1353     if (OS_MEM_GAPSIZE_CHECK(gapSize)) {
1354         PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1355         return NULL;
1356     }
1357 
1358     if (OS_MEM_GET_GAPSIZE_ALIGNED_FLAG(gapSize)) {
1359         gapSize = OS_MEM_GET_ALIGNED_GAPSIZE(gapSize);
1360         if ((gapSize & (OS_MEM_ALIGN_SIZE - 1)) ||
1361             (gapSize > ((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE - (UINTPTR)pool))) {
1362             PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1363             return NULL;
1364         }
1365         realPtr = (VOID *)((UINTPTR)ptr - (UINTPTR)gapSize);
1366     }
1367     return realPtr;
1368 }
1369 
1370 UINT32 LOS_MemFree(VOID *pool, VOID *ptr)
1371 {
1372     if ((pool == NULL) || (ptr == NULL) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *)) ||
1373         !OS_MEM_IS_ALIGNED(ptr, sizeof(VOID *))) {
1374         return LOS_NOK;
1375     }
1376 
1377     OsHookCall(LOS_HOOK_TYPE_MEM_FREE, pool, ptr);
1378 
1379     UINT32 ret = LOS_NOK;
1380     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1381     struct OsMemNodeHead *node = NULL;
1382     UINT32 intSave = 0;
1383 
1384     MEM_LOCK(poolHead, intSave);
1385     do {
1386         ptr = OsGetRealPtr(pool, ptr);
1387         if (ptr == NULL) {
1388             break;
1389         }
1390         node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
1391         ret = OsMemFree(poolHead, node);
1392     } while (0);
1393     MEM_UNLOCK(poolHead, intSave);
1394 
1395     return ret;
1396 }
1397 
1398 STATIC INLINE VOID OsMemReAllocSmaller(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node, UINT32 nodeSize)
1399 {
1400 #if (LOSCFG_MEM_WATERLINE == 1)
1401     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1402 #endif
1403     node->sizeAndFlag = nodeSize;
1404     if ((allocSize + OS_MEM_MIN_LEFT_SIZE) <= nodeSize) {
1405         OsMemSplitNode(pool, node, allocSize);
1406 #if (LOSCFG_MEM_WATERLINE == 1)
1407         poolInfo->info.curUsedSize -= nodeSize - allocSize;
1408 #endif
1409 #ifdef LOSCFG_KERNEL_LMS
1410         OsLmsReallocSplitNodeMark(node);
1411     } else {
1412         OsLmsReallocResizeMark(node, allocSize);
1413 #endif
1414     }
1415     OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1416 #if (LOSCFG_MEM_LEAKCHECK == 1)
1417     OsMemLinkRegisterRecord(node);
1418 #endif
1419 }
1420 
1421 STATIC INLINE VOID OsMemMergeNodeForReAllocBigger(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node,
1422                                                   UINT32 nodeSize, struct OsMemNodeHead *nextNode)
1423 {
1424     node->sizeAndFlag = nodeSize;
1425     OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
1426     OsMemMergeNode(nextNode);
1427 #ifdef LOSCFG_KERNEL_LMS
1428     OsLmsReallocMergeNodeMark(node);
1429 #endif
1430     if ((allocSize + OS_MEM_MIN_LEFT_SIZE) <= node->sizeAndFlag) {
1431         OsMemSplitNode(pool, node, allocSize);
1432 #ifdef LOSCFG_KERNEL_LMS
1433         OsLmsReallocSplitNodeMark(node);
1434     } else {
1435         OsLmsReallocResizeMark(node, allocSize);
1436 #endif
1437     }
1438     OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1439     OsMemWaterUsedRecord((struct OsMemPoolHead *)pool, OS_MEM_NODE_GET_SIZE(node->sizeAndFlag) - nodeSize);
1440 #if (LOSCFG_MEM_LEAKCHECK == 1)
1441     OsMemLinkRegisterRecord(node);
1442 #endif
1443 }
1444 
1445 STATIC INLINE VOID *OsMemRealloc(struct OsMemPoolHead *pool, const VOID *ptr,
1446                 struct OsMemNodeHead *node, UINT32 size, UINT32 intSave)
1447 {
1448     struct OsMemNodeHead *nextNode = NULL;
1449     UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
1450     UINT32 nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1451     VOID *tmpPtr = NULL;
1452 
1453     if (nodeSize >= allocSize) {
1454         OsMemReAllocSmaller(pool, allocSize, node, nodeSize);
1455         return (VOID *)ptr;
1456     }
1457 
1458     nextNode = OS_MEM_NEXT_NODE(node);
1459     if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag) &&
1460         ((nextNode->sizeAndFlag + nodeSize) >= allocSize)) {
1461         OsMemMergeNodeForReAllocBigger(pool, allocSize, node, nodeSize, nextNode);
1462         return (VOID *)ptr;
1463     }
1464 
1465     tmpPtr = OsMemAlloc(pool, size, intSave);
1466     if (tmpPtr != NULL) {
1467         if (memcpy_s(tmpPtr, size, ptr, (nodeSize - OS_MEM_NODE_HEAD_SIZE)) != EOK) {
1468             MEM_UNLOCK(pool, intSave);
1469             (VOID)LOS_MemFree((VOID *)pool, (VOID *)tmpPtr);
1470             MEM_LOCK(pool, intSave);
1471             return NULL;
1472         }
1473         (VOID)OsMemFree(pool, node);
1474     }
1475     return tmpPtr;
1476 }
1477 
1478 VOID *LOS_MemRealloc(VOID *pool, VOID *ptr, UINT32 size)
1479 {
1480     if ((pool == NULL) || OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
1481         return NULL;
1482     }
1483 
1484     OsHookCall(LOS_HOOK_TYPE_MEM_REALLOC, pool, ptr, size);
1485 
1486     if (ptr == NULL) {
1487         return LOS_MemAlloc(pool, size);
1488     }
1489 
1490     if (size == 0) {
1491         (VOID)LOS_MemFree(pool, ptr);
1492         return NULL;
1493     }
1494 
1495     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1496         size = OS_MEM_MIN_ALLOC_SIZE;
1497     }
1498 
1499     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1500     struct OsMemNodeHead *node = NULL;
1501     VOID *newPtr = NULL;
1502     UINT32 intSave = 0;
1503 
1504     MEM_LOCK(poolHead, intSave);
1505     do {
1506         ptr = OsGetRealPtr(pool, ptr);
1507         if (ptr == NULL) {
1508             break;
1509         }
1510 
1511         node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
1512         if (OsMemCheckUsedNode(pool, node) != LOS_OK) {
1513             break;
1514         }
1515 
1516         newPtr = OsMemRealloc(pool, ptr, node, size, intSave);
1517     } while (0);
1518     MEM_UNLOCK(poolHead, intSave);
1519 
1520     return newPtr;
1521 }
1522 
1523 #if (LOSCFG_MEM_FREE_BY_TASKID == 1)
1524 STATIC VOID MemNodeFreeByTaskIDHandle(struct OsMemNodeHead *curNode, VOID *arg)
1525 {
1526     UINT32 *args = (UINT32 *)arg;
1527     UINT32 taskID = *args;
1528     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)(UINTPTR)(*(args + 1));
1529     struct OsMemUsedNodeHead *node = NULL;
1530     if (!OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
1531         return;
1532     }
1533 
1534     node = (struct OsMemUsedNodeHead *)curNode;
1535     if (node->header.taskID == taskID) {
1536         OsMemFree(poolHead, &node->header);
1537     }
1538     return;
1539 }
1540 
1541 UINT32 LOS_MemFreeByTaskID(VOID *pool, UINT32 taskID)
1542 {
1543     UINT32 args[2] = { taskID, (UINT32)(UINTPTR)pool };
1544     if (pool == NULL) {
1545         return LOS_NOK;
1546     }
1547 
1548     if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
1549         return LOS_NOK;
1550     }
1551 
1552     OsAllMemNodeDoHandle(pool, MemNodeFreeByTaskIDHandle, (VOID *)args);
1553 
1554     return LOS_OK;
1555 }
1556 #endif
1557 
1558 UINT32 LOS_MemPoolSizeGet(const VOID *pool)
1559 {
1560     UINT32 count = 0;
1561 
1562     if (pool == NULL) {
1563         return LOS_NOK;
1564     }
1565 
1566     count += ((struct OsMemPoolHead *)pool)->info.totalSize;
1567 #if (LOSCFG_MEM_MUL_REGIONS == 1)
1568     count -= ((struct OsMemPoolHead *)pool)->info.totalGapSize;
1569 #endif
1570 
1571 #if OS_MEM_EXPAND_ENABLE
1572     UINT32 size;
1573     struct OsMemNodeHead *node = NULL;
1574     struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, count);
1575 
1576     while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1577         size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1578         node = OsMemSentinelNodeGet(sentinel);
1579         sentinel = OS_MEM_END_NODE(node, size);
1580         count += size;
1581     }
1582 #endif
1583     return count;
1584 }
1585 
1586 STATIC VOID MemUsedGetHandle(struct OsMemNodeHead *curNode, VOID *arg)
1587 {
1588     UINT32 *memUsed = (UINT32 *)arg;
1589     if (OS_MEM_IS_GAP_NODE(curNode)) {
1590         *memUsed += OS_MEM_NODE_HEAD_SIZE;
1591     } else if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
1592         *memUsed += OS_MEM_NODE_GET_SIZE(curNode->sizeAndFlag);
1593     }
1594     return;
1595 }
1596 
1597 UINT32 LOS_MemTotalUsedGet(VOID *pool)
1598 {
1599     UINT32 memUsed = 0;
1600 
1601     if (pool == NULL) {
1602         return LOS_NOK;
1603     }
1604 
1605     OsAllMemNodeDoHandle(pool, MemUsedGetHandle, (VOID *)&memUsed);
1606 
1607     return memUsed;
1608 }
1609 
1610 STATIC INLINE VOID OsMemMagicCheckPrint(struct OsMemNodeHead **tmpNode)
1611 {
1612 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1613     PRINT_ERR("[%s], %d, memory check error!\n"
1614               "memory used but magic num wrong, magic num = 0x%x\n",
1615               __FUNCTION__, __LINE__, (*tmpNode)->magic);
1616 #else
1617     (VOID)tmpNode;
1618 #endif
1619 }
1620 
1621 STATIC UINT32 OsMemAddrValidCheckPrint(const VOID *pool, struct OsMemFreeNodeHead **tmpNode)
1622 {
1623     if (((*tmpNode)->prev != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->prev)) {
1624         PRINT_ERR("[%s], %d, memory check error!\n"
1625                   " freeNode.prev: %p is out of legal mem range\n",
1626                   __FUNCTION__, __LINE__, (*tmpNode)->prev);
1627         return LOS_NOK;
1628     }
1629     if (((*tmpNode)->next != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->next)) {
1630         PRINT_ERR("[%s], %d, memory check error!\n"
1631                   " freeNode.next: %p is out of legal mem range\n",
1632                   __FUNCTION__, __LINE__, (*tmpNode)->next);
1633         return LOS_NOK;
1634     }
1635     return LOS_OK;
1636 }
1637 
1638 STATIC UINT32 OsMemIntegrityCheckSub(struct OsMemNodeHead **tmpNode, const VOID *pool)
1639 {
1640     if (!OS_MEM_MAGIC_VALID(*tmpNode)) {
1641         OsMemMagicCheckPrint(tmpNode);
1642         return LOS_NOK;
1643     }
1644 
1645     if (!OsMemAddrValidCheck(pool, (*tmpNode)->ptr.prev)) {
1646         PRINT_ERR("[%s], %d, memory check error!\n"
1647                   " node prev: %p is out of legal mem range\n",
1648                   __FUNCTION__, __LINE__, (*tmpNode)->ptr.next);
1649         return LOS_NOK;
1650     }
1651 
1652     if (!OS_MEM_NODE_GET_USED_FLAG((*tmpNode)->sizeAndFlag)) { /* is free node, check free node range */
1653         if (OsMemAddrValidCheckPrint(pool, (struct OsMemFreeNodeHead **)tmpNode)) {
1654             return LOS_NOK;
1655         }
1656     }
1657 
1658     return LOS_OK;
1659 }
1660 
1661 STATIC UINT32 OsMemFreeListNodeCheck(const struct OsMemPoolHead *pool,
1662                 const struct OsMemFreeNodeHead *node)
1663 {
1664     if (!OsMemAddrValidCheck(pool, node) ||
1665         ((node->prev != NULL) && !OsMemAddrValidCheck(pool, node->prev)) ||
1666         ((node->next != NULL) && !OsMemAddrValidCheck(pool, node->next)) ||
1667         !OsMemAddrValidCheck(pool, node->header.ptr.prev)) {
1668         return LOS_NOK;
1669     }
1670 
1671     if (!OS_MEM_IS_ALIGNED(node, sizeof(VOID *)) ||
1672         !OS_MEM_IS_ALIGNED(node->prev, sizeof(VOID *)) ||
1673         !OS_MEM_IS_ALIGNED(node->next, sizeof(VOID *)) ||
1674         !OS_MEM_IS_ALIGNED(node->header.ptr.prev, sizeof(VOID *))) {
1675         return LOS_NOK;
1676     }
1677 
1678     return LOS_OK;
1679 }
1680 
1681 STATIC VOID OsMemPoolHeadCheck(const struct OsMemPoolHead *pool)
1682 {
1683     struct OsMemFreeNodeHead *tmpNode = NULL;
1684     UINT32 index;
1685     UINT32 flag = 0;
1686 
1687     if ((pool->info.pool != pool) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *))) {
1688         PRINT_ERR("wrong mem pool addr: %p, func: %s, line: %d\n", pool, __FUNCTION__, __LINE__);
1689         return;
1690     }
1691 
1692     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
1693         for (tmpNode = pool->freeList[index]; tmpNode != NULL; tmpNode = tmpNode->next) {
1694             if (OsMemFreeListNodeCheck(pool, tmpNode)) {
1695                 flag = 1;
1696                 PRINT_ERR("FreeListIndex: %u, node: %p, bNode: %p, prev:%p, next: %p\n",
1697                           index, tmpNode, tmpNode->header.ptr.prev, tmpNode->prev, tmpNode->next);
1698             }
1699         }
1700     }
1701 
1702     if (flag) {
1703         PRINTK("mem pool info: poolAddr: %p, poolSize: 0x%x\n", pool, pool->info.totalSize);
1704 #if (LOSCFG_MEM_WATERLINE == 1)
1705         PRINTK("mem pool info: poolWaterLine: 0x%x, poolCurUsedSize: 0x%x\n", pool->info.waterLine,
1706                pool->info.curUsedSize);
1707 #endif
1708 #if OS_MEM_EXPAND_ENABLE
1709         UINT32 size;
1710         struct OsMemNodeHead *node = NULL;
1711         struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, pool->info.totalSize);
1712         while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1713             size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1714             node = OsMemSentinelNodeGet(sentinel);
1715             sentinel = OS_MEM_END_NODE(node, size);
1716             PRINTK("expand node info: nodeAddr: 0x%x, nodeSize: 0x%x\n", node, size);
1717         }
1718 #endif
1719     }
1720 }
1721 
1722 STATIC UINT32 OsMemIntegrityCheck(const struct OsMemPoolHead *pool, struct OsMemNodeHead **tmpNode,
1723                 struct OsMemNodeHead **preNode)
1724 {
1725     struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
1726 
1727     OsMemPoolHeadCheck(pool);
1728 
1729     *preNode = OS_MEM_FIRST_NODE(pool);
1730     do {
1731         for (*tmpNode = *preNode; *tmpNode < endNode; *tmpNode = OS_MEM_NEXT_NODE(*tmpNode)) {
1732             if (OS_MEM_IS_GAP_NODE(*tmpNode)) {
1733                 continue;
1734             }
1735             if (OsMemIntegrityCheckSub(tmpNode, pool) == LOS_NOK) {
1736                 return LOS_NOK;
1737             }
1738             *preNode = *tmpNode;
1739         }
1740 #if OS_MEM_EXPAND_ENABLE
1741         if (OsMemIsLastSentinelNode(*tmpNode) == FALSE) {
1742             *preNode = OsMemSentinelNodeGet(*tmpNode);
1743             endNode = OS_MEM_END_NODE(*preNode, OS_MEM_NODE_GET_SIZE((*tmpNode)->sizeAndFlag));
1744         } else
1745 #endif
1746         {
1747             break;
1748         }
1749     } while (1);
1750     return LOS_OK;
1751 }
1752 
1753 #if (LOSCFG_KERNEL_PRINTF != 0)
1754 STATIC VOID OsMemNodeInfo(const struct OsMemNodeHead *tmpNode,
1755                           const struct OsMemNodeHead *preNode)
1756 {
1757     struct OsMemUsedNodeHead *usedNode = NULL;
1758     struct OsMemFreeNodeHead *freeNode = NULL;
1759 
1760     if (tmpNode == preNode) {
1761         PRINTK("\n the broken node is the first node\n");
1762     }
1763 
1764     if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
1765         usedNode = (struct OsMemUsedNodeHead *)tmpNode;
1766         PRINTK("\n broken node head: %p  "
1767 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1768             "0x%x  "
1769 #endif
1770             "0x%x, ",
1771             usedNode->header.ptr.prev,
1772 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1773             usedNode->header.magic,
1774 #endif
1775             usedNode->header.sizeAndFlag);
1776     } else {
1777         freeNode = (struct OsMemFreeNodeHead *)tmpNode;
1778         PRINTK("\n broken node head: %p  %p  %p  "
1779 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1780             "0x%x  "
1781 #endif
1782             "0x%x, ",
1783             freeNode->header.ptr.prev, freeNode->next, freeNode->prev,
1784 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1785             freeNode->header.magic,
1786 #endif
1787             freeNode->header.sizeAndFlag);
1788     }
1789 
1790     if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1791         usedNode = (struct OsMemUsedNodeHead *)preNode;
1792         PRINTK("prev node head: %p  "
1793 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1794             "0x%x  "
1795 #endif
1796             "0x%x\n",
1797             usedNode->header.ptr.prev,
1798 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1799             usedNode->header.magic,
1800 #endif
1801             usedNode->header.sizeAndFlag);
1802     } else {
1803         freeNode = (struct OsMemFreeNodeHead *)preNode;
1804         PRINTK("prev node head: %p  %p  %p  "
1805 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1806             "0x%x  "
1807 #endif
1808             "0x%x, ",
1809             freeNode->header.ptr.prev, freeNode->next, freeNode->prev,
1810 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1811             freeNode->header.magic,
1812 #endif
1813             freeNode->header.sizeAndFlag);
1814     }
1815 
1816 #if (LOSCFG_MEM_LEAKCHECK == 1)
1817     OsMemNodeBacktraceInfo(tmpNode, preNode);
1818 #endif
1819 }
1820 #endif
1821 
1822 struct OsMemIntegrityCheckInfo {
1823     struct OsMemNodeHead preNode;
1824     struct OsMemNodeHead errNode;
1825 };
1826 
1827 struct OsMemIntegrityCheckInfo g_integrityCheckRecord = {0};
1828 
1829 STATIC INLINE VOID OsMemCheckInfoRecord(const struct OsMemNodeHead *errNode,
1830                                      const struct OsMemNodeHead *preNode)
1831 {
1832     (VOID)memcpy(&g_integrityCheckRecord.preNode, preNode, sizeof(struct OsMemNodeHead));
1833     (VOID)memcpy(&g_integrityCheckRecord.errNode, errNode, sizeof(struct OsMemNodeHead));
1834 }
1835 
1836 STATIC VOID OsMemIntegrityCheckError(struct OsMemPoolHead *pool,
1837                                      const struct OsMemNodeHead *tmpNode,
1838                                      const struct OsMemNodeHead *preNode,
1839                                      UINT32 intSave)
1840 {
1841 #if (LOSCFG_KERNEL_PRINTF != 0)
1842     OsMemNodeInfo(tmpNode, preNode);
1843 #endif
1844     OsMemCheckInfoRecord(tmpNode, preNode);
1845 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
1846     LosTaskCB *taskCB = NULL;
1847     if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1848         struct OsMemUsedNodeHead *usedNode = (struct OsMemUsedNodeHead *)preNode;
1849         UINT32 taskID = usedNode->header.taskID;
1850         if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
1851             MEM_UNLOCK(pool, intSave);
1852             LOS_Panic("Task ID %u in pre node is invalid!\n", taskID);
1853         }
1854 
1855         taskCB = OS_TCB_FROM_TID(taskID);
1856         if ((taskCB->taskStatus & OS_TASK_STATUS_UNUSED) || (taskCB->taskEntry == NULL)) {
1857             MEM_UNLOCK(pool, intSave);
1858             LOS_Panic("\r\nTask ID %u in pre node is not created!\n", taskID);
1859         }
1860     } else {
1861         PRINTK("The prev node is free\n");
1862     }
1863     MEM_UNLOCK(pool, intSave);
1864     PRINT_ERR("cur node: 0x%x, pre node: 0x%x, pre node was allocated by task: %d, %s\n",
1865               (unsigned int)tmpNode, (unsigned int)preNode, taskCB->taskID, taskCB->taskName);
1866     LOS_Panic("Memory integrity check error!\n");
1867 #else
1868     MEM_UNLOCK(pool, intSave);
1869     LOS_Panic("Memory integrity check error, cur node: 0x%x, pre node: 0x%x\n", tmpNode, preNode);
1870 #endif
1871 }
1872 
1873 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1874 STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave)
1875 {
1876     struct OsMemNodeHead *tmpNode = NULL;
1877     struct OsMemNodeHead *preNode = NULL;
1878 
1879     if (OsMemIntegrityCheck(pool, &tmpNode, &preNode)) {
1880         OsMemIntegrityCheckError(pool, tmpNode, preNode, intSave);
1881         return LOS_NOK;
1882     }
1883     return LOS_OK;
1884 }
1885 #endif
1886 
1887 UINT32 LOS_MemIntegrityCheck(const VOID *pool)
1888 {
1889     if (pool == NULL) {
1890         return LOS_NOK;
1891     }
1892 
1893     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1894     struct OsMemNodeHead *tmpNode = NULL;
1895     struct OsMemNodeHead *preNode = NULL;
1896     UINT32 intSave = 0;
1897 
1898     MEM_LOCK(poolHead, intSave);
1899     if (OsMemIntegrityCheck(poolHead, &tmpNode, &preNode)) {
1900         goto ERROR_OUT;
1901     }
1902     MEM_UNLOCK(poolHead, intSave);
1903     return LOS_OK;
1904 
1905 ERROR_OUT:
1906     OsMemIntegrityCheckError(poolHead, tmpNode, preNode, intSave);
1907     return LOS_NOK;
1908 }
1909 
1910 STATIC INLINE VOID OsMemInfoGet(struct OsMemNodeHead *node,
1911                 LOS_MEM_POOL_STATUS *poolStatus)
1912 {
1913     UINT32 totalUsedSize = 0;
1914     UINT32 totalFreeSize = 0;
1915     UINT32 usedNodeNum = 0;
1916     UINT32 freeNodeNum = 0;
1917     UINT32 maxFreeSize = 0;
1918     UINT32 size;
1919 
1920     if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1921         size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1922         ++freeNodeNum;
1923         totalFreeSize += size;
1924         if (maxFreeSize < size) {
1925             maxFreeSize = size;
1926         }
1927     } else {
1928         if (OS_MEM_IS_GAP_NODE(node)) {
1929             size = OS_MEM_NODE_HEAD_SIZE;
1930         } else {
1931             size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1932         }
1933         ++usedNodeNum;
1934         totalUsedSize += size;
1935     }
1936 
1937     poolStatus->totalUsedSize += totalUsedSize;
1938     poolStatus->totalFreeSize += totalFreeSize;
1939     poolStatus->maxFreeNodeSize = poolStatus->maxFreeNodeSize > maxFreeSize ?
1940                                   poolStatus->maxFreeNodeSize : maxFreeSize;
1941     poolStatus->usedNodeNum += usedNodeNum;
1942     poolStatus->freeNodeNum += freeNodeNum;
1943 }
1944 
1945 STATIC VOID OsMemNodeInfoGetHandle(struct OsMemNodeHead *curNode, VOID *arg)
1946 {
1947     LOS_MEM_POOL_STATUS *poolStatus = (LOS_MEM_POOL_STATUS *)arg;
1948     OsMemInfoGet(curNode, poolStatus);
1949     return;
1950 }
1951 
1952 UINT32 LOS_MemInfoGet(VOID *pool, LOS_MEM_POOL_STATUS *poolStatus)
1953 {
1954     struct OsMemPoolHead *poolInfo = pool;
1955     UINT32 intSave = 0;
1956 
1957     if (poolStatus == NULL) {
1958         PRINT_ERR("can't use NULL addr to save info\n");
1959         return LOS_NOK;
1960     }
1961 
1962     if ((pool == NULL) || (poolInfo->info.pool != pool)) {
1963         PRINT_ERR("wrong mem pool addr: 0x%x, line:%d\n", (UINTPTR)poolInfo, __LINE__);
1964         return LOS_NOK;
1965     }
1966 
1967     (VOID)memset_s(poolStatus, sizeof(LOS_MEM_POOL_STATUS), 0, sizeof(LOS_MEM_POOL_STATUS));
1968 
1969     OsAllMemNodeDoHandle(pool, OsMemNodeInfoGetHandle, (VOID *)poolStatus);
1970 
1971     MEM_LOCK(poolInfo, intSave);
1972 #if (LOSCFG_MEM_WATERLINE == 1)
1973     poolStatus->usageWaterLine = poolInfo->info.waterLine;
1974 #endif
1975     MEM_UNLOCK(poolInfo, intSave);
1976 
1977     return LOS_OK;
1978 }
1979 
1980 STATIC VOID OsMemInfoPrint(VOID *pool)
1981 {
1982 #if (LOSCFG_KERNEL_PRINTF != 0)
1983     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1984     LOS_MEM_POOL_STATUS status = {0};
1985 
1986     if (LOS_MemInfoGet(pool, &status) == LOS_NOK) {
1987         return;
1988     }
1989 
1990 #if (LOSCFG_MEM_WATERLINE == 1)
1991     PRINTK("pool addr          pool size    used size     free size    "
1992            "max free node size   used node num     free node num      UsageWaterLine\n");
1993     PRINTK("---------------    --------     -------       --------     "
1994            "--------------       -------------      ------------      ------------\n");
1995     PRINTK("%-16p   0x%-8x   0x%-8x    0x%-8x   0x%-16x   0x%-13x    0x%-13x    0x%-13x\n",
1996            poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
1997            status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
1998            status.freeNodeNum, status.usageWaterLine);
1999 #else
2000     PRINTK("pool addr          pool size    used size     free size    "
2001            "max free node size   used node num     free node num\n");
2002     PRINTK("---------------    --------     -------       --------     "
2003            "--------------       -------------      ------------\n");
2004     PRINTK("%-16p  0x%-8x   0x%-8x    0x%-8x   0x%-16x   0x%-13x    0x%-13x\n",
2005            poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
2006            status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
2007            status.freeNodeNum);
2008 #endif
2009 #endif
2010 }
2011 
2012 UINT32 LOS_MemFreeNodeShow(VOID *pool)
2013 {
2014 #if (LOSCFG_KERNEL_PRINTF != 0)
2015     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
2016 
2017     if ((poolInfo == NULL) || ((UINTPTR)pool != (UINTPTR)poolInfo->info.pool)) {
2018         PRINT_ERR("wrong mem pool addr: 0x%x, line: %d\n", (UINTPTR)poolInfo, __LINE__);
2019         return LOS_NOK;
2020     }
2021 
2022     struct OsMemFreeNodeHead *node = NULL;
2023     UINT32 countNum[OS_MEM_FREE_LIST_COUNT] = {0};
2024     UINT32 index;
2025     UINT32 intSave = 0;
2026 
2027     MEM_LOCK(poolInfo, intSave);
2028     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
2029         node = poolInfo->freeList[index];
2030         while (node) {
2031             node = node->next;
2032             countNum[index]++;
2033         }
2034     }
2035     MEM_UNLOCK(poolInfo, intSave);
2036 
2037     PRINTK("\n   ************************ left free node number**********************\n");
2038     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
2039         if (countNum[index] == 0) {
2040             continue;
2041         }
2042 
2043         PRINTK("free index: %03u, ", index);
2044         if (index < OS_MEM_SMALL_BUCKET_COUNT) {
2045             PRINTK("size: [0x%x], num: %u\n", (index + 1) << 2, countNum[index]); /* 2: setup is 4. */
2046         } else {
2047             UINT32 val = 1 << (((index - OS_MEM_SMALL_BUCKET_COUNT) >> OS_MEM_SLI) + OS_MEM_LARGE_START_BUCKET);
2048             UINT32 offset = val >> OS_MEM_SLI;
2049             PRINTK("size: [0x%x, 0x%x], num: %u\n",
2050                    (offset * ((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI))) + val,
2051                    ((offset * (((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI)) + 1)) + val - 1),
2052                    countNum[index]);
2053         }
2054     }
2055     PRINTK("\n   ********************************************************************\n\n");
2056 #endif
2057     return LOS_OK;
2058 }
2059 
2060 VOID LOS_MemUnlockEnable(VOID *pool)
2061 {
2062     if (pool == NULL) {
2063         return;
2064     }
2065 
2066     ((struct OsMemPoolHead *)pool)->info.attr |= OS_MEM_POOL_UNLOCK_ENABLE;
2067 }
2068 
2069 #if (LOSCFG_MEM_MUL_REGIONS == 1)
2070 STATIC INLINE UINT32 OsMemMulRegionsParamCheck(VOID *pool, const LosMemRegion * const memRegions,
2071                                                 UINT32 memRegionCount)
2072 {
2073     const LosMemRegion *memRegion = NULL;
2074     VOID *lastStartAddress = NULL;
2075     VOID *curStartAddress = NULL;
2076     UINT32 lastLength;
2077     UINT32 curLength;
2078     UINT32 regionCount;
2079 
2080     if ((pool != NULL) && (((struct OsMemPoolHead *)pool)->info.pool != pool)) {
2081         PRINT_ERR("wrong mem pool addr: %p, func: %s, line: %d\n", pool, __FUNCTION__, __LINE__);
2082         return LOS_NOK;
2083     }
2084 
2085     if (pool != NULL) {
2086         lastStartAddress = pool;
2087         lastLength = ((struct OsMemPoolHead *)pool)->info.totalSize;
2088     }
2089 
2090     memRegion = memRegions;
2091     regionCount = 0;
2092     while (regionCount < memRegionCount) {
2093         curStartAddress = memRegion->startAddress;
2094         curLength = memRegion->length;
2095         if ((curStartAddress == NULL) || (curLength == 0)) {
2096             PRINT_ERR("Memory address or length configured wrongly:address:0x%x, the length:0x%x\n",
2097                       (UINTPTR)curStartAddress, curLength);
2098             return LOS_NOK;
2099         }
2100         if (((UINTPTR)curStartAddress & (OS_MEM_ALIGN_SIZE - 1)) || (curLength & (OS_MEM_ALIGN_SIZE - 1))) {
2101             PRINT_ERR("Memory address or length configured not aligned:address:0x%x, the length:0x%x, alignsize:%d\n",
2102                       (UINTPTR)curStartAddress, curLength, OS_MEM_ALIGN_SIZE);
2103             return LOS_NOK;
2104         }
2105         if ((lastStartAddress != NULL) && (((UINT8 *)lastStartAddress + lastLength) >= (UINT8 *)curStartAddress)) {
2106             PRINT_ERR("Memory regions overlapped, the last start address:0x%x, "
2107                       "the length:0x%x, the current start address:0x%x\n",
2108                       (UINTPTR)lastStartAddress, lastLength, (UINTPTR)curStartAddress);
2109             return LOS_NOK;
2110         }
2111         memRegion++;
2112         regionCount++;
2113         lastStartAddress = curStartAddress;
2114         lastLength = curLength;
2115     }
2116     return LOS_OK;
2117 }
2118 
2119 STATIC INLINE VOID OsMemMulRegionsLink(struct OsMemPoolHead *poolHead, VOID *lastStartAddress, UINT32 lastLength,
2120                                        struct OsMemNodeHead *lastEndNode, const LosMemRegion *memRegion)
2121 {
2122     UINT32 curLength;
2123     UINT32 gapSize;
2124     struct OsMemNodeHead *curEndNode = NULL;
2125     struct OsMemNodeHead *curFreeNode = NULL;
2126     VOID *curStartAddress = NULL;
2127 
2128     curStartAddress = memRegion->startAddress;
2129     curLength = memRegion->length;
2130 #ifdef LOSCFG_KERNEL_LMS
2131     UINT32 resize = 0;
2132     if (g_lms != NULL) {
2133         /*
2134          * resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
2135          * resize != 0, shadow memory init successful, set poolSize as resize.
2136          */
2137         resize = g_lms->init(curStartAddress, curLength);
2138         curLength = (resize == 0) ? curLength : resize;
2139     }
2140 #endif
2141     // mark the gap between two regions as one used node
2142     gapSize = (UINT8 *)(curStartAddress) - ((UINT8 *)(poolHead) + poolHead->info.totalSize);
2143     lastEndNode->sizeAndFlag = gapSize + OS_MEM_NODE_HEAD_SIZE;
2144     OS_MEM_SET_MAGIC(lastEndNode);
2145     OS_MEM_NODE_SET_USED_FLAG(lastEndNode->sizeAndFlag);
2146 
2147     // mark the gap node with magic number
2148     OS_MEM_MARK_GAP_NODE(lastEndNode);
2149 
2150     poolHead->info.totalSize += (curLength + gapSize);
2151     poolHead->info.totalGapSize += gapSize;
2152 
2153     curFreeNode = (struct OsMemNodeHead *)curStartAddress;
2154     curFreeNode->sizeAndFlag = curLength - OS_MEM_NODE_HEAD_SIZE;
2155     curFreeNode->ptr.prev = lastEndNode;
2156     OS_MEM_SET_MAGIC(curFreeNode);
2157     OsMemFreeNodeAdd(poolHead, (struct OsMemFreeNodeHead *)curFreeNode);
2158 
2159     curEndNode = OS_MEM_END_NODE(curStartAddress, curLength);
2160     curEndNode->sizeAndFlag = 0;
2161     curEndNode->ptr.prev = curFreeNode;
2162     OS_MEM_SET_MAGIC(curEndNode);
2163     OS_MEM_NODE_SET_USED_FLAG(curEndNode->sizeAndFlag);
2164 
2165 #if (LOSCFG_MEM_WATERLINE == 1)
2166     poolHead->info.curUsedSize += OS_MEM_NODE_HEAD_SIZE;
2167     poolHead->info.waterLine = poolHead->info.curUsedSize;
2168 #endif
2169 }
2170 
2171 UINT32 LOS_MemRegionsAdd(VOID *pool, const LosMemRegion *const memRegions, UINT32 memRegionCount)
2172 {
2173     UINT32 ret;
2174     UINT32 lastLength;
2175     UINT32 curLength;
2176     UINT32 regionCount;
2177     struct OsMemPoolHead *poolHead = NULL;
2178     struct OsMemNodeHead *lastEndNode = NULL;
2179     struct OsMemNodeHead *firstFreeNode = NULL;
2180     const LosMemRegion *memRegion = NULL;
2181     VOID *lastStartAddress = NULL;
2182     VOID *curStartAddress = NULL;
2183 
2184     ret = OsMemMulRegionsParamCheck(pool, memRegions, memRegionCount);
2185     if (ret != LOS_OK) {
2186         return ret;
2187     }
2188 
2189     memRegion = memRegions;
2190     regionCount = 0;
2191     if (pool != NULL) { // add the memory regions to the specified memory pool
2192         poolHead = (struct OsMemPoolHead *)pool;
2193         lastStartAddress = pool;
2194         lastLength = poolHead->info.totalSize;
2195     } else { // initialize the memory pool with the first memory region
2196         lastStartAddress = memRegion->startAddress;
2197         lastLength = memRegion->length;
2198         poolHead = (struct OsMemPoolHead *)lastStartAddress;
2199         ret = LOS_MemInit(lastStartAddress, lastLength);
2200         if (ret != LOS_OK) {
2201             return ret;
2202         }
2203         memRegion++;
2204         regionCount++;
2205     }
2206 
2207     firstFreeNode = OS_MEM_FIRST_NODE(lastStartAddress);
2208     lastEndNode = OS_MEM_END_NODE(lastStartAddress, poolHead->info.totalSize);
2209     /* traverse the rest memory regions, and initialize them as free nodes and link together */
2210     while (regionCount < memRegionCount) {
2211         curStartAddress = memRegion->startAddress;
2212         curLength = memRegion->length;
2213 
2214         OsMemMulRegionsLink(poolHead, lastStartAddress, lastLength, lastEndNode, memRegion);
2215         lastStartAddress = curStartAddress;
2216         lastLength = curLength;
2217         lastEndNode = OS_MEM_END_NODE(poolHead, poolHead->info.totalSize);
2218         memRegion++;
2219         regionCount++;
2220     }
2221 
2222     firstFreeNode->ptr.prev = lastEndNode;
2223     return ret;
2224 }
2225 #endif
2226 
2227 UINT32 OsMemSystemInit(VOID)
2228 {
2229     UINT32 ret;
2230 
2231 #if (LOSCFG_SYS_EXTERNAL_HEAP == 0)
2232     m_aucSysMem0 = g_memStart;
2233 #else
2234     m_aucSysMem0 = LOSCFG_SYS_HEAP_ADDR;
2235 #endif
2236 
2237     ret = LOS_MemInit(m_aucSysMem0, LOSCFG_SYS_HEAP_SIZE);
2238     PRINT_INFO("LiteOS heap memory address:%p, size:0x%lx\n", m_aucSysMem0, (unsigned long int)LOSCFG_SYS_HEAP_SIZE);
2239     return ret;
2240 }
2241 
2242 #if (LOSCFG_PLATFORM_EXC == 1)
2243 STATIC VOID OsMemExcInfoGetSub(struct OsMemPoolHead *pool, MemInfoCB *memExcInfo)
2244 {
2245     struct OsMemNodeHead *tmpNode = NULL;
2246     UINT32 taskID = OS_TASK_ERRORID;
2247     UINT32 intSave = 0;
2248 
2249     (VOID)memset_s(memExcInfo, sizeof(MemInfoCB), 0, sizeof(MemInfoCB));
2250 
2251     MEM_LOCK(pool, intSave);
2252     memExcInfo->type = MEM_MANG_MEMORY;
2253     memExcInfo->startAddr = (UINTPTR)pool->info.pool;
2254     memExcInfo->size = pool->info.totalSize;
2255     memExcInfo->free = pool->info.totalSize - pool->info.curUsedSize;
2256 
2257     struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool);
2258     struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
2259 
2260     for (tmpNode = firstNode; tmpNode < endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
2261         memExcInfo->blockSize++;
2262         if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
2263             if (!OS_MEM_MAGIC_VALID(tmpNode) ||
2264                 !OsMemAddrValidCheck(pool, tmpNode->ptr.prev)) {
2265 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
2266                 taskID = ((struct OsMemUsedNodeHead *)tmpNode)->header.taskID;
2267 #endif
2268                 goto ERROUT;
2269             }
2270         } else { /* is free node, check free node range */
2271             struct OsMemFreeNodeHead *freeNode = (struct OsMemFreeNodeHead *)tmpNode;
2272             if (OsMemAddrValidCheckPrint(pool, &freeNode)) {
2273                 goto ERROUT;
2274             }
2275         }
2276     }
2277     MEM_UNLOCK(pool, intSave);
2278     return;
2279 
2280 ERROUT:
2281     memExcInfo->errorAddr = (UINTPTR)((CHAR *)tmpNode + OS_MEM_NODE_HEAD_SIZE);
2282     memExcInfo->errorLen = OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag) - OS_MEM_NODE_HEAD_SIZE;
2283     memExcInfo->errorOwner = taskID;
2284     MEM_UNLOCK(pool, intSave);
2285     return;
2286 }
2287 
2288 UINT32 OsMemExcInfoGet(UINT32 memNumMax, MemInfoCB *memExcInfo)
2289 {
2290     UINT8 *buffer = (UINT8 *)memExcInfo;
2291     UINT32 count = 0;
2292 
2293 #if (LOSCFG_MEM_MUL_POOL == 1)
2294     struct OsMemPoolHead *memPool = g_poolHead;
2295     while (memPool != NULL) {
2296         OsMemExcInfoGetSub(memPool, (MemInfoCB *)buffer);
2297         count++;
2298         buffer += sizeof(MemInfoCB);
2299         if (count >= memNumMax) {
2300             break;
2301         }
2302         memPool = memPool->nextPool;
2303     }
2304 #else
2305     OsMemExcInfoGetSub(m_aucSysMem0, buffer);
2306     count++;
2307 #endif
2308 
2309     return count;
2310 }
2311 #endif
2312