• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "los_memory.h"
33 #include "los_memory_pri.h"
34 #include "sys/param.h"
35 #include "los_spinlock.h"
36 #include "los_vm_phys.h"
37 #include "los_vm_boot.h"
38 #include "los_vm_filemap.h"
39 #include "los_task_pri.h"
40 #include "los_hook.h"
41 
42 #ifdef LOSCFG_KERNEL_LMS
43 #include "los_lms_pri.h"
44 #endif
45 
46 /* Used to cut non-essential functions. */
47 #define OS_MEM_FREE_BY_TASKID   0
48 #ifdef LOSCFG_KERNEL_VM
49 #define OS_MEM_EXPAND_ENABLE    1
50 #else
51 #define OS_MEM_EXPAND_ENABLE    0
52 #endif
53 
54 /* the dump size of current broken node when memcheck error */
55 #define OS_MEM_NODE_DUMP_SIZE   64
56 /* column num of the output info of mem node */
57 #define OS_MEM_COLUMN_NUM       8
58 
59 UINT8 *m_aucSysMem0 = NULL;
60 UINT8 *m_aucSysMem1 = NULL;
61 
62 #ifdef LOSCFG_MEM_MUL_POOL
63 VOID *g_poolHead = NULL;
64 #endif
65 
66 /* The following is the macro definition and interface implementation related to the TLSF. */
67 
68 /* Supposing a Second Level Index: SLI = 3. */
69 #define OS_MEM_SLI                      3
70 /* Giving 1 free list for each small bucket: 4, 8, 12, up to 124. */
71 #define OS_MEM_SMALL_BUCKET_COUNT       31
72 #define OS_MEM_SMALL_BUCKET_MAX_SIZE    128
73 /* Giving OS_MEM_FREE_LIST_NUM free lists for each large bucket. */
74 #define OS_MEM_LARGE_BUCKET_COUNT       24
75 #define OS_MEM_FREE_LIST_NUM            (1 << OS_MEM_SLI)
76 /* OS_MEM_SMALL_BUCKET_MAX_SIZE to the power of 2 is 7. */
77 #define OS_MEM_LARGE_START_BUCKET       7
78 
79 /* The count of free list. */
80 #define OS_MEM_FREE_LIST_COUNT  (OS_MEM_SMALL_BUCKET_COUNT + (OS_MEM_LARGE_BUCKET_COUNT << OS_MEM_SLI))
81 /* The bitmap is used to indicate whether the free list is empty, 1: not empty, 0: empty. */
82 #define OS_MEM_BITMAP_WORDS     ((OS_MEM_FREE_LIST_COUNT >> 5) + 1)
83 
84 #define OS_MEM_BITMAP_MASK 0x1FU
85 
86 /* Used to find the first bit of 1 in bitmap. */
OsMemFFS(UINT32 bitmap)87 STATIC INLINE UINT16 OsMemFFS(UINT32 bitmap)
88 {
89     bitmap &= ~bitmap + 1;
90     return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
91 }
92 
93 /* Used to find the last bit of 1 in bitmap. */
OsMemFLS(UINT32 bitmap)94 STATIC INLINE UINT16 OsMemFLS(UINT32 bitmap)
95 {
96     return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
97 }
98 
OsMemLog2(UINT32 size)99 STATIC INLINE UINT32 OsMemLog2(UINT32 size)
100 {
101     return OsMemFLS(size);
102 }
103 
104 /* Get the first level: f = log2(size). */
OsMemFlGet(UINT32 size)105 STATIC INLINE UINT32 OsMemFlGet(UINT32 size)
106 {
107     if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
108         return ((size >> 2) - 1); /* 2: The small bucket setup is 4. */
109     }
110     return OsMemLog2(size);
111 }
112 
113 /* Get the second level: s = (size - 2^f) * 2^SLI / 2^f. */
OsMemSlGet(UINT32 size,UINT32 fl)114 STATIC INLINE UINT32 OsMemSlGet(UINT32 size, UINT32 fl)
115 {
116     return (((size << OS_MEM_SLI) >> fl) - OS_MEM_FREE_LIST_NUM);
117 }
118 
119 /* The following is the memory algorithm related macro definition and interface implementation. */
120 
121 struct OsMemNodeHead {
122     UINT32 magic;
123     union {
124         struct OsMemNodeHead *prev; /* The prev is used for current node points to the previous node */
125         struct OsMemNodeHead *next; /* The next is used for last node points to the expand node */
126     } ptr;
127 #ifdef LOSCFG_MEM_LEAKCHECK
128     UINTPTR linkReg[LOS_RECORD_LR_CNT];
129 #endif
130     UINT32 sizeAndFlag;
131 };
132 
133 struct OsMemUsedNodeHead {
134     struct OsMemNodeHead header;
135 #if OS_MEM_FREE_BY_TASKID
136     UINT32 taskID;
137 #endif
138 };
139 
140 struct OsMemFreeNodeHead {
141     struct OsMemNodeHead header;
142     struct OsMemFreeNodeHead *prev;
143     struct OsMemFreeNodeHead *next;
144 };
145 
146 struct OsMemPoolInfo {
147     VOID *pool;
148     UINT32 totalSize;
149     UINT32 attr;
150 #ifdef LOSCFG_MEM_WATERLINE
151     UINT32 waterLine;   /* Maximum usage size in a memory pool */
152     UINT32 curUsedSize; /* Current usage size in a memory pool */
153 #endif
154 };
155 
156 struct OsMemPoolHead {
157     struct OsMemPoolInfo info;
158     UINT32 freeListBitmap[OS_MEM_BITMAP_WORDS];
159     struct OsMemFreeNodeHead *freeList[OS_MEM_FREE_LIST_COUNT];
160     SPIN_LOCK_S spinlock;
161 #ifdef LOSCFG_MEM_MUL_POOL
162     VOID *nextPool;
163 #endif
164 };
165 
166 /* Spinlock for mem module, only available on SMP mode */
167 #define MEM_LOCK(pool, state)       LOS_SpinLockSave(&(pool)->spinlock, &(state))
168 #define MEM_UNLOCK(pool, state)     LOS_SpinUnlockRestore(&(pool)->spinlock, (state))
169 
170 /* The memory pool support expand. */
171 #define OS_MEM_POOL_EXPAND_ENABLE  0x01
172 /* The memory pool support no lock. */
173 #define OS_MEM_POOL_LOCK_ENABLE    0x02
174 
175 #define OS_MEM_NODE_MAGIC        0xABCDDCBA
176 #define OS_MEM_MIN_ALLOC_SIZE    (sizeof(struct OsMemFreeNodeHead) - sizeof(struct OsMemUsedNodeHead))
177 
178 #define OS_MEM_NODE_USED_FLAG      0x80000000U
179 #define OS_MEM_NODE_ALIGNED_FLAG   0x40000000U
180 #define OS_MEM_NODE_LAST_FLAG      0x20000000U  /* Sentinel Node */
181 #define OS_MEM_NODE_ALIGNED_AND_USED_FLAG (OS_MEM_NODE_USED_FLAG | OS_MEM_NODE_ALIGNED_FLAG | OS_MEM_NODE_LAST_FLAG)
182 
183 #define OS_MEM_NODE_GET_ALIGNED_FLAG(sizeAndFlag) \
184             ((sizeAndFlag) & OS_MEM_NODE_ALIGNED_FLAG)
185 #define OS_MEM_NODE_SET_ALIGNED_FLAG(sizeAndFlag) \
186             ((sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_ALIGNED_FLAG))
187 #define OS_MEM_NODE_GET_ALIGNED_GAPSIZE(sizeAndFlag) \
188             ((sizeAndFlag) & ~OS_MEM_NODE_ALIGNED_FLAG)
189 #define OS_MEM_NODE_GET_USED_FLAG(sizeAndFlag) \
190             ((sizeAndFlag) & OS_MEM_NODE_USED_FLAG)
191 #define OS_MEM_NODE_SET_USED_FLAG(sizeAndFlag) \
192             ((sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_USED_FLAG))
193 #define OS_MEM_NODE_GET_SIZE(sizeAndFlag) \
194             ((sizeAndFlag) & ~OS_MEM_NODE_ALIGNED_AND_USED_FLAG)
195 #define OS_MEM_NODE_SET_LAST_FLAG(sizeAndFlag) \
196                         ((sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_LAST_FLAG))
197 #define OS_MEM_NODE_GET_LAST_FLAG(sizeAndFlag) \
198             ((sizeAndFlag) & OS_MEM_NODE_LAST_FLAG)
199 
200 #define OS_MEM_ALIGN_SIZE           sizeof(UINTPTR)
201 #define OS_MEM_IS_POW_TWO(value)    ((((UINTPTR)(value)) & ((UINTPTR)(value) - 1)) == 0)
202 #define OS_MEM_ALIGN(p, alignSize)  (((UINTPTR)(p) + (alignSize) - 1) & ~((UINTPTR)((alignSize) - 1)))
203 #define OS_MEM_IS_ALIGNED(a, b)     (!(((UINTPTR)(a)) & (((UINTPTR)(b)) - 1)))
204 #define OS_MEM_NODE_HEAD_SIZE       sizeof(struct OsMemUsedNodeHead)
205 #define OS_MEM_MIN_POOL_SIZE        (OS_MEM_NODE_HEAD_SIZE + sizeof(struct OsMemPoolHead))
206 #define OS_MEM_NEXT_NODE(node) \
207     ((struct OsMemNodeHead *)(VOID *)((UINT8 *)(node) + OS_MEM_NODE_GET_SIZE((node)->sizeAndFlag)))
208 #define OS_MEM_FIRST_NODE(pool) \
209     (struct OsMemNodeHead *)((UINT8 *)(pool) + sizeof(struct OsMemPoolHead))
210 #define OS_MEM_END_NODE(pool, size) \
211     (struct OsMemNodeHead *)((UINT8 *)(pool) + (size) - OS_MEM_NODE_HEAD_SIZE)
212 #define OS_MEM_MIDDLE_ADDR_OPEN_END(startAddr, middleAddr, endAddr) \
213     (((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) < (UINT8 *)(endAddr)))
214 #define OS_MEM_MIDDLE_ADDR(startAddr, middleAddr, endAddr) \
215     (((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) <= (UINT8 *)(endAddr)))
216 #define OS_MEM_SET_MAGIC(node)      ((node)->magic = OS_MEM_NODE_MAGIC)
217 #define OS_MEM_MAGIC_VALID(node)    ((node)->magic == OS_MEM_NODE_MAGIC)
218 
219 STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node);
220 STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node);
221 STATIC VOID OsMemInfoPrint(VOID *pool);
222 #ifdef LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK
223 STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave);
224 #endif
225 
226 #if OS_MEM_FREE_BY_TASKID
OsMemNodeSetTaskID(struct OsMemUsedNodeHead * node)227 STATIC INLINE VOID OsMemNodeSetTaskID(struct OsMemUsedNodeHead *node)
228 {
229     node->taskID = LOS_CurTaskIDGet();
230 }
231 #endif
232 
233 #ifdef LOSCFG_MEM_WATERLINE
OsMemWaterUsedRecord(struct OsMemPoolHead * pool,UINT32 size)234 STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
235 {
236     pool->info.curUsedSize += size;
237     if (pool->info.curUsedSize > pool->info.waterLine) {
238         pool->info.waterLine = pool->info.curUsedSize;
239     }
240 }
241 #else
OsMemWaterUsedRecord(struct OsMemPoolHead * pool,UINT32 size)242 STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
243 {
244     (VOID)pool;
245     (VOID)size;
246 }
247 #endif
248 
249 #if OS_MEM_EXPAND_ENABLE
OsMemLastSentinelNodeGet(const struct OsMemNodeHead * sentinelNode)250 STATIC INLINE struct OsMemNodeHead *OsMemLastSentinelNodeGet(const struct OsMemNodeHead *sentinelNode)
251 {
252     struct OsMemNodeHead *node = NULL;
253     VOID *ptr = sentinelNode->ptr.next;
254     UINT32 size = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
255 
256     while ((ptr != NULL) && (size != 0)) {
257         node = OS_MEM_END_NODE(ptr, size);
258         ptr = node->ptr.next;
259         size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
260     }
261 
262     return node;
263 }
264 
OsMemSentinelNodeCheck(struct OsMemNodeHead * sentinelNode)265 STATIC INLINE BOOL OsMemSentinelNodeCheck(struct OsMemNodeHead *sentinelNode)
266 {
267     if (!OS_MEM_NODE_GET_USED_FLAG(sentinelNode->sizeAndFlag)) {
268         return FALSE;
269     }
270 
271     if (!OS_MEM_MAGIC_VALID(sentinelNode)) {
272         return FALSE;
273     }
274 
275     return TRUE;
276 }
277 
OsMemIsLastSentinelNode(struct OsMemNodeHead * sentinelNode)278 STATIC INLINE BOOL OsMemIsLastSentinelNode(struct OsMemNodeHead *sentinelNode)
279 {
280     if (OsMemSentinelNodeCheck(sentinelNode) == FALSE) {
281         PRINT_ERR("%s %d, The current sentinel node is invalid\n", __FUNCTION__, __LINE__);
282         return TRUE;
283     }
284 
285     if ((OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag) == 0) ||
286         (sentinelNode->ptr.next == NULL)) {
287         return TRUE;
288     }
289 
290     return FALSE;
291 }
292 
OsMemSentinelNodeSet(struct OsMemNodeHead * sentinelNode,VOID * newNode,UINT32 size)293 STATIC INLINE VOID OsMemSentinelNodeSet(struct OsMemNodeHead *sentinelNode, VOID *newNode, UINT32 size)
294 {
295     if (sentinelNode->ptr.next != NULL) {
296         sentinelNode = OsMemLastSentinelNodeGet(sentinelNode);
297     }
298 
299     sentinelNode->sizeAndFlag = size;
300     sentinelNode->ptr.next = newNode;
301     OS_MEM_NODE_SET_USED_FLAG(sentinelNode->sizeAndFlag);
302     OS_MEM_NODE_SET_LAST_FLAG(sentinelNode->sizeAndFlag);
303 }
304 
OsMemSentinelNodeGet(struct OsMemNodeHead * node)305 STATIC INLINE VOID *OsMemSentinelNodeGet(struct OsMemNodeHead *node)
306 {
307     return node->ptr.next;
308 }
309 
PreSentinelNodeGet(const VOID * pool,const struct OsMemNodeHead * node)310 STATIC INLINE struct OsMemNodeHead *PreSentinelNodeGet(const VOID *pool, const struct OsMemNodeHead *node)
311 {
312     UINT32 nextSize;
313     struct OsMemNodeHead *nextNode = NULL;
314     struct OsMemNodeHead *sentinelNode = NULL;
315 
316     sentinelNode = OS_MEM_END_NODE(pool, ((struct OsMemPoolHead *)pool)->info.totalSize);
317     while (sentinelNode != NULL) {
318         if (OsMemIsLastSentinelNode(sentinelNode)) {
319             PRINT_ERR("PreSentinelNodeGet can not find node %#x\n", node);
320             return NULL;
321         }
322         nextNode = OsMemSentinelNodeGet(sentinelNode);
323         if (nextNode == node) {
324             return sentinelNode;
325         }
326         nextSize = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
327         sentinelNode = OS_MEM_END_NODE(nextNode, nextSize);
328     }
329 
330     return NULL;
331 }
332 
OsMemLargeNodeFree(const VOID * ptr)333 UINT32 OsMemLargeNodeFree(const VOID *ptr)
334 {
335     LosVmPage *page = OsVmVaddrToPage((VOID *)ptr);
336     if ((page == NULL) || (page->nPages == 0)) {
337         return LOS_NOK;
338     }
339     LOS_PhysPagesFreeContiguous((VOID *)ptr, page->nPages);
340 
341     return LOS_OK;
342 }
343 
TryShrinkPool(const VOID * pool,const struct OsMemNodeHead * node)344 STATIC INLINE BOOL TryShrinkPool(const VOID *pool, const struct OsMemNodeHead *node)
345 {
346     struct OsMemNodeHead *mySentinel = NULL;
347     struct OsMemNodeHead *preSentinel = NULL;
348     size_t totalSize = (UINTPTR)node->ptr.prev - (UINTPTR)node;
349     size_t nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
350 
351     if (nodeSize != totalSize) {
352         return FALSE;
353     }
354 
355     preSentinel = PreSentinelNodeGet(pool, node);
356     if (preSentinel == NULL) {
357         return FALSE;
358     }
359 
360     mySentinel = node->ptr.prev;
361     if (OsMemIsLastSentinelNode(mySentinel)) { /* prev node becomes sentinel node */
362         preSentinel->ptr.next = NULL;
363         OsMemSentinelNodeSet(preSentinel, NULL, 0);
364     } else {
365         preSentinel->sizeAndFlag = mySentinel->sizeAndFlag;
366         preSentinel->ptr.next = mySentinel->ptr.next;
367     }
368 
369     if (OsMemLargeNodeFree(node) != LOS_OK) {
370         PRINT_ERR("TryShrinkPool free %#x failed!\n", node);
371         return FALSE;
372     }
373 #ifdef LOSCFG_KERNEL_LMS
374     LOS_LmsCheckPoolDel(node);
375 #endif
376     return TRUE;
377 }
378 
OsMemPoolExpandSub(VOID * pool,UINT32 size,UINT32 intSave)379 STATIC INLINE INT32 OsMemPoolExpandSub(VOID *pool, UINT32 size, UINT32 intSave)
380 {
381     UINT32 tryCount = MAX_SHRINK_PAGECACHE_TRY;
382     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
383     struct OsMemNodeHead *newNode = NULL;
384     struct OsMemNodeHead *endNode = NULL;
385 
386     size = ROUNDUP(size + OS_MEM_NODE_HEAD_SIZE, PAGE_SIZE);
387     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
388 
389 RETRY:
390     newNode = (struct OsMemNodeHead *)LOS_PhysPagesAllocContiguous(size >> PAGE_SHIFT);
391     if (newNode == NULL) {
392         if (tryCount > 0) {
393             tryCount--;
394             MEM_UNLOCK(poolInfo, intSave);
395             OsTryShrinkMemory(size >> PAGE_SHIFT);
396             MEM_LOCK(poolInfo, intSave);
397             goto RETRY;
398         }
399 
400         PRINT_ERR("OsMemPoolExpand alloc failed size = %u\n", size);
401         return -1;
402     }
403 #ifdef LOSCFG_KERNEL_LMS
404     UINT32 resize = 0;
405     if (g_lms != NULL) {
406         /*
407          * resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
408          * resize != 0, shadow memory init successful, set poolSize as resize.
409          */
410         resize = g_lms->init(newNode, size);
411         size = (resize == 0) ? size : resize;
412     }
413 #endif
414     newNode->sizeAndFlag = (size - OS_MEM_NODE_HEAD_SIZE);
415     newNode->ptr.prev = OS_MEM_END_NODE(newNode, size);
416     OsMemSentinelNodeSet(endNode, newNode, size);
417     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
418 
419     endNode = OS_MEM_END_NODE(newNode, size);
420     (VOID)memset(endNode, 0, sizeof(*endNode));
421     endNode->ptr.next = NULL;
422     endNode->magic = OS_MEM_NODE_MAGIC;
423     OsMemSentinelNodeSet(endNode, NULL, 0);
424     OsMemWaterUsedRecord(poolInfo, OS_MEM_NODE_HEAD_SIZE);
425 
426     return 0;
427 }
428 
OsMemPoolExpand(VOID * pool,UINT32 allocSize,UINT32 intSave)429 STATIC INLINE INT32 OsMemPoolExpand(VOID *pool, UINT32 allocSize, UINT32 intSave)
430 {
431     UINT32 expandDefault = MEM_EXPAND_SIZE(LOS_MemPoolSizeGet(pool));
432     UINT32 expandSize = MAX(expandDefault, allocSize);
433     UINT32 tryCount = 1;
434     UINT32 ret;
435 
436     do {
437         ret = OsMemPoolExpandSub(pool, expandSize, intSave);
438         if (ret == 0) {
439             return 0;
440         }
441 
442         if (allocSize > expandDefault) {
443             break;
444         }
445         expandSize = allocSize;
446     } while (tryCount--);
447 
448     return -1;
449 }
450 
LOS_MemExpandEnable(VOID * pool)451 VOID LOS_MemExpandEnable(VOID *pool)
452 {
453     if (pool == NULL) {
454         return;
455     }
456 
457     ((struct OsMemPoolHead *)pool)->info.attr |= OS_MEM_POOL_EXPAND_ENABLE;
458 }
459 #endif
460 
461 #ifdef LOSCFG_KERNEL_LMS
OsLmsFirstNodeMark(VOID * pool,struct OsMemNodeHead * node)462 STATIC INLINE VOID OsLmsFirstNodeMark(VOID *pool, struct OsMemNodeHead *node)
463 {
464     if (g_lms == NULL) {
465         return;
466     }
467 
468     g_lms->simpleMark((UINTPTR)pool, (UINTPTR)node, LMS_SHADOW_PAINT_U8);
469     g_lms->simpleMark((UINTPTR)node, (UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
470     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node), (UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
471         LMS_SHADOW_REDZONE_U8);
472     g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
473         LMS_SHADOW_AFTERFREE_U8);
474 }
475 
OsLmsAllocAlignMark(VOID * ptr,VOID * alignedPtr,UINT32 size)476 STATIC INLINE VOID OsLmsAllocAlignMark(VOID *ptr, VOID *alignedPtr, UINT32 size)
477 {
478     struct OsMemNodeHead *allocNode = NULL;
479 
480     if ((g_lms == NULL) || (ptr == NULL)) {
481         return;
482     }
483     allocNode = (struct OsMemNodeHead *)((struct OsMemUsedNodeHead *)ptr - 1);
484     if (ptr != alignedPtr) {
485         g_lms->simpleMark((UINTPTR)ptr, (UINTPTR)ptr + sizeof(UINT32), LMS_SHADOW_PAINT_U8);
486         g_lms->simpleMark((UINTPTR)ptr + sizeof(UINT32), (UINTPTR)alignedPtr, LMS_SHADOW_REDZONE_U8);
487     }
488 
489     /* mark remaining as redzone */
490     g_lms->simpleMark(LMS_ADDR_ALIGN((UINTPTR)alignedPtr + size), (UINTPTR)OS_MEM_NEXT_NODE(allocNode),
491         LMS_SHADOW_REDZONE_U8);
492 }
493 
OsLmsReallocMergeNodeMark(struct OsMemNodeHead * node)494 STATIC INLINE VOID OsLmsReallocMergeNodeMark(struct OsMemNodeHead *node)
495 {
496     if (g_lms == NULL) {
497         return;
498     }
499 
500     g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
501         LMS_SHADOW_ACCESSIBLE_U8);
502 }
503 
OsLmsReallocSplitNodeMark(struct OsMemNodeHead * node)504 STATIC INLINE VOID OsLmsReallocSplitNodeMark(struct OsMemNodeHead *node)
505 {
506     if (g_lms == NULL) {
507         return;
508     }
509     /* mark next node */
510     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node),
511         (UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
512     g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
513         (UINTPTR)OS_MEM_NEXT_NODE(OS_MEM_NEXT_NODE(node)), LMS_SHADOW_AFTERFREE_U8);
514 }
515 
OsLmsReallocResizeMark(struct OsMemNodeHead * node,UINT32 resize)516 STATIC INLINE VOID OsLmsReallocResizeMark(struct OsMemNodeHead *node, UINT32 resize)
517 {
518     if (g_lms == NULL) {
519         return;
520     }
521     /* mark remaining as redzone */
522     g_lms->simpleMark((UINTPTR)node + resize, (UINTPTR)OS_MEM_NEXT_NODE(node), LMS_SHADOW_REDZONE_U8);
523 }
524 #endif
525 
526 #ifdef LOSCFG_MEM_LEAKCHECK
OsMemLinkRegisterRecord(struct OsMemNodeHead * node)527 STATIC INLINE VOID OsMemLinkRegisterRecord(struct OsMemNodeHead *node)
528 {
529     LOS_RecordLR(node->linkReg, LOS_RECORD_LR_CNT, LOS_RECORD_LR_CNT, LOS_OMIT_LR_CNT);
530 }
531 
OsMemUsedNodePrint(struct OsMemNodeHead * node)532 STATIC INLINE VOID OsMemUsedNodePrint(struct OsMemNodeHead *node)
533 {
534     UINT32 count;
535 
536     if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
537 #ifdef __LP64__
538         PRINTK("0x%018x: ", node);
539 #else
540         PRINTK("0x%010x: ", node);
541 #endif
542         for (count = 0; count < LOS_RECORD_LR_CNT; count++) {
543 #ifdef __LP64__
544             PRINTK(" 0x%018x ", node->linkReg[count]);
545 #else
546             PRINTK(" 0x%010x ", node->linkReg[count]);
547 #endif
548         }
549         PRINTK("\n");
550     }
551 }
552 
OsMemUsedNodeShow(VOID * pool)553 VOID OsMemUsedNodeShow(VOID *pool)
554 {
555     if (pool == NULL) {
556         PRINTK("input param is NULL\n");
557         return;
558     }
559     if (LOS_MemIntegrityCheck(pool)) {
560         PRINTK("LOS_MemIntegrityCheck error\n");
561         return;
562     }
563     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
564     struct OsMemNodeHead *tmpNode = NULL;
565     struct OsMemNodeHead *endNode = NULL;
566     UINT32 size;
567     UINT32 intSave;
568     UINT32 count;
569 
570 #ifdef __LP64__
571     PRINTK("\n\rnode                ");
572 #else
573     PRINTK("\n\rnode        ");
574 #endif
575     for (count = 0; count < LOS_RECORD_LR_CNT; count++) {
576 #ifdef __LP64__
577         PRINTK("        LR[%u]       ", count);
578 #else
579         PRINTK("    LR[%u]   ", count);
580 #endif
581     }
582     PRINTK("\n");
583 
584     MEM_LOCK(poolInfo, intSave);
585     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
586 #if OS_MEM_EXPAND_ENABLE
587     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;
588          tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
589         if (tmpNode == endNode) {
590             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
591                 size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
592                 tmpNode = OsMemSentinelNodeGet(endNode);
593                 endNode = OS_MEM_END_NODE(tmpNode, size);
594                 continue;
595             } else {
596                 break;
597             }
598         } else {
599             OsMemUsedNodePrint(tmpNode);
600         }
601     }
602 #else
603     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode;
604          tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
605         OsMemUsedNodePrint(tmpNode);
606     }
607 #endif
608     MEM_UNLOCK(poolInfo, intSave);
609 }
610 
OsMemNodeBacktraceInfo(const struct OsMemNodeHead * tmpNode,const struct OsMemNodeHead * preNode)611 STATIC VOID OsMemNodeBacktraceInfo(const struct OsMemNodeHead *tmpNode,
612                                    const struct OsMemNodeHead *preNode)
613 {
614     int i;
615     PRINTK("\n broken node head LR info: \n");
616     for (i = 0; i < LOS_RECORD_LR_CNT; i++) {
617         PRINTK(" LR[%d]:%#x\n", i, tmpNode->linkReg[i]);
618     }
619 
620     PRINTK("\n pre node head LR info: \n");
621     for (i = 0; i < LOS_RECORD_LR_CNT; i++) {
622         PRINTK(" LR[%d]:%#x\n", i, preNode->linkReg[i]);
623     }
624 }
625 #endif
626 
OsMemFreeListIndexGet(UINT32 size)627 STATIC INLINE UINT32 OsMemFreeListIndexGet(UINT32 size)
628 {
629     UINT32 fl = OsMemFlGet(size);
630     if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
631         return fl;
632     }
633 
634     UINT32 sl = OsMemSlGet(size, fl);
635     return (OS_MEM_SMALL_BUCKET_COUNT + ((fl - OS_MEM_LARGE_START_BUCKET) << OS_MEM_SLI) + sl);
636 }
637 
OsMemFindCurSuitableBlock(struct OsMemPoolHead * poolHead,UINT32 index,UINT32 size)638 STATIC INLINE struct OsMemFreeNodeHead *OsMemFindCurSuitableBlock(struct OsMemPoolHead *poolHead,
639                                                                   UINT32 index, UINT32 size)
640 {
641     struct OsMemFreeNodeHead *node = NULL;
642 
643     for (node = poolHead->freeList[index]; node != NULL; node = node->next) {
644         if (node->header.sizeAndFlag >= size) {
645             return node;
646         }
647     }
648 
649     return NULL;
650 }
651 
652 #define BITMAP_INDEX(index) ((index) >> 5)
OsMemNotEmptyIndexGet(struct OsMemPoolHead * poolHead,UINT32 index)653 STATIC INLINE UINT32 OsMemNotEmptyIndexGet(struct OsMemPoolHead *poolHead, UINT32 index)
654 {
655     UINT32 mask;
656 
657     mask = poolHead->freeListBitmap[BITMAP_INDEX(index)];
658     mask &= ~((1 << (index & OS_MEM_BITMAP_MASK)) - 1);
659     if (mask != 0) {
660         index = OsMemFFS(mask) + (index & ~OS_MEM_BITMAP_MASK);
661         return index;
662     }
663 
664     return OS_MEM_FREE_LIST_COUNT;
665 }
666 
OsMemFindNextSuitableBlock(VOID * pool,UINT32 size,UINT32 * outIndex)667 STATIC INLINE struct OsMemFreeNodeHead *OsMemFindNextSuitableBlock(VOID *pool, UINT32 size, UINT32 *outIndex)
668 {
669     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
670     UINT32 fl = OsMemFlGet(size);
671     UINT32 sl;
672     UINT32 index, tmp;
673     UINT32 curIndex = OS_MEM_FREE_LIST_COUNT;
674     UINT32 mask;
675 
676     do {
677         if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
678             index = fl;
679         } else {
680             sl = OsMemSlGet(size, fl);
681             curIndex = ((fl - OS_MEM_LARGE_START_BUCKET) << OS_MEM_SLI) + sl + OS_MEM_SMALL_BUCKET_COUNT;
682             index = curIndex + 1;
683         }
684 
685         tmp = OsMemNotEmptyIndexGet(poolHead, index);
686         if (tmp != OS_MEM_FREE_LIST_COUNT) {
687             index = tmp;
688             goto DONE;
689         }
690 
691         for (index = LOS_Align(index + 1, 32); index < OS_MEM_FREE_LIST_COUNT; index += 32) { /* 32: align size */
692             mask = poolHead->freeListBitmap[BITMAP_INDEX(index)];
693             if (mask != 0) {
694                 index = OsMemFFS(mask) + index;
695                 goto DONE;
696             }
697         }
698     } while (0);
699 
700     if (curIndex == OS_MEM_FREE_LIST_COUNT) {
701         return NULL;
702     }
703 
704     *outIndex = curIndex;
705     return OsMemFindCurSuitableBlock(poolHead, curIndex, size);
706 DONE:
707     *outIndex = index;
708     return poolHead->freeList[index];
709 }
710 
OsMemSetFreeListBit(struct OsMemPoolHead * head,UINT32 index)711 STATIC INLINE VOID OsMemSetFreeListBit(struct OsMemPoolHead *head, UINT32 index)
712 {
713     head->freeListBitmap[BITMAP_INDEX(index)] |= 1U << (index & 0x1f);
714 }
715 
OsMemClearFreeListBit(struct OsMemPoolHead * head,UINT32 index)716 STATIC INLINE VOID OsMemClearFreeListBit(struct OsMemPoolHead *head, UINT32 index)
717 {
718     head->freeListBitmap[BITMAP_INDEX(index)] &= ~(1U << (index & 0x1f));
719 }
720 
OsMemListAdd(struct OsMemPoolHead * pool,UINT32 listIndex,struct OsMemFreeNodeHead * node)721 STATIC INLINE VOID OsMemListAdd(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
722 {
723     struct OsMemFreeNodeHead *firstNode = pool->freeList[listIndex];
724     if (firstNode != NULL) {
725         firstNode->prev = node;
726     }
727     node->prev = NULL;
728     node->next = firstNode;
729     pool->freeList[listIndex] = node;
730     OsMemSetFreeListBit(pool, listIndex);
731     node->header.magic = OS_MEM_NODE_MAGIC;
732 }
733 
OsMemListDelete(struct OsMemPoolHead * pool,UINT32 listIndex,struct OsMemFreeNodeHead * node)734 STATIC INLINE VOID OsMemListDelete(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
735 {
736     if (node == pool->freeList[listIndex]) {
737         pool->freeList[listIndex] = node->next;
738         if (node->next == NULL) {
739             OsMemClearFreeListBit(pool, listIndex);
740         } else {
741             node->next->prev = NULL;
742         }
743     } else {
744         node->prev->next = node->next;
745         if (node->next != NULL) {
746             node->next->prev = node->prev;
747         }
748     }
749     node->header.magic = OS_MEM_NODE_MAGIC;
750 }
751 
OsMemFreeNodeAdd(VOID * pool,struct OsMemFreeNodeHead * node)752 STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node)
753 {
754     UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
755     if (index >= OS_MEM_FREE_LIST_COUNT) {
756         LOS_Panic("The index of free lists is error, index = %u\n", index);
757         return;
758     }
759     OsMemListAdd(pool, index, node);
760 }
761 
OsMemFreeNodeDelete(VOID * pool,struct OsMemFreeNodeHead * node)762 STATIC INLINE VOID OsMemFreeNodeDelete(VOID *pool, struct OsMemFreeNodeHead *node)
763 {
764     UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
765     if (index >= OS_MEM_FREE_LIST_COUNT) {
766         LOS_Panic("The index of free lists is error, index = %u\n", index);
767         return;
768     }
769     OsMemListDelete(pool, index, node);
770 }
771 
OsMemFreeNodeGet(VOID * pool,UINT32 size)772 STATIC INLINE struct OsMemNodeHead *OsMemFreeNodeGet(VOID *pool, UINT32 size)
773 {
774     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
775     UINT32 index;
776     struct OsMemFreeNodeHead *firstNode = OsMemFindNextSuitableBlock(pool, size, &index);
777     if (firstNode == NULL) {
778         return NULL;
779     }
780 
781     OsMemListDelete(poolHead, index, firstNode);
782 
783     return &firstNode->header;
784 }
785 
OsMemMergeNode(struct OsMemNodeHead * node)786 STATIC INLINE VOID OsMemMergeNode(struct OsMemNodeHead *node)
787 {
788     struct OsMemNodeHead *nextNode = NULL;
789 
790     node->ptr.prev->sizeAndFlag += node->sizeAndFlag;
791     nextNode = (struct OsMemNodeHead *)((UINTPTR)node + node->sizeAndFlag);
792     if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {
793         nextNode->ptr.prev = node->ptr.prev;
794     }
795 }
796 
OsMemSplitNode(VOID * pool,struct OsMemNodeHead * allocNode,UINT32 allocSize)797 STATIC INLINE VOID OsMemSplitNode(VOID *pool, struct OsMemNodeHead *allocNode, UINT32 allocSize)
798 {
799     struct OsMemFreeNodeHead *newFreeNode = NULL;
800     struct OsMemNodeHead *nextNode = NULL;
801 
802     newFreeNode = (struct OsMemFreeNodeHead *)(VOID *)((UINT8 *)allocNode + allocSize);
803     newFreeNode->header.ptr.prev = allocNode;
804     newFreeNode->header.sizeAndFlag = allocNode->sizeAndFlag - allocSize;
805     allocNode->sizeAndFlag = allocSize;
806     nextNode = OS_MEM_NEXT_NODE(&newFreeNode->header);
807     if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {
808         nextNode->ptr.prev = &newFreeNode->header;
809         if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
810             OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
811             OsMemMergeNode(nextNode);
812         }
813     }
814 
815     OsMemFreeNodeAdd(pool, newFreeNode);
816 }
817 
OsMemCreateUsedNode(VOID * addr)818 STATIC INLINE VOID *OsMemCreateUsedNode(VOID *addr)
819 {
820     struct OsMemUsedNodeHead *node = (struct OsMemUsedNodeHead *)addr;
821 
822 #if OS_MEM_FREE_BY_TASKID
823     OsMemNodeSetTaskID(node);
824 #endif
825 
826 #ifdef LOSCFG_KERNEL_LMS
827     struct OsMemNodeHead *newNode = (struct OsMemNodeHead *)node;
828     if (g_lms != NULL) {
829         g_lms->mallocMark(newNode, OS_MEM_NEXT_NODE(newNode), OS_MEM_NODE_HEAD_SIZE);
830     }
831 #endif
832     return node + 1;
833 }
834 
OsMemPoolInit(VOID * pool,UINT32 size)835 STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size)
836 {
837     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
838     struct OsMemNodeHead *newNode = NULL;
839     struct OsMemNodeHead *endNode = NULL;
840 
841     (VOID)memset_s(poolHead, sizeof(struct OsMemPoolHead), 0, sizeof(struct OsMemPoolHead));
842 
843 #ifdef LOSCFG_KERNEL_LMS
844     UINT32 resize = 0;
845     if (g_lms != NULL) {
846         /*
847          * resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
848          * resize != 0, shadow memory init successful, set poolSize as resize.
849          */
850         resize = g_lms->init(pool, size);
851         size = (resize == 0) ? size : resize;
852     }
853 #endif
854 
855     LOS_SpinInit(&poolHead->spinlock);
856     poolHead->info.pool = pool;
857     poolHead->info.totalSize = size;
858     poolHead->info.attr = OS_MEM_POOL_LOCK_ENABLE; /* default attr: lock, not expand. */
859 
860     newNode = OS_MEM_FIRST_NODE(pool);
861     newNode->sizeAndFlag = (size - sizeof(struct OsMemPoolHead) - OS_MEM_NODE_HEAD_SIZE);
862     newNode->ptr.prev = NULL;
863     newNode->magic = OS_MEM_NODE_MAGIC;
864     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
865 
866     /* The last mem node */
867     endNode = OS_MEM_END_NODE(pool, size);
868     endNode->magic = OS_MEM_NODE_MAGIC;
869 #if OS_MEM_EXPAND_ENABLE
870     endNode->ptr.next = NULL;
871     OsMemSentinelNodeSet(endNode, NULL, 0);
872 #else
873     endNode->sizeAndFlag = 0;
874     endNode->ptr.prev = newNode;
875     OS_MEM_NODE_SET_USED_FLAG(endNode->sizeAndFlag);
876 #endif
877 #ifdef LOSCFG_MEM_WATERLINE
878     poolHead->info.curUsedSize = sizeof(struct OsMemPoolHead) + OS_MEM_NODE_HEAD_SIZE;
879     poolHead->info.waterLine = poolHead->info.curUsedSize;
880 #endif
881 #ifdef LOSCFG_KERNEL_LMS
882     if (resize != 0) {
883         OsLmsFirstNodeMark(pool, newNode);
884     }
885 #endif
886     return LOS_OK;
887 }
888 
889 #ifdef LOSCFG_MEM_MUL_POOL
OsMemPoolDeInit(const VOID * pool,UINT32 size)890 STATIC VOID OsMemPoolDeInit(const VOID *pool, UINT32 size)
891 {
892 #ifdef LOSCFG_KERNEL_LMS
893     if (g_lms != NULL) {
894         g_lms->deInit(pool);
895     }
896 #endif
897     (VOID)memset_s(pool, size, 0, sizeof(struct OsMemPoolHead));
898 }
899 
OsMemPoolAdd(VOID * pool,UINT32 size)900 STATIC UINT32 OsMemPoolAdd(VOID *pool, UINT32 size)
901 {
902     VOID *nextPool = g_poolHead;
903     VOID *curPool = g_poolHead;
904     UINTPTR poolEnd;
905     while (nextPool != NULL) {
906         poolEnd = (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool);
907         if (((pool <= nextPool) && (((UINTPTR)pool + size) > (UINTPTR)nextPool)) ||
908             (((UINTPTR)pool < poolEnd) && (((UINTPTR)pool + size) >= poolEnd))) {
909             PRINT_ERR("pool [%#x, %#x) conflict with pool [%#x, %#x)\n",
910                       pool, (UINTPTR)pool + size,
911                       nextPool, (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool));
912             return LOS_NOK;
913         }
914         curPool = nextPool;
915         nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
916     }
917 
918     if (g_poolHead == NULL) {
919         g_poolHead = pool;
920     } else {
921         ((struct OsMemPoolHead *)curPool)->nextPool = pool;
922     }
923 
924     ((struct OsMemPoolHead *)pool)->nextPool = NULL;
925     return LOS_OK;
926 }
927 
OsMemPoolDelete(const VOID * pool)928 STATIC UINT32 OsMemPoolDelete(const VOID *pool)
929 {
930     UINT32 ret = LOS_NOK;
931     VOID *nextPool = NULL;
932     VOID *curPool = NULL;
933 
934     do {
935         if (pool == g_poolHead) {
936             g_poolHead = ((struct OsMemPoolHead *)g_poolHead)->nextPool;
937             ret = LOS_OK;
938             break;
939         }
940 
941         curPool = g_poolHead;
942         nextPool = g_poolHead;
943         while (nextPool != NULL) {
944             if (pool == nextPool) {
945                 ((struct OsMemPoolHead *)curPool)->nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
946                 ret = LOS_OK;
947                 break;
948             }
949             curPool = nextPool;
950             nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
951         }
952     } while (0);
953 
954     return ret;
955 }
956 #endif
957 
LOS_MemInit(VOID * pool,UINT32 size)958 UINT32 LOS_MemInit(VOID *pool, UINT32 size)
959 {
960     if ((pool == NULL) || (size <= OS_MEM_MIN_POOL_SIZE)) {
961         return OS_ERROR;
962     }
963 
964     size = OS_MEM_ALIGN(size, OS_MEM_ALIGN_SIZE);
965     if (OsMemPoolInit(pool, size)) {
966         return OS_ERROR;
967     }
968 
969 #ifdef LOSCFG_MEM_MUL_POOL
970     if (OsMemPoolAdd(pool, size)) {
971         (VOID)OsMemPoolDeInit(pool, size);
972         return OS_ERROR;
973     }
974 #endif
975 
976     OsHookCall(LOS_HOOK_TYPE_MEM_INIT, pool, size);
977     return LOS_OK;
978 }
979 
980 #ifdef LOSCFG_MEM_MUL_POOL
LOS_MemDeInit(VOID * pool)981 UINT32 LOS_MemDeInit(VOID *pool)
982 {
983     struct OsMemPoolHead *tmpPool = (struct OsMemPoolHead *)pool;
984 
985     if ((tmpPool == NULL) ||
986         (tmpPool->info.pool != pool) ||
987         (tmpPool->info.totalSize <= OS_MEM_MIN_POOL_SIZE)) {
988         return OS_ERROR;
989     }
990 
991     if (OsMemPoolDelete(tmpPool)) {
992         return OS_ERROR;
993     }
994 
995     OsMemPoolDeInit(tmpPool, tmpPool->info.totalSize);
996 
997     OsHookCall(LOS_HOOK_TYPE_MEM_DEINIT, tmpPool);
998     return LOS_OK;
999 }
1000 
LOS_MemPoolList(VOID)1001 UINT32 LOS_MemPoolList(VOID)
1002 {
1003     VOID *nextPool = g_poolHead;
1004     UINT32 index = 0;
1005     while (nextPool != NULL) {
1006         PRINTK("pool%u :\n", index);
1007         index++;
1008         OsMemInfoPrint(nextPool);
1009         nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
1010     }
1011     return index;
1012 }
1013 #endif
1014 
OsMemAlloc(struct OsMemPoolHead * pool,UINT32 size,UINT32 intSave)1015 STATIC INLINE VOID *OsMemAlloc(struct OsMemPoolHead *pool, UINT32 size, UINT32 intSave)
1016 {
1017     struct OsMemNodeHead *allocNode = NULL;
1018 
1019 #ifdef LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK
1020     if (OsMemAllocCheck(pool, intSave) == LOS_NOK) {
1021         return NULL;
1022     }
1023 #endif
1024 
1025     UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
1026 #if OS_MEM_EXPAND_ENABLE
1027 retry:
1028 #endif
1029     allocNode = OsMemFreeNodeGet(pool, allocSize);
1030     if (allocNode == NULL) {
1031 #if OS_MEM_EXPAND_ENABLE
1032         if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
1033             INT32 ret = OsMemPoolExpand(pool, allocSize, intSave);
1034             if (ret == 0) {
1035                 goto retry;
1036             }
1037         }
1038 #endif
1039         MEM_UNLOCK(pool, intSave);
1040         PRINT_ERR("---------------------------------------------------"
1041                   "--------------------------------------------------------\n");
1042         OsMemInfoPrint(pool);
1043         PRINT_ERR("[%s] No suitable free block, require free node size: 0x%x\n", __FUNCTION__, allocSize);
1044         PRINT_ERR("----------------------------------------------------"
1045                   "-------------------------------------------------------\n");
1046         MEM_LOCK(pool, intSave);
1047         return NULL;
1048     }
1049 
1050     if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= allocNode->sizeAndFlag) {
1051         OsMemSplitNode(pool, allocNode, allocSize);
1052     }
1053 
1054     OS_MEM_NODE_SET_USED_FLAG(allocNode->sizeAndFlag);
1055     OsMemWaterUsedRecord(pool, OS_MEM_NODE_GET_SIZE(allocNode->sizeAndFlag));
1056 
1057 #ifdef LOSCFG_MEM_LEAKCHECK
1058     OsMemLinkRegisterRecord(allocNode);
1059 #endif
1060     return OsMemCreateUsedNode((VOID *)allocNode);
1061 }
1062 
LOS_MemAlloc(VOID * pool,UINT32 size)1063 VOID *LOS_MemAlloc(VOID *pool, UINT32 size)
1064 {
1065     if ((pool == NULL) || (size == 0)) {
1066         return (size > 0) ? OsVmBootMemAlloc(size) : NULL;
1067     }
1068 
1069     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1070         size = OS_MEM_MIN_ALLOC_SIZE;
1071     }
1072 
1073     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1074     VOID *ptr = NULL;
1075     UINT32 intSave;
1076 
1077     do {
1078         if (OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
1079             break;
1080         }
1081         MEM_LOCK(poolHead, intSave);
1082         ptr = OsMemAlloc(poolHead, size, intSave);
1083         MEM_UNLOCK(poolHead, intSave);
1084     } while (0);
1085 
1086     OsHookCall(LOS_HOOK_TYPE_MEM_ALLOC, pool, ptr, size);
1087     return ptr;
1088 }
1089 
LOS_MemAllocAlign(VOID * pool,UINT32 size,UINT32 boundary)1090 VOID *LOS_MemAllocAlign(VOID *pool, UINT32 size, UINT32 boundary)
1091 {
1092     UINT32 gapSize;
1093 
1094     if ((pool == NULL) || (size == 0) || (boundary == 0) || !OS_MEM_IS_POW_TWO(boundary) ||
1095         !OS_MEM_IS_ALIGNED(boundary, sizeof(VOID *))) {
1096         return NULL;
1097     }
1098 
1099     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1100         size = OS_MEM_MIN_ALLOC_SIZE;
1101     }
1102 
1103     /*
1104      * sizeof(gapSize) bytes stores offset between alignedPtr and ptr,
1105      * the ptr has been OS_MEM_ALIGN_SIZE(4 or 8) aligned, so maximum
1106      * offset between alignedPtr and ptr is boundary - OS_MEM_ALIGN_SIZE
1107      */
1108     if ((boundary - sizeof(gapSize)) > ((UINT32)(-1) - size)) {
1109         return NULL;
1110     }
1111 
1112     UINT32 useSize = (size + boundary) - sizeof(gapSize);
1113     if (OS_MEM_NODE_GET_USED_FLAG(useSize) || OS_MEM_NODE_GET_ALIGNED_FLAG(useSize)) {
1114         return NULL;
1115     }
1116 
1117     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1118     UINT32 intSave;
1119     VOID *ptr = NULL;
1120     VOID *alignedPtr = NULL;
1121 
1122     do {
1123         MEM_LOCK(poolHead, intSave);
1124         ptr = OsMemAlloc(pool, useSize, intSave);
1125         MEM_UNLOCK(poolHead, intSave);
1126         alignedPtr = (VOID *)OS_MEM_ALIGN(ptr, boundary);
1127         if (ptr == alignedPtr) {
1128 #ifdef LOSCFG_KERNEL_LMS
1129             OsLmsAllocAlignMark(ptr, alignedPtr, size);
1130 #endif
1131             break;
1132         }
1133 
1134         /* store gapSize in address (ptr - 4), it will be checked while free */
1135         gapSize = (UINT32)((UINTPTR)alignedPtr - (UINTPTR)ptr);
1136         struct OsMemUsedNodeHead *allocNode = (struct OsMemUsedNodeHead *)ptr - 1;
1137         OS_MEM_NODE_SET_ALIGNED_FLAG(allocNode->header.sizeAndFlag);
1138         OS_MEM_NODE_SET_ALIGNED_FLAG(gapSize);
1139         *(UINT32 *)((UINTPTR)alignedPtr - sizeof(gapSize)) = gapSize;
1140 #ifdef LOSCFG_KERNEL_LMS
1141         OsLmsAllocAlignMark(ptr, alignedPtr, size);
1142 #endif
1143         ptr = alignedPtr;
1144     } while (0);
1145 
1146     OsHookCall(LOS_HOOK_TYPE_MEM_ALLOCALIGN, pool, ptr, size, boundary);
1147     return ptr;
1148 }
1149 
OsMemAddrValidCheck(const struct OsMemPoolHead * pool,const VOID * addr)1150 STATIC INLINE BOOL OsMemAddrValidCheck(const struct OsMemPoolHead *pool, const VOID *addr)
1151 {
1152     UINT32 size;
1153 
1154     /* First node prev is NULL */
1155     if (addr == NULL) {
1156         return TRUE;
1157     }
1158 
1159     size = pool->info.totalSize;
1160     if (OS_MEM_MIDDLE_ADDR_OPEN_END(pool + 1, addr, (UINTPTR)pool + size)) {
1161         return TRUE;
1162     }
1163 #if OS_MEM_EXPAND_ENABLE
1164     struct OsMemNodeHead *node = NULL;
1165     struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, size);
1166     while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1167         size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1168         node = OsMemSentinelNodeGet(sentinel);
1169         sentinel = OS_MEM_END_NODE(node, size);
1170         if (OS_MEM_MIDDLE_ADDR_OPEN_END(node, addr, (UINTPTR)node + size)) {
1171             return TRUE;
1172         }
1173     }
1174 #endif
1175     return FALSE;
1176 }
1177 
OsMemIsNodeValid(const struct OsMemNodeHead * node,const struct OsMemNodeHead * startNode,const struct OsMemNodeHead * endNode,const struct OsMemPoolHead * poolInfo)1178 STATIC INLINE BOOL OsMemIsNodeValid(const struct OsMemNodeHead *node, const struct OsMemNodeHead *startNode,
1179                                     const struct OsMemNodeHead *endNode,
1180                                     const struct OsMemPoolHead *poolInfo)
1181 {
1182     if (!OS_MEM_MIDDLE_ADDR(startNode, node, endNode)) {
1183         return FALSE;
1184     }
1185 
1186     if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1187         if (!OS_MEM_MAGIC_VALID(node)) {
1188             return FALSE;
1189         }
1190         return TRUE;
1191     }
1192 
1193     if (!OsMemAddrValidCheck(poolInfo, node->ptr.prev)) {
1194         return FALSE;
1195     }
1196 
1197     return TRUE;
1198 }
1199 
MemCheckUsedNode(const struct OsMemPoolHead * pool,const struct OsMemNodeHead * node,const struct OsMemNodeHead * startNode,const struct OsMemNodeHead * endNode)1200 STATIC  BOOL MemCheckUsedNode(const struct OsMemPoolHead *pool, const struct OsMemNodeHead *node,
1201                               const struct OsMemNodeHead *startNode, const struct OsMemNodeHead *endNode)
1202 {
1203     if (!OsMemIsNodeValid(node, startNode, endNode, pool)) {
1204         return FALSE;
1205     }
1206 
1207     if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1208         return FALSE;
1209     }
1210 
1211     const struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node);
1212     if (!OsMemIsNodeValid(nextNode, startNode, endNode, pool)) {
1213         return FALSE;
1214     }
1215 
1216     if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {
1217         if (nextNode->ptr.prev != node) {
1218             return FALSE;
1219         }
1220     }
1221 
1222     if ((node != startNode) &&
1223         ((!OsMemIsNodeValid(node->ptr.prev, startNode, endNode, pool)) ||
1224         (OS_MEM_NEXT_NODE(node->ptr.prev) != node))) {
1225         return FALSE;
1226     }
1227 
1228     return TRUE;
1229 }
1230 
OsMemCheckUsedNode(const struct OsMemPoolHead * pool,const struct OsMemNodeHead * node)1231 STATIC UINT32 OsMemCheckUsedNode(const struct OsMemPoolHead *pool, const struct OsMemNodeHead *node)
1232 {
1233     struct OsMemNodeHead *startNode = (struct OsMemNodeHead *)OS_MEM_FIRST_NODE(pool);
1234     struct OsMemNodeHead *endNode = (struct OsMemNodeHead *)OS_MEM_END_NODE(pool, pool->info.totalSize);
1235     BOOL doneFlag = FALSE;
1236 
1237     do {
1238         doneFlag = MemCheckUsedNode(pool, node, startNode, endNode);
1239         if (!doneFlag) {
1240 #if OS_MEM_EXPAND_ENABLE
1241             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
1242                 startNode = OsMemSentinelNodeGet(endNode);
1243                 endNode = OS_MEM_END_NODE(startNode, OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag));
1244                 continue;
1245             }
1246 #endif
1247             return LOS_NOK;
1248         }
1249     } while (!doneFlag);
1250 
1251     return LOS_OK;
1252 }
1253 
OsMemFree(struct OsMemPoolHead * pool,struct OsMemNodeHead * node)1254 STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node)
1255 {
1256     UINT32 ret = OsMemCheckUsedNode(pool, node);
1257     if (ret != LOS_OK) {
1258         PRINT_ERR("OsMemFree check error!\n");
1259         return ret;
1260     }
1261 
1262 #ifdef LOSCFG_MEM_WATERLINE
1263     pool->info.curUsedSize -= OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1264 #endif
1265 
1266     node->sizeAndFlag = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1267 #ifdef LOSCFG_MEM_LEAKCHECK
1268     OsMemLinkRegisterRecord(node);
1269 #endif
1270 #ifdef LOSCFG_KERNEL_LMS
1271     struct OsMemNodeHead *nextNodeBackup = OS_MEM_NEXT_NODE(node);
1272     struct OsMemNodeHead *curNodeBackup = node;
1273     if (g_lms != NULL) {
1274         g_lms->check((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, TRUE);
1275     }
1276 #endif
1277     struct OsMemNodeHead *preNode = node->ptr.prev; /* merage preNode */
1278     if ((preNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1279         OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)preNode);
1280         OsMemMergeNode(node);
1281         node = preNode;
1282     }
1283 
1284     struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node); /* merage nextNode */
1285     if ((nextNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
1286         OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
1287         OsMemMergeNode(nextNode);
1288     }
1289 
1290 #if OS_MEM_EXPAND_ENABLE
1291     if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
1292         /* if this is a expand head node, and all unused, free it to pmm */
1293         if ((node->ptr.prev != NULL) && (node->ptr.prev > node)) {
1294             if (TryShrinkPool(pool, node)) {
1295                 return LOS_OK;
1296             }
1297         }
1298     }
1299 #endif
1300     OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)node);
1301 #ifdef LOSCFG_KERNEL_LMS
1302     if (g_lms != NULL) {
1303         g_lms->freeMark(curNodeBackup, nextNodeBackup, OS_MEM_NODE_HEAD_SIZE);
1304     }
1305 #endif
1306     return ret;
1307 }
1308 
LOS_MemFree(VOID * pool,VOID * ptr)1309 UINT32 LOS_MemFree(VOID *pool, VOID *ptr)
1310 {
1311     UINT32 intSave;
1312     UINT32 ret = LOS_NOK;
1313 
1314     if ((pool == NULL) || (ptr == NULL) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *)) ||
1315         !OS_MEM_IS_ALIGNED(ptr, sizeof(VOID *))) {
1316         return ret;
1317     }
1318     OsHookCall(LOS_HOOK_TYPE_MEM_FREE, pool, ptr);
1319 
1320     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1321     struct OsMemNodeHead *node = NULL;
1322 
1323     do {
1324         UINT32 gapSize = *(UINT32 *)((UINTPTR)ptr - sizeof(UINT32));
1325         if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize) && OS_MEM_NODE_GET_USED_FLAG(gapSize)) {
1326             PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1327             break;
1328         }
1329 
1330         node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
1331 
1332         if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize)) {
1333             gapSize = OS_MEM_NODE_GET_ALIGNED_GAPSIZE(gapSize);
1334             if ((gapSize & (OS_MEM_ALIGN_SIZE - 1)) || (gapSize > ((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE))) {
1335                 PRINT_ERR("illegal gapSize: 0x%x\n", gapSize);
1336                 break;
1337             }
1338             node = (struct OsMemNodeHead *)((UINTPTR)ptr - gapSize - OS_MEM_NODE_HEAD_SIZE);
1339         }
1340         MEM_LOCK(poolHead, intSave);
1341         ret = OsMemFree(poolHead, node);
1342         MEM_UNLOCK(poolHead, intSave);
1343     } while (0);
1344 
1345     return ret;
1346 }
1347 
OsMemReAllocSmaller(VOID * pool,UINT32 allocSize,struct OsMemNodeHead * node,UINT32 nodeSize)1348 STATIC INLINE VOID OsMemReAllocSmaller(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node, UINT32 nodeSize)
1349 {
1350 #ifdef LOSCFG_MEM_WATERLINE
1351     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1352 #endif
1353     node->sizeAndFlag = nodeSize;
1354     if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= nodeSize) {
1355         OsMemSplitNode(pool, node, allocSize);
1356         OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1357 #ifdef LOSCFG_MEM_WATERLINE
1358         poolInfo->info.curUsedSize -= nodeSize - allocSize;
1359 #endif
1360 #ifdef LOSCFG_KERNEL_LMS
1361         OsLmsReallocSplitNodeMark(node);
1362     } else {
1363         OsLmsReallocResizeMark(node, allocSize);
1364 #endif
1365     }
1366     OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1367 #ifdef LOSCFG_MEM_LEAKCHECK
1368     OsMemLinkRegisterRecord(node);
1369 #endif
1370 }
1371 
OsMemMergeNodeForReAllocBigger(VOID * pool,UINT32 allocSize,struct OsMemNodeHead * node,UINT32 nodeSize,struct OsMemNodeHead * nextNode)1372 STATIC INLINE VOID OsMemMergeNodeForReAllocBigger(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node,
1373                                                   UINT32 nodeSize, struct OsMemNodeHead *nextNode)
1374 {
1375     node->sizeAndFlag = nodeSize;
1376     OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
1377     OsMemMergeNode(nextNode);
1378 #ifdef LOSCFG_KERNEL_LMS
1379     OsLmsReallocMergeNodeMark(node);
1380 #endif
1381     if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= node->sizeAndFlag) {
1382         OsMemSplitNode(pool, node, allocSize);
1383 #ifdef LOSCFG_KERNEL_LMS
1384         OsLmsReallocSplitNodeMark(node);
1385     } else {
1386         OsLmsReallocResizeMark(node, allocSize);
1387 #endif
1388     }
1389     OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1390     OsMemWaterUsedRecord((struct OsMemPoolHead *)pool, node->sizeAndFlag - nodeSize);
1391 #ifdef LOSCFG_MEM_LEAKCHECK
1392     OsMemLinkRegisterRecord(node);
1393 #endif
1394 }
1395 
OsGetRealPtr(const VOID * pool,VOID * ptr)1396 STATIC INLINE VOID *OsGetRealPtr(const VOID *pool, VOID *ptr)
1397 {
1398     VOID *realPtr = ptr;
1399     UINT32 gapSize = *((UINT32 *)((UINTPTR)ptr - sizeof(UINT32)));
1400 
1401     if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize) && OS_MEM_NODE_GET_USED_FLAG(gapSize)) {
1402         PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1403         return NULL;
1404     }
1405     if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize)) {
1406         gapSize = OS_MEM_NODE_GET_ALIGNED_GAPSIZE(gapSize);
1407         if ((gapSize & (OS_MEM_ALIGN_SIZE - 1)) ||
1408             (gapSize > ((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE - (UINTPTR)pool))) {
1409             PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1410             return NULL;
1411         }
1412         realPtr = (VOID *)((UINTPTR)ptr - (UINTPTR)gapSize);
1413     }
1414     return realPtr;
1415 }
1416 
OsMemRealloc(struct OsMemPoolHead * pool,const VOID * ptr,struct OsMemNodeHead * node,UINT32 size,UINT32 intSave)1417 STATIC INLINE VOID *OsMemRealloc(struct OsMemPoolHead *pool, const VOID *ptr,
1418                                  struct OsMemNodeHead *node, UINT32 size, UINT32 intSave)
1419 {
1420     struct OsMemNodeHead *nextNode = NULL;
1421     UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
1422     UINT32 nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1423     VOID *tmpPtr = NULL;
1424 
1425     if (nodeSize >= allocSize) {
1426         OsMemReAllocSmaller(pool, allocSize, node, nodeSize);
1427         return (VOID *)ptr;
1428     }
1429 
1430     nextNode = OS_MEM_NEXT_NODE(node);
1431     if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag) &&
1432         ((nextNode->sizeAndFlag + nodeSize) >= allocSize)) {
1433         OsMemMergeNodeForReAllocBigger(pool, allocSize, node, nodeSize, nextNode);
1434         return (VOID *)ptr;
1435     }
1436 
1437     tmpPtr = OsMemAlloc(pool, size, intSave);
1438     if (tmpPtr != NULL) {
1439         if (memcpy_s(tmpPtr, size, ptr, (nodeSize - OS_MEM_NODE_HEAD_SIZE)) != EOK) {
1440             MEM_UNLOCK(pool, intSave);
1441             (VOID)LOS_MemFree((VOID *)pool, (VOID *)tmpPtr);
1442             MEM_LOCK(pool, intSave);
1443             return NULL;
1444         }
1445         (VOID)OsMemFree(pool, node);
1446     }
1447     return tmpPtr;
1448 }
1449 
LOS_MemRealloc(VOID * pool,VOID * ptr,UINT32 size)1450 VOID *LOS_MemRealloc(VOID *pool, VOID *ptr, UINT32 size)
1451 {
1452     if ((pool == NULL) || OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
1453         return NULL;
1454     }
1455     OsHookCall(LOS_HOOK_TYPE_MEM_REALLOC, pool, ptr, size);
1456     if (size < OS_MEM_MIN_ALLOC_SIZE) {
1457         size = OS_MEM_MIN_ALLOC_SIZE;
1458     }
1459 
1460     if (ptr == NULL) {
1461         return LOS_MemAlloc(pool, size);
1462     }
1463 
1464     if (size == 0) {
1465         (VOID)LOS_MemFree(pool, ptr);
1466         return NULL;
1467     }
1468 
1469     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1470     struct OsMemNodeHead *node = NULL;
1471     VOID *newPtr = NULL;
1472     UINT32 intSave;
1473 
1474     MEM_LOCK(poolHead, intSave);
1475     do {
1476         ptr = OsGetRealPtr(pool, ptr);
1477         if (ptr == NULL) {
1478             break;
1479         }
1480 
1481         node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
1482         if (OsMemCheckUsedNode(pool, node) != LOS_OK) {
1483             break;
1484         }
1485 
1486         newPtr = OsMemRealloc(pool, ptr, node, size, intSave);
1487     } while (0);
1488     MEM_UNLOCK(poolHead, intSave);
1489 
1490     return newPtr;
1491 }
1492 
1493 #if OS_MEM_FREE_BY_TASKID
LOS_MemFreeByTaskID(VOID * pool,UINT32 taskID)1494 UINT32 LOS_MemFreeByTaskID(VOID *pool, UINT32 taskID)
1495 {
1496     if (pool == NULL) {
1497         return OS_ERROR;
1498     }
1499 
1500     if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
1501         return OS_ERROR;
1502     }
1503 
1504     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1505     struct OsMemNodeHead *tmpNode = NULL;
1506     struct OsMemUsedNodeHead *node = NULL;
1507     struct OsMemNodeHead *endNode = NULL;
1508     UINT32 size;
1509     UINT32 intSave;
1510 
1511     MEM_LOCK(poolHead, intSave);
1512     endNode = OS_MEM_END_NODE(pool, poolHead->info.totalSize);
1513     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;) {
1514         if (tmpNode == endNode) {
1515             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
1516                 size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
1517                 tmpNode = OsMemSentinelNodeGet(endNode);
1518                 endNode = OS_MEM_END_NODE(tmpNode, size);
1519                 continue;
1520             } else {
1521                 break;
1522             }
1523         } else {
1524             if (!OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
1525                 tmpNode = OS_MEM_NEXT_NODE(tmpNode);
1526                 continue;
1527             }
1528 
1529             node = (struct OsMemUsedNodeHead *)tmpNode;
1530             tmpNode = OS_MEM_NEXT_NODE(tmpNode);
1531 
1532             if (node->taskID == taskID) {
1533                 OsMemFree(poolHead, &node->header);
1534             }
1535         }
1536     }
1537     MEM_UNLOCK(poolHead, intSave);
1538 
1539     return LOS_OK;
1540 }
1541 #endif
1542 
LOS_MemPoolSizeGet(const VOID * pool)1543 UINT32 LOS_MemPoolSizeGet(const VOID *pool)
1544 {
1545     UINT32 count = 0;
1546 
1547     if (pool == NULL) {
1548         return LOS_NOK;
1549     }
1550 
1551     count += ((struct OsMemPoolHead *)pool)->info.totalSize;
1552 
1553 #if OS_MEM_EXPAND_ENABLE
1554     UINT32 size;
1555     struct OsMemNodeHead *node = NULL;
1556     struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, count);
1557 
1558     while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1559         size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1560         node = OsMemSentinelNodeGet(sentinel);
1561         sentinel = OS_MEM_END_NODE(node, size);
1562         count += size;
1563     }
1564 #endif
1565     return count;
1566 }
1567 
LOS_MemTotalUsedGet(VOID * pool)1568 UINT32 LOS_MemTotalUsedGet(VOID *pool)
1569 {
1570     struct OsMemNodeHead *tmpNode = NULL;
1571     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1572     struct OsMemNodeHead *endNode = NULL;
1573     UINT32 memUsed = 0;
1574     UINT32 intSave;
1575 
1576     if (pool == NULL) {
1577         return LOS_NOK;
1578     }
1579 
1580     MEM_LOCK(poolInfo, intSave);
1581     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
1582 #if OS_MEM_EXPAND_ENABLE
1583     UINT32 size;
1584     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;) {
1585         if (tmpNode == endNode) {
1586             memUsed += OS_MEM_NODE_HEAD_SIZE;
1587             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
1588                 size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
1589                 tmpNode = OsMemSentinelNodeGet(endNode);
1590                 endNode = OS_MEM_END_NODE(tmpNode, size);
1591                 continue;
1592             } else {
1593                 break;
1594             }
1595         } else {
1596             if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
1597                 memUsed += OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag);
1598             }
1599             tmpNode = OS_MEM_NEXT_NODE(tmpNode);
1600         }
1601     }
1602 #else
1603     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode;) {
1604         if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
1605             memUsed += OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag);
1606         }
1607         tmpNode = OS_MEM_NEXT_NODE(tmpNode);
1608     }
1609 #endif
1610     MEM_UNLOCK(poolInfo, intSave);
1611 
1612     return memUsed;
1613 }
1614 
OsMemMagicCheckPrint(struct OsMemNodeHead ** tmpNode)1615 STATIC INLINE VOID OsMemMagicCheckPrint(struct OsMemNodeHead **tmpNode)
1616 {
1617     PRINT_ERR("[%s], %d, memory check error!\n"
1618               "memory used but magic num wrong, magic num = %#x\n",
1619               __FUNCTION__, __LINE__, (*tmpNode)->magic);
1620 }
1621 
OsMemAddrValidCheckPrint(const VOID * pool,struct OsMemFreeNodeHead ** tmpNode)1622 STATIC UINT32 OsMemAddrValidCheckPrint(const VOID *pool, struct OsMemFreeNodeHead **tmpNode)
1623 {
1624     if (((*tmpNode)->prev != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->prev)) {
1625         PRINT_ERR("[%s], %d, memory check error!\n"
1626                   " freeNode.prev:%#x is out of legal mem range\n",
1627                   __FUNCTION__, __LINE__, (*tmpNode)->prev);
1628         return LOS_NOK;
1629     }
1630     if (((*tmpNode)->next != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->next)) {
1631         PRINT_ERR("[%s], %d, memory check error!\n"
1632                   " freeNode.next:%#x is out of legal mem range\n",
1633                   __FUNCTION__, __LINE__, (*tmpNode)->next);
1634         return LOS_NOK;
1635     }
1636     return LOS_OK;
1637 }
1638 
OsMemIntegrityCheckSub(struct OsMemNodeHead ** tmpNode,const VOID * pool,const struct OsMemNodeHead * endNode)1639 STATIC UINT32 OsMemIntegrityCheckSub(struct OsMemNodeHead **tmpNode, const VOID *pool,
1640                                      const struct OsMemNodeHead *endNode)
1641 {
1642     if (!OS_MEM_MAGIC_VALID(*tmpNode)) {
1643         OsMemMagicCheckPrint(tmpNode);
1644         return LOS_NOK;
1645     }
1646 
1647     if (!OS_MEM_NODE_GET_USED_FLAG((*tmpNode)->sizeAndFlag)) { /* is free node, check free node range */
1648         if (OsMemAddrValidCheckPrint(pool, (struct OsMemFreeNodeHead **)tmpNode)) {
1649             return LOS_NOK;
1650         }
1651     }
1652     return LOS_OK;
1653 }
1654 
OsMemFreeListNodeCheck(const struct OsMemPoolHead * pool,const struct OsMemFreeNodeHead * node)1655 STATIC UINT32 OsMemFreeListNodeCheck(const struct OsMemPoolHead *pool,
1656                                      const struct OsMemFreeNodeHead *node)
1657 {
1658     if (!OsMemAddrValidCheck(pool, node) ||
1659         !OsMemAddrValidCheck(pool, node->prev) ||
1660         !OsMemAddrValidCheck(pool, node->next) ||
1661         !OsMemAddrValidCheck(pool, node->header.ptr.prev)) {
1662         return LOS_NOK;
1663     }
1664 
1665     if (!OS_MEM_IS_ALIGNED(node, sizeof(VOID *)) ||
1666         !OS_MEM_IS_ALIGNED(node->prev, sizeof(VOID *)) ||
1667         !OS_MEM_IS_ALIGNED(node->next, sizeof(VOID *)) ||
1668         !OS_MEM_IS_ALIGNED(node->header.ptr.prev, sizeof(VOID *))) {
1669         return LOS_NOK;
1670     }
1671 
1672     return LOS_OK;
1673 }
1674 
OsMemPoolHeadCheck(const struct OsMemPoolHead * pool)1675 STATIC VOID OsMemPoolHeadCheck(const struct OsMemPoolHead *pool)
1676 {
1677     struct OsMemFreeNodeHead *tmpNode = NULL;
1678     UINT32 index;
1679     UINT32 flag = 0;
1680 
1681     if ((pool->info.pool != pool) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *))) {
1682         PRINT_ERR("wrong mem pool addr: %#x, func:%s, line:%d\n", pool, __FUNCTION__, __LINE__);
1683         return;
1684     }
1685 
1686     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
1687         for (tmpNode = pool->freeList[index]; tmpNode != NULL; tmpNode = tmpNode->next) {
1688             if (OsMemFreeListNodeCheck(pool, tmpNode)) {
1689                 flag = 1;
1690                 PRINT_ERR("FreeListIndex: %u, node: %#x, bNode: %#x, prev: %#x, next: %#x\n",
1691                           index, tmpNode, tmpNode->header.ptr.prev, tmpNode->prev, tmpNode->next);
1692                 goto OUT;
1693             }
1694         }
1695     }
1696 
1697 OUT:
1698     if (flag) {
1699         PRINTK("mem pool info: poolAddr: %#x, poolSize: 0x%x\n", pool, pool->info.totalSize);
1700 #ifdef LOSCFG_MEM_WATERLINE
1701         PRINTK("mem pool info: poolWaterLine: 0x%x, poolCurUsedSize: 0x%x\n", pool->info.waterLine,
1702                pool->info.curUsedSize);
1703 #endif
1704 #if OS_MEM_EXPAND_ENABLE
1705         UINT32 size;
1706         struct OsMemNodeHead *node = NULL;
1707         struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, pool->info.totalSize);
1708         while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1709             size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1710             node = OsMemSentinelNodeGet(sentinel);
1711             sentinel = OS_MEM_END_NODE(node, size);
1712             PRINTK("expand node info: nodeAddr: %#x, nodeSize: 0x%x\n", node, size);
1713         }
1714 #endif
1715     }
1716 }
1717 
OsMemIntegrityCheck(const struct OsMemPoolHead * pool,struct OsMemNodeHead ** tmpNode,struct OsMemNodeHead ** preNode)1718 STATIC UINT32 OsMemIntegrityCheck(const struct OsMemPoolHead *pool, struct OsMemNodeHead **tmpNode,
1719                                   struct OsMemNodeHead **preNode)
1720 {
1721     struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
1722 
1723     OsMemPoolHeadCheck(pool);
1724 
1725     *preNode = OS_MEM_FIRST_NODE(pool);
1726     do {
1727         for (*tmpNode = *preNode; *tmpNode < endNode; *tmpNode = OS_MEM_NEXT_NODE(*tmpNode)) {
1728             if (OsMemIntegrityCheckSub(tmpNode, pool, endNode) == LOS_NOK) {
1729                 return LOS_NOK;
1730             }
1731             *preNode = *tmpNode;
1732         }
1733 #if OS_MEM_EXPAND_ENABLE
1734         if (OsMemIsLastSentinelNode(*tmpNode) == FALSE) {
1735             *preNode = OsMemSentinelNodeGet(*tmpNode);
1736             endNode = OS_MEM_END_NODE(*preNode, OS_MEM_NODE_GET_SIZE((*tmpNode)->sizeAndFlag));
1737         } else
1738 #endif
1739         {
1740             break;
1741         }
1742     } while (1);
1743     return LOS_OK;
1744 }
1745 
OsMemNodeInfo(const struct OsMemNodeHead * tmpNode,const struct OsMemNodeHead * preNode)1746 STATIC VOID OsMemNodeInfo(const struct OsMemNodeHead *tmpNode,
1747                           const struct OsMemNodeHead *preNode)
1748 {
1749     struct OsMemUsedNodeHead *usedNode = NULL;
1750     struct OsMemFreeNodeHead *freeNode = NULL;
1751 
1752     if (tmpNode == preNode) {
1753         PRINTK("\n the broken node is the first node\n");
1754     }
1755 
1756     if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
1757         usedNode = (struct OsMemUsedNodeHead *)tmpNode;
1758         PRINTK("\n broken node head: %#x  %#x  %#x, ",
1759                usedNode->header.ptr.prev, usedNode->header.magic, usedNode->header.sizeAndFlag);
1760     } else {
1761         freeNode = (struct OsMemFreeNodeHead *)tmpNode;
1762         PRINTK("\n broken node head: %#x  %#x  %#x  %#x, %#x",
1763                freeNode->header.ptr.prev, freeNode->next, freeNode->prev, freeNode->header.magic,
1764                freeNode->header.sizeAndFlag);
1765     }
1766 
1767     if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1768         usedNode = (struct OsMemUsedNodeHead *)preNode;
1769         PRINTK("prev node head: %#x  %#x  %#x\n",
1770                usedNode->header.ptr.prev, usedNode->header.magic, usedNode->header.sizeAndFlag);
1771     } else {
1772         freeNode = (struct OsMemFreeNodeHead *)preNode;
1773         PRINTK("prev node head: %#x  %#x  %#x  %#x, %#x",
1774                freeNode->header.ptr.prev, freeNode->next, freeNode->prev, freeNode->header.magic,
1775                freeNode->header.sizeAndFlag);
1776     }
1777 
1778 #ifdef LOSCFG_MEM_LEAKCHECK
1779     OsMemNodeBacktraceInfo(tmpNode, preNode);
1780 #endif
1781 
1782     PRINTK("\n---------------------------------------------\n");
1783     PRINTK(" dump mem tmpNode:%#x ~ %#x\n", tmpNode, ((UINTPTR)tmpNode + OS_MEM_NODE_DUMP_SIZE));
1784     OsDumpMemByte(OS_MEM_NODE_DUMP_SIZE, (UINTPTR)tmpNode);
1785     PRINTK("\n---------------------------------------------\n");
1786     if (preNode != tmpNode) {
1787         PRINTK(" dump mem :%#x ~ tmpNode:%#x\n", ((UINTPTR)tmpNode - OS_MEM_NODE_DUMP_SIZE), tmpNode);
1788         OsDumpMemByte(OS_MEM_NODE_DUMP_SIZE, ((UINTPTR)tmpNode - OS_MEM_NODE_DUMP_SIZE));
1789         PRINTK("\n---------------------------------------------\n");
1790     }
1791 }
1792 
OsMemIntegrityCheckError(struct OsMemPoolHead * pool,const struct OsMemNodeHead * tmpNode,const struct OsMemNodeHead * preNode,UINT32 intSave)1793 STATIC VOID OsMemIntegrityCheckError(struct OsMemPoolHead *pool,
1794                                      const struct OsMemNodeHead *tmpNode,
1795                                      const struct OsMemNodeHead *preNode,
1796                                      UINT32 intSave)
1797 {
1798     OsMemNodeInfo(tmpNode, preNode);
1799 
1800 #if OS_MEM_FREE_BY_TASKID
1801     LosTaskCB *taskCB = NULL;
1802     if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1803         struct OsMemUsedNodeHead *usedNode = (struct OsMemUsedNodeHead *)preNode;
1804         UINT32 taskID = usedNode->taskID;
1805         if (OS_TID_CHECK_INVALID(taskID)) {
1806             MEM_UNLOCK(pool, intSave);
1807             LOS_Panic("Task ID %u in pre node is invalid!\n", taskID);
1808             return;
1809         }
1810 
1811         taskCB = OS_TCB_FROM_TID(taskID);
1812         if (OsTaskIsUnused(taskCB) || (taskCB->taskEntry == NULL)) {
1813             MEM_UNLOCK(pool, intSave);
1814             LOS_Panic("\r\nTask ID %u in pre node is not created!\n", taskID);
1815             return;
1816         }
1817     } else {
1818         PRINTK("The prev node is free\n");
1819     }
1820     MEM_UNLOCK(pool, intSave);
1821     LOS_Panic("cur node: %#x\npre node: %#x\npre node was allocated by task:%s\n",
1822               tmpNode, preNode, taskCB->taskName);
1823 #else
1824     MEM_UNLOCK(pool, intSave);
1825     LOS_Panic("Memory integrity check error, cur node: %#x, pre node: %#x\n", tmpNode, preNode);
1826 #endif
1827 }
1828 
1829 #ifdef LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK
OsMemAllocCheck(struct OsMemPoolHead * pool,UINT32 intSave)1830 STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave)
1831 {
1832     struct OsMemNodeHead *tmpNode = NULL;
1833     struct OsMemNodeHead *preNode = NULL;
1834 
1835     if (OsMemIntegrityCheck(pool, &tmpNode, &preNode)) {
1836         OsMemIntegrityCheckError(pool, tmpNode, preNode, intSave);
1837         return LOS_NOK;
1838     }
1839     return LOS_OK;
1840 }
1841 #endif
1842 
LOS_MemIntegrityCheck(const VOID * pool)1843 UINT32 LOS_MemIntegrityCheck(const VOID *pool)
1844 {
1845     if (pool == NULL) {
1846         return LOS_NOK;
1847     }
1848 
1849     struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1850     struct OsMemNodeHead *tmpNode = NULL;
1851     struct OsMemNodeHead *preNode = NULL;
1852     UINT32 intSave = 0;
1853 
1854     MEM_LOCK(poolHead, intSave);
1855     if (OsMemIntegrityCheck(poolHead, &tmpNode, &preNode)) {
1856         goto ERROR_OUT;
1857     }
1858     MEM_UNLOCK(poolHead, intSave);
1859     return LOS_OK;
1860 
1861 ERROR_OUT:
1862     OsMemIntegrityCheckError(poolHead, tmpNode, preNode, intSave);
1863     return LOS_NOK;
1864 }
1865 
OsMemInfoGet(struct OsMemPoolHead * poolInfo,struct OsMemNodeHead * node,LOS_MEM_POOL_STATUS * poolStatus)1866 STATIC INLINE VOID OsMemInfoGet(struct OsMemPoolHead *poolInfo, struct OsMemNodeHead *node,
1867                                 LOS_MEM_POOL_STATUS *poolStatus)
1868 {
1869     UINT32 totalUsedSize = 0;
1870     UINT32 totalFreeSize = 0;
1871     UINT32 usedNodeNum = 0;
1872     UINT32 freeNodeNum = 0;
1873     UINT32 maxFreeSize = 0;
1874     UINT32 size;
1875 
1876     if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1877         size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1878         ++freeNodeNum;
1879         totalFreeSize += size;
1880         if (maxFreeSize < size) {
1881             maxFreeSize = size;
1882         }
1883     } else {
1884         size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1885         ++usedNodeNum;
1886         totalUsedSize += size;
1887     }
1888 
1889     poolStatus->totalUsedSize += totalUsedSize;
1890     poolStatus->totalFreeSize += totalFreeSize;
1891     poolStatus->maxFreeNodeSize = MAX(poolStatus->maxFreeNodeSize, maxFreeSize);
1892     poolStatus->usedNodeNum += usedNodeNum;
1893     poolStatus->freeNodeNum += freeNodeNum;
1894 }
1895 
LOS_MemInfoGet(VOID * pool,LOS_MEM_POOL_STATUS * poolStatus)1896 UINT32 LOS_MemInfoGet(VOID *pool, LOS_MEM_POOL_STATUS *poolStatus)
1897 {
1898     struct OsMemPoolHead *poolInfo = pool;
1899 
1900     if (poolStatus == NULL) {
1901         PRINT_ERR("can't use NULL addr to save info\n");
1902         return LOS_NOK;
1903     }
1904 
1905     if ((pool == NULL) || (poolInfo->info.pool != pool)) {
1906         PRINT_ERR("wrong mem pool addr: %#x, line:%d\n", poolInfo, __LINE__);
1907         return LOS_NOK;
1908     }
1909 
1910     (VOID)memset_s(poolStatus, sizeof(LOS_MEM_POOL_STATUS), 0, sizeof(LOS_MEM_POOL_STATUS));
1911 
1912     struct OsMemNodeHead *tmpNode = NULL;
1913     struct OsMemNodeHead *endNode = NULL;
1914     UINT32 intSave;
1915 
1916     MEM_LOCK(poolInfo, intSave);
1917     endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
1918 #if OS_MEM_EXPAND_ENABLE
1919     UINT32 size;
1920     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
1921         if (tmpNode == endNode) {
1922             poolStatus->totalUsedSize += OS_MEM_NODE_HEAD_SIZE;
1923             poolStatus->usedNodeNum++;
1924             if (OsMemIsLastSentinelNode(endNode) == FALSE) {
1925                 size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
1926                 tmpNode = OsMemSentinelNodeGet(endNode);
1927                 endNode = OS_MEM_END_NODE(tmpNode, size);
1928                 continue;
1929             } else {
1930                 break;
1931             }
1932         } else {
1933             OsMemInfoGet(poolInfo, tmpNode, poolStatus);
1934         }
1935     }
1936 #else
1937     for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
1938         OsMemInfoGet(poolInfo, tmpNode, poolStatus);
1939     }
1940 #endif
1941 #ifdef LOSCFG_MEM_WATERLINE
1942     poolStatus->usageWaterLine = poolInfo->info.waterLine;
1943 #endif
1944     MEM_UNLOCK(poolInfo, intSave);
1945 
1946     return LOS_OK;
1947 }
1948 
OsMemInfoPrint(VOID * pool)1949 STATIC VOID OsMemInfoPrint(VOID *pool)
1950 {
1951     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1952     LOS_MEM_POOL_STATUS status = {0};
1953 
1954     if (LOS_MemInfoGet(pool, &status) == LOS_NOK) {
1955         return;
1956     }
1957 
1958 #ifdef LOSCFG_MEM_WATERLINE
1959     PRINTK("pool addr          pool size    used size     free size    "
1960            "max free node size   used node num     free node num      UsageWaterLine\n");
1961     PRINTK("---------------    --------     -------       --------     "
1962            "--------------       -------------      ------------      ------------\n");
1963     PRINTK("%-16#x   0x%-8x   0x%-8x    0x%-8x   0x%-16x   0x%-13x    0x%-13x    0x%-13x\n",
1964            poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
1965            status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
1966            status.freeNodeNum, status.usageWaterLine);
1967 #else
1968     PRINTK("pool addr          pool size    used size     free size    "
1969            "max free node size   used node num     free node num\n");
1970     PRINTK("---------------    --------     -------       --------     "
1971            "--------------       -------------      ------------\n");
1972     PRINTK("%-16#x   0x%-8x   0x%-8x    0x%-8x   0x%-16x   0x%-13x    0x%-13x\n",
1973            poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
1974            status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
1975            status.freeNodeNum);
1976 #endif
1977 }
1978 
LOS_MemFreeNodeShow(VOID * pool)1979 UINT32 LOS_MemFreeNodeShow(VOID *pool)
1980 {
1981     struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1982 
1983     if ((poolInfo == NULL) || ((UINTPTR)pool != (UINTPTR)poolInfo->info.pool)) {
1984         PRINT_ERR("wrong mem pool addr: %#x, line:%d\n", poolInfo, __LINE__);
1985         return LOS_NOK;
1986     }
1987 
1988     struct OsMemFreeNodeHead *node = NULL;
1989     UINT32 countNum[OS_MEM_FREE_LIST_COUNT] = {0};
1990     UINT32 index;
1991     UINT32 intSave;
1992 
1993     MEM_LOCK(poolInfo, intSave);
1994     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
1995         node = poolInfo->freeList[index];
1996         while (node) {
1997             node = node->next;
1998             countNum[index]++;
1999         }
2000     }
2001     MEM_UNLOCK(poolInfo, intSave);
2002 
2003     PRINTK("\n   ************************ left free node number**********************\n");
2004     for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
2005         if (countNum[index] == 0) {
2006             continue;
2007         }
2008 
2009         PRINTK("free index: %03u, ", index);
2010         if (index < OS_MEM_SMALL_BUCKET_COUNT) {
2011             PRINTK("size: [%#x], num: %u\n", (index + 1) << 2, countNum[index]); /* 2: setup is 4. */
2012         } else {
2013             UINT32 val = 1 << (((index - OS_MEM_SMALL_BUCKET_COUNT) >> OS_MEM_SLI) + OS_MEM_LARGE_START_BUCKET);
2014             UINT32 offset = val >> OS_MEM_SLI;
2015             PRINTK("size: [%#x, %#x], num: %u\n",
2016                    (offset * ((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI))) + val,
2017                    ((offset * (((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI)) + 1)) + val - 1),
2018                    countNum[index]);
2019         }
2020     }
2021     PRINTK("\n   ********************************************************************\n\n");
2022 
2023     return LOS_OK;
2024 }
2025 
OsKHeapInit(size_t size)2026 STATUS_T OsKHeapInit(size_t size)
2027 {
2028     STATUS_T ret;
2029     VOID *ptr = NULL;
2030     /*
2031      * roundup to MB aligned in order to set kernel attributes. kernel text/code/data attributes
2032      * should page mapping, remaining region should section mapping. so the boundary should be
2033      * MB aligned.
2034      */
2035     UINTPTR end = ROUNDUP(g_vmBootMemBase + size, MB);
2036     size = end - g_vmBootMemBase;
2037 
2038     ptr = OsVmBootMemAlloc(size);
2039     if (!ptr) {
2040         PRINT_ERR("vmm_kheap_init boot_alloc_mem failed! %d\n", size);
2041         return -1;
2042     }
2043 
2044     m_aucSysMem0 = m_aucSysMem1 = ptr;
2045     ret = LOS_MemInit(m_aucSysMem0, size);
2046     if (ret != LOS_OK) {
2047         PRINT_ERR("vmm_kheap_init LOS_MemInit failed!\n");
2048         g_vmBootMemBase -= size;
2049         return ret;
2050     }
2051 #if OS_MEM_EXPAND_ENABLE
2052     LOS_MemExpandEnable(OS_SYS_MEM_ADDR);
2053 #endif
2054     return LOS_OK;
2055 }
2056 
OsMemIsHeapNode(const VOID * ptr)2057 BOOL OsMemIsHeapNode(const VOID *ptr)
2058 {
2059     struct OsMemPoolHead *pool = (struct OsMemPoolHead *)m_aucSysMem1;
2060     struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool);
2061     struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
2062 
2063     if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) {
2064         return TRUE;
2065     }
2066 
2067 #if OS_MEM_EXPAND_ENABLE
2068     UINT32 intSave;
2069     UINT32 size;
2070     MEM_LOCK(pool, intSave);
2071     while (OsMemIsLastSentinelNode(endNode) == FALSE) {
2072         size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
2073         firstNode = OsMemSentinelNodeGet(endNode);
2074         endNode = OS_MEM_END_NODE(firstNode, size);
2075         if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) {
2076             MEM_UNLOCK(pool, intSave);
2077             return TRUE;
2078         }
2079     }
2080     MEM_UNLOCK(pool, intSave);
2081 #endif
2082     return FALSE;
2083 }
2084