1 /*
2 * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3 * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this list of
9 * conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12 * of conditions and the following disclaimer in the documentation and/or other materials
13 * provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16 * to endorse or promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "los_memory.h"
33 #include "securec.h"
34 #include "los_arch.h"
35 #include "los_config.h"
36 #include "los_debug.h"
37 #include "los_hook.h"
38 #include "los_interrupt.h"
39 #include "los_task.h"
40 #ifdef LOSCFG_KERNEL_LMS
41 #include "los_lms_pri.h"
42 #endif
43 #if (LOSCFG_KERNEL_LMK == 1)
44 #include "los_lmk.h"
45 #endif
46
47 /* Used to cut non-essential functions. */
48 #define OS_MEM_EXPAND_ENABLE 0
49
50 UINT8 *m_aucSysMem0 = NULL;
51
52 #if (LOSCFG_SYS_EXTERNAL_HEAP == 0)
53 STATIC UINT8 g_memStart[LOSCFG_SYS_HEAP_SIZE];
54 #endif
55
56 #if (LOSCFG_MEM_MUL_POOL == 1)
57 VOID *g_poolHead = NULL;
58 #endif
59
60 /* The following is the macro definition and interface implementation related to the TLSF. */
61
62 /* Supposing a Second Level Index: SLI = 3. */
63 #define OS_MEM_SLI 3
64 /* Giving 1 free list for each small bucket: 4, 8, 12, up to 124. */
65 #define OS_MEM_SMALL_BUCKET_COUNT 31
66 #define OS_MEM_SMALL_BUCKET_MAX_SIZE 128
67 /* Giving 2^OS_MEM_SLI free lists for each large bucket. */
68 #define OS_MEM_LARGE_BUCKET_COUNT 24
69 /* OS_MEM_SMALL_BUCKET_MAX_SIZE to the power of 2 is 7. */
70 #define OS_MEM_LARGE_START_BUCKET 7
71
72 /* The count of free list. */
73 #define OS_MEM_FREE_LIST_COUNT (OS_MEM_SMALL_BUCKET_COUNT + (OS_MEM_LARGE_BUCKET_COUNT << OS_MEM_SLI))
74 /* The bitmap is used to indicate whether the free list is empty, 1: not empty, 0: empty. */
75 #define OS_MEM_BITMAP_WORDS ((OS_MEM_FREE_LIST_COUNT >> 5) + 1)
76
77 #define OS_MEM_BITMAP_MASK 0x1FU
78
79 /* Used to find the first bit of 1 in bitmap. */
OsMemFFS(UINT32 bitmap)80 STATIC INLINE UINT16 OsMemFFS(UINT32 bitmap)
81 {
82 bitmap &= ~bitmap + 1;
83 return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
84 }
85
86 /* Used to find the last bit of 1 in bitmap. */
OsMemFLS(UINT32 bitmap)87 STATIC INLINE UINT16 OsMemFLS(UINT32 bitmap)
88 {
89 return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
90 }
91
OsMemLog2(UINT32 size)92 STATIC INLINE UINT32 OsMemLog2(UINT32 size)
93 {
94 return (size > 0) ? OsMemFLS(size) : 0;
95 }
96
97 /* Get the first level: f = log2(size). */
OsMemFlGet(UINT32 size)98 STATIC INLINE UINT32 OsMemFlGet(UINT32 size)
99 {
100 if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
101 return ((size >> 2) - 1); /* 2: The small bucket setup is 4. */
102 }
103 return (OsMemLog2(size) - OS_MEM_LARGE_START_BUCKET + OS_MEM_SMALL_BUCKET_COUNT);
104 }
105
106 /* Get the second level: s = (size - 2^f) * 2^SLI / 2^f. */
OsMemSlGet(UINT32 size,UINT32 fl)107 STATIC INLINE UINT32 OsMemSlGet(UINT32 size, UINT32 fl)
108 {
109 if ((fl < OS_MEM_SMALL_BUCKET_COUNT) || (size < OS_MEM_SMALL_BUCKET_MAX_SIZE)) {
110 PRINT_ERR("fl or size is too small, fl = %u, size = %u\n", fl, size);
111 return 0;
112 }
113
114 UINT32 sl = (size << OS_MEM_SLI) >> (fl - OS_MEM_SMALL_BUCKET_COUNT + OS_MEM_LARGE_START_BUCKET);
115 return (sl - (1 << OS_MEM_SLI));
116 }
117
118 /* The following is the memory algorithm related macro definition and interface implementation. */
119 #if (LOSCFG_TASK_MEM_USED != 1 && LOSCFG_MEM_FREE_BY_TASKID == 1 && (LOSCFG_BASE_CORE_TSK_LIMIT + 1) > 64)
120 #error "When enter here, LOSCFG_BASE_CORE_TSK_LIMIT larger than 63 is not support"
121 #endif
122
123 struct OsMemNodeHead {
124 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
125 UINT32 magic;
126 #endif
127 #if (LOSCFG_MEM_LEAKCHECK == 1)
128 UINTPTR linkReg[LOSCFG_MEM_RECORD_LR_CNT];
129 #endif
130 union {
131 struct OsMemNodeHead *prev; /* The prev is used for current node points to the previous node */
132 struct OsMemNodeHead *next; /* The next is used for sentinel node points to the expand node */
133 } ptr;
134 #if (LOSCFG_TASK_MEM_USED == 1)
135 UINT32 taskID;
136 UINT32 sizeAndFlag;
137 #elif (LOSCFG_MEM_FREE_BY_TASKID == 1)
138 UINT32 taskID : 6;
139 UINT32 sizeAndFlag : 26;
140 #else
141 UINT32 sizeAndFlag;
142 #endif
143 };
144
145 struct OsMemUsedNodeHead {
146 struct OsMemNodeHead header;
147 };
148
149 struct OsMemFreeNodeHead {
150 struct OsMemNodeHead header;
151 struct OsMemFreeNodeHead *prev;
152 struct OsMemFreeNodeHead *next;
153 };
154
155 struct OsMemPoolInfo {
156 VOID *pool;
157 UINT32 totalSize;
158 UINT32 attr;
159 #if (LOSCFG_MEM_WATERLINE == 1)
160 UINT32 waterLine; /* Maximum usage size in a memory pool */
161 UINT32 curUsedSize; /* Current usage size in a memory pool */
162 #endif
163 #if (LOSCFG_MEM_MUL_REGIONS == 1)
164 UINT32 totalGapSize;
165 #endif
166 };
167
168 struct OsMemPoolHead {
169 struct OsMemPoolInfo info;
170 UINT32 freeListBitmap[OS_MEM_BITMAP_WORDS];
171 struct OsMemFreeNodeHead *freeList[OS_MEM_FREE_LIST_COUNT];
172 #if (LOSCFG_MEM_MUL_POOL == 1)
173 VOID *nextPool;
174 #endif
175 };
176
177 /* The memory pool support expand. */
178 #define OS_MEM_POOL_EXPAND_ENABLE 0x01
179 /* The memory pool support no lock. */
180 #define OS_MEM_POOL_UNLOCK_ENABLE 0x02
181
182 #define MEM_LOCK(pool, state) do { \
183 if (!((pool)->info.attr & OS_MEM_POOL_UNLOCK_ENABLE)) { \
184 (state) = LOS_IntLock(); \
185 } \
186 } while (0);
187 #define MEM_UNLOCK(pool, state) do { \
188 if (!((pool)->info.attr & OS_MEM_POOL_UNLOCK_ENABLE)) { \
189 LOS_IntRestore(state); \
190 } \
191 } while (0);
192
193 #define OS_MEM_NODE_MAGIC 0xABCDDCBA
194 #if (LOSCFG_TASK_MEM_USED != 1 && LOSCFG_MEM_FREE_BY_TASKID == 1)
195 #define OS_MEM_NODE_USED_FLAG (1U << 25)
196 #define OS_MEM_NODE_ALIGNED_FLAG (1U << 24)
197 #if (LOSCFG_MEM_LEAKCHECK == 1)
198 #define OS_MEM_NODE_LEAK_FLAG (1U << 23)
199 #else
200 #define OS_MEM_NODE_LEAK_FLAG 0
201 #endif
202 #if (OS_MEM_EXPAND_ENABLE == 1)
203 #define OS_MEM_NODE_LAST_FLAG (1U << 22) /* Sentinel Node */
204 #else
205 #define OS_MEM_NODE_LAST_FLAG 0
206 #endif
207 #else
208 #define OS_MEM_NODE_USED_FLAG (1U << 31)
209 #define OS_MEM_NODE_ALIGNED_FLAG (1U << 30)
210 #if (LOSCFG_MEM_LEAKCHECK == 1)
211 #define OS_MEM_NODE_LEAK_FLAG (1U << 29)
212 #else
213 #define OS_MEM_NODE_LEAK_FLAG 0
214 #endif
215 #if (OS_MEM_EXPAND_ENABLE == 1)
216 #define OS_MEM_NODE_LAST_FLAG (1U << 28) /* Sentinel Node */
217 #else
218 #define OS_MEM_NODE_LAST_FLAG 0
219 #endif
220 #endif
221
222 #define OS_MEM_NODE_ALIGNED_AND_USED_FLAG \
223 (OS_MEM_NODE_USED_FLAG | OS_MEM_NODE_ALIGNED_FLAG | OS_MEM_NODE_LEAK_FLAG | OS_MEM_NODE_LAST_FLAG)
224
225 #define OS_MEM_NODE_GET_ALIGNED_FLAG(sizeAndFlag) \
226 ((sizeAndFlag) & OS_MEM_NODE_ALIGNED_FLAG)
227 #define OS_MEM_NODE_SET_ALIGNED_FLAG(sizeAndFlag) \
228 (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_ALIGNED_FLAG)
229 #define OS_MEM_NODE_GET_USED_FLAG(sizeAndFlag) \
230 ((sizeAndFlag) & OS_MEM_NODE_USED_FLAG)
231 #define OS_MEM_NODE_SET_USED_FLAG(sizeAndFlag) \
232 (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_USED_FLAG)
233 #define OS_MEM_NODE_GET_SIZE(sizeAndFlag) \
234 ((sizeAndFlag) & ~OS_MEM_NODE_ALIGNED_AND_USED_FLAG)
235
236 #define OS_MEM_GAPSIZE_USED_FLAG 0x80000000U
237 #define OS_MEM_GAPSIZE_ALIGNED_FLAG 0x40000000U
238 #define OS_MEM_GET_ALIGNED_GAPSIZE(gapsize) \
239 ((gapsize) & ~OS_MEM_GAPSIZE_ALIGNED_FLAG)
240 #define OS_MEM_GET_GAPSIZE_ALIGNED_FLAG(gapsize) \
241 ((gapsize) & OS_MEM_GAPSIZE_ALIGNED_FLAG)
242 #define OS_MEM_SET_GAPSIZE_ALIGNED_FLAG(gapsize) \
243 (gapsize) = ((gapsize) | OS_MEM_GAPSIZE_ALIGNED_FLAG)
244 #define OS_MEM_GET_GAPSIZE_USED_FLAG(gapsize) \
245 ((gapsize) & OS_MEM_GAPSIZE_USED_FLAG)
246 #define OS_MEM_GAPSIZE_CHECK(gapsize) \
247 (OS_MEM_GET_GAPSIZE_ALIGNED_FLAG(gapsize) && \
248 OS_MEM_GET_GAPSIZE_USED_FLAG(gapsize))
249
250 #define OS_MEM_NODE_SET_LAST_FLAG(sizeAndFlag) \
251 (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_LAST_FLAG)
252 #define OS_MEM_NODE_GET_LAST_FLAG(sizeAndFlag) \
253 ((sizeAndFlag) & OS_MEM_NODE_LAST_FLAG)
254 #define OS_MEM_NODE_GET_LEAK_FLAG(sizeAndFlag) \
255 ((sizeAndFlag) & OS_MEM_NODE_LEAK_FLAG)
256 #define OS_MEM_NODE_SET_LEAK_FLAG(sizeAndFlag) \
257 (sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_LEAK_FLAG)
258
259 #define OS_MEM_ALIGN_SIZE sizeof(UINTPTR)
260 #define OS_MEM_IS_POW_TWO(value) ((((UINTPTR)(value)) & ((UINTPTR)(value) - 1)) == 0)
261 #define OS_MEM_ALIGN(p, alignSize) (((UINTPTR)(p) + (alignSize) - 1) & ~((UINTPTR)((alignSize) - 1)))
262 #define OS_MEM_IS_ALIGNED(a, b) (!(((UINTPTR)(a)) & (((UINTPTR)(b)) - 1)))
263 #define OS_MEM_NODE_HEAD_SIZE sizeof(struct OsMemUsedNodeHead)
264 #define OS_MEM_MIN_POOL_SIZE (OS_MEM_NODE_HEAD_SIZE + sizeof(struct OsMemPoolHead))
265 #define OS_MEM_MIN_LEFT_SIZE sizeof(struct OsMemFreeNodeHead)
266 #define OS_MEM_MIN_ALLOC_SIZE 8
267 #define OS_MEM_NEXT_NODE(node) \
268 ((struct OsMemNodeHead *)(VOID *)((UINT8 *)(node) + OS_MEM_NODE_GET_SIZE((node)->sizeAndFlag)))
269 #define OS_MEM_FIRST_NODE(pool) \
270 (struct OsMemNodeHead *)((UINT8 *)(pool) + sizeof(struct OsMemPoolHead))
271 #define OS_MEM_END_NODE(pool, size) \
272 (struct OsMemNodeHead *)((UINT8 *)(pool) + (size) - OS_MEM_NODE_HEAD_SIZE)
273 #define OS_MEM_MIDDLE_ADDR_OPEN_END(startAddr, middleAddr, endAddr) \
274 (((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) < (UINT8 *)(endAddr)))
275 #define OS_MEM_MIDDLE_ADDR(startAddr, middleAddr, endAddr) \
276 (((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) <= (UINT8 *)(endAddr)))
277 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
278 STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave);
279 #define OS_MEM_SET_MAGIC(node) ((node)->magic = OS_MEM_NODE_MAGIC)
280 #define OS_MEM_MAGIC_VALID(node) ((node)->magic == OS_MEM_NODE_MAGIC)
281 #else
282 #define OS_MEM_SET_MAGIC(node)
283 #define OS_MEM_MAGIC_VALID(node) TRUE
284 #endif
285
286 #if (LOSCFG_MEM_MUL_REGIONS == 1)
287 /**
288 * When LOSCFG_MEM_MUL_REGIONS is enabled to support multiple non-continuous memory regions,
289 * the gap between two memory regions is marked as a used OsMemNodeHead node. The gap node
290 * couldn't be freed, and would also be skipped in some DFX functions. The 'ptr.prev' pointer
291 * of this node is set to OS_MEM_GAP_NODE_MAGIC to identify that this is a gap node.
292 */
293 #define OS_MEM_GAP_NODE_MAGIC 0xDCBAABCD
294 #define OS_MEM_MARK_GAP_NODE(node) \
295 (((struct OsMemNodeHead *)(node))->ptr.prev = (struct OsMemNodeHead *)OS_MEM_GAP_NODE_MAGIC)
296 #define OS_MEM_IS_GAP_NODE(node) \
297 (((struct OsMemNodeHead *)(node))->ptr.prev == (struct OsMemNodeHead *)OS_MEM_GAP_NODE_MAGIC)
298 #else
299 #define OS_MEM_MARK_GAP_NODE(node)
300 #define OS_MEM_IS_GAP_NODE(node) FALSE
301 #endif
302
303 STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node);
304 STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node);
305 STATIC VOID OsMemInfoPrint(VOID *pool);
306
307 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
OsMemNodeSetTaskID(struct OsMemUsedNodeHead * node)308 STATIC INLINE VOID OsMemNodeSetTaskID(struct OsMemUsedNodeHead *node)
309 {
310 node->header.taskID = LOS_CurTaskIDGet();
311 }
312 #endif
OsAllMemNodeDoHandle(VOID * pool,VOID (* handle)(struct OsMemNodeHead * curNode,VOID * arg),VOID * arg)313 STATIC VOID OsAllMemNodeDoHandle(VOID *pool, VOID (*handle)(struct OsMemNodeHead *curNode, VOID *arg), VOID *arg)
314 {
315 struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
316 struct OsMemNodeHead *tmpNode = NULL;
317 struct OsMemNodeHead *endNode = NULL;
318 UINT32 intSave = 0;
319
320 if (pool == NULL) {
321 PRINTK("input param is NULL\n");
322 return;
323 }
324 if (LOS_MemIntegrityCheck(pool)) {
325 PRINTK("LOS_MemIntegrityCheck error\n");
326 return;
327 }
328
329 MEM_LOCK(poolInfo, intSave);
330 endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
331 for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
332 if (tmpNode == endNode) {
333 #if OS_MEM_EXPAND_ENABLE
334 UINT32 size;
335 if (OsMemIsLastSentinelNode(endNode) == FALSE) {
336 size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
337 tmpNode = OsMemSentinelNodeGet(endNode);
338 endNode = OS_MEM_END_NODE(tmpNode, size);
339 continue;
340 }
341 #endif
342 break;
343 }
344 handle(tmpNode, arg);
345 }
346 MEM_UNLOCK(poolInfo, intSave);
347 }
348
349 #if (LOSCFG_TASK_MEM_USED == 1)
GetTaskMemUsedHandle(struct OsMemNodeHead * curNode,VOID * arg)350 STATIC VOID GetTaskMemUsedHandle(struct OsMemNodeHead *curNode, VOID *arg)
351 {
352 UINT32 *args = (UINT32 *)arg;
353 UINT32 *tskMemInfoBuf = (UINT32 *)(UINTPTR)*args;
354 UINT32 tskMemInfoCnt = *(args + 1);
355 #ifndef LOSCFG_MEM_MUL_REGIONS
356 if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
357 #else
358 if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(curNode)) {
359 #endif
360 if (curNode->taskID < tskMemInfoCnt) {
361 tskMemInfoBuf[curNode->taskID] += OS_MEM_NODE_GET_SIZE(curNode->sizeAndFlag);
362 }
363 }
364 return;
365 }
366
367 VOID OsTaskMemUsed(VOID *pool, UINT32 *tskMemInfoBuf, UINT32 tskMemInfoCnt)
368 {
369 UINT32 args[2] = {(UINT32)(UINTPTR)tskMemInfoBuf, tskMemInfoCnt};
370 OsAllMemNodeDoHandle(pool, GetTaskMemUsedHandle, (VOID *)args);
371 return;
372 }
373 #endif
374
375 #if (LOSCFG_MEM_WATERLINE == 1)
376 STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
377 {
378 pool->info.curUsedSize += size;
379 if (pool->info.curUsedSize > pool->info.waterLine) {
380 pool->info.waterLine = pool->info.curUsedSize;
381 }
382 }
383 #else
384 STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
385 {
386 (VOID)pool;
387 (VOID)size;
388 }
389 #endif
390
391 #if OS_MEM_EXPAND_ENABLE
392 STATIC INLINE struct OsMemNodeHead *OsMemLastSentinelNodeGet(const struct OsMemNodeHead *sentinelNode)
393 {
394 struct OsMemNodeHead *node = NULL;
395 VOID *ptr = sentinelNode->ptr.next;
396 UINT32 size = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
397
398 while ((ptr != NULL) && (size != 0)) {
399 node = OS_MEM_END_NODE(ptr, size);
400 ptr = node->ptr.next;
401 size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
402 }
403
404 return node;
405 }
406
407 STATIC INLINE BOOL OsMemSentinelNodeCheck(struct OsMemNodeHead *sentinelNode)
408 {
409 if (!OS_MEM_NODE_GET_USED_FLAG(sentinelNode->sizeAndFlag)) {
410 return FALSE;
411 }
412
413 if (!OS_MEM_MAGIC_VALID(sentinelNode)) {
414 return FALSE;
415 }
416
417 return TRUE;
418 }
419
420 STATIC INLINE BOOL OsMemIsLastSentinelNode(struct OsMemNodeHead *sentinelNode)
421 {
422 if (OsMemSentinelNodeCheck(sentinelNode) == FALSE) {
423 PRINT_ERR("%s %d, The current sentinel node is invalid\n", __FUNCTION__, __LINE__);
424 return TRUE;
425 }
426
427 if ((OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag) == 0) ||
428 (sentinelNode->ptr.next == NULL)) {
429 return TRUE;
430 }
431
432 return FALSE;
433 }
434
435 STATIC INLINE VOID OsMemSentinelNodeSet(struct OsMemNodeHead *sentinelNode, VOID *newNode, UINT32 size)
436 {
437 if (sentinelNode->ptr.next != NULL) {
438 sentinelNode = OsMemLastSentinelNodeGet(sentinelNode);
439 }
440
441 sentinelNode->sizeAndFlag = size;
442 sentinelNode->ptr.next = newNode;
443 OS_MEM_NODE_SET_USED_FLAG(sentinelNode->sizeAndFlag);
444 OS_MEM_NODE_SET_LAST_FLAG(sentinelNode->sizeAndFlag);
445 }
446
447 STATIC INLINE VOID *OsMemSentinelNodeGet(struct OsMemNodeHead *node)
448 {
449 if (OsMemSentinelNodeCheck(node) == FALSE) {
450 return NULL;
451 }
452
453 return node->ptr.next;
454 }
455
456 STATIC INLINE struct OsMemNodeHead *PreSentinelNodeGet(const VOID *pool, const struct OsMemNodeHead *node)
457 {
458 UINT32 nextSize;
459 struct OsMemNodeHead *nextNode = NULL;
460 struct OsMemNodeHead *sentinelNode = NULL;
461
462 sentinelNode = OS_MEM_END_NODE(pool, ((struct OsMemPoolHead *)pool)->info.totalSize);
463 while (sentinelNode != NULL) {
464 if (OsMemIsLastSentinelNode(sentinelNode)) {
465 PRINT_ERR("PreSentinelNodeGet can not find node 0x%x\n", node);
466 return NULL;
467 }
468 nextNode = OsMemSentinelNodeGet(sentinelNode);
469 if (nextNode == node) {
470 return sentinelNode;
471 }
472 nextSize = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
473 sentinelNode = OS_MEM_END_NODE(nextNode, nextSize);
474 }
475
476 return NULL;
477 }
478
479 STATIC INLINE BOOL TryShrinkPool(const VOID *pool, const struct OsMemNodeHead *node)
480 {
481 struct OsMemNodeHead *mySentinel = NULL;
482 struct OsMemNodeHead *preSentinel = NULL;
483 size_t totalSize = (UINTPTR)node->ptr.prev - (UINTPTR)node;
484 size_t nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
485
486 if (nodeSize != totalSize) {
487 return FALSE;
488 }
489
490 preSentinel = PreSentinelNodeGet(pool, node);
491 if (preSentinel == NULL) {
492 return FALSE;
493 }
494
495 mySentinel = node->ptr.prev;
496 if (OsMemIsLastSentinelNode(mySentinel)) { /* prev node becomes sentinel node */
497 preSentinel->ptr.next = NULL;
498 OsMemSentinelNodeSet(preSentinel, NULL, 0);
499 } else {
500 preSentinel->sizeAndFlag = mySentinel->sizeAndFlag;
501 preSentinel->ptr.next = mySentinel->ptr.next;
502 }
503
504 if (OsMemLargeNodeFree(node) != LOS_OK) {
505 PRINT_ERR("TryShrinkPool free 0x%x failed!\n", node);
506 return FALSE;
507 }
508
509 return TRUE;
510 }
511
512 STATIC INLINE INT32 OsMemPoolExpand(VOID *pool, UINT32 size, UINT32 intSave)
513 {
514 UINT32 tryCount = MAX_SHRINK_PAGECACHE_TRY;
515 struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
516 struct OsMemNodeHead *newNode = NULL;
517 struct OsMemNodeHead *endNode = NULL;
518
519 size = ROUNDUP(size + OS_MEM_NODE_HEAD_SIZE, PAGE_SIZE);
520 endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
521
522 RETRY:
523 newNode = (struct OsMemNodeHead *)LOS_PhysPagesAllocContiguous(size >> PAGE_SHIFT);
524 if (newNode == NULL) {
525 if (tryCount > 0) {
526 tryCount--;
527 MEM_UNLOCK(poolInfo, intSave);
528 OsTryShrinkMemory(size >> PAGE_SHIFT);
529 MEM_LOCK(poolInfo, intSave);
530 goto RETRY;
531 }
532
533 PRINT_ERR("OsMemPoolExpand alloc failed size = %u\n", size);
534 return -1;
535 }
536 newNode->sizeAndFlag = (size - OS_MEM_NODE_HEAD_SIZE);
537 newNode->ptr.prev = OS_MEM_END_NODE(newNode, size);
538 OsMemSentinelNodeSet(endNode, newNode, size);
539 OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
540
541 endNode = OS_MEM_END_NODE(newNode, size);
542 (VOID)memset(endNode, 0, sizeof(*endNode));
543 endNode->ptr.next = NULL;
544 OS_MEM_SET_MAGIC(endNode);
545 OsMemSentinelNodeSet(endNode, NULL, 0);
546 OsMemWaterUsedRecord(poolInfo, OS_MEM_NODE_HEAD_SIZE);
547
548 return 0;
549 }
550
551 VOID LOS_MemExpandEnable(VOID *pool)
552 {
553 if (pool == NULL) {
554 return;
555 }
556
557 ((struct OsMemPoolHead *)pool)->info.attr |= OS_MEM_POOL_EXPAND_ENABLE;
558 }
559 #endif
560
561 #ifdef LOSCFG_KERNEL_LMS
562 STATIC INLINE VOID OsLmsFirstNodeMark(VOID *pool, struct OsMemNodeHead *node)
563 {
564 if (g_lms == NULL) {
565 return;
566 }
567
568 g_lms->simpleMark((UINTPTR)pool, (UINTPTR)node, LMS_SHADOW_PAINT_U8);
569 g_lms->simpleMark((UINTPTR)node, (UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
570 g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node), (UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
571 LMS_SHADOW_REDZONE_U8);
572 g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
573 LMS_SHADOW_AFTERFREE_U8);
574 }
575
576 STATIC INLINE VOID OsLmsAllocAlignMark(VOID *ptr, VOID *alignedPtr, UINT32 size)
577 {
578 struct OsMemNodeHead *allocNode = NULL;
579
580 if ((g_lms == NULL) || (ptr == NULL)) {
581 return;
582 }
583 allocNode = (struct OsMemNodeHead *)((struct OsMemUsedNodeHead *)ptr - 1);
584 if (ptr != alignedPtr) {
585 g_lms->simpleMark((UINTPTR)ptr, (UINTPTR)ptr + sizeof(UINT32), LMS_SHADOW_PAINT_U8);
586 g_lms->simpleMark((UINTPTR)ptr + sizeof(UINT32), (UINTPTR)alignedPtr, LMS_SHADOW_REDZONE_U8);
587 }
588
589 /* mark remining as redzone */
590 g_lms->simpleMark(LMS_ADDR_ALIGN((UINTPTR)alignedPtr + size), (UINTPTR)OS_MEM_NEXT_NODE(allocNode),
591 LMS_SHADOW_REDZONE_U8);
592 }
593
594 STATIC INLINE VOID OsLmsReallocMergeNodeMark(struct OsMemNodeHead *node)
595 {
596 if (g_lms == NULL) {
597 return;
598 }
599
600 g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
601 LMS_SHADOW_ACCESSABLE_U8);
602 }
603
604 STATIC INLINE VOID OsLmsReallocSplitNodeMark(struct OsMemNodeHead *node)
605 {
606 if (g_lms == NULL) {
607 return;
608 }
609 /* mark next node */
610 g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node),
611 (UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
612 g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
613 (UINTPTR)OS_MEM_NEXT_NODE(OS_MEM_NEXT_NODE(node)), LMS_SHADOW_AFTERFREE_U8);
614 }
615
616 STATIC INLINE VOID OsLmsReallocResizeMark(struct OsMemNodeHead *node, UINT32 resize)
617 {
618 if (g_lms == NULL) {
619 return;
620 }
621 /* mark remaining as redzone */
622 g_lms->simpleMark((UINTPTR)node + resize, (UINTPTR)OS_MEM_NEXT_NODE(node), LMS_SHADOW_REDZONE_U8);
623 }
624 #endif
625
626 #if (LOSCFG_MEM_LEAKCHECK == 1)
627 struct OsMemLeakCheckInfo {
628 struct OsMemNodeHead *node;
629 UINTPTR linkReg[LOSCFG_MEM_RECORD_LR_CNT];
630 };
631
632 struct OsMemLeakCheckInfo g_leakCheckRecord[LOSCFG_MEM_LEAKCHECK_RECORD_MAX_NUM] = {0};
633 STATIC UINT32 g_leakCheckRecordCnt = 0;
634
635 STATIC INLINE VOID OsMemLeakCheckInfoRecord(struct OsMemNodeHead *node)
636 {
637 struct OsMemLeakCheckInfo *info = &g_leakCheckRecord[g_leakCheckRecordCnt];
638
639 if (!OS_MEM_NODE_GET_LEAK_FLAG(node->sizeAndFlag)) {
640 info->node = node;
641 (VOID)memcpy(info->linkReg, node->linkReg, sizeof(node->linkReg));
642 OS_MEM_NODE_SET_LEAK_FLAG(node->sizeAndFlag);
643 g_leakCheckRecordCnt++;
644 if (g_leakCheckRecordCnt >= LOSCFG_MEM_LEAKCHECK_RECORD_MAX_NUM) {
645 g_leakCheckRecordCnt = 0;
646 }
647 }
648 }
649
650 STATIC INLINE VOID OsMemLeakCheckInit(VOID)
651 {
652 (VOID)memset(g_leakCheckRecord, 0, sizeof(struct OsMemLeakCheckInfo) * LOSCFG_MEM_LEAKCHECK_RECORD_MAX_NUM);
653 g_leakCheckRecordCnt = 0;
654 }
655
656 STATIC INLINE VOID OsMemLinkRegisterRecord(struct OsMemNodeHead *node)
657 {
658 (VOID)memset(node->linkReg, 0, sizeof(node->linkReg));
659 OsBackTraceHookCall(node->linkReg, LOSCFG_MEM_RECORD_LR_CNT, LOSCFG_MEM_OMIT_LR_CNT, 0);
660 }
661
662 STATIC INLINE VOID OsMemUsedNodePrint(struct OsMemNodeHead *node)
663 {
664 UINT32 count;
665
666 if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(node)) {
667 PRINTK("0x%x: 0x%x ", (UINTPTR)node, OS_MEM_NODE_GET_SIZE(node->sizeAndFlag));
668 for (count = 0; count < LOSCFG_MEM_RECORD_LR_CNT; count++) {
669 PRINTK(" 0x%x ", node->linkReg[count]);
670 }
671 PRINTK("\n");
672
673 OsMemLeakCheckInfoRecord(node);
674 }
675 }
676
677 STATIC VOID OsMemUsedNodePrintHandle(struct OsMemNodeHead *node, VOID *arg)
678 {
679 UNUSED(arg);
680 OsMemUsedNodePrint(node);
681 return;
682 }
683
684 VOID LOS_MemUsedNodeShow(VOID *pool)
685 {
686 UINT32 count;
687
688 PRINTK("\n\rnode size ");
689 for (count = 0; count < LOSCFG_MEM_RECORD_LR_CNT; count++) {
690 PRINTK(" LR[%u] ", count);
691 }
692 PRINTK("\n");
693
694 OsMemLeakCheckInit();
695 OsAllMemNodeDoHandle(pool, OsMemUsedNodePrintHandle, NULL);
696 return;
697 }
698
699 #if (LOSCFG_KERNEL_PRINTF != 0)
700 STATIC VOID OsMemNodeBacktraceInfo(const struct OsMemNodeHead *tmpNode,
701 const struct OsMemNodeHead *preNode)
702 {
703 int i;
704 PRINTK("\n broken node head LR info: \n");
705 for (i = 0; i < LOSCFG_MEM_RECORD_LR_CNT; i++) {
706 PRINTK(" LR[%d]:0x%x\n", i, tmpNode->linkReg[i]);
707 }
708
709 PRINTK("\n pre node head LR info: \n");
710 for (i = 0; i < LOSCFG_MEM_RECORD_LR_CNT; i++) {
711 PRINTK(" LR[%d]:0x%x\n", i, preNode->linkReg[i]);
712 }
713 }
714 #endif
715 #endif
716
717 STATIC INLINE UINT32 OsMemFreeListIndexGet(UINT32 size)
718 {
719 UINT32 fl = OsMemFlGet(size);
720 if (fl < OS_MEM_SMALL_BUCKET_COUNT) {
721 return fl;
722 }
723
724 UINT32 sl = OsMemSlGet(size, fl);
725 return (OS_MEM_SMALL_BUCKET_COUNT + ((fl - OS_MEM_SMALL_BUCKET_COUNT) << OS_MEM_SLI) + sl);
726 }
727
728 STATIC INLINE struct OsMemFreeNodeHead *OsMemFindCurSuitableBlock(struct OsMemPoolHead *poolHead,
729 UINT32 index, UINT32 size)
730 {
731 struct OsMemFreeNodeHead *node = NULL;
732
733 for (node = poolHead->freeList[index]; node != NULL; node = node->next) {
734 if (node->header.sizeAndFlag >= size) {
735 return node;
736 }
737 }
738
739 return NULL;
740 }
741
742 STATIC INLINE UINT32 OsMemNotEmptyIndexGet(struct OsMemPoolHead *poolHead, UINT32 index)
743 {
744 /* 5: Divide by 32 to calculate the index of the bitmap array. */
745 UINT32 mask = poolHead->freeListBitmap[index >> 5];
746 mask &= ~((1 << (index & OS_MEM_BITMAP_MASK)) - 1);
747 if (mask != 0) {
748 index = OsMemFFS(mask) + (index & ~OS_MEM_BITMAP_MASK);
749 return index;
750 }
751
752 return OS_MEM_FREE_LIST_COUNT;
753 }
754
755 STATIC INLINE struct OsMemFreeNodeHead *OsMemFindNextSuitableBlock(VOID *pool, UINT32 size, UINT32 *outIndex)
756 {
757 struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
758 UINT32 fl = OsMemFlGet(size);
759 UINT32 sl;
760 UINT32 index, tmp;
761 UINT32 curIndex = OS_MEM_FREE_LIST_COUNT;
762 UINT32 mask;
763
764 do {
765 if (fl < OS_MEM_SMALL_BUCKET_COUNT) {
766 index = fl;
767 } else {
768 sl = OsMemSlGet(size, fl);
769 curIndex = ((fl - OS_MEM_SMALL_BUCKET_COUNT) << OS_MEM_SLI) + sl + OS_MEM_SMALL_BUCKET_COUNT;
770 index = curIndex + 1;
771 }
772
773 tmp = OsMemNotEmptyIndexGet(poolHead, index);
774 if (tmp != OS_MEM_FREE_LIST_COUNT) {
775 index = tmp;
776 goto DONE;
777 }
778
779 for (index = LOS_Align(index + 1, 32); index < OS_MEM_FREE_LIST_COUNT; index += 32) {
780 /* 5: Divide by 32 to calculate the index of the bitmap array. */
781 mask = poolHead->freeListBitmap[index >> 5];
782 if (mask != 0) {
783 index = OsMemFFS(mask) + index;
784 goto DONE;
785 }
786 }
787 } while (0);
788
789 if (curIndex == OS_MEM_FREE_LIST_COUNT) {
790 return NULL;
791 }
792
793 *outIndex = curIndex;
794 return OsMemFindCurSuitableBlock(poolHead, curIndex, size);
795 DONE:
796 *outIndex = index;
797 return poolHead->freeList[index];
798 }
799
800 STATIC INLINE VOID OsMemSetFreeListBit(struct OsMemPoolHead *head, UINT32 index)
801 {
802 /* 5: Divide by 32 to calculate the index of the bitmap array. */
803 head->freeListBitmap[index >> 5] |= 1U << (index & 0x1f);
804 }
805
806 STATIC INLINE VOID OsMemClearFreeListBit(struct OsMemPoolHead *head, UINT32 index)
807 {
808 /* 5: Divide by 32 to calculate the index of the bitmap array. */
809 head->freeListBitmap[index >> 5] &= ~(1U << (index & 0x1f));
810 }
811
812 STATIC INLINE VOID OsMemListAdd(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
813 {
814 struct OsMemFreeNodeHead *firstNode = pool->freeList[listIndex];
815 if (firstNode != NULL) {
816 firstNode->prev = node;
817 }
818 node->prev = NULL;
819 node->next = firstNode;
820 pool->freeList[listIndex] = node;
821 OsMemSetFreeListBit(pool, listIndex);
822 OS_MEM_SET_MAGIC(&node->header);
823 }
824
825 STATIC INLINE VOID OsMemListDelete(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
826 {
827 if (node == pool->freeList[listIndex]) {
828 pool->freeList[listIndex] = node->next;
829 if (node->next == NULL) {
830 OsMemClearFreeListBit(pool, listIndex);
831 } else {
832 node->next->prev = NULL;
833 }
834 } else {
835 node->prev->next = node->next;
836 if (node->next != NULL) {
837 node->next->prev = node->prev;
838 }
839 }
840 OS_MEM_SET_MAGIC(&node->header);
841 }
842
843 STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node)
844 {
845 UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
846 if (index >= OS_MEM_FREE_LIST_COUNT) {
847 LOS_Panic("The index of free lists is error, index = %u\n", index);
848 }
849 OsMemListAdd(pool, index, node);
850 }
851
852 STATIC INLINE VOID OsMemFreeNodeDelete(VOID *pool, struct OsMemFreeNodeHead *node)
853 {
854 UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
855 OsMemListDelete(pool, index, node);
856 }
857
858 STATIC INLINE struct OsMemNodeHead *OsMemFreeNodeGet(VOID *pool, UINT32 size)
859 {
860 struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
861 UINT32 index;
862 struct OsMemFreeNodeHead *firstNode = OsMemFindNextSuitableBlock(pool, size, &index);
863 if (firstNode == NULL) {
864 return NULL;
865 }
866
867 OsMemListDelete(poolHead, index, firstNode);
868
869 return &firstNode->header;
870 }
871
872 STATIC INLINE VOID OsMemMergeNode(struct OsMemNodeHead *node)
873 {
874 struct OsMemNodeHead *nextNode = NULL;
875
876 node->ptr.prev->sizeAndFlag += node->sizeAndFlag;
877 nextNode = (struct OsMemNodeHead *)((UINTPTR)node + node->sizeAndFlag);
878 if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(nextNode)) {
879 nextNode->ptr.prev = node->ptr.prev;
880 }
881 }
882
883 STATIC INLINE VOID OsMemSplitNode(VOID *pool, struct OsMemNodeHead *allocNode, UINT32 allocSize)
884 {
885 struct OsMemFreeNodeHead *newFreeNode = NULL;
886 struct OsMemNodeHead *nextNode = NULL;
887
888 newFreeNode = (struct OsMemFreeNodeHead *)(VOID *)((UINT8 *)allocNode + allocSize);
889 newFreeNode->header.ptr.prev = allocNode;
890 newFreeNode->header.sizeAndFlag = allocNode->sizeAndFlag - allocSize;
891 allocNode->sizeAndFlag = allocSize;
892 nextNode = OS_MEM_NEXT_NODE(&newFreeNode->header);
893 if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(nextNode)) {
894 nextNode->ptr.prev = &newFreeNode->header;
895 if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
896 OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
897 OsMemMergeNode(nextNode);
898 }
899 }
900
901 OsMemFreeNodeAdd(pool, newFreeNode);
902 }
903
904 STATIC INLINE VOID *OsMemCreateUsedNode(VOID *addr)
905 {
906 struct OsMemUsedNodeHead *node = (struct OsMemUsedNodeHead *)addr;
907
908 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
909 OsMemNodeSetTaskID(node);
910 #endif
911
912 #ifdef LOSCFG_KERNEL_LMS
913 struct OsMemNodeHead *newNode = (struct OsMemNodeHead *)node;
914 if (g_lms != NULL) {
915 g_lms->mallocMark(newNode, OS_MEM_NEXT_NODE(newNode), OS_MEM_NODE_HEAD_SIZE);
916 }
917 #endif
918 return node + 1;
919 }
920
921 STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size)
922 {
923 struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
924 struct OsMemNodeHead *newNode = NULL;
925 struct OsMemNodeHead *endNode = NULL;
926 #ifdef LOSCFG_KERNEL_LMS
927 UINT32 resize = 0;
928 if (g_lms != NULL) {
929 /*
930 * resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
931 * resize != 0, shadow memory init successful, set poolSize as resize.
932 */
933 resize = g_lms->init(pool, size);
934 size = (resize == 0) ? size : resize;
935 }
936 #endif
937 (VOID)memset(poolHead, 0, sizeof(struct OsMemPoolHead));
938
939 poolHead->info.pool = pool;
940 poolHead->info.totalSize = size;
941 /* default attr: lock, not expand. */
942 poolHead->info.attr &= ~(OS_MEM_POOL_UNLOCK_ENABLE | OS_MEM_POOL_EXPAND_ENABLE);
943
944 newNode = OS_MEM_FIRST_NODE(pool);
945 newNode->sizeAndFlag = (size - sizeof(struct OsMemPoolHead) - OS_MEM_NODE_HEAD_SIZE);
946 newNode->ptr.prev = OS_MEM_END_NODE(pool, size);
947 OS_MEM_SET_MAGIC(newNode);
948 OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
949
950 /* The last mem node */
951 endNode = OS_MEM_END_NODE(pool, size);
952 OS_MEM_SET_MAGIC(endNode);
953 #if OS_MEM_EXPAND_ENABLE
954 endNode->ptr.next = NULL;
955 OsMemSentinelNodeSet(endNode, NULL, 0);
956 #else
957 endNode->sizeAndFlag = 0;
958 endNode->ptr.prev = newNode;
959 OS_MEM_NODE_SET_USED_FLAG(endNode->sizeAndFlag);
960 #endif
961 #if (LOSCFG_MEM_WATERLINE == 1)
962 poolHead->info.curUsedSize = sizeof(struct OsMemPoolHead) + OS_MEM_NODE_HEAD_SIZE;
963 poolHead->info.waterLine = poolHead->info.curUsedSize;
964 #endif
965
966 #ifdef LOSCFG_KERNEL_LMS
967 if (resize != 0) {
968 OsLmsFirstNodeMark(pool, newNode);
969 }
970 #endif
971 return LOS_OK;
972 }
973
974 #if (LOSCFG_MEM_MUL_POOL == 1)
975 STATIC VOID OsMemPoolDeinit(VOID *pool)
976 {
977 (VOID)memset(pool, 0, sizeof(struct OsMemPoolHead));
978 }
979
980 STATIC UINT32 OsMemPoolAdd(VOID *pool, UINT32 size)
981 {
982 VOID *nextPool = g_poolHead;
983 VOID *curPool = g_poolHead;
984 UINTPTR poolEnd;
985 while (nextPool != NULL) {
986 poolEnd = (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool);
987 if (((pool <= nextPool) && (((UINTPTR)pool + size) > (UINTPTR)nextPool)) ||
988 (((UINTPTR)pool < poolEnd) && (((UINTPTR)pool + size) >= poolEnd))) {
989 PRINT_ERR("pool [0x%x, 0x%x) conflict with pool [0x%x, 0x%x)\n", (UINTPTR)pool,
990 (UINTPTR)pool + size, (UINTPTR)nextPool, (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool));
991 return LOS_NOK;
992 }
993 curPool = nextPool;
994 nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
995 }
996
997 if (g_poolHead == NULL) {
998 g_poolHead = pool;
999 } else {
1000 ((struct OsMemPoolHead *)curPool)->nextPool = pool;
1001 }
1002
1003 ((struct OsMemPoolHead *)pool)->nextPool = NULL;
1004 return LOS_OK;
1005 }
1006
1007 STATIC UINT32 OsMemPoolDelete(VOID *pool)
1008 {
1009 UINT32 ret = LOS_NOK;
1010 VOID *nextPool = NULL;
1011 VOID *curPool = NULL;
1012
1013 do {
1014 if (pool == g_poolHead) {
1015 g_poolHead = ((struct OsMemPoolHead *)g_poolHead)->nextPool;
1016 ret = LOS_OK;
1017 break;
1018 }
1019
1020 curPool = g_poolHead;
1021 nextPool = g_poolHead;
1022 while (nextPool != NULL) {
1023 if (pool == nextPool) {
1024 ((struct OsMemPoolHead *)curPool)->nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
1025 ret = LOS_OK;
1026 break;
1027 }
1028 curPool = nextPool;
1029 nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
1030 }
1031 } while (0);
1032
1033 return ret;
1034 }
1035 #endif
1036
1037 UINT32 LOS_MemInit(VOID *pool, UINT32 size)
1038 {
1039 if ((pool == NULL) || (size <= OS_MEM_MIN_POOL_SIZE)) {
1040 return LOS_NOK;
1041 }
1042
1043 if (((UINTPTR)pool & (OS_MEM_ALIGN_SIZE - 1)) || \
1044 (size & (OS_MEM_ALIGN_SIZE - 1))) {
1045 PRINT_ERR("LiteOS heap memory address or size configured not aligned:address:0x%x,size:0x%x, alignsize:%d\n", \
1046 (UINTPTR)pool, size, OS_MEM_ALIGN_SIZE);
1047 return LOS_NOK;
1048 }
1049
1050 if (OsMemPoolInit(pool, size)) {
1051 return LOS_NOK;
1052 }
1053
1054 #if (LOSCFG_MEM_MUL_POOL == 1)
1055 if (OsMemPoolAdd(pool, size)) {
1056 (VOID)OsMemPoolDeinit(pool);
1057 return LOS_NOK;
1058 }
1059 #endif
1060
1061 OsHookCall(LOS_HOOK_TYPE_MEM_INIT, pool, size);
1062
1063 return LOS_OK;
1064 }
1065
1066 #if (LOSCFG_MEM_MUL_POOL == 1)
1067 UINT32 LOS_MemDeInit(VOID *pool)
1068 {
1069 if (pool == NULL) {
1070 return LOS_NOK;
1071 }
1072
1073 if (OsMemPoolDelete(pool)) {
1074 return LOS_NOK;
1075 }
1076
1077 OsMemPoolDeinit(pool);
1078
1079 OsHookCall(LOS_HOOK_TYPE_MEM_DEINIT, pool);
1080
1081 return LOS_OK;
1082 }
1083
1084 UINT32 LOS_MemPoolList(VOID)
1085 {
1086 VOID *nextPool = g_poolHead;
1087 UINT32 index = 0;
1088 while (nextPool != NULL) {
1089 PRINTK("pool%u :\n", index);
1090 index++;
1091 OsMemInfoPrint(nextPool);
1092 nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
1093 }
1094 return index;
1095 }
1096 #endif
1097
1098 STATIC INLINE VOID *OsMemAlloc(struct OsMemPoolHead *pool, UINT32 size, UINT32 intSave)
1099 {
1100 struct OsMemNodeHead *allocNode = NULL;
1101
1102 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1103 if (OsMemAllocCheck(pool, intSave) == LOS_NOK) {
1104 return NULL;
1105 }
1106 #endif
1107
1108 UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
1109 #if OS_MEM_EXPAND_ENABLE || (LOSCFG_KERNEL_LMK == 1)
1110 retry:
1111 #endif
1112 allocNode = OsMemFreeNodeGet(pool, allocSize);
1113 if (allocNode == NULL) {
1114 #if OS_MEM_EXPAND_ENABLE
1115 if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
1116 INT32 ret = OsMemPoolExpand(pool, allocSize, intSave);
1117 if (ret == 0) {
1118 goto retry;
1119 }
1120 }
1121 #endif
1122
1123 #if (LOSCFG_KERNEL_LMK == 1)
1124 UINT32 killRet = LOS_LmkTasksKill();
1125 if (killRet == LOS_OK) {
1126 goto retry;
1127 }
1128 #endif
1129 PRINT_ERR("---------------------------------------------------"
1130 "--------------------------------------------------------\n");
1131 MEM_UNLOCK(pool, intSave);
1132 OsMemInfoPrint(pool);
1133 MEM_LOCK(pool, intSave);
1134 PRINT_ERR("[%s] No suitable free block, require free node size: 0x%x\n", __FUNCTION__, allocSize);
1135 PRINT_ERR("----------------------------------------------------"
1136 "-------------------------------------------------------\n");
1137 return NULL;
1138 }
1139
1140 if ((allocSize + OS_MEM_MIN_LEFT_SIZE) <= allocNode->sizeAndFlag) {
1141 OsMemSplitNode(pool, allocNode, allocSize);
1142 }
1143
1144 OS_MEM_NODE_SET_USED_FLAG(allocNode->sizeAndFlag);
1145 OsMemWaterUsedRecord(pool, OS_MEM_NODE_GET_SIZE(allocNode->sizeAndFlag));
1146
1147 #if (LOSCFG_MEM_LEAKCHECK == 1)
1148 OsMemLinkRegisterRecord(allocNode);
1149 #endif
1150 return OsMemCreateUsedNode((VOID *)allocNode);
1151 }
1152
1153 VOID *LOS_MemAlloc(VOID *pool, UINT32 size)
1154 {
1155 if ((pool == NULL) || (size == 0)) {
1156 return NULL;
1157 }
1158
1159 if (size < OS_MEM_MIN_ALLOC_SIZE) {
1160 size = OS_MEM_MIN_ALLOC_SIZE;
1161 }
1162
1163 struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1164 VOID *ptr = NULL;
1165 UINT32 intSave = 0;
1166
1167 MEM_LOCK(poolHead, intSave);
1168 do {
1169 if (OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
1170 break;
1171 }
1172 ptr = OsMemAlloc(poolHead, size, intSave);
1173 } while (0);
1174 MEM_UNLOCK(poolHead, intSave);
1175
1176 OsHookCall(LOS_HOOK_TYPE_MEM_ALLOC, pool, ptr, size);
1177
1178 return ptr;
1179 }
1180
1181 VOID *LOS_MemAllocAlign(VOID *pool, UINT32 size, UINT32 boundary)
1182 {
1183 UINT32 gapSize;
1184
1185 if ((pool == NULL) || (size == 0) || (boundary == 0) || !OS_MEM_IS_POW_TWO(boundary) ||
1186 !OS_MEM_IS_ALIGNED(boundary, sizeof(VOID *))) {
1187 return NULL;
1188 }
1189
1190 if (size < OS_MEM_MIN_ALLOC_SIZE) {
1191 size = OS_MEM_MIN_ALLOC_SIZE;
1192 }
1193
1194 /*
1195 * sizeof(gapSize) bytes stores offset between alignedPtr and ptr,
1196 * the ptr has been OS_MEM_ALIGN_SIZE(4 or 8) aligned, so maximum
1197 * offset between alignedPtr and ptr is boundary - OS_MEM_ALIGN_SIZE
1198 */
1199 if ((boundary - sizeof(gapSize)) > ((UINT32)(-1) - size)) {
1200 return NULL;
1201 }
1202
1203 UINT32 useSize = (size + boundary) - sizeof(gapSize);
1204 if (OS_MEM_NODE_GET_USED_FLAG(useSize) || OS_MEM_NODE_GET_ALIGNED_FLAG(useSize)) {
1205 return NULL;
1206 }
1207
1208 struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1209 UINT32 intSave = 0;
1210 VOID *ptr = NULL;
1211 VOID *alignedPtr = NULL;
1212
1213 MEM_LOCK(poolHead, intSave);
1214 do {
1215 ptr = OsMemAlloc(pool, useSize, intSave);
1216 alignedPtr = (VOID *)OS_MEM_ALIGN(ptr, boundary);
1217 if (ptr == alignedPtr) {
1218 #ifdef LOSCFG_KERNEL_LMS
1219 OsLmsAllocAlignMark(ptr, alignedPtr, size);
1220 #endif
1221 break;
1222 }
1223
1224 /* store gapSize in address (ptr - 4), it will be checked while free */
1225 gapSize = (UINT32)((UINTPTR)alignedPtr - (UINTPTR)ptr);
1226 struct OsMemUsedNodeHead *allocNode = (struct OsMemUsedNodeHead *)ptr - 1;
1227 OS_MEM_NODE_SET_ALIGNED_FLAG(allocNode->header.sizeAndFlag);
1228 OS_MEM_SET_GAPSIZE_ALIGNED_FLAG(gapSize);
1229 *(UINT32 *)((UINTPTR)alignedPtr - sizeof(gapSize)) = gapSize;
1230 #ifdef LOSCFG_KERNEL_LMS
1231 OsLmsAllocAlignMark(ptr, alignedPtr, size);
1232 #endif
1233 ptr = alignedPtr;
1234 } while (0);
1235 MEM_UNLOCK(poolHead, intSave);
1236
1237 OsHookCall(LOS_HOOK_TYPE_MEM_ALLOCALIGN, pool, ptr, size, boundary);
1238
1239 return ptr;
1240 }
1241
1242 STATIC INLINE BOOL OsMemAddrValidCheck(const struct OsMemPoolHead *pool, const VOID *addr)
1243 {
1244 UINT32 size;
1245
1246 size = pool->info.totalSize;
1247 if (OS_MEM_MIDDLE_ADDR_OPEN_END(pool + 1, addr, (UINTPTR)pool + size)) {
1248 return TRUE;
1249 }
1250 #if OS_MEM_EXPAND_ENABLE
1251 struct OsMemNodeHead *node = NULL;
1252 struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, size);
1253 while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1254 size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1255 node = OsMemSentinelNodeGet(sentinel);
1256 sentinel = OS_MEM_END_NODE(node, size);
1257 if (OS_MEM_MIDDLE_ADDR_OPEN_END(node, addr, (UINTPTR)node + size)) {
1258 return TRUE;
1259 }
1260 }
1261 #endif
1262 return FALSE;
1263 }
1264
1265 STATIC INLINE BOOL OsMemIsNodeValid(const struct OsMemNodeHead *node, const struct OsMemNodeHead *startNode,
1266 const struct OsMemNodeHead *endNode,
1267 const struct OsMemPoolHead *poolInfo)
1268 {
1269 if (!OS_MEM_MIDDLE_ADDR(startNode, node, endNode)) {
1270 return FALSE;
1271 }
1272
1273 if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1274 if (!OS_MEM_MAGIC_VALID(node)) {
1275 return FALSE;
1276 }
1277 return TRUE;
1278 }
1279
1280 if (!OsMemAddrValidCheck(poolInfo, node->ptr.prev)) {
1281 return FALSE;
1282 }
1283
1284 return TRUE;
1285 }
1286
1287 STATIC UINT32 OsMemCheckUsedNode(const struct OsMemPoolHead *pool, const struct OsMemNodeHead *node)
1288 {
1289 struct OsMemNodeHead *startNode = (struct OsMemNodeHead *)OS_MEM_FIRST_NODE(pool);
1290 struct OsMemNodeHead *endNode = (struct OsMemNodeHead *)OS_MEM_END_NODE(pool, pool->info.totalSize);
1291 struct OsMemNodeHead *nextNode = NULL;
1292 BOOL doneFlag = FALSE;
1293
1294 do {
1295 do {
1296 if (OS_MEM_IS_GAP_NODE(node)) {
1297 break;
1298 }
1299
1300 if (!OsMemIsNodeValid(node, startNode, endNode, pool)) {
1301 break;
1302 }
1303
1304 if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1305 break;
1306 }
1307
1308 nextNode = OS_MEM_NEXT_NODE(node);
1309 if (!OsMemIsNodeValid(nextNode, startNode, endNode, pool)) {
1310 break;
1311 }
1312
1313 if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(nextNode)) {
1314 if (nextNode->ptr.prev != node) {
1315 break;
1316 }
1317 }
1318
1319 if ((node != startNode) &&
1320 ((!OsMemIsNodeValid(node->ptr.prev, startNode, endNode, pool)) ||
1321 (OS_MEM_NEXT_NODE(node->ptr.prev) != node))) {
1322 break;
1323 }
1324 doneFlag = TRUE;
1325 } while (0);
1326
1327 if (!doneFlag) {
1328 #if OS_MEM_EXPAND_ENABLE
1329 if (OsMemIsLastSentinelNode(endNode) == FALSE) {
1330 startNode = OsMemSentinelNodeGet(endNode);
1331 endNode = OS_MEM_END_NODE(startNode, OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag));
1332 continue;
1333 }
1334 #endif
1335 return LOS_NOK;
1336 }
1337 } while (!doneFlag);
1338
1339 return LOS_OK;
1340 }
1341
1342 STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node)
1343 {
1344 UINT32 ret = OsMemCheckUsedNode(pool, node);
1345 if (ret != LOS_OK) {
1346 PRINT_ERR("OsMemFree check error!\n");
1347 return ret;
1348 }
1349
1350 #if (LOSCFG_MEM_WATERLINE == 1)
1351 pool->info.curUsedSize -= OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1352 #endif
1353
1354 node->sizeAndFlag = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1355 #if (LOSCFG_MEM_LEAKCHECK == 1)
1356 OsMemLinkRegisterRecord(node);
1357 #endif
1358 #ifdef LOSCFG_KERNEL_LMS
1359 struct OsMemNodeHead *nextNodeBackup = OS_MEM_NEXT_NODE(node);
1360 struct OsMemNodeHead *curNodeBackup = node;
1361 if (g_lms != NULL) {
1362 g_lms->check((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, TRUE);
1363 }
1364 #endif
1365 struct OsMemNodeHead *preNode = node->ptr.prev; /* merage preNode */
1366 if ((preNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1367 OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)preNode);
1368 OsMemMergeNode(node);
1369 node = preNode;
1370 }
1371
1372 struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node); /* merage nextNode */
1373 if ((nextNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
1374 OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
1375 OsMemMergeNode(nextNode);
1376 }
1377
1378 #if OS_MEM_EXPAND_ENABLE
1379 if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
1380 struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool);
1381 /* if this is a expand head node, and all unused, free it to pmm */
1382 if ((node->prev > node) && (node != firstNode)) {
1383 if (TryShrinkPool(pool, node)) {
1384 return LOS_OK;
1385 }
1386 }
1387 }
1388 #endif
1389
1390 OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)node);
1391 #ifdef LOSCFG_KERNEL_LMS
1392 if (g_lms != NULL) {
1393 g_lms->freeMark(curNodeBackup, nextNodeBackup, OS_MEM_NODE_HEAD_SIZE);
1394 }
1395 #endif
1396 return ret;
1397 }
1398
1399 STATIC INLINE VOID *OsGetRealPtr(const VOID *pool, VOID *ptr)
1400 {
1401 VOID *realPtr = ptr;
1402 UINT32 gapSize = *((UINT32 *)((UINTPTR)ptr - sizeof(UINT32)));
1403
1404 if (OS_MEM_GAPSIZE_CHECK(gapSize)) {
1405 PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1406 return NULL;
1407 }
1408
1409 if (OS_MEM_GET_GAPSIZE_ALIGNED_FLAG(gapSize)) {
1410 gapSize = OS_MEM_GET_ALIGNED_GAPSIZE(gapSize);
1411 if ((gapSize & (OS_MEM_ALIGN_SIZE - 1)) ||
1412 (gapSize > ((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE - (UINTPTR)pool))) {
1413 PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
1414 return NULL;
1415 }
1416 realPtr = (VOID *)((UINTPTR)ptr - (UINTPTR)gapSize);
1417 }
1418 return realPtr;
1419 }
1420
1421 UINT32 LOS_MemFree(VOID *pool, VOID *ptr)
1422 {
1423 if ((pool == NULL) || (ptr == NULL) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *)) ||
1424 !OS_MEM_IS_ALIGNED(ptr, sizeof(VOID *))) {
1425 return LOS_NOK;
1426 }
1427
1428 OsHookCall(LOS_HOOK_TYPE_MEM_FREE, pool, ptr);
1429
1430 UINT32 ret = LOS_NOK;
1431 struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1432 struct OsMemNodeHead *node = NULL;
1433 UINT32 intSave = 0;
1434
1435 MEM_LOCK(poolHead, intSave);
1436 do {
1437 ptr = OsGetRealPtr(pool, ptr);
1438 if (ptr == NULL) {
1439 break;
1440 }
1441 node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
1442 ret = OsMemFree(poolHead, node);
1443 } while (0);
1444 MEM_UNLOCK(poolHead, intSave);
1445
1446 return ret;
1447 }
1448
1449 STATIC INLINE VOID OsMemReAllocSmaller(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node, UINT32 nodeSize)
1450 {
1451 #if (LOSCFG_MEM_WATERLINE == 1)
1452 struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
1453 #endif
1454 node->sizeAndFlag = nodeSize;
1455 if ((allocSize + OS_MEM_MIN_LEFT_SIZE) <= nodeSize) {
1456 OsMemSplitNode(pool, node, allocSize);
1457 #if (LOSCFG_MEM_WATERLINE == 1)
1458 poolInfo->info.curUsedSize -= nodeSize - allocSize;
1459 #endif
1460 #ifdef LOSCFG_KERNEL_LMS
1461 OsLmsReallocSplitNodeMark(node);
1462 } else {
1463 OsLmsReallocResizeMark(node, allocSize);
1464 #endif
1465 }
1466 OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1467 #if (LOSCFG_MEM_LEAKCHECK == 1)
1468 OsMemLinkRegisterRecord(node);
1469 #endif
1470 }
1471
1472 STATIC INLINE VOID OsMemMergeNodeForReAllocBigger(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node,
1473 UINT32 nodeSize, struct OsMemNodeHead *nextNode)
1474 {
1475 node->sizeAndFlag = nodeSize;
1476 OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
1477 OsMemMergeNode(nextNode);
1478 #ifdef LOSCFG_KERNEL_LMS
1479 OsLmsReallocMergeNodeMark(node);
1480 #endif
1481 if ((allocSize + OS_MEM_MIN_LEFT_SIZE) <= node->sizeAndFlag) {
1482 OsMemSplitNode(pool, node, allocSize);
1483 #ifdef LOSCFG_KERNEL_LMS
1484 OsLmsReallocSplitNodeMark(node);
1485 } else {
1486 OsLmsReallocResizeMark(node, allocSize);
1487 #endif
1488 }
1489 OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
1490 OsMemWaterUsedRecord((struct OsMemPoolHead *)pool, node->sizeAndFlag - nodeSize);
1491 #if (LOSCFG_MEM_LEAKCHECK == 1)
1492 OsMemLinkRegisterRecord(node);
1493 #endif
1494 }
1495
1496 STATIC INLINE VOID *OsMemRealloc(struct OsMemPoolHead *pool, const VOID *ptr,
1497 struct OsMemNodeHead *node, UINT32 size, UINT32 intSave)
1498 {
1499 struct OsMemNodeHead *nextNode = NULL;
1500 UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
1501 UINT32 nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1502 VOID *tmpPtr = NULL;
1503
1504 if (nodeSize >= allocSize) {
1505 OsMemReAllocSmaller(pool, allocSize, node, nodeSize);
1506 return (VOID *)ptr;
1507 }
1508
1509 nextNode = OS_MEM_NEXT_NODE(node);
1510 if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag) &&
1511 ((nextNode->sizeAndFlag + nodeSize) >= allocSize)) {
1512 OsMemMergeNodeForReAllocBigger(pool, allocSize, node, nodeSize, nextNode);
1513 return (VOID *)ptr;
1514 }
1515
1516 tmpPtr = OsMemAlloc(pool, size, intSave);
1517 if (tmpPtr != NULL) {
1518 if (memcpy_s(tmpPtr, size, ptr, (nodeSize - OS_MEM_NODE_HEAD_SIZE)) != EOK) {
1519 MEM_UNLOCK(pool, intSave);
1520 (VOID)LOS_MemFree((VOID *)pool, (VOID *)tmpPtr);
1521 MEM_LOCK(pool, intSave);
1522 return NULL;
1523 }
1524 (VOID)OsMemFree(pool, node);
1525 }
1526 return tmpPtr;
1527 }
1528
1529 VOID *LOS_MemRealloc(VOID *pool, VOID *ptr, UINT32 size)
1530 {
1531 if ((pool == NULL) || OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
1532 return NULL;
1533 }
1534
1535 OsHookCall(LOS_HOOK_TYPE_MEM_REALLOC, pool, ptr, size);
1536
1537 if (ptr == NULL) {
1538 return LOS_MemAlloc(pool, size);
1539 }
1540
1541 if (size == 0) {
1542 (VOID)LOS_MemFree(pool, ptr);
1543 return NULL;
1544 }
1545
1546 if (size < OS_MEM_MIN_ALLOC_SIZE) {
1547 size = OS_MEM_MIN_ALLOC_SIZE;
1548 }
1549
1550 struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1551 struct OsMemNodeHead *node = NULL;
1552 VOID *newPtr = NULL;
1553 UINT32 intSave = 0;
1554
1555 MEM_LOCK(poolHead, intSave);
1556 do {
1557 ptr = OsGetRealPtr(pool, ptr);
1558 if (ptr == NULL) {
1559 break;
1560 }
1561
1562 node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
1563 if (OsMemCheckUsedNode(pool, node) != LOS_OK) {
1564 break;
1565 }
1566
1567 newPtr = OsMemRealloc(pool, ptr, node, size, intSave);
1568 } while (0);
1569 MEM_UNLOCK(poolHead, intSave);
1570
1571 return newPtr;
1572 }
1573
1574 #if (LOSCFG_MEM_FREE_BY_TASKID == 1)
1575 STATIC VOID MemNodeFreeByTaskIDHandle(struct OsMemNodeHead *curNode, VOID *arg)
1576 {
1577 UINT32 *args = (UINT32 *)arg;
1578 UINT32 taskID = *args;
1579 struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)(UINTPTR)(*(args + 1));
1580 struct OsMemUsedNodeHead *node = NULL;
1581 if (!OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
1582 return;
1583 }
1584
1585 node = (struct OsMemUsedNodeHead *)curNode;
1586 if (node->header.taskID == taskID) {
1587 OsMemFree(poolHead, &node->header);
1588 }
1589 return;
1590 }
1591
1592 UINT32 LOS_MemFreeByTaskID(VOID *pool, UINT32 taskID)
1593 {
1594 UINT32 args[2] = { taskID, (UINT32)(UINTPTR)pool };
1595 if (pool == NULL) {
1596 return LOS_NOK;
1597 }
1598
1599 if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
1600 return LOS_NOK;
1601 }
1602
1603 OsAllMemNodeDoHandle(pool, MemNodeFreeByTaskIDHandle, (VOID *)args);
1604
1605 return LOS_OK;
1606 }
1607 #endif
1608
1609 UINT32 LOS_MemPoolSizeGet(const VOID *pool)
1610 {
1611 UINT32 count = 0;
1612
1613 if (pool == NULL) {
1614 return LOS_NOK;
1615 }
1616
1617 count += ((struct OsMemPoolHead *)pool)->info.totalSize;
1618 #if (LOSCFG_MEM_MUL_REGIONS == 1)
1619 count -= ((struct OsMemPoolHead *)pool)->info.totalGapSize;
1620 #endif
1621
1622 #if OS_MEM_EXPAND_ENABLE
1623 UINT32 size;
1624 struct OsMemNodeHead *node = NULL;
1625 struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, count);
1626
1627 while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1628 size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1629 node = OsMemSentinelNodeGet(sentinel);
1630 sentinel = OS_MEM_END_NODE(node, size);
1631 count += size;
1632 }
1633 #endif
1634 return count;
1635 }
1636
1637 STATIC VOID MemUsedGetHandle(struct OsMemNodeHead *curNode, VOID *arg)
1638 {
1639 UINT32 *memUsed = (UINT32 *)arg;
1640 if (OS_MEM_IS_GAP_NODE(curNode)) {
1641 *memUsed += OS_MEM_NODE_HEAD_SIZE;
1642 } else if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
1643 *memUsed += OS_MEM_NODE_GET_SIZE(curNode->sizeAndFlag);
1644 }
1645 return;
1646 }
1647
1648 UINT32 LOS_MemTotalUsedGet(VOID *pool)
1649 {
1650 UINT32 memUsed = 0;
1651
1652 if (pool == NULL) {
1653 return LOS_NOK;
1654 }
1655
1656 OsAllMemNodeDoHandle(pool, MemUsedGetHandle, (VOID *)&memUsed);
1657
1658 return memUsed;
1659 }
1660
1661 STATIC INLINE VOID OsMemMagicCheckPrint(struct OsMemNodeHead **tmpNode)
1662 {
1663 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1664 PRINT_ERR("[%s], %d, memory check error!\n"
1665 "memory used but magic num wrong, magic num = 0x%x\n",
1666 __FUNCTION__, __LINE__, (*tmpNode)->magic);
1667 #else
1668 (VOID)tmpNode;
1669 #endif
1670 }
1671
1672 STATIC UINT32 OsMemAddrValidCheckPrint(const VOID *pool, struct OsMemFreeNodeHead **tmpNode)
1673 {
1674 if (((*tmpNode)->prev != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->prev)) {
1675 PRINT_ERR("[%s], %d, memory check error!\n"
1676 " freeNode.prev: %p is out of legal mem range\n",
1677 __FUNCTION__, __LINE__, (*tmpNode)->prev);
1678 return LOS_NOK;
1679 }
1680 if (((*tmpNode)->next != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->next)) {
1681 PRINT_ERR("[%s], %d, memory check error!\n"
1682 " freeNode.next: %p is out of legal mem range\n",
1683 __FUNCTION__, __LINE__, (*tmpNode)->next);
1684 return LOS_NOK;
1685 }
1686 return LOS_OK;
1687 }
1688
1689 STATIC UINT32 OsMemIntegrityCheckSub(struct OsMemNodeHead **tmpNode, const VOID *pool)
1690 {
1691 if (!OS_MEM_MAGIC_VALID(*tmpNode)) {
1692 OsMemMagicCheckPrint(tmpNode);
1693 return LOS_NOK;
1694 }
1695
1696 if (!OsMemAddrValidCheck(pool, (*tmpNode)->ptr.prev)) {
1697 PRINT_ERR("[%s], %d, memory check error!\n"
1698 " node prev: %p is out of legal mem range\n",
1699 __FUNCTION__, __LINE__, (*tmpNode)->ptr.next);
1700 return LOS_NOK;
1701 }
1702
1703 if (!OS_MEM_NODE_GET_USED_FLAG((*tmpNode)->sizeAndFlag)) { /* is free node, check free node range */
1704 if (OsMemAddrValidCheckPrint(pool, (struct OsMemFreeNodeHead **)tmpNode)) {
1705 return LOS_NOK;
1706 }
1707 }
1708
1709 return LOS_OK;
1710 }
1711
1712 STATIC UINT32 OsMemFreeListNodeCheck(const struct OsMemPoolHead *pool,
1713 const struct OsMemFreeNodeHead *node)
1714 {
1715 if (!OsMemAddrValidCheck(pool, node) ||
1716 ((node->prev != NULL) && !OsMemAddrValidCheck(pool, node->prev)) ||
1717 ((node->next != NULL) && !OsMemAddrValidCheck(pool, node->next)) ||
1718 !OsMemAddrValidCheck(pool, node->header.ptr.prev)) {
1719 return LOS_NOK;
1720 }
1721
1722 if (!OS_MEM_IS_ALIGNED(node, sizeof(VOID *)) ||
1723 !OS_MEM_IS_ALIGNED(node->prev, sizeof(VOID *)) ||
1724 !OS_MEM_IS_ALIGNED(node->next, sizeof(VOID *)) ||
1725 !OS_MEM_IS_ALIGNED(node->header.ptr.prev, sizeof(VOID *))) {
1726 return LOS_NOK;
1727 }
1728
1729 return LOS_OK;
1730 }
1731
1732 STATIC VOID OsMemPoolHeadCheck(const struct OsMemPoolHead *pool)
1733 {
1734 struct OsMemFreeNodeHead *tmpNode = NULL;
1735 UINT32 index;
1736 UINT32 flag = 0;
1737
1738 if ((pool->info.pool != pool) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *))) {
1739 PRINT_ERR("wrong mem pool addr: %p, func: %s, line: %d\n", pool, __FUNCTION__, __LINE__);
1740 return;
1741 }
1742
1743 for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
1744 for (tmpNode = pool->freeList[index]; tmpNode != NULL; tmpNode = tmpNode->next) {
1745 if (OsMemFreeListNodeCheck(pool, tmpNode)) {
1746 flag = 1;
1747 PRINT_ERR("FreeListIndex: %u, node: %p, bNode: %p, prev:%p, next: %p\n",
1748 index, tmpNode, tmpNode->header.ptr.prev, tmpNode->prev, tmpNode->next);
1749 }
1750 }
1751 }
1752
1753 if (flag) {
1754 PRINTK("mem pool info: poolAddr: %p, poolSize: 0x%x\n", pool, pool->info.totalSize);
1755 #if (LOSCFG_MEM_WATERLINE == 1)
1756 PRINTK("mem pool info: poolWaterLine: 0x%x, poolCurUsedSize: 0x%x\n", pool->info.waterLine,
1757 pool->info.curUsedSize);
1758 #endif
1759 #if OS_MEM_EXPAND_ENABLE
1760 UINT32 size;
1761 struct OsMemNodeHead *node = NULL;
1762 struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, pool->info.totalSize);
1763 while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
1764 size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
1765 node = OsMemSentinelNodeGet(sentinel);
1766 sentinel = OS_MEM_END_NODE(node, size);
1767 PRINTK("expand node info: nodeAddr: 0x%x, nodeSize: 0x%x\n", node, size);
1768 }
1769 #endif
1770 }
1771 }
1772
1773 STATIC UINT32 OsMemIntegrityCheck(const struct OsMemPoolHead *pool, struct OsMemNodeHead **tmpNode,
1774 struct OsMemNodeHead **preNode)
1775 {
1776 struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
1777
1778 OsMemPoolHeadCheck(pool);
1779
1780 *preNode = OS_MEM_FIRST_NODE(pool);
1781 do {
1782 for (*tmpNode = *preNode; *tmpNode < endNode; *tmpNode = OS_MEM_NEXT_NODE(*tmpNode)) {
1783 if (OS_MEM_IS_GAP_NODE(*tmpNode)) {
1784 continue;
1785 }
1786 if (OsMemIntegrityCheckSub(tmpNode, pool) == LOS_NOK) {
1787 return LOS_NOK;
1788 }
1789 *preNode = *tmpNode;
1790 }
1791 #if OS_MEM_EXPAND_ENABLE
1792 if (OsMemIsLastSentinelNode(*tmpNode) == FALSE) {
1793 *preNode = OsMemSentinelNodeGet(*tmpNode);
1794 endNode = OS_MEM_END_NODE(*preNode, OS_MEM_NODE_GET_SIZE((*tmpNode)->sizeAndFlag));
1795 } else
1796 #endif
1797 {
1798 break;
1799 }
1800 } while (1);
1801 return LOS_OK;
1802 }
1803
1804 #if (LOSCFG_KERNEL_PRINTF != 0)
1805 STATIC VOID OsMemNodeInfo(const struct OsMemNodeHead *tmpNode,
1806 const struct OsMemNodeHead *preNode)
1807 {
1808 struct OsMemUsedNodeHead *usedNode = NULL;
1809 struct OsMemFreeNodeHead *freeNode = NULL;
1810
1811 if (tmpNode == preNode) {
1812 PRINTK("\n the broken node is the first node\n");
1813 }
1814
1815 if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
1816 usedNode = (struct OsMemUsedNodeHead *)tmpNode;
1817 PRINTK("\n broken node head: %p "
1818 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1819 "0x%x "
1820 #endif
1821 "0x%x, ",
1822 usedNode->header.ptr.prev,
1823 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1824 usedNode->header.magic,
1825 #endif
1826 usedNode->header.sizeAndFlag);
1827 } else {
1828 freeNode = (struct OsMemFreeNodeHead *)tmpNode;
1829 PRINTK("\n broken node head: %p %p %p "
1830 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1831 "0x%x "
1832 #endif
1833 "0x%x, ",
1834 freeNode->header.ptr.prev, freeNode->next, freeNode->prev,
1835 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1836 freeNode->header.magic,
1837 #endif
1838 freeNode->header.sizeAndFlag);
1839 }
1840
1841 if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1842 usedNode = (struct OsMemUsedNodeHead *)preNode;
1843 PRINTK("prev node head: %p "
1844 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1845 "0x%x "
1846 #endif
1847 "0x%x\n",
1848 usedNode->header.ptr.prev,
1849 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1850 usedNode->header.magic,
1851 #endif
1852 usedNode->header.sizeAndFlag);
1853 } else {
1854 freeNode = (struct OsMemFreeNodeHead *)preNode;
1855 PRINTK("prev node head: %p %p %p "
1856 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1857 "0x%x "
1858 #endif
1859 "0x%x, ",
1860 freeNode->header.ptr.prev, freeNode->next, freeNode->prev,
1861 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1862 freeNode->header.magic,
1863 #endif
1864 freeNode->header.sizeAndFlag);
1865 }
1866
1867 #if (LOSCFG_MEM_LEAKCHECK == 1)
1868 OsMemNodeBacktraceInfo(tmpNode, preNode);
1869 #endif
1870 }
1871 #endif
1872
1873 struct OsMemIntegrityCheckInfo {
1874 struct OsMemNodeHead preNode;
1875 struct OsMemNodeHead errNode;
1876 };
1877
1878 struct OsMemIntegrityCheckInfo g_integrityCheckRecord = {0};
1879
1880 STATIC INLINE VOID OsMemCheckInfoRecord(const struct OsMemNodeHead *errNode,
1881 const struct OsMemNodeHead *preNode)
1882 {
1883 (VOID)memcpy(&g_integrityCheckRecord.preNode, preNode, sizeof(struct OsMemNodeHead));
1884 (VOID)memcpy(&g_integrityCheckRecord.errNode, errNode, sizeof(struct OsMemNodeHead));
1885 }
1886
1887 STATIC VOID OsMemIntegrityCheckError(struct OsMemPoolHead *pool,
1888 const struct OsMemNodeHead *tmpNode,
1889 const struct OsMemNodeHead *preNode,
1890 UINT32 intSave)
1891 {
1892 #if (LOSCFG_KERNEL_PRINTF != 0)
1893 OsMemNodeInfo(tmpNode, preNode);
1894 #endif
1895 OsMemCheckInfoRecord(tmpNode, preNode);
1896 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
1897 LosTaskCB *taskCB = NULL;
1898 if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
1899 struct OsMemUsedNodeHead *usedNode = (struct OsMemUsedNodeHead *)preNode;
1900 UINT32 taskID = usedNode->header.taskID;
1901 if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
1902 MEM_UNLOCK(pool, intSave);
1903 LOS_Panic("Task ID %u in pre node is invalid!\n", taskID);
1904 return;
1905 }
1906
1907 taskCB = OS_TCB_FROM_TID(taskID);
1908 if ((taskCB->taskStatus & OS_TASK_STATUS_UNUSED) || (taskCB->taskEntry == NULL)) {
1909 MEM_UNLOCK(pool, intSave);
1910 LOS_Panic("\r\nTask ID %u in pre node is not created!\n", taskID);
1911 return;
1912 }
1913 } else {
1914 PRINTK("The prev node is free\n");
1915 }
1916 MEM_UNLOCK(pool, intSave);
1917 PRINT_ERR("cur node: 0x%x, pre node: 0x%x, pre node was allocated by task: %d, %s\n",
1918 (unsigned int)tmpNode, (unsigned int)preNode, taskCB->taskID, taskCB->taskName);
1919 LOS_Panic("Memory integrity check error!\n");
1920 #else
1921 MEM_UNLOCK(pool, intSave);
1922 LOS_Panic("Memory integrity check error, cur node: 0x%x, pre node: 0x%x\n", tmpNode, preNode);
1923 #endif
1924 }
1925
1926 #if (LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK == 1)
1927 STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave)
1928 {
1929 struct OsMemNodeHead *tmpNode = NULL;
1930 struct OsMemNodeHead *preNode = NULL;
1931
1932 if (OsMemIntegrityCheck(pool, &tmpNode, &preNode)) {
1933 OsMemIntegrityCheckError(pool, tmpNode, preNode, intSave);
1934 return LOS_NOK;
1935 }
1936 return LOS_OK;
1937 }
1938 #endif
1939
1940 UINT32 LOS_MemIntegrityCheck(const VOID *pool)
1941 {
1942 if (pool == NULL) {
1943 return LOS_NOK;
1944 }
1945
1946 struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
1947 struct OsMemNodeHead *tmpNode = NULL;
1948 struct OsMemNodeHead *preNode = NULL;
1949 UINT32 intSave = 0;
1950
1951 MEM_LOCK(poolHead, intSave);
1952 if (OsMemIntegrityCheck(poolHead, &tmpNode, &preNode)) {
1953 goto ERROR_OUT;
1954 }
1955 MEM_UNLOCK(poolHead, intSave);
1956 return LOS_OK;
1957
1958 ERROR_OUT:
1959 OsMemIntegrityCheckError(poolHead, tmpNode, preNode, intSave);
1960 return LOS_NOK;
1961 }
1962
1963 STATIC INLINE VOID OsMemInfoGet(struct OsMemNodeHead *node,
1964 LOS_MEM_POOL_STATUS *poolStatus)
1965 {
1966 UINT32 totalUsedSize = 0;
1967 UINT32 totalFreeSize = 0;
1968 UINT32 usedNodeNum = 0;
1969 UINT32 freeNodeNum = 0;
1970 UINT32 maxFreeSize = 0;
1971 UINT32 size;
1972
1973 if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
1974 size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1975 ++freeNodeNum;
1976 totalFreeSize += size;
1977 if (maxFreeSize < size) {
1978 maxFreeSize = size;
1979 }
1980 } else {
1981 if (OS_MEM_IS_GAP_NODE(node)) {
1982 size = OS_MEM_NODE_HEAD_SIZE;
1983 } else {
1984 size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
1985 }
1986 ++usedNodeNum;
1987 totalUsedSize += size;
1988 }
1989
1990 poolStatus->totalUsedSize += totalUsedSize;
1991 poolStatus->totalFreeSize += totalFreeSize;
1992 poolStatus->maxFreeNodeSize = poolStatus->maxFreeNodeSize > maxFreeSize ?
1993 poolStatus->maxFreeNodeSize : maxFreeSize;
1994 poolStatus->usedNodeNum += usedNodeNum;
1995 poolStatus->freeNodeNum += freeNodeNum;
1996 }
1997
1998 STATIC VOID OsMemNodeInfoGetHandle(struct OsMemNodeHead *curNode, VOID *arg)
1999 {
2000 LOS_MEM_POOL_STATUS *poolStatus = (LOS_MEM_POOL_STATUS *)arg;
2001 OsMemInfoGet(curNode, poolStatus);
2002 return;
2003 }
2004
2005 UINT32 LOS_MemInfoGet(VOID *pool, LOS_MEM_POOL_STATUS *poolStatus)
2006 {
2007 struct OsMemPoolHead *poolInfo = pool;
2008 UINT32 intSave = 0;
2009
2010 if (poolStatus == NULL) {
2011 PRINT_ERR("can't use NULL addr to save info\n");
2012 return LOS_NOK;
2013 }
2014
2015 if ((pool == NULL) || (poolInfo->info.pool != pool)) {
2016 PRINT_ERR("wrong mem pool addr: 0x%x, line:%d\n", (UINTPTR)poolInfo, __LINE__);
2017 return LOS_NOK;
2018 }
2019
2020 (VOID)memset(poolStatus, 0, sizeof(LOS_MEM_POOL_STATUS));
2021
2022 OsAllMemNodeDoHandle(pool, OsMemNodeInfoGetHandle, (VOID *)poolStatus);
2023
2024 MEM_LOCK(poolInfo, intSave);
2025 #if (LOSCFG_MEM_WATERLINE == 1)
2026 poolStatus->usageWaterLine = poolInfo->info.waterLine;
2027 #endif
2028 MEM_UNLOCK(poolInfo, intSave);
2029
2030 return LOS_OK;
2031 }
2032
2033 STATIC VOID OsMemInfoPrint(VOID *pool)
2034 {
2035 #if (LOSCFG_KERNEL_PRINTF != 0)
2036 struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
2037 LOS_MEM_POOL_STATUS status = {0};
2038
2039 if (LOS_MemInfoGet(pool, &status) == LOS_NOK) {
2040 return;
2041 }
2042
2043 #if (LOSCFG_MEM_WATERLINE == 1)
2044 PRINTK("pool addr pool size used size free size "
2045 "max free node size used node num free node num UsageWaterLine\n");
2046 PRINTK("--------------- -------- ------- -------- "
2047 "-------------- ------------- ------------ ------------\n");
2048 PRINTK("%-16p 0x%-8x 0x%-8x 0x%-8x 0x%-16x 0x%-13x 0x%-13x 0x%-13x\n",
2049 poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
2050 status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
2051 status.freeNodeNum, status.usageWaterLine);
2052 #else
2053 PRINTK("pool addr pool size used size free size "
2054 "max free node size used node num free node num\n");
2055 PRINTK("--------------- -------- ------- -------- "
2056 "-------------- ------------- ------------\n");
2057 PRINTK("%-16p 0x%-8x 0x%-8x 0x%-8x 0x%-16x 0x%-13x 0x%-13x\n",
2058 poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
2059 status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
2060 status.freeNodeNum);
2061 #endif
2062 #endif
2063 }
2064
2065 UINT32 LOS_MemFreeNodeShow(VOID *pool)
2066 {
2067 #if (LOSCFG_KERNEL_PRINTF != 0)
2068 struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
2069
2070 if ((poolInfo == NULL) || ((UINTPTR)pool != (UINTPTR)poolInfo->info.pool)) {
2071 PRINT_ERR("wrong mem pool addr: 0x%x, line: %d\n", (UINTPTR)poolInfo, __LINE__);
2072 return LOS_NOK;
2073 }
2074
2075 struct OsMemFreeNodeHead *node = NULL;
2076 UINT32 countNum[OS_MEM_FREE_LIST_COUNT] = {0};
2077 UINT32 index;
2078 UINT32 intSave = 0;
2079
2080 MEM_LOCK(poolInfo, intSave);
2081 for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
2082 node = poolInfo->freeList[index];
2083 while (node) {
2084 node = node->next;
2085 countNum[index]++;
2086 }
2087 }
2088 MEM_UNLOCK(poolInfo, intSave);
2089
2090 PRINTK("\n ************************ left free node number**********************\n");
2091 for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
2092 if (countNum[index] == 0) {
2093 continue;
2094 }
2095
2096 PRINTK("free index: %03u, ", index);
2097 if (index < OS_MEM_SMALL_BUCKET_COUNT) {
2098 PRINTK("size: [0x%x], num: %u\n", (index + 1) << 2, countNum[index]); /* 2: setup is 4. */
2099 } else {
2100 UINT32 val = 1 << (((index - OS_MEM_SMALL_BUCKET_COUNT) >> OS_MEM_SLI) + OS_MEM_LARGE_START_BUCKET);
2101 UINT32 offset = val >> OS_MEM_SLI;
2102 PRINTK("size: [0x%x, 0x%x], num: %u\n",
2103 (offset * ((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI))) + val,
2104 ((offset * (((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI)) + 1)) + val - 1),
2105 countNum[index]);
2106 }
2107 }
2108 PRINTK("\n ********************************************************************\n\n");
2109 #endif
2110 return LOS_OK;
2111 }
2112
2113 VOID LOS_MemUnlockEnable(VOID *pool)
2114 {
2115 if (pool == NULL) {
2116 return;
2117 }
2118
2119 ((struct OsMemPoolHead *)pool)->info.attr |= OS_MEM_POOL_UNLOCK_ENABLE;
2120 }
2121
2122 #if (LOSCFG_MEM_MUL_REGIONS == 1)
2123 STATIC INLINE UINT32 OsMemMulRegionsParamCheck(VOID *pool, const LosMemRegion * const memRegions,
2124 UINT32 memRegionCount)
2125 {
2126 const LosMemRegion *memRegion = NULL;
2127 VOID *lastStartAddress = NULL;
2128 VOID *curStartAddress = NULL;
2129 UINT32 lastLength;
2130 UINT32 curLength;
2131 UINT32 regionCount;
2132
2133 if ((pool != NULL) && (((struct OsMemPoolHead *)pool)->info.pool != pool)) {
2134 PRINT_ERR("wrong mem pool addr: %p, func: %s, line: %d\n", pool, __FUNCTION__, __LINE__);
2135 return LOS_NOK;
2136 }
2137
2138 if (pool != NULL) {
2139 lastStartAddress = pool;
2140 lastLength = ((struct OsMemPoolHead *)pool)->info.totalSize;
2141 }
2142
2143 memRegion = memRegions;
2144 regionCount = 0;
2145 while (regionCount < memRegionCount) {
2146 curStartAddress = memRegion->startAddress;
2147 curLength = memRegion->length;
2148 if ((curStartAddress == NULL) || (curLength == 0)) {
2149 PRINT_ERR("Memory address or length configured wrongly:address:0x%x, the length:0x%x\n",
2150 (UINTPTR)curStartAddress, curLength);
2151 return LOS_NOK;
2152 }
2153 if (((UINTPTR)curStartAddress & (OS_MEM_ALIGN_SIZE - 1)) || (curLength & (OS_MEM_ALIGN_SIZE - 1))) {
2154 PRINT_ERR("Memory address or length configured not aligned:address:0x%x, the length:0x%x, alignsize:%d\n",
2155 (UINTPTR)curStartAddress, curLength, OS_MEM_ALIGN_SIZE);
2156 return LOS_NOK;
2157 }
2158 if ((lastStartAddress != NULL) && (((UINT8 *)lastStartAddress + lastLength) >= (UINT8 *)curStartAddress)) {
2159 PRINT_ERR("Memory regions overlapped, the last start address:0x%x, "
2160 "the length:0x%x, the current start address:0x%x\n",
2161 (UINTPTR)lastStartAddress, lastLength, (UINTPTR)curStartAddress);
2162 return LOS_NOK;
2163 }
2164 memRegion++;
2165 regionCount++;
2166 lastStartAddress = curStartAddress;
2167 lastLength = curLength;
2168 }
2169 return LOS_OK;
2170 }
2171
2172 STATIC INLINE VOID OsMemMulRegionsLink(struct OsMemPoolHead *poolHead, VOID *lastStartAddress, UINT32 lastLength,
2173 struct OsMemNodeHead *lastEndNode, const LosMemRegion *memRegion)
2174 {
2175 UINT32 curLength;
2176 UINT32 gapSize;
2177 struct OsMemNodeHead *curEndNode = NULL;
2178 struct OsMemNodeHead *curFreeNode = NULL;
2179 VOID *curStartAddress = NULL;
2180
2181 curStartAddress = memRegion->startAddress;
2182 curLength = memRegion->length;
2183 #ifdef LOSCFG_KERNEL_LMS
2184 UINT32 resize = 0;
2185 if (g_lms != NULL) {
2186 /*
2187 * resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
2188 * resize != 0, shadow memory init successful, set poolSize as resize.
2189 */
2190 resize = g_lms->init(curStartAddress, curLength);
2191 curLength = (resize == 0) ? curLength : resize;
2192 }
2193 #endif
2194 // mark the gap between two regions as one used node
2195 gapSize = (UINT8 *)(curStartAddress) - ((UINT8 *)(poolHead) + poolHead->info.totalSize);
2196 lastEndNode->sizeAndFlag = gapSize + OS_MEM_NODE_HEAD_SIZE;
2197 OS_MEM_SET_MAGIC(lastEndNode);
2198 OS_MEM_NODE_SET_USED_FLAG(lastEndNode->sizeAndFlag);
2199
2200 // mark the gap node with magic number
2201 OS_MEM_MARK_GAP_NODE(lastEndNode);
2202
2203 poolHead->info.totalSize += (curLength + gapSize);
2204 poolHead->info.totalGapSize += gapSize;
2205
2206 curFreeNode = (struct OsMemNodeHead *)curStartAddress;
2207 curFreeNode->sizeAndFlag = curLength - OS_MEM_NODE_HEAD_SIZE;
2208 curFreeNode->ptr.prev = lastEndNode;
2209 OS_MEM_SET_MAGIC(curFreeNode);
2210 OsMemFreeNodeAdd(poolHead, (struct OsMemFreeNodeHead *)curFreeNode);
2211
2212 curEndNode = OS_MEM_END_NODE(curStartAddress, curLength);
2213 curEndNode->sizeAndFlag = 0;
2214 curEndNode->ptr.prev = curFreeNode;
2215 OS_MEM_SET_MAGIC(curEndNode);
2216 OS_MEM_NODE_SET_USED_FLAG(curEndNode->sizeAndFlag);
2217
2218 #if (LOSCFG_MEM_WATERLINE == 1)
2219 poolHead->info.curUsedSize += OS_MEM_NODE_HEAD_SIZE;
2220 poolHead->info.waterLine = poolHead->info.curUsedSize;
2221 #endif
2222 }
2223
2224 UINT32 LOS_MemRegionsAdd(VOID *pool, const LosMemRegion *const memRegions, UINT32 memRegionCount)
2225 {
2226 UINT32 ret;
2227 UINT32 lastLength;
2228 UINT32 curLength;
2229 UINT32 regionCount;
2230 struct OsMemPoolHead *poolHead = NULL;
2231 struct OsMemNodeHead *lastEndNode = NULL;
2232 struct OsMemNodeHead *firstFreeNode = NULL;
2233 const LosMemRegion *memRegion = NULL;
2234 VOID *lastStartAddress = NULL;
2235 VOID *curStartAddress = NULL;
2236
2237 ret = OsMemMulRegionsParamCheck(pool, memRegions, memRegionCount);
2238 if (ret != LOS_OK) {
2239 return ret;
2240 }
2241
2242 memRegion = memRegions;
2243 regionCount = 0;
2244 if (pool != NULL) { // add the memory regions to the specified memory pool
2245 poolHead = (struct OsMemPoolHead *)pool;
2246 lastStartAddress = pool;
2247 lastLength = poolHead->info.totalSize;
2248 } else { // initialize the memory pool with the first memory region
2249 lastStartAddress = memRegion->startAddress;
2250 lastLength = memRegion->length;
2251 poolHead = (struct OsMemPoolHead *)lastStartAddress;
2252 ret = LOS_MemInit(lastStartAddress, lastLength);
2253 if (ret != LOS_OK) {
2254 return ret;
2255 }
2256 memRegion++;
2257 regionCount++;
2258 }
2259
2260 firstFreeNode = OS_MEM_FIRST_NODE(lastStartAddress);
2261 lastEndNode = OS_MEM_END_NODE(lastStartAddress, poolHead->info.totalSize);
2262 /* traverse the rest memory regions, and initialize them as free nodes and link together */
2263 while (regionCount < memRegionCount) {
2264 curStartAddress = memRegion->startAddress;
2265 curLength = memRegion->length;
2266
2267 OsMemMulRegionsLink(poolHead, lastStartAddress, lastLength, lastEndNode, memRegion);
2268 lastStartAddress = curStartAddress;
2269 lastLength = curLength;
2270 lastEndNode = OS_MEM_END_NODE(poolHead, poolHead->info.totalSize);
2271 memRegion++;
2272 regionCount++;
2273 }
2274
2275 firstFreeNode->ptr.prev = lastEndNode;
2276 return ret;
2277 }
2278 #endif
2279
2280 UINT32 OsMemSystemInit(VOID)
2281 {
2282 UINT32 ret;
2283
2284 #if (LOSCFG_SYS_EXTERNAL_HEAP == 0)
2285 m_aucSysMem0 = g_memStart;
2286 #else
2287 m_aucSysMem0 = LOSCFG_SYS_HEAP_ADDR;
2288 #endif
2289
2290 ret = LOS_MemInit(m_aucSysMem0, LOSCFG_SYS_HEAP_SIZE);
2291 PRINT_INFO("LiteOS heap memory address:%p, size:0x%lx\n", m_aucSysMem0, LOSCFG_SYS_HEAP_SIZE);
2292 return ret;
2293 }
2294
2295 #if (LOSCFG_PLATFORM_EXC == 1)
2296 STATIC VOID OsMemExcInfoGetSub(struct OsMemPoolHead *pool, MemInfoCB *memExcInfo)
2297 {
2298 struct OsMemNodeHead *tmpNode = NULL;
2299 UINT32 taskID = OS_TASK_ERRORID;
2300 UINT32 intSave = 0;
2301
2302 (VOID)memset(memExcInfo, 0, sizeof(MemInfoCB));
2303
2304 MEM_LOCK(pool, intSave);
2305 memExcInfo->type = MEM_MANG_MEMORY;
2306 memExcInfo->startAddr = (UINTPTR)pool->info.pool;
2307 memExcInfo->size = pool->info.totalSize;
2308 memExcInfo->free = pool->info.totalSize - pool->info.curUsedSize;
2309
2310 struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool);
2311 struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
2312
2313 for (tmpNode = firstNode; tmpNode < endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
2314 memExcInfo->blockSize++;
2315 if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
2316 if (!OS_MEM_MAGIC_VALID(tmpNode) ||
2317 !OsMemAddrValidCheck(pool, tmpNode->ptr.prev)) {
2318 #if (LOSCFG_MEM_FREE_BY_TASKID == 1 || LOSCFG_TASK_MEM_USED == 1)
2319 taskID = ((struct OsMemUsedNodeHead *)tmpNode)->header.taskID;
2320 #endif
2321 goto ERROUT;
2322 }
2323 } else { /* is free node, check free node range */
2324 struct OsMemFreeNodeHead *freeNode = (struct OsMemFreeNodeHead *)tmpNode;
2325 if (OsMemAddrValidCheckPrint(pool, &freeNode)) {
2326 goto ERROUT;
2327 }
2328 }
2329 }
2330 MEM_UNLOCK(pool, intSave);
2331 return;
2332
2333 ERROUT:
2334 memExcInfo->errorAddr = (UINTPTR)((CHAR *)tmpNode + OS_MEM_NODE_HEAD_SIZE);
2335 memExcInfo->errorLen = OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag) - OS_MEM_NODE_HEAD_SIZE;
2336 memExcInfo->errorOwner = taskID;
2337 MEM_UNLOCK(pool, intSave);
2338 return;
2339 }
2340
2341 UINT32 OsMemExcInfoGet(UINT32 memNumMax, MemInfoCB *memExcInfo)
2342 {
2343 UINT8 *buffer = (UINT8 *)memExcInfo;
2344 UINT32 count = 0;
2345
2346 #if (LOSCFG_MEM_MUL_POOL == 1)
2347 struct OsMemPoolHead *memPool = g_poolHead;
2348 while (memPool != NULL) {
2349 OsMemExcInfoGetSub(memPool, (MemInfoCB *)buffer);
2350 count++;
2351 buffer += sizeof(MemInfoCB);
2352 if (count >= memNumMax) {
2353 break;
2354 }
2355 memPool = memPool->nextPool;
2356 }
2357 #else
2358 OsMemExcInfoGetSub(m_aucSysMem0, buffer);
2359 count++;
2360 #endif
2361
2362 return count;
2363 }
2364 #endif
2365