• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file
3  * Dynamic memory manager
4  *
5  * This is a lightweight replacement for the standard C library malloc().
6  *
7  * If you want to use the standard C library malloc() instead, define
8  * MEM_LIBC_MALLOC to 1 in your lwipopts.h
9  *
10  * To let mem_malloc() use pools (prevents fragmentation and is much faster than
11  * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
12  * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
13  * of pools like this (more pools can be added between _START and _END):
14  *
15  * Define three pools with sizes 256, 512, and 1512 bytes
16  * LWIP_MALLOC_MEMPOOL_START
17  * LWIP_MALLOC_MEMPOOL(20, 256)
18  * LWIP_MALLOC_MEMPOOL(10, 512)
19  * LWIP_MALLOC_MEMPOOL(5, 1512)
20  * LWIP_MALLOC_MEMPOOL_END
21  */
22 
23 /*
24  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
25  * All rights reserved.
26  *
27  * Redistribution and use in source and binary forms, with or without modification,
28  * are permitted provided that the following conditions are met:
29  *
30  * 1. Redistributions of source code must retain the above copyright notice,
31  *    this list of conditions and the following disclaimer.
32  * 2. Redistributions in binary form must reproduce the above copyright notice,
33  *    this list of conditions and the following disclaimer in the documentation
34  *    and/or other materials provided with the distribution.
35  * 3. The name of the author may not be used to endorse or promote products
36  *    derived from this software without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
39  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
40  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
41  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
42  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
43  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
46  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
47  * OF SUCH DAMAGE.
48  *
49  * This file is part of the lwIP TCP/IP stack.
50  *
51  * Author: Adam Dunkels <adam@sics.se>
52  *         Simon Goldschmidt
53  *
54  */
55 
56 #include "lwip/opt.h"
57 #include "lwip/mem.h"
58 #include "lwip/def.h"
59 #include "lwip/sys.h"
60 #include "lwip/stats.h"
61 #include "lwip/err.h"
62 
63 #include <string.h>
64 
65 #if MEM_LIBC_MALLOC
66 #include <stdlib.h> /* for malloc()/free() */
67 #endif
68 
69 /* This is overridable for tests only... */
70 #ifndef LWIP_MEM_ILLEGAL_FREE
71 #define LWIP_MEM_ILLEGAL_FREE(msg)         LWIP_ASSERT(msg, 0)
72 #endif
73 
74 #define MEM_STATS_INC_LOCKED(x)         SYS_ARCH_LOCKED(MEM_STATS_INC(x))
75 #define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y))
76 #define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y))
77 
78 #if MEM_OVERFLOW_CHECK
79 #define MEM_SANITY_OFFSET   MEM_SANITY_REGION_BEFORE_ALIGNED
80 #define MEM_SANITY_OVERHEAD (MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED)
81 #else
82 #define MEM_SANITY_OFFSET   0
83 #define MEM_SANITY_OVERHEAD 0
84 #endif
85 
86 #if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK
87 /**
88  * Check if a mep element was victim of an overflow or underflow
89  * (e.g. the restricted area after/before it has been altered)
90  *
91  * @param p the mem element to check
92  * @param size allocated size of the element
93  * @param descr1 description of the element source shown on error
94  * @param descr2 description of the element source shown on error
95  */
96 void
mem_overflow_check_raw(void * p,size_t size,const char * descr1,const char * descr2)97 mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2)
98 {
99 #if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED
100   u16_t k;
101   u8_t *m;
102 
103 #if MEM_SANITY_REGION_AFTER_ALIGNED > 0
104   m = (u8_t *)p + size;
105   for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) {
106     if (m[k] != 0xcd) {
107       char errstr[128];
108       snprintf(errstr, sizeof(errstr), "detected mem overflow in %s%s", descr1, descr2);
109       LWIP_ASSERT(errstr, 0);
110     }
111   }
112 #endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
113 
114 #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
115   m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
116   for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) {
117     if (m[k] != 0xcd) {
118       char errstr[128];
119       snprintf(errstr, sizeof(errstr), "detected mem underflow in %s%s", descr1, descr2);
120       LWIP_ASSERT(errstr, 0);
121     }
122   }
123 #endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */
124 #else
125   LWIP_UNUSED_ARG(p);
126   LWIP_UNUSED_ARG(desc);
127   LWIP_UNUSED_ARG(descr);
128 #endif
129 }
130 
131 /**
132  * Initialize the restricted area of a mem element.
133  */
134 void
mem_overflow_init_raw(void * p,size_t size)135 mem_overflow_init_raw(void *p, size_t size)
136 {
137 #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0
138   u8_t *m;
139 #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
140   m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
141   memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED);
142 #endif
143 #if MEM_SANITY_REGION_AFTER_ALIGNED > 0
144   m = (u8_t *)p + size;
145   memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED);
146 #endif
147 #else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
148   LWIP_UNUSED_ARG(p);
149   LWIP_UNUSED_ARG(desc);
150 #endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
151 }
152 #endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */
153 
154 #if MEM_LIBC_MALLOC || MEM_USE_POOLS
155 
156 /** mem_init is not used when using pools instead of a heap or using
157  * C library malloc().
158  */
159 void
mem_init(void)160 mem_init(void)
161 {
162 }
163 
164 /** mem_trim is not used when using pools instead of a heap or using
165  * C library malloc(): we can't free part of a pool element and the stack
166  * support mem_trim() to return a different pointer
167  */
168 void *
mem_trim(void * mem,mem_size_t size)169 mem_trim(void *mem, mem_size_t size)
170 {
171   LWIP_UNUSED_ARG(size);
172   return mem;
173 }
174 #endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */
175 
176 #if MEM_LIBC_MALLOC
177 /* lwIP heap implemented using C library malloc() */
178 
179 /* in case C library malloc() needs extra protection,
180  * allow these defines to be overridden.
181  */
182 #ifndef mem_clib_free
183 #define mem_clib_free free
184 #endif
185 #ifndef mem_clib_malloc
186 #define mem_clib_malloc malloc
187 #endif
188 #ifndef mem_clib_calloc
189 #define mem_clib_calloc calloc
190 #endif
191 
192 #if LWIP_STATS && MEM_STATS
193 #define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t))
194 #else
195 #define MEM_LIBC_STATSHELPER_SIZE 0
196 #endif
197 
198 /**
199  * Allocate a block of memory with a minimum of 'size' bytes.
200  *
201  * @param size is the minimum size of the requested block in bytes.
202  * @return pointer to allocated memory or NULL if no free memory was found.
203  *
204  * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT).
205  */
206 void *
mem_malloc(mem_size_t size)207 mem_malloc(mem_size_t size)
208 {
209   void *ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE);
210   if (ret == NULL) {
211     MEM_STATS_INC_LOCKED(err);
212   } else {
213     LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret);
214 #if LWIP_STATS && MEM_STATS
215     *(mem_size_t *)ret = size;
216     ret = (u8_t *)ret + MEM_LIBC_STATSHELPER_SIZE;
217     MEM_STATS_INC_USED_LOCKED(used, size);
218 #endif
219   }
220   return ret;
221 }
222 
223 /** Put memory back on the heap
224  *
225  * @param rmem is the pointer as returned by a previous call to mem_malloc()
226  */
227 void
mem_free(void * rmem)228 mem_free(void *rmem)
229 {
230   LWIP_ASSERT("rmem != NULL", (rmem != NULL));
231   LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
232 #if LWIP_STATS && MEM_STATS
233   rmem = (u8_t *)rmem - MEM_LIBC_STATSHELPER_SIZE;
234   MEM_STATS_DEC_USED_LOCKED(used, *(mem_size_t *)rmem);
235 #endif
236   mem_clib_free(rmem);
237 }
238 
239 #elif MEM_USE_POOLS
240 
241 /* lwIP heap implemented with different sized pools */
242 
243 /**
244  * Allocate memory: determine the smallest pool that is big enough
245  * to contain an element of 'size' and get an element from that pool.
246  *
247  * @param size the size in bytes of the memory needed
248  * @return a pointer to the allocated memory or NULL if the pool is empty
249  */
250 void *
mem_malloc(mem_size_t size)251 mem_malloc(mem_size_t size)
252 {
253   void *ret;
254   struct memp_malloc_helper *element = NULL;
255   memp_t poolnr;
256   mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
257 
258   for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
259     /* is this pool big enough to hold an element of the required size
260        plus a struct memp_malloc_helper that saves the pool this element came from? */
261     if (required_size <= memp_pools[poolnr]->size) {
262       element = (struct memp_malloc_helper *)memp_malloc(poolnr);
263       if (element == NULL) {
264         /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */
265 #if MEM_USE_POOLS_TRY_BIGGER_POOL
266         /** Try a bigger pool if this one is empty! */
267         if (poolnr < MEMP_POOL_LAST) {
268           continue;
269         }
270 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
271         MEM_STATS_INC_LOCKED(err);
272         return NULL;
273       }
274       break;
275     }
276   }
277   if (poolnr > MEMP_POOL_LAST) {
278     LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
279     MEM_STATS_INC_LOCKED(err);
280     return NULL;
281   }
282 
283   /* save the pool number this element came from */
284   element->poolnr = poolnr;
285   /* and return a pointer to the memory directly after the struct memp_malloc_helper */
286   ret = (u8_t *)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
287 
288 #if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS)
289   /* truncating to u16_t is safe because struct memp_desc::size is u16_t */
290   element->size = (u16_t)size;
291   MEM_STATS_INC_USED_LOCKED(used, element->size);
292 #endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */
293 #if MEMP_OVERFLOW_CHECK
294   /* initialize unused memory (diff between requested size and selected pool's size) */
295   memset((u8_t *)ret + size, 0xcd, memp_pools[poolnr]->size - size);
296 #endif /* MEMP_OVERFLOW_CHECK */
297   return ret;
298 }
299 
300 /**
301  * Free memory previously allocated by mem_malloc. Loads the pool number
302  * and calls memp_free with that pool number to put the element back into
303  * its pool
304  *
305  * @param rmem the memory element to free
306  */
307 void
mem_free(void * rmem)308 mem_free(void *rmem)
309 {
310   struct memp_malloc_helper *hmem;
311 
312   LWIP_ASSERT("rmem != NULL", (rmem != NULL));
313   LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
314 
315   /* get the original struct memp_malloc_helper */
316   /* cast through void* to get rid of alignment warnings */
317   hmem = (struct memp_malloc_helper *)(void *)((u8_t *)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
318 
319   LWIP_ASSERT("hmem != NULL", (hmem != NULL));
320   LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
321   LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
322 
323   MEM_STATS_DEC_USED_LOCKED(used, hmem->size);
324 #if MEMP_OVERFLOW_CHECK
325   {
326     u16_t i;
327     LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size",
328                 hmem->size <= memp_pools[hmem->poolnr]->size);
329     /* check that unused memory remained untouched (diff between requested size and selected pool's size) */
330     for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) {
331       u8_t data = *((u8_t *)rmem + i);
332       LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd);
333     }
334   }
335 #endif /* MEMP_OVERFLOW_CHECK */
336 
337   /* and put it in the pool we saved earlier */
338   memp_free(hmem->poolnr, hmem);
339 }
340 
341 #else /* MEM_USE_POOLS */
342 /* lwIP replacement for your libc malloc() */
343 
344 /**
345  * The heap is made up as a list of structs of this type.
346  * This does not have to be aligned since for getting its size,
347  * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns.
348  */
349 struct mem {
350   /** index (-> ram[next]) of the next struct */
351   mem_size_t next;
352   /** index (-> ram[prev]) of the previous struct */
353   mem_size_t prev;
354   /** 1: this area is used; 0: this area is unused */
355   u8_t used;
356 #if MEM_OVERFLOW_CHECK
357   /** this keeps track of the user allocation size for guard checks */
358   mem_size_t user_size;
359 #endif
360 };
361 
362 /** All allocated blocks will be MIN_SIZE bytes big, at least!
363  * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
364  * larger values could prevent too small blocks to fragment the RAM too much. */
365 #ifndef MIN_SIZE
366 #define MIN_SIZE             12
367 #endif /* MIN_SIZE */
368 /* some alignment macros: we define them here for better source code layout */
369 #define MIN_SIZE_ALIGNED     LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
370 #define SIZEOF_STRUCT_MEM    LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
371 #define MEM_SIZE_ALIGNED     LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
372 
373 /** If you want to relocate the heap to external memory, simply define
374  * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
375  * If so, make sure the memory at that location is big enough (see below on
376  * how that space is calculated). */
377 #ifndef LWIP_RAM_HEAP_POINTER
378 /** the heap. we need one struct mem at the end and some room for alignment */
379 LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U * SIZEOF_STRUCT_MEM));
380 #define LWIP_RAM_HEAP_POINTER ram_heap
381 #endif /* LWIP_RAM_HEAP_POINTER */
382 
383 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
384 static u8_t *ram;
385 /** the last entry, always unused! */
386 static struct mem *ram_end;
387 
388 /** concurrent access protection */
389 #if !NO_SYS
390 static sys_mutex_t mem_mutex;
391 #endif
392 
393 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
394 
395 static volatile u8_t mem_free_count;
396 
397 /* Allow mem_free from other (e.g. interrupt) context */
398 #define LWIP_MEM_FREE_DECL_PROTECT()  SYS_ARCH_DECL_PROTECT(lev_free)
399 #define LWIP_MEM_FREE_PROTECT()       SYS_ARCH_PROTECT(lev_free)
400 #define LWIP_MEM_FREE_UNPROTECT()     SYS_ARCH_UNPROTECT(lev_free)
401 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
402 #define LWIP_MEM_ALLOC_PROTECT()      SYS_ARCH_PROTECT(lev_alloc)
403 #define LWIP_MEM_ALLOC_UNPROTECT()    SYS_ARCH_UNPROTECT(lev_alloc)
404 #define LWIP_MEM_LFREE_VOLATILE       volatile
405 
406 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
407 
408 /* Protect the heap only by using a mutex */
409 #define LWIP_MEM_FREE_DECL_PROTECT()
410 #define LWIP_MEM_FREE_PROTECT()    sys_mutex_lock(&mem_mutex)
411 #define LWIP_MEM_FREE_UNPROTECT()  sys_mutex_unlock(&mem_mutex)
412 /* mem_malloc is protected using mutex AND LWIP_MEM_ALLOC_PROTECT */
413 #define LWIP_MEM_ALLOC_DECL_PROTECT()
414 #define LWIP_MEM_ALLOC_PROTECT()
415 #define LWIP_MEM_ALLOC_UNPROTECT()
416 #define LWIP_MEM_LFREE_VOLATILE
417 
418 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
419 
420 /** pointer to the lowest free block, this is used for faster search */
421 static struct mem * LWIP_MEM_LFREE_VOLATILE lfree;
422 
423 #if MEM_SANITY_CHECK
424 static void mem_sanity(void);
425 #define MEM_SANITY() mem_sanity()
426 #else
427 #define MEM_SANITY()
428 #endif
429 
430 #if MEM_OVERFLOW_CHECK
431 static void
mem_overflow_init_element(struct mem * mem,mem_size_t user_size)432 mem_overflow_init_element(struct mem *mem, mem_size_t user_size)
433 {
434   void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
435   mem->user_size = user_size;
436   mem_overflow_init_raw(p, user_size);
437 }
438 
439 static void
mem_overflow_check_element(struct mem * mem)440 mem_overflow_check_element(struct mem *mem)
441 {
442   void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
443   mem_overflow_check_raw(p, mem->user_size, "heap", "");
444 }
445 #else /* MEM_OVERFLOW_CHECK */
446 #define mem_overflow_init_element(mem, size)
447 #define mem_overflow_check_element(mem)
448 #endif /* MEM_OVERFLOW_CHECK */
449 
450 static struct mem *
ptr_to_mem(mem_size_t ptr)451 ptr_to_mem(mem_size_t ptr)
452 {
453   return (struct mem *)(void *)&ram[ptr];
454 }
455 
456 static mem_size_t
mem_to_ptr(void * mem)457 mem_to_ptr(void *mem)
458 {
459   return (mem_size_t)((u8_t *)mem - ram);
460 }
461 
462 /**
463  * "Plug holes" by combining adjacent empty struct mems.
464  * After this function is through, there should not exist
465  * one empty struct mem pointing to another empty struct mem.
466  *
467  * @param mem this points to a struct mem which just has been freed
468  * @internal this function is only called by mem_free() and mem_trim()
469  *
470  * This assumes access to the heap is protected by the calling function
471  * already.
472  */
473 static void
plug_holes(struct mem * mem)474 plug_holes(struct mem *mem)
475 {
476   struct mem *nmem;
477   struct mem *pmem;
478 
479   LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
480   LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
481   LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
482 
483   /* plug hole forward */
484   LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
485 
486   nmem = ptr_to_mem(mem->next);
487   if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
488     /* if mem->next is unused and not end of ram, combine mem and mem->next */
489     if (lfree == nmem) {
490       lfree = mem;
491     }
492     mem->next = nmem->next;
493     if (nmem->next != MEM_SIZE_ALIGNED) {
494       ptr_to_mem(nmem->next)->prev = mem_to_ptr(mem);
495     }
496   }
497 
498   /* plug hole backward */
499   pmem = ptr_to_mem(mem->prev);
500   if (pmem != mem && pmem->used == 0) {
501     /* if mem->prev is unused, combine mem and mem->prev */
502     if (lfree == mem) {
503       lfree = pmem;
504     }
505     pmem->next = mem->next;
506     if (mem->next != MEM_SIZE_ALIGNED) {
507       ptr_to_mem(mem->next)->prev = mem_to_ptr(pmem);
508     }
509   }
510 }
511 
512 /**
513  * Zero the heap and initialize start, end and lowest-free
514  */
515 void
mem_init(void)516 mem_init(void)
517 {
518   struct mem *mem;
519 
520   LWIP_ASSERT("Sanity check alignment",
521               (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0);
522 
523   /* align the heap */
524   ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
525   /* initialize the start of the heap */
526   mem = (struct mem *)(void *)ram;
527   mem->next = MEM_SIZE_ALIGNED;
528   mem->prev = 0;
529   mem->used = 0;
530   /* initialize the end of the heap */
531   ram_end = ptr_to_mem(MEM_SIZE_ALIGNED);
532   ram_end->used = 1;
533   ram_end->next = MEM_SIZE_ALIGNED;
534   ram_end->prev = MEM_SIZE_ALIGNED;
535   MEM_SANITY();
536 
537   /* initialize the lowest-free pointer to the start of the heap */
538   lfree = (struct mem *)(void *)ram;
539 
540   MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
541 
542   if (sys_mutex_new(&mem_mutex) != ERR_OK) {
543     LWIP_ASSERT("failed to create mem_mutex", 0);
544   }
545 }
546 
547 /* Check if a struct mem is correctly linked.
548  * If not, double-free is a possible reason.
549  */
550 static int
mem_link_valid(struct mem * mem)551 mem_link_valid(struct mem *mem)
552 {
553   struct mem *nmem, *pmem;
554   mem_size_t rmem_idx;
555   rmem_idx = mem_to_ptr(mem);
556   nmem = ptr_to_mem(mem->next);
557   pmem = ptr_to_mem(mem->prev);
558   if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) ||
559       ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) ||
560       ((nmem != ram_end) && (nmem->prev != rmem_idx))) {
561     return 0;
562   }
563   return 1;
564 }
565 
566 #if MEM_SANITY_CHECK
567 static void
mem_sanity(void)568 mem_sanity(void)
569 {
570   struct mem *mem;
571   u8_t last_used;
572 
573   /* begin with first element here */
574   mem = (struct mem *)ram;
575   LWIP_ASSERT("heap element used valid", (mem->used == 0) || (mem->used == 1));
576   last_used = mem->used;
577   LWIP_ASSERT("heap element prev ptr valid", mem->prev == 0);
578   LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED);
579   LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next)));
580 
581   /* check all elements before the end of the heap */
582   for (mem = ptr_to_mem(mem->next);
583        ((u8_t *)mem > ram) && (mem < ram_end);
584        mem = ptr_to_mem(mem->next)) {
585     LWIP_ASSERT("heap element aligned", LWIP_MEM_ALIGN(mem) == mem);
586     LWIP_ASSERT("heap element prev ptr valid", mem->prev <= MEM_SIZE_ALIGNED);
587     LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED);
588     LWIP_ASSERT("heap element prev ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->prev) == ptr_to_mem(mem->prev)));
589     LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next)));
590 
591     if (last_used == 0) {
592       /* 2 unused elements in a row? */
593       LWIP_ASSERT("heap element unused?", mem->used == 1);
594     } else {
595       LWIP_ASSERT("heap element unused member", (mem->used == 0) || (mem->used == 1));
596     }
597 
598     LWIP_ASSERT("heap element link valid", mem_link_valid(mem));
599 
600     /* used/unused altering */
601     last_used = mem->used;
602   }
603   LWIP_ASSERT("heap end ptr sanity", mem == ptr_to_mem(MEM_SIZE_ALIGNED));
604   LWIP_ASSERT("heap element used valid", mem->used == 1);
605   LWIP_ASSERT("heap element prev ptr valid", mem->prev == MEM_SIZE_ALIGNED);
606   LWIP_ASSERT("heap element next ptr valid", mem->next == MEM_SIZE_ALIGNED);
607 }
608 #endif /* MEM_SANITY_CHECK */
609 
610 /**
611  * Put a struct mem back on the heap
612  *
613  * @param rmem is the data portion of a struct mem as returned by a previous
614  *             call to mem_malloc()
615  */
616 void
mem_free(void * rmem)617 mem_free(void *rmem)
618 {
619   struct mem *mem;
620   LWIP_MEM_FREE_DECL_PROTECT();
621 
622   if (rmem == NULL) {
623     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
624     return;
625   }
626   if ((((mem_ptr_t)rmem) & (MEM_ALIGNMENT - 1)) != 0) {
627     LWIP_MEM_ILLEGAL_FREE("mem_free: sanity check alignment");
628     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: sanity check alignment\n"));
629     /* protect mem stats from concurrent access */
630     MEM_STATS_INC_LOCKED(illegal);
631     return;
632   }
633 
634   /* Get the corresponding struct mem: */
635   /* cast through void* to get rid of alignment warnings */
636   mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET));
637 
638   if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) {
639     LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory");
640     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
641     /* protect mem stats from concurrent access */
642     MEM_STATS_INC_LOCKED(illegal);
643     return;
644   }
645 #if MEM_OVERFLOW_CHECK
646   mem_overflow_check_element(mem);
647 #endif
648   /* protect the heap from concurrent access */
649   LWIP_MEM_FREE_PROTECT();
650   /* mem has to be in a used state */
651   if (!mem->used) {
652     LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: double free");
653     LWIP_MEM_FREE_UNPROTECT();
654     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: double free?\n"));
655     /* protect mem stats from concurrent access */
656     MEM_STATS_INC_LOCKED(illegal);
657     return;
658   }
659 
660   if (!mem_link_valid(mem)) {
661     LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: non-linked: double free");
662     LWIP_MEM_FREE_UNPROTECT();
663     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: non-linked: double free?\n"));
664     /* protect mem stats from concurrent access */
665     MEM_STATS_INC_LOCKED(illegal);
666     return;
667   }
668 
669   /* mem is now unused. */
670   mem->used = 0;
671 
672   if (mem < lfree) {
673     /* the newly freed struct is now the lowest */
674     lfree = mem;
675   }
676 
677   MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
678 
679   /* finally, see if prev or next are free also */
680   plug_holes(mem);
681   MEM_SANITY();
682 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
683   mem_free_count = 1;
684 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
685   LWIP_MEM_FREE_UNPROTECT();
686 }
687 
688 /**
689  * Shrink memory returned by mem_malloc().
690  *
691  * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
692  * @param new_size required size after shrinking (needs to be smaller than or
693  *                equal to the previous size)
694  * @return for compatibility reasons: is always == rmem, at the moment
695  *         or NULL if newsize is > old size, in which case rmem is NOT touched
696  *         or freed!
697  */
698 void *
mem_trim(void * rmem,mem_size_t new_size)699 mem_trim(void *rmem, mem_size_t new_size)
700 {
701   mem_size_t size, newsize;
702   mem_size_t ptr, ptr2;
703   struct mem *mem, *mem2;
704   /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
705   LWIP_MEM_FREE_DECL_PROTECT();
706 
707   /* Expand the size of the allocated memory region so that we can
708      adjust for alignment. */
709   newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size);
710   if (newsize < MIN_SIZE_ALIGNED) {
711     /* every data block must be at least MIN_SIZE_ALIGNED long */
712     newsize = MIN_SIZE_ALIGNED;
713   }
714 #if MEM_OVERFLOW_CHECK
715   newsize += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED;
716 #endif
717   if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) {
718     return NULL;
719   }
720 
721   LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
722               (u8_t *)rmem < (u8_t *)ram_end);
723 
724   if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
725     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
726     /* protect mem stats from concurrent access */
727     MEM_STATS_INC_LOCKED(illegal);
728     return rmem;
729   }
730   /* Get the corresponding struct mem ... */
731   /* cast through void* to get rid of alignment warnings */
732   mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET));
733 #if MEM_OVERFLOW_CHECK
734   mem_overflow_check_element(mem);
735 #endif
736   /* ... and its offset pointer */
737   ptr = mem_to_ptr(mem);
738 
739   size = (mem_size_t)((mem_size_t)(mem->next - ptr) - (SIZEOF_STRUCT_MEM + MEM_SANITY_OVERHEAD));
740   LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
741   if (newsize > size) {
742     /* not supported */
743     return NULL;
744   }
745   if (newsize == size) {
746     /* No change in size, simply return */
747     return rmem;
748   }
749 
750   /* protect the heap from concurrent access */
751   LWIP_MEM_FREE_PROTECT();
752 
753   mem2 = ptr_to_mem(mem->next);
754   if (mem2->used == 0) {
755     /* The next struct is unused, we can simply move it at little */
756     mem_size_t next;
757     LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED);
758     /* remember the old next pointer */
759     next = mem2->next;
760     /* create new struct mem which is moved directly after the shrinked mem */
761     ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
762     if (lfree == mem2) {
763       lfree = ptr_to_mem(ptr2);
764     }
765     mem2 = ptr_to_mem(ptr2);
766     mem2->used = 0;
767     /* restore the next pointer */
768     mem2->next = next;
769     /* link it back to mem */
770     mem2->prev = ptr;
771     /* link mem to it */
772     mem->next = ptr2;
773     /* last thing to restore linked list: as we have moved mem2,
774      * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
775      * the end of the heap */
776     if (mem2->next != MEM_SIZE_ALIGNED) {
777       ptr_to_mem(mem2->next)->prev = ptr2;
778     }
779     MEM_STATS_DEC_USED(used, (size - newsize));
780     /* no need to plug holes, we've already done that */
781   } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
782     /* Next struct is used but there's room for another struct mem with
783      * at least MIN_SIZE_ALIGNED of data.
784      * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
785      * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
786      * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
787      *       region that couldn't hold data, but when mem->next gets freed,
788      *       the 2 regions would be combined, resulting in more free memory */
789     ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
790     LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED);
791     mem2 = ptr_to_mem(ptr2);
792     if (mem2 < lfree) {
793       lfree = mem2;
794     }
795     mem2->used = 0;
796     mem2->next = mem->next;
797     mem2->prev = ptr;
798     mem->next = ptr2;
799     if (mem2->next != MEM_SIZE_ALIGNED) {
800       ptr_to_mem(mem2->next)->prev = ptr2;
801     }
802     MEM_STATS_DEC_USED(used, (size - newsize));
803     /* the original mem->next is used, so no need to plug holes! */
804   }
805   /* else {
806     next struct mem is used but size between mem and mem2 is not big enough
807     to create another struct mem
808     -> don't do anyhting.
809     -> the remaining space stays unused since it is too small
810   } */
811 #if MEM_OVERFLOW_CHECK
812   mem_overflow_init_element(mem, new_size);
813 #endif
814   MEM_SANITY();
815 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
816   mem_free_count = 1;
817 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
818   LWIP_MEM_FREE_UNPROTECT();
819   return rmem;
820 }
821 
822 /**
823  * Allocate a block of memory with a minimum of 'size' bytes.
824  *
825  * @param size_in is the minimum size of the requested block in bytes.
826  * @return pointer to allocated memory or NULL if no free memory was found.
827  *
828  * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
829  */
830 void *
mem_malloc(mem_size_t size_in)831 mem_malloc(mem_size_t size_in)
832 {
833   mem_size_t ptr, ptr2, size;
834   struct mem *mem, *mem2;
835 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
836   u8_t local_mem_free_count = 0;
837 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
838   LWIP_MEM_ALLOC_DECL_PROTECT();
839 
840   if (size_in == 0) {
841     return NULL;
842   }
843 
844   /* Expand the size of the allocated memory region so that we can
845      adjust for alignment. */
846   size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in);
847   if (size < MIN_SIZE_ALIGNED) {
848     /* every data block must be at least MIN_SIZE_ALIGNED long */
849     size = MIN_SIZE_ALIGNED;
850   }
851 #if MEM_OVERFLOW_CHECK
852   size += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED;
853 #endif
854   if ((size > MEM_SIZE_ALIGNED) || (size < size_in)) {
855     return NULL;
856   }
857 
858   /* protect the heap from concurrent access */
859   sys_mutex_lock(&mem_mutex);
860   LWIP_MEM_ALLOC_PROTECT();
861 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
862   /* run as long as a mem_free disturbed mem_malloc or mem_trim */
863   do {
864     local_mem_free_count = 0;
865 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
866 
867     /* Scan through the heap searching for a free block that is big enough,
868      * beginning with the lowest free block.
869      */
870     for (ptr = mem_to_ptr(lfree); ptr < MEM_SIZE_ALIGNED - size;
871          ptr = ptr_to_mem(ptr)->next) {
872       mem = ptr_to_mem(ptr);
873 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
874       mem_free_count = 0;
875       LWIP_MEM_ALLOC_UNPROTECT();
876       /* allow mem_free or mem_trim to run */
877       LWIP_MEM_ALLOC_PROTECT();
878       if (mem_free_count != 0) {
879         /* If mem_free or mem_trim have run, we have to restart since they
880            could have altered our current struct mem. */
881         local_mem_free_count = 1;
882         break;
883       }
884 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
885 
886       if ((!mem->used) &&
887           (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
888         /* mem is not used and at least perfect fit is possible:
889          * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
890 
891         if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
892           /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
893            * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
894            * -> split large block, create empty remainder,
895            * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
896            * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
897            * struct mem would fit in but no data between mem2 and mem2->next
898            * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
899            *       region that couldn't hold data, but when mem->next gets freed,
900            *       the 2 regions would be combined, resulting in more free memory
901            */
902           ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size);
903           LWIP_ASSERT("invalid next ptr",ptr2 != MEM_SIZE_ALIGNED);
904           /* create mem2 struct */
905           mem2 = ptr_to_mem(ptr2);
906           mem2->used = 0;
907           mem2->next = mem->next;
908           mem2->prev = ptr;
909           /* and insert it between mem and mem->next */
910           mem->next = ptr2;
911           mem->used = 1;
912 
913           if (mem2->next != MEM_SIZE_ALIGNED) {
914             ptr_to_mem(mem2->next)->prev = ptr2;
915           }
916           MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
917         } else {
918           /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
919            * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
920            * take care of this).
921            * -> near fit or exact fit: do not split, no mem2 creation
922            * also can't move mem->next directly behind mem, since mem->next
923            * will always be used at this point!
924            */
925           mem->used = 1;
926           MEM_STATS_INC_USED(used, mem->next - mem_to_ptr(mem));
927         }
928 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
929 mem_malloc_adjust_lfree:
930 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
931         if (mem == lfree) {
932           struct mem *cur = lfree;
933           /* Find next free block after mem and update lowest free pointer */
934           while (cur->used && cur != ram_end) {
935 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
936             mem_free_count = 0;
937             LWIP_MEM_ALLOC_UNPROTECT();
938             /* prevent high interrupt latency... */
939             LWIP_MEM_ALLOC_PROTECT();
940             if (mem_free_count != 0) {
941               /* If mem_free or mem_trim have run, we have to restart since they
942                  could have altered our current struct mem or lfree. */
943               goto mem_malloc_adjust_lfree;
944             }
945 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
946             cur = ptr_to_mem(cur->next);
947           }
948           lfree = cur;
949           LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
950         }
951         LWIP_MEM_ALLOC_UNPROTECT();
952         sys_mutex_unlock(&mem_mutex);
953         LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
954                     (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
955         LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
956                     ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
957         LWIP_ASSERT("mem_malloc: sanity check alignment",
958                     (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0);
959 
960 #if MEM_OVERFLOW_CHECK
961         mem_overflow_init_element(mem, size_in);
962 #endif
963         MEM_SANITY();
964         return (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
965       }
966     }
967 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
968     /* if we got interrupted by a mem_free, try again */
969   } while (local_mem_free_count != 0);
970 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
971   MEM_STATS_INC(err);
972   LWIP_MEM_ALLOC_UNPROTECT();
973   sys_mutex_unlock(&mem_mutex);
974   LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
975   return NULL;
976 }
977 
978 #endif /* MEM_USE_POOLS */
979 
980 #if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS)
981 void *
mem_calloc(mem_size_t count,mem_size_t size)982 mem_calloc(mem_size_t count, mem_size_t size)
983 {
984   return mem_clib_calloc(count, size);
985 }
986 
987 #else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */
988 /**
989  * Contiguously allocates enough space for count objects that are size bytes
990  * of memory each and returns a pointer to the allocated memory.
991  *
992  * The allocated memory is filled with bytes of value zero.
993  *
994  * @param count number of objects to allocate
995  * @param size size of the objects to allocate
996  * @return pointer to allocated memory / NULL pointer if there is an error
997  */
998 void *
mem_calloc(mem_size_t count,mem_size_t size)999 mem_calloc(mem_size_t count, mem_size_t size)
1000 {
1001   void *p;
1002   size_t alloc_size = (size_t)count * (size_t)size;
1003 
1004   if ((size_t)(mem_size_t)alloc_size != alloc_size) {
1005     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_calloc: could not allocate %"SZT_F" bytes\n", alloc_size));
1006     return NULL;
1007   }
1008 
1009   /* allocate 'count' objects of size 'size' */
1010   p = mem_malloc((mem_size_t)alloc_size);
1011   if (p) {
1012     /* zero the memory */
1013     memset(p, 0, alloc_size);
1014   }
1015   return p;
1016 }
1017 #endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */
1018