• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===------------------------ fallback_malloc.cpp -------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is dual licensed under the MIT and the University of Illinois Open
6 // Source Licenses. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 // Define _LIBCPP_BUILDING_LIBRARY to ensure _LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION
11 // is only defined when libc aligned allocation is not available.
12 #define _LIBCPP_BUILDING_LIBRARY
13 #include "fallback_malloc.h"
14 
15 #include <__threading_support>
16 
17 #include <cstdlib> // for malloc, calloc, free
18 #include <cstring> // for memset
19 
20 //  A small, simple heap manager based (loosely) on
21 //  the startup heap manager from FreeBSD, optimized for space.
22 //
23 //  Manages a fixed-size memory pool, supports malloc and free only.
24 //  No support for realloc.
25 //
26 //  Allocates chunks in multiples of four bytes, with a four byte header
27 //  for each chunk. The overhead of each chunk is kept low by keeping pointers
28 //  as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
29 
30 namespace {
31 
32 // When POSIX threads are not available, make the mutex operations a nop
33 #ifndef _LIBCXXABI_HAS_NO_THREADS
34 _LIBCPP_SAFE_STATIC
35 static std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;
36 #else
37 static void* heap_mutex = 0;
38 #endif
39 
40 class mutexor {
41 public:
42 #ifndef _LIBCXXABI_HAS_NO_THREADS
mutexor(std::__libcpp_mutex_t * m)43   mutexor(std::__libcpp_mutex_t* m) : mtx_(m) {
44     std::__libcpp_mutex_lock(mtx_);
45   }
~mutexor()46   ~mutexor() { std::__libcpp_mutex_unlock(mtx_); }
47 #else
48   mutexor(void*) {}
49   ~mutexor() {}
50 #endif
51 private:
52   mutexor(const mutexor& rhs);
53   mutexor& operator=(const mutexor& rhs);
54 #ifndef _LIBCXXABI_HAS_NO_THREADS
55   std::__libcpp_mutex_t* mtx_;
56 #endif
57 };
58 
59 static const size_t HEAP_SIZE = 512;
60 char heap[HEAP_SIZE] __attribute__((aligned));
61 
62 typedef unsigned short heap_offset;
63 typedef unsigned short heap_size;
64 
65 struct heap_node {
66   heap_offset next_node; // offset into heap
67   heap_size len;         // size in units of "sizeof(heap_node)"
68 };
69 
70 static const heap_node* list_end =
71     (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap
72 static heap_node* freelist = NULL;
73 
node_from_offset(const heap_offset offset)74 heap_node* node_from_offset(const heap_offset offset) {
75   return (heap_node*)(heap + (offset * sizeof(heap_node)));
76 }
77 
offset_from_node(const heap_node * ptr)78 heap_offset offset_from_node(const heap_node* ptr) {
79   return static_cast<heap_offset>(
80       static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) /
81       sizeof(heap_node));
82 }
83 
init_heap()84 void init_heap() {
85   freelist = (heap_node*)heap;
86   freelist->next_node = offset_from_node(list_end);
87   freelist->len = HEAP_SIZE / sizeof(heap_node);
88 }
89 
90 //  How big a chunk we allocate
alloc_size(size_t len)91 size_t alloc_size(size_t len) {
92   return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1;
93 }
94 
is_fallback_ptr(void * ptr)95 bool is_fallback_ptr(void* ptr) {
96   return ptr >= heap && ptr < (heap + HEAP_SIZE);
97 }
98 
fallback_malloc(size_t len)99 void* fallback_malloc(size_t len) {
100   heap_node *p, *prev;
101   const size_t nelems = alloc_size(len);
102   mutexor mtx(&heap_mutex);
103 
104   if (NULL == freelist)
105     init_heap();
106 
107   //  Walk the free list, looking for a "big enough" chunk
108   for (p = freelist, prev = 0; p && p != list_end;
109        prev = p, p = node_from_offset(p->next_node)) {
110 
111     if (p->len > nelems) { //  chunk is larger, shorten, and return the tail
112       heap_node* q;
113 
114       p->len = static_cast<heap_size>(p->len - nelems);
115       q = p + p->len;
116       q->next_node = 0;
117       q->len = static_cast<heap_size>(nelems);
118       return (void*)(q + 1);
119     }
120 
121     if (p->len == nelems) { // exact size match
122       if (prev == 0)
123         freelist = node_from_offset(p->next_node);
124       else
125         prev->next_node = p->next_node;
126       p->next_node = 0;
127       return (void*)(p + 1);
128     }
129   }
130   return NULL; // couldn't find a spot big enough
131 }
132 
133 //  Return the start of the next block
after(struct heap_node * p)134 heap_node* after(struct heap_node* p) { return p + p->len; }
135 
fallback_free(void * ptr)136 void fallback_free(void* ptr) {
137   struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk
138   struct heap_node *p, *prev;
139 
140   mutexor mtx(&heap_mutex);
141 
142 #ifdef DEBUG_FALLBACK_MALLOC
143   std::cout << "Freeing item at " << offset_from_node(cp) << " of size "
144             << cp->len << std::endl;
145 #endif
146 
147   for (p = freelist, prev = 0; p && p != list_end;
148        prev = p, p = node_from_offset(p->next_node)) {
149 #ifdef DEBUG_FALLBACK_MALLOC
150     std::cout << "  p, cp, after (p), after(cp) " << offset_from_node(p) << ' '
151               << offset_from_node(cp) << ' ' << offset_from_node(after(p))
152               << ' ' << offset_from_node(after(cp)) << std::endl;
153 #endif
154     if (after(p) == cp) {
155 #ifdef DEBUG_FALLBACK_MALLOC
156       std::cout << "  Appending onto chunk at " << offset_from_node(p)
157                 << std::endl;
158 #endif
159       p->len = static_cast<heap_size>(
160           p->len + cp->len); // make the free heap_node larger
161       return;
162     } else if (after(cp) == p) { // there's a free heap_node right after
163 #ifdef DEBUG_FALLBACK_MALLOC
164       std::cout << "  Appending free chunk at " << offset_from_node(p)
165                 << std::endl;
166 #endif
167       cp->len = static_cast<heap_size>(cp->len + p->len);
168       if (prev == 0) {
169         freelist = cp;
170         cp->next_node = p->next_node;
171       } else
172         prev->next_node = offset_from_node(cp);
173       return;
174     }
175   }
176 //  Nothing to merge with, add it to the start of the free list
177 #ifdef DEBUG_FALLBACK_MALLOC
178   std::cout << "  Making new free list entry " << offset_from_node(cp)
179             << std::endl;
180 #endif
181   cp->next_node = offset_from_node(freelist);
182   freelist = cp;
183 }
184 
185 #ifdef INSTRUMENT_FALLBACK_MALLOC
print_free_list()186 size_t print_free_list() {
187   struct heap_node *p, *prev;
188   heap_size total_free = 0;
189   if (NULL == freelist)
190     init_heap();
191 
192   for (p = freelist, prev = 0; p && p != list_end;
193        prev = p, p = node_from_offset(p->next_node)) {
194     std::cout << (prev == 0 ? "" : "  ") << "Offset: " << offset_from_node(p)
195               << "\tsize: " << p->len << " Next: " << p->next_node << std::endl;
196     total_free += p->len;
197   }
198   std::cout << "Total Free space: " << total_free << std::endl;
199   return total_free;
200 }
201 #endif
202 } // end unnamed namespace
203 
204 namespace __cxxabiv1 {
205 
206 struct __attribute__((aligned)) __aligned_type {};
207 
__aligned_malloc_with_fallback(size_t size)208 void* __aligned_malloc_with_fallback(size_t size) {
209 #if defined(_WIN32)
210   if (void* dest = _aligned_malloc(size, alignof(__aligned_type)))
211     return dest;
212 #elif defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
213   if (void* dest = std::malloc(size))
214     return dest;
215 #else
216   if (size == 0)
217     size = 1;
218   void* dest;
219   if (::posix_memalign(&dest, __alignof(__aligned_type), size) == 0)
220     return dest;
221 #endif
222   return fallback_malloc(size);
223 }
224 
__calloc_with_fallback(size_t count,size_t size)225 void* __calloc_with_fallback(size_t count, size_t size) {
226   void* ptr = std::calloc(count, size);
227   if (NULL != ptr)
228     return ptr;
229   // if calloc fails, fall back to emergency stash
230   ptr = fallback_malloc(size * count);
231   if (NULL != ptr)
232     std::memset(ptr, 0, size * count);
233   return ptr;
234 }
235 
__aligned_free_with_fallback(void * ptr)236 void __aligned_free_with_fallback(void* ptr) {
237   if (is_fallback_ptr(ptr))
238     fallback_free(ptr);
239   else {
240 #if defined(_WIN32)
241     ::_aligned_free(ptr);
242 #else
243     std::free(ptr);
244 #endif
245   }
246 }
247 
__free_with_fallback(void * ptr)248 void __free_with_fallback(void* ptr) {
249   if (is_fallback_ptr(ptr))
250     fallback_free(ptr);
251   else
252     std::free(ptr);
253 }
254 
255 } // namespace __cxxabiv1
256