1 // Copyright (c) 2005, 2007, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 // ---
31 // Author: Sanjay Ghemawat
32
33 #include "config.h"
34 #if !USE(SYSTEM_MALLOC)
35 #include "TCSystemAlloc.h"
36
37 #include "Assertions.h"
38 #include "CheckedArithmetic.h"
39 #include "TCSpinLock.h"
40 #include "VMTags.h"
41 #include <algorithm>
42 #include <stdint.h>
43
44 #if OS(WIN)
45 #include "windows.h"
46 #else
47 #include <errno.h>
48 #include <unistd.h>
49 #include <sys/mman.h>
50 #endif
51
52 #ifndef MAP_ANONYMOUS
53 #define MAP_ANONYMOUS MAP_ANON
54 #endif
55
56 using namespace std;
57
58 // Structure for discovering alignment
59 union MemoryAligner {
60 void* p;
61 double d;
62 size_t s;
63 };
64
65 static SpinLock spinlock = SPINLOCK_INITIALIZER;
66
67 // Page size is initialized on demand
68 static size_t pagesize = 0;
69
70 // Configuration parameters.
71
72 #if HAVE(MMAP)
73 static bool use_mmap = true;
74 #endif
75
76 // Flags to keep us from retrying allocators that failed.
77 static bool devmem_failure = false;
78 static bool sbrk_failure = false;
79 static bool mmap_failure = false;
80
81 #if HAVE(MMAP)
82
TryMmap(size_t size,size_t * actual_size,size_t alignment)83 static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) {
84 // Enforce page alignment
85 if (pagesize == 0) pagesize = getpagesize();
86 if (alignment < pagesize) alignment = pagesize;
87 size = ((size + alignment - 1) / alignment) * alignment;
88
89 // could theoretically return the "extra" bytes here, but this
90 // is simple and correct.
91 if (actual_size)
92 *actual_size = size;
93
94 // Ask for extra memory if alignment > pagesize
95 size_t extra = 0;
96 if (alignment > pagesize) {
97 extra = alignment - pagesize;
98 }
99 Checked<size_t> mapSize = Checked<size_t>(size) + extra + 2 * pagesize;
100 void* result = mmap(NULL, mapSize.unsafeGet(),
101 PROT_READ | PROT_WRITE,
102 MAP_PRIVATE|MAP_ANONYMOUS,
103 VM_TAG_FOR_TCMALLOC_MEMORY, 0);
104 if (result == reinterpret_cast<void*>(MAP_FAILED)) {
105 mmap_failure = true;
106 return NULL;
107 }
108 mmap(result, pagesize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_TCMALLOC_MEMORY, 0);
109 mmap(static_cast<char*>(result) + (mapSize - pagesize).unsafeGet(), pagesize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_TCMALLOC_MEMORY, 0);
110 result = static_cast<char*>(result) + pagesize;
111 // Adjust the return memory so it is aligned
112 uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
113 size_t adjust = 0;
114 if ((ptr & (alignment - 1)) != 0) {
115 adjust = alignment - (ptr & (alignment - 1));
116 }
117
118 // Return the unused memory to the system
119 if (adjust > 0) {
120 munmap(reinterpret_cast<void*>(ptr), adjust);
121 }
122 if (adjust < extra) {
123 munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust);
124 }
125
126 ptr += adjust;
127 return reinterpret_cast<void*>(ptr);
128 }
129
130 #endif /* HAVE(MMAP) */
131
TCMalloc_SystemAlloc(size_t size,size_t * actual_size,size_t alignment)132 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) {
133 // Discard requests that overflow
134 if (size + alignment < size) return NULL;
135
136 SpinLockHolder lock_holder(&spinlock);
137
138 // Enforce minimum alignment
139 if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner);
140
141 // Try twice, once avoiding allocators that failed before, and once
142 // more trying all allocators even if they failed before.
143 for (int i = 0; i < 2; i++) {
144
145 #if HAVE(MMAP)
146 if (use_mmap && !mmap_failure) {
147 void* result = TryMmap(size, actual_size, alignment);
148 if (result != NULL) return result;
149 }
150 #endif
151
152 // nothing worked - reset failure flags and try again
153 devmem_failure = false;
154 sbrk_failure = false;
155 mmap_failure = false;
156 }
157 return NULL;
158 }
159
160 #if HAVE(MADV_FREE_REUSE)
161
TCMalloc_SystemRelease(void * start,size_t length)162 void TCMalloc_SystemRelease(void* start, size_t length)
163 {
164 int madviseResult;
165
166 while ((madviseResult = madvise(start, length, MADV_FREE_REUSABLE)) == -1 && errno == EAGAIN) { }
167
168 // Although really advisory, if madvise fail, we want to know about it.
169 ASSERT_UNUSED(madviseResult, madviseResult != -1);
170 }
171
172 #elif HAVE(MADV_FREE) || HAVE(MADV_DONTNEED)
173
TCMalloc_SystemRelease(void * start,size_t length)174 void TCMalloc_SystemRelease(void* start, size_t length)
175 {
176 // MADV_FREE clears the modified bit on pages, which allows
177 // them to be discarded immediately.
178 #if HAVE(MADV_FREE)
179 const int advice = MADV_FREE;
180 #else
181 const int advice = MADV_DONTNEED;
182 #endif
183 if (pagesize == 0) pagesize = getpagesize();
184 const size_t pagemask = pagesize - 1;
185
186 size_t new_start = reinterpret_cast<size_t>(start);
187 size_t end = new_start + length;
188 size_t new_end = end;
189
190 // Round up the starting address and round down the ending address
191 // to be page aligned:
192 new_start = (new_start + pagesize - 1) & ~pagemask;
193 new_end = new_end & ~pagemask;
194
195 ASSERT((new_start & pagemask) == 0);
196 ASSERT((new_end & pagemask) == 0);
197 ASSERT(new_start >= reinterpret_cast<size_t>(start));
198 ASSERT(new_end <= end);
199
200 if (new_end > new_start) {
201 // Note -- ignoring most return codes, because if this fails it
202 // doesn't matter...
203 while (madvise(reinterpret_cast<char*>(new_start), new_end - new_start,
204 advice) == -1 &&
205 errno == EAGAIN) {
206 // NOP
207 }
208 }
209 }
210
211 #elif HAVE(MMAP)
212
TCMalloc_SystemRelease(void * start,size_t length)213 void TCMalloc_SystemRelease(void* start, size_t length)
214 {
215 void* newAddress = mmap(start, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
216 // If the mmap failed then that's ok, we just won't return the memory to the system.
217 ASSERT_UNUSED(newAddress, newAddress == start || newAddress == reinterpret_cast<void*>(MAP_FAILED));
218 }
219
220 #else
221
222 // Platforms that don't support returning memory use an empty inline version of TCMalloc_SystemRelease
223 // declared in TCSystemAlloc.h
224
225 #endif
226
227 #if HAVE(MADV_FREE_REUSE)
228
TCMalloc_SystemCommit(void * start,size_t length)229 void TCMalloc_SystemCommit(void* start, size_t length)
230 {
231 while (madvise(start, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
232 }
233
234 #else
235
236 // Platforms that don't need to explicitly commit memory use an empty inline version of TCMalloc_SystemCommit
237 // declared in TCSystemAlloc.h
238
239 #endif
240
241 #endif // #if !USE(SYSTEM_MALLOC)
242
243