1 // Copyright (c) 2005, 2007, Google Inc.
2 // All rights reserved.
3 // Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 // ---
32 // Author: Sanjay Ghemawat <opensource@google.com>
33 //
34 // A malloc that uses a per-thread cache to satisfy small malloc requests.
35 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
36 //
37 // See doc/tcmalloc.html for a high-level
38 // description of how this malloc works.
39 //
40 // SYNCHRONIZATION
41 // 1. The thread-specific lists are accessed without acquiring any locks.
42 // This is safe because each such list is only accessed by one thread.
43 // 2. We have a lock per central free-list, and hold it while manipulating
44 // the central free list for a particular size.
45 // 3. The central page allocator is protected by "pageheap_lock".
46 // 4. The pagemap (which maps from page-number to descriptor),
47 // can be read without holding any locks, and written while holding
48 // the "pageheap_lock".
49 // 5. To improve performance, a subset of the information one can get
50 // from the pagemap is cached in a data structure, pagemap_cache_,
51 // that atomically reads and writes its entries. This cache can be
52 // read and written without locking.
53 //
54 // This multi-threaded access to the pagemap is safe for fairly
55 // subtle reasons. We basically assume that when an object X is
56 // allocated by thread A and deallocated by thread B, there must
57 // have been appropriate synchronization in the handoff of object
58 // X from thread A to thread B. The same logic applies to pagemap_cache_.
59 //
60 // THE PAGEID-TO-SIZECLASS CACHE
61 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
62 // returns 0 for a particular PageID then that means "no information," not that
63 // the sizeclass is 0. The cache may have stale information for pages that do
64 // not hold the beginning of any free()'able object. Staleness is eliminated
65 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
66 // do_memalign() for all other relevant pages.
67 //
68 // TODO: Bias reclamation to larger addresses
69 // TODO: implement mallinfo/mallopt
70 // TODO: Better testing
71 //
72 // 9/28/2003 (new page-level allocator replaces ptmalloc2):
73 // * malloc/free of small objects goes from ~300 ns to ~50 ns.
74 // * allocation of a reasonably complicated struct
75 // goes from about 1100 ns to about 300 ns.
76
77 #include "config.h"
78 #include "FastMalloc.h"
79
80 #include "Assertions.h"
81 #include <limits>
82 #if ENABLE(JSC_MULTIPLE_THREADS)
83 #include <pthread.h>
84 #endif
85
86 #ifndef NO_TCMALLOC_SAMPLES
87 #ifdef WTF_CHANGES
88 #define NO_TCMALLOC_SAMPLES
89 #endif
90 #endif
91
92 #if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC) && defined(NDEBUG)
93 #define FORCE_SYSTEM_MALLOC 0
94 #else
95 #define FORCE_SYSTEM_MALLOC 1
96 #endif
97
98 // Use a background thread to periodically scavenge memory to release back to the system
99 // https://bugs.webkit.org/show_bug.cgi?id=27900: don't turn this on for Tiger until we have figured out why it caused a crash.
100 #if defined(BUILDING_ON_TIGER)
101 #define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 0
102 #else
103 #define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1
104 #endif
105
106 #ifndef NDEBUG
107 namespace WTF {
108
109 #if ENABLE(JSC_MULTIPLE_THREADS)
110 static pthread_key_t isForbiddenKey;
111 static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT;
initializeIsForbiddenKey()112 static void initializeIsForbiddenKey()
113 {
114 pthread_key_create(&isForbiddenKey, 0);
115 }
116
117 #if !ASSERT_DISABLED
isForbidden()118 static bool isForbidden()
119 {
120 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
121 return !!pthread_getspecific(isForbiddenKey);
122 }
123 #endif
124
fastMallocForbid()125 void fastMallocForbid()
126 {
127 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
128 pthread_setspecific(isForbiddenKey, &isForbiddenKey);
129 }
130
fastMallocAllow()131 void fastMallocAllow()
132 {
133 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
134 pthread_setspecific(isForbiddenKey, 0);
135 }
136
137 #else
138
139 static bool staticIsForbidden;
140 static bool isForbidden()
141 {
142 return staticIsForbidden;
143 }
144
145 void fastMallocForbid()
146 {
147 staticIsForbidden = true;
148 }
149
150 void fastMallocAllow()
151 {
152 staticIsForbidden = false;
153 }
154 #endif // ENABLE(JSC_MULTIPLE_THREADS)
155
156 } // namespace WTF
157 #endif // NDEBUG
158
159 #include <string.h>
160
161 namespace WTF {
162
163 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
164
165 namespace Internal {
166
fastMallocMatchFailed(void *)167 void fastMallocMatchFailed(void*)
168 {
169 CRASH();
170 }
171
172 } // namespace Internal
173
174 #endif
175
fastZeroedMalloc(size_t n)176 void* fastZeroedMalloc(size_t n)
177 {
178 void* result = fastMalloc(n);
179 memset(result, 0, n);
180 return result;
181 }
182
fastStrDup(const char * src)183 char* fastStrDup(const char* src)
184 {
185 int len = strlen(src) + 1;
186 char* dup = static_cast<char*>(fastMalloc(len));
187
188 if (dup)
189 memcpy(dup, src, len);
190
191 return dup;
192 }
193
tryFastZeroedMalloc(size_t n)194 TryMallocReturnValue tryFastZeroedMalloc(size_t n)
195 {
196 void* result;
197 if (!tryFastMalloc(n).getValue(result))
198 return 0;
199 memset(result, 0, n);
200 return result;
201 }
202
203 } // namespace WTF
204
205 #if FORCE_SYSTEM_MALLOC
206
207 namespace WTF {
208
tryFastMalloc(size_t n)209 TryMallocReturnValue tryFastMalloc(size_t n)
210 {
211 ASSERT(!isForbidden());
212
213 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
214 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
215 return 0;
216
217 void* result = malloc(n + sizeof(AllocAlignmentInteger));
218 if (!result)
219 return 0;
220
221 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
222 result = static_cast<AllocAlignmentInteger*>(result) + 1;
223
224 return result;
225 #else
226 return malloc(n);
227 #endif
228 }
229
fastMalloc(size_t n)230 void* fastMalloc(size_t n)
231 {
232 ASSERT(!isForbidden());
233
234 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
235 TryMallocReturnValue returnValue = tryFastMalloc(n);
236 void* result;
237 returnValue.getValue(result);
238 #else
239 void* result = malloc(n);
240 #endif
241
242 if (!result) {
243 #if PLATFORM(BREWMP)
244 // The behavior of malloc(0) is implementation defined.
245 // To make sure that fastMalloc never returns 0, retry with fastMalloc(1).
246 if (!n)
247 return fastMalloc(1);
248 #endif
249 CRASH();
250 }
251
252 return result;
253 }
254
tryFastCalloc(size_t n_elements,size_t element_size)255 TryMallocReturnValue tryFastCalloc(size_t n_elements, size_t element_size)
256 {
257 ASSERT(!isForbidden());
258
259 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
260 size_t totalBytes = n_elements * element_size;
261 if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements || (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes))
262 return 0;
263
264 totalBytes += sizeof(AllocAlignmentInteger);
265 void* result = malloc(totalBytes);
266 if (!result)
267 return 0;
268
269 memset(result, 0, totalBytes);
270 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
271 result = static_cast<AllocAlignmentInteger*>(result) + 1;
272 return result;
273 #else
274 return calloc(n_elements, element_size);
275 #endif
276 }
277
fastCalloc(size_t n_elements,size_t element_size)278 void* fastCalloc(size_t n_elements, size_t element_size)
279 {
280 ASSERT(!isForbidden());
281
282 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
283 TryMallocReturnValue returnValue = tryFastCalloc(n_elements, element_size);
284 void* result;
285 returnValue.getValue(result);
286 #else
287 void* result = calloc(n_elements, element_size);
288 #endif
289
290 if (!result) {
291 #if PLATFORM(BREWMP)
292 // If either n_elements or element_size is 0, the behavior of calloc is implementation defined.
293 // To make sure that fastCalloc never returns 0, retry with fastCalloc(1, 1).
294 if (!n_elements || !element_size)
295 return fastCalloc(1, 1);
296 #endif
297 CRASH();
298 }
299
300 return result;
301 }
302
fastFree(void * p)303 void fastFree(void* p)
304 {
305 ASSERT(!isForbidden());
306
307 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
308 if (!p)
309 return;
310
311 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
312 if (*header != Internal::AllocTypeMalloc)
313 Internal::fastMallocMatchFailed(p);
314 free(header);
315 #else
316 free(p);
317 #endif
318 }
319
tryFastRealloc(void * p,size_t n)320 TryMallocReturnValue tryFastRealloc(void* p, size_t n)
321 {
322 ASSERT(!isForbidden());
323
324 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
325 if (p) {
326 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
327 return 0;
328 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
329 if (*header != Internal::AllocTypeMalloc)
330 Internal::fastMallocMatchFailed(p);
331 void* result = realloc(header, n + sizeof(AllocAlignmentInteger));
332 if (!result)
333 return 0;
334
335 // This should not be needed because the value is already there:
336 // *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
337 result = static_cast<AllocAlignmentInteger*>(result) + 1;
338 return result;
339 } else {
340 return fastMalloc(n);
341 }
342 #else
343 return realloc(p, n);
344 #endif
345 }
346
fastRealloc(void * p,size_t n)347 void* fastRealloc(void* p, size_t n)
348 {
349 ASSERT(!isForbidden());
350
351 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
352 TryMallocReturnValue returnValue = tryFastRealloc(p, n);
353 void* result;
354 returnValue.getValue(result);
355 #else
356 void* result = realloc(p, n);
357 #endif
358
359 if (!result)
360 CRASH();
361 return result;
362 }
363
releaseFastMallocFreeMemory()364 void releaseFastMallocFreeMemory() { }
365
fastMallocStatistics()366 FastMallocStatistics fastMallocStatistics()
367 {
368 FastMallocStatistics statistics = { 0, 0, 0, 0 };
369 return statistics;
370 }
371
372 } // namespace WTF
373
374 #if OS(DARWIN)
375 // This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
376 // It will never be used in this case, so it's type and value are less interesting than its presence.
377 extern "C" const int jscore_fastmalloc_introspection = 0;
378 #endif
379
380 #else // FORCE_SYSTEM_MALLOC
381
382 #if HAVE(STDINT_H)
383 #include <stdint.h>
384 #elif HAVE(INTTYPES_H)
385 #include <inttypes.h>
386 #else
387 #include <sys/types.h>
388 #endif
389
390 #include "AlwaysInline.h"
391 #include "Assertions.h"
392 #include "TCPackedCache.h"
393 #include "TCPageMap.h"
394 #include "TCSpinLock.h"
395 #include "TCSystemAlloc.h"
396 #include <algorithm>
397 #include <errno.h>
398 #include <limits>
399 #include <new>
400 #include <pthread.h>
401 #include <stdarg.h>
402 #include <stddef.h>
403 #include <stdio.h>
404 #if OS(UNIX)
405 #include <unistd.h>
406 #endif
407 #if COMPILER(MSVC)
408 #ifndef WIN32_LEAN_AND_MEAN
409 #define WIN32_LEAN_AND_MEAN
410 #endif
411 #include <windows.h>
412 #endif
413
414 #if WTF_CHANGES
415
416 #if OS(DARWIN)
417 #include "MallocZoneSupport.h"
418 #include <wtf/HashSet.h>
419 #include <wtf/Vector.h>
420 #endif
421 #if HAVE(DISPATCH_H)
422 #include <dispatch/dispatch.h>
423 #endif
424
425
426 #ifndef PRIuS
427 #define PRIuS "zu"
428 #endif
429
430 // Calling pthread_getspecific through a global function pointer is faster than a normal
431 // call to the function on Mac OS X, and it's used in performance-critical code. So we
432 // use a function pointer. But that's not necessarily faster on other platforms, and we had
433 // problems with this technique on Windows, so we'll do this only on Mac OS X.
434 #if OS(DARWIN)
435 static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific;
436 #define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
437 #endif
438
439 #define DEFINE_VARIABLE(type, name, value, meaning) \
440 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
441 type FLAGS_##name(value); \
442 char FLAGS_no##name; \
443 } \
444 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
445
446 #define DEFINE_int64(name, value, meaning) \
447 DEFINE_VARIABLE(int64_t, name, value, meaning)
448
449 #define DEFINE_double(name, value, meaning) \
450 DEFINE_VARIABLE(double, name, value, meaning)
451
452 namespace WTF {
453
454 #define malloc fastMalloc
455 #define calloc fastCalloc
456 #define free fastFree
457 #define realloc fastRealloc
458
459 #define MESSAGE LOG_ERROR
460 #define CHECK_CONDITION ASSERT
461
462 #if OS(DARWIN)
463 class Span;
464 class TCMalloc_Central_FreeListPadded;
465 class TCMalloc_PageHeap;
466 class TCMalloc_ThreadCache;
467 template <typename T> class PageHeapAllocator;
468
469 class FastMallocZone {
470 public:
471 static void init();
472
473 static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address_t zoneAddress, memory_reader_t, vm_range_recorder_t);
goodSize(malloc_zone_t *,size_t size)474 static size_t goodSize(malloc_zone_t*, size_t size) { return size; }
check(malloc_zone_t *)475 static boolean_t check(malloc_zone_t*) { return true; }
print(malloc_zone_t *,boolean_t)476 static void print(malloc_zone_t*, boolean_t) { }
log(malloc_zone_t *,void *)477 static void log(malloc_zone_t*, void*) { }
forceLock(malloc_zone_t *)478 static void forceLock(malloc_zone_t*) { }
forceUnlock(malloc_zone_t *)479 static void forceUnlock(malloc_zone_t*) { }
statistics(malloc_zone_t *,malloc_statistics_t * stats)480 static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); }
481
482 private:
483 FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*);
484 static size_t size(malloc_zone_t*, const void*);
485 static void* zoneMalloc(malloc_zone_t*, size_t);
486 static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size);
487 static void zoneFree(malloc_zone_t*, void*);
488 static void* zoneRealloc(malloc_zone_t*, void*, size_t);
zoneValloc(malloc_zone_t *,size_t)489 static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
zoneDestroy(malloc_zone_t *)490 static void zoneDestroy(malloc_zone_t*) { }
491
492 malloc_zone_t m_zone;
493 TCMalloc_PageHeap* m_pageHeap;
494 TCMalloc_ThreadCache** m_threadHeaps;
495 TCMalloc_Central_FreeListPadded* m_centralCaches;
496 PageHeapAllocator<Span>* m_spanAllocator;
497 PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator;
498 };
499
500 #endif
501
502 #endif
503
504 #ifndef WTF_CHANGES
505 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
506 // you're porting to a system where you really can't get a stacktrace.
507 #ifdef NO_TCMALLOC_SAMPLES
508 // We use #define so code compiles even if you #include stacktrace.h somehow.
509 # define GetStackTrace(stack, depth, skip) (0)
510 #else
511 # include <google/stacktrace.h>
512 #endif
513 #endif
514
515 // Even if we have support for thread-local storage in the compiler
516 // and linker, the OS may not support it. We need to check that at
517 // runtime. Right now, we have to keep a manual set of "bad" OSes.
518 #if defined(HAVE_TLS)
519 static bool kernel_supports_tls = false; // be conservative
KernelSupportsTLS()520 static inline bool KernelSupportsTLS() {
521 return kernel_supports_tls;
522 }
523 # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
CheckIfKernelSupportsTLS()524 static void CheckIfKernelSupportsTLS() {
525 kernel_supports_tls = false;
526 }
527 # else
528 # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
CheckIfKernelSupportsTLS()529 static void CheckIfKernelSupportsTLS() {
530 struct utsname buf;
531 if (uname(&buf) != 0) { // should be impossible
532 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno);
533 kernel_supports_tls = false;
534 } else if (strcasecmp(buf.sysname, "linux") == 0) {
535 // The linux case: the first kernel to support TLS was 2.6.0
536 if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x
537 kernel_supports_tls = false;
538 else if (buf.release[0] == '2' && buf.release[1] == '.' &&
539 buf.release[2] >= '0' && buf.release[2] < '6' &&
540 buf.release[3] == '.') // 2.0 - 2.5
541 kernel_supports_tls = false;
542 else
543 kernel_supports_tls = true;
544 } else { // some other kernel, we'll be optimisitic
545 kernel_supports_tls = true;
546 }
547 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
548 }
549 # endif // HAVE_DECL_UNAME
550 #endif // HAVE_TLS
551
552 // __THROW is defined in glibc systems. It means, counter-intuitively,
553 // "This function will never throw an exception." It's an optional
554 // optimization tool, but we may need to use it to match glibc prototypes.
555 #ifndef __THROW // I guess we're not on a glibc system
556 # define __THROW // __THROW is just an optimization, so ok to make it ""
557 #endif
558
559 //-------------------------------------------------------------------
560 // Configuration
561 //-------------------------------------------------------------------
562
563 // Not all possible combinations of the following parameters make
564 // sense. In particular, if kMaxSize increases, you may have to
565 // increase kNumClasses as well.
566 static const size_t kPageShift = 12;
567 static const size_t kPageSize = 1 << kPageShift;
568 static const size_t kMaxSize = 8u * kPageSize;
569 static const size_t kAlignShift = 3;
570 static const size_t kAlignment = 1 << kAlignShift;
571 static const size_t kNumClasses = 68;
572
573 // Allocates a big block of memory for the pagemap once we reach more than
574 // 128MB
575 static const size_t kPageMapBigAllocationThreshold = 128 << 20;
576
577 // Minimum number of pages to fetch from system at a time. Must be
578 // significantly bigger than kPageSize to amortize system-call
579 // overhead, and also to reduce external fragementation. Also, we
580 // should keep this value big because various incarnations of Linux
581 // have small limits on the number of mmap() regions per
582 // address-space.
583 static const size_t kMinSystemAlloc = 1 << (20 - kPageShift);
584
585 // Number of objects to move between a per-thread list and a central
586 // list in one shot. We want this to be not too small so we can
587 // amortize the lock overhead for accessing the central list. Making
588 // it too big may temporarily cause unnecessary memory wastage in the
589 // per-thread free list until the scavenger cleans up the list.
590 static int num_objects_to_move[kNumClasses];
591
592 // Maximum length we allow a per-thread free-list to have before we
593 // move objects from it into the corresponding central free-list. We
594 // want this big to avoid locking the central free-list too often. It
595 // should not hurt to make this list somewhat big because the
596 // scavenging code will shrink it down when its contents are not in use.
597 static const int kMaxFreeListLength = 256;
598
599 // Lower and upper bounds on the per-thread cache sizes
600 static const size_t kMinThreadCacheSize = kMaxSize * 2;
601 static const size_t kMaxThreadCacheSize = 2 << 20;
602
603 // Default bound on the total amount of thread caches
604 static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
605
606 // For all span-lengths < kMaxPages we keep an exact-size list.
607 // REQUIRED: kMaxPages >= kMinSystemAlloc;
608 static const size_t kMaxPages = kMinSystemAlloc;
609
610 /* The smallest prime > 2^n */
611 static int primes_list[] = {
612 // Small values might cause high rates of sampling
613 // and hence commented out.
614 // 2, 5, 11, 17, 37, 67, 131, 257,
615 // 521, 1031, 2053, 4099, 8209, 16411,
616 32771, 65537, 131101, 262147, 524309, 1048583,
617 2097169, 4194319, 8388617, 16777259, 33554467 };
618
619 // Twice the approximate gap between sampling actions.
620 // I.e., we take one sample approximately once every
621 // tcmalloc_sample_parameter/2
622 // bytes of allocation, i.e., ~ once every 128KB.
623 // Must be a prime number.
624 #ifdef NO_TCMALLOC_SAMPLES
625 DEFINE_int64(tcmalloc_sample_parameter, 0,
626 "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
627 static size_t sample_period = 0;
628 #else
629 DEFINE_int64(tcmalloc_sample_parameter, 262147,
630 "Twice the approximate gap between sampling actions."
631 " Must be a prime number. Otherwise will be rounded up to a "
632 " larger prime number");
633 static size_t sample_period = 262147;
634 #endif
635
636 // Protects sample_period above
637 static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
638
639 // Parameters for controlling how fast memory is returned to the OS.
640
641 DEFINE_double(tcmalloc_release_rate, 1,
642 "Rate at which we release unused memory to the system. "
643 "Zero means we never release memory back to the system. "
644 "Increase this flag to return memory faster; decrease it "
645 "to return memory slower. Reasonable rates are in the "
646 "range [0,10]");
647
648 //-------------------------------------------------------------------
649 // Mapping from size to size_class and vice versa
650 //-------------------------------------------------------------------
651
652 // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
653 // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
654 // So for these larger sizes we have an array indexed by ceil(size/128).
655 //
656 // We flatten both logical arrays into one physical array and use
657 // arithmetic to compute an appropriate index. The constants used by
658 // ClassIndex() were selected to make the flattening work.
659 //
660 // Examples:
661 // Size Expression Index
662 // -------------------------------------------------------
663 // 0 (0 + 7) / 8 0
664 // 1 (1 + 7) / 8 1
665 // ...
666 // 1024 (1024 + 7) / 8 128
667 // 1025 (1025 + 127 + (120<<7)) / 128 129
668 // ...
669 // 32768 (32768 + 127 + (120<<7)) / 128 376
670 static const size_t kMaxSmallSize = 1024;
671 static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128
672 static const int add_amount[2] = { 7, 127 + (120 << 7) };
673 static unsigned char class_array[377];
674
675 // Compute index of the class_array[] entry for a given size
ClassIndex(size_t s)676 static inline int ClassIndex(size_t s) {
677 const int i = (s > kMaxSmallSize);
678 return static_cast<int>((s + add_amount[i]) >> shift_amount[i]);
679 }
680
681 // Mapping from size class to max size storable in that class
682 static size_t class_to_size[kNumClasses];
683
684 // Mapping from size class to number of pages to allocate at a time
685 static size_t class_to_pages[kNumClasses];
686
687 // TransferCache is used to cache transfers of num_objects_to_move[size_class]
688 // back and forth between thread caches and the central cache for a given size
689 // class.
690 struct TCEntry {
691 void *head; // Head of chain of objects.
692 void *tail; // Tail of chain of objects.
693 };
694 // A central cache freelist can have anywhere from 0 to kNumTransferEntries
695 // slots to put link list chains into. To keep memory usage bounded the total
696 // number of TCEntries across size classes is fixed. Currently each size
697 // class is initially given one TCEntry which also means that the maximum any
698 // one class can have is kNumClasses.
699 static const int kNumTransferEntries = kNumClasses;
700
701 // Note: the following only works for "n"s that fit in 32-bits, but
702 // that is fine since we only use it for small sizes.
LgFloor(size_t n)703 static inline int LgFloor(size_t n) {
704 int log = 0;
705 for (int i = 4; i >= 0; --i) {
706 int shift = (1 << i);
707 size_t x = n >> shift;
708 if (x != 0) {
709 n = x;
710 log += shift;
711 }
712 }
713 ASSERT(n == 1);
714 return log;
715 }
716
717 // Some very basic linked list functions for dealing with using void * as
718 // storage.
719
SLL_Next(void * t)720 static inline void *SLL_Next(void *t) {
721 return *(reinterpret_cast<void**>(t));
722 }
723
SLL_SetNext(void * t,void * n)724 static inline void SLL_SetNext(void *t, void *n) {
725 *(reinterpret_cast<void**>(t)) = n;
726 }
727
SLL_Push(void ** list,void * element)728 static inline void SLL_Push(void **list, void *element) {
729 SLL_SetNext(element, *list);
730 *list = element;
731 }
732
SLL_Pop(void ** list)733 static inline void *SLL_Pop(void **list) {
734 void *result = *list;
735 *list = SLL_Next(*list);
736 return result;
737 }
738
739
740 // Remove N elements from a linked list to which head points. head will be
741 // modified to point to the new head. start and end will point to the first
742 // and last nodes of the range. Note that end will point to NULL after this
743 // function is called.
SLL_PopRange(void ** head,int N,void ** start,void ** end)744 static inline void SLL_PopRange(void **head, int N, void **start, void **end) {
745 if (N == 0) {
746 *start = NULL;
747 *end = NULL;
748 return;
749 }
750
751 void *tmp = *head;
752 for (int i = 1; i < N; ++i) {
753 tmp = SLL_Next(tmp);
754 }
755
756 *start = *head;
757 *end = tmp;
758 *head = SLL_Next(tmp);
759 // Unlink range from list.
760 SLL_SetNext(tmp, NULL);
761 }
762
SLL_PushRange(void ** head,void * start,void * end)763 static inline void SLL_PushRange(void **head, void *start, void *end) {
764 if (!start) return;
765 SLL_SetNext(end, *head);
766 *head = start;
767 }
768
SLL_Size(void * head)769 static inline size_t SLL_Size(void *head) {
770 int count = 0;
771 while (head) {
772 count++;
773 head = SLL_Next(head);
774 }
775 return count;
776 }
777
778 // Setup helper functions.
779
SizeClass(size_t size)780 static ALWAYS_INLINE size_t SizeClass(size_t size) {
781 return class_array[ClassIndex(size)];
782 }
783
784 // Get the byte-size for a specified class
ByteSizeForClass(size_t cl)785 static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) {
786 return class_to_size[cl];
787 }
NumMoveSize(size_t size)788 static int NumMoveSize(size_t size) {
789 if (size == 0) return 0;
790 // Use approx 64k transfers between thread and central caches.
791 int num = static_cast<int>(64.0 * 1024.0 / size);
792 if (num < 2) num = 2;
793 // Clamp well below kMaxFreeListLength to avoid ping pong between central
794 // and thread caches.
795 if (num > static_cast<int>(0.8 * kMaxFreeListLength))
796 num = static_cast<int>(0.8 * kMaxFreeListLength);
797
798 // Also, avoid bringing in too many objects into small object free
799 // lists. There are lots of such lists, and if we allow each one to
800 // fetch too many at a time, we end up having to scavenge too often
801 // (especially when there are lots of threads and each thread gets a
802 // small allowance for its thread cache).
803 //
804 // TODO: Make thread cache free list sizes dynamic so that we do not
805 // have to equally divide a fixed resource amongst lots of threads.
806 if (num > 32) num = 32;
807
808 return num;
809 }
810
811 // Initialize the mapping arrays
InitSizeClasses()812 static void InitSizeClasses() {
813 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
814 if (ClassIndex(0) < 0) {
815 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
816 CRASH();
817 }
818 if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) {
819 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
820 CRASH();
821 }
822
823 // Compute the size classes we want to use
824 size_t sc = 1; // Next size class to assign
825 unsigned char alignshift = kAlignShift;
826 int last_lg = -1;
827 for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
828 int lg = LgFloor(size);
829 if (lg > last_lg) {
830 // Increase alignment every so often.
831 //
832 // Since we double the alignment every time size doubles and
833 // size >= 128, this means that space wasted due to alignment is
834 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
835 // bytes, so the space wasted as a percentage starts falling for
836 // sizes > 2K.
837 if ((lg >= 7) && (alignshift < 8)) {
838 alignshift++;
839 }
840 last_lg = lg;
841 }
842
843 // Allocate enough pages so leftover is less than 1/8 of total.
844 // This bounds wasted space to at most 12.5%.
845 size_t psize = kPageSize;
846 while ((psize % size) > (psize >> 3)) {
847 psize += kPageSize;
848 }
849 const size_t my_pages = psize >> kPageShift;
850
851 if (sc > 1 && my_pages == class_to_pages[sc-1]) {
852 // See if we can merge this into the previous class without
853 // increasing the fragmentation of the previous class.
854 const size_t my_objects = (my_pages << kPageShift) / size;
855 const size_t prev_objects = (class_to_pages[sc-1] << kPageShift)
856 / class_to_size[sc-1];
857 if (my_objects == prev_objects) {
858 // Adjust last class to include this size
859 class_to_size[sc-1] = size;
860 continue;
861 }
862 }
863
864 // Add new class
865 class_to_pages[sc] = my_pages;
866 class_to_size[sc] = size;
867 sc++;
868 }
869 if (sc != kNumClasses) {
870 MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n",
871 sc, int(kNumClasses));
872 CRASH();
873 }
874
875 // Initialize the mapping arrays
876 int next_size = 0;
877 for (unsigned char c = 1; c < kNumClasses; c++) {
878 const size_t max_size_in_class = class_to_size[c];
879 for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) {
880 class_array[ClassIndex(s)] = c;
881 }
882 next_size = static_cast<int>(max_size_in_class + kAlignment);
883 }
884
885 // Double-check sizes just to be safe
886 for (size_t size = 0; size <= kMaxSize; size++) {
887 const size_t sc = SizeClass(size);
888 if (sc == 0) {
889 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
890 CRASH();
891 }
892 if (sc > 1 && size <= class_to_size[sc-1]) {
893 MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS
894 "\n", sc, size);
895 CRASH();
896 }
897 if (sc >= kNumClasses) {
898 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
899 CRASH();
900 }
901 const size_t s = class_to_size[sc];
902 if (size > s) {
903 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
904 CRASH();
905 }
906 if (s == 0) {
907 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
908 CRASH();
909 }
910 }
911
912 // Initialize the num_objects_to_move array.
913 for (size_t cl = 1; cl < kNumClasses; ++cl) {
914 num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
915 }
916
917 #ifndef WTF_CHANGES
918 if (false) {
919 // Dump class sizes and maximum external wastage per size class
920 for (size_t cl = 1; cl < kNumClasses; ++cl) {
921 const int alloc_size = class_to_pages[cl] << kPageShift;
922 const int alloc_objs = alloc_size / class_to_size[cl];
923 const int min_used = (class_to_size[cl-1] + 1) * alloc_objs;
924 const int max_waste = alloc_size - min_used;
925 MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
926 int(cl),
927 int(class_to_size[cl-1] + 1),
928 int(class_to_size[cl]),
929 int(class_to_pages[cl] << kPageShift),
930 max_waste * 100.0 / alloc_size
931 );
932 }
933 }
934 #endif
935 }
936
937 // -------------------------------------------------------------------------
938 // Simple allocator for objects of a specified type. External locking
939 // is required before accessing one of these objects.
940 // -------------------------------------------------------------------------
941
942 // Metadata allocator -- keeps stats about how many bytes allocated
943 static uint64_t metadata_system_bytes = 0;
MetaDataAlloc(size_t bytes)944 static void* MetaDataAlloc(size_t bytes) {
945 void* result = TCMalloc_SystemAlloc(bytes, 0);
946 if (result != NULL) {
947 metadata_system_bytes += bytes;
948 }
949 return result;
950 }
951
952 template <class T>
953 class PageHeapAllocator {
954 private:
955 // How much to allocate from system at a time
956 static const size_t kAllocIncrement = 32 << 10;
957
958 // Aligned size of T
959 static const size_t kAlignedSize
960 = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
961
962 // Free area from which to carve new objects
963 char* free_area_;
964 size_t free_avail_;
965
966 // Linked list of all regions allocated by this allocator
967 void* allocated_regions_;
968
969 // Free list of already carved objects
970 void* free_list_;
971
972 // Number of allocated but unfreed objects
973 int inuse_;
974
975 public:
Init()976 void Init() {
977 ASSERT(kAlignedSize <= kAllocIncrement);
978 inuse_ = 0;
979 allocated_regions_ = 0;
980 free_area_ = NULL;
981 free_avail_ = 0;
982 free_list_ = NULL;
983 }
984
New()985 T* New() {
986 // Consult free list
987 void* result;
988 if (free_list_ != NULL) {
989 result = free_list_;
990 free_list_ = *(reinterpret_cast<void**>(result));
991 } else {
992 if (free_avail_ < kAlignedSize) {
993 // Need more room
994 char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
995 if (!new_allocation)
996 CRASH();
997
998 *(void**)new_allocation = allocated_regions_;
999 allocated_regions_ = new_allocation;
1000 free_area_ = new_allocation + kAlignedSize;
1001 free_avail_ = kAllocIncrement - kAlignedSize;
1002 }
1003 result = free_area_;
1004 free_area_ += kAlignedSize;
1005 free_avail_ -= kAlignedSize;
1006 }
1007 inuse_++;
1008 return reinterpret_cast<T*>(result);
1009 }
1010
Delete(T * p)1011 void Delete(T* p) {
1012 *(reinterpret_cast<void**>(p)) = free_list_;
1013 free_list_ = p;
1014 inuse_--;
1015 }
1016
inuse() const1017 int inuse() const { return inuse_; }
1018
1019 #if defined(WTF_CHANGES) && OS(DARWIN)
1020 template <class Recorder>
recordAdministrativeRegions(Recorder & recorder,const RemoteMemoryReader & reader)1021 void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
1022 {
1023 vm_address_t adminAllocation = reinterpret_cast<vm_address_t>(allocated_regions_);
1024 while (adminAllocation) {
1025 recorder.recordRegion(adminAllocation, kAllocIncrement);
1026 adminAllocation = *reader(reinterpret_cast<vm_address_t*>(adminAllocation));
1027 }
1028 }
1029 #endif
1030 };
1031
1032 // -------------------------------------------------------------------------
1033 // Span - a contiguous run of pages
1034 // -------------------------------------------------------------------------
1035
1036 // Type that can hold a page number
1037 typedef uintptr_t PageID;
1038
1039 // Type that can hold the length of a run of pages
1040 typedef uintptr_t Length;
1041
1042 static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
1043
1044 // Convert byte size into pages. This won't overflow, but may return
1045 // an unreasonably large value if bytes is huge enough.
pages(size_t bytes)1046 static inline Length pages(size_t bytes) {
1047 return (bytes >> kPageShift) +
1048 ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
1049 }
1050
1051 // Convert a user size into the number of bytes that will actually be
1052 // allocated
AllocationSize(size_t bytes)1053 static size_t AllocationSize(size_t bytes) {
1054 if (bytes > kMaxSize) {
1055 // Large object: we allocate an integral number of pages
1056 ASSERT(bytes <= (kMaxValidPages << kPageShift));
1057 return pages(bytes) << kPageShift;
1058 } else {
1059 // Small object: find the size class to which it belongs
1060 return ByteSizeForClass(SizeClass(bytes));
1061 }
1062 }
1063
1064 // Information kept for a span (a contiguous run of pages).
1065 struct Span {
1066 PageID start; // Starting page number
1067 Length length; // Number of pages in span
1068 Span* next; // Used when in link list
1069 Span* prev; // Used when in link list
1070 void* objects; // Linked list of free objects
1071 unsigned int free : 1; // Is the span free
1072 #ifndef NO_TCMALLOC_SAMPLES
1073 unsigned int sample : 1; // Sampled object?
1074 #endif
1075 unsigned int sizeclass : 8; // Size-class for small objects (or 0)
1076 unsigned int refcount : 11; // Number of non-free objects
1077 bool decommitted : 1;
1078
1079 #undef SPAN_HISTORY
1080 #ifdef SPAN_HISTORY
1081 // For debugging, we can keep a log events per span
1082 int nexthistory;
1083 char history[64];
1084 int value[64];
1085 #endif
1086 };
1087
1088 #define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
1089
1090 #ifdef SPAN_HISTORY
Event(Span * span,char op,int v=0)1091 void Event(Span* span, char op, int v = 0) {
1092 span->history[span->nexthistory] = op;
1093 span->value[span->nexthistory] = v;
1094 span->nexthistory++;
1095 if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
1096 }
1097 #else
1098 #define Event(s,o,v) ((void) 0)
1099 #endif
1100
1101 // Allocator/deallocator for spans
1102 static PageHeapAllocator<Span> span_allocator;
NewSpan(PageID p,Length len)1103 static Span* NewSpan(PageID p, Length len) {
1104 Span* result = span_allocator.New();
1105 memset(result, 0, sizeof(*result));
1106 result->start = p;
1107 result->length = len;
1108 #ifdef SPAN_HISTORY
1109 result->nexthistory = 0;
1110 #endif
1111 return result;
1112 }
1113
DeleteSpan(Span * span)1114 static inline void DeleteSpan(Span* span) {
1115 #ifndef NDEBUG
1116 // In debug mode, trash the contents of deleted Spans
1117 memset(span, 0x3f, sizeof(*span));
1118 #endif
1119 span_allocator.Delete(span);
1120 }
1121
1122 // -------------------------------------------------------------------------
1123 // Doubly linked list of spans.
1124 // -------------------------------------------------------------------------
1125
DLL_Init(Span * list)1126 static inline void DLL_Init(Span* list) {
1127 list->next = list;
1128 list->prev = list;
1129 }
1130
DLL_Remove(Span * span)1131 static inline void DLL_Remove(Span* span) {
1132 span->prev->next = span->next;
1133 span->next->prev = span->prev;
1134 span->prev = NULL;
1135 span->next = NULL;
1136 }
1137
DLL_IsEmpty(const Span * list)1138 static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list) {
1139 return list->next == list;
1140 }
1141
DLL_Length(const Span * list)1142 static int DLL_Length(const Span* list) {
1143 int result = 0;
1144 for (Span* s = list->next; s != list; s = s->next) {
1145 result++;
1146 }
1147 return result;
1148 }
1149
1150 #if 0 /* Not needed at the moment -- causes compiler warnings if not used */
1151 static void DLL_Print(const char* label, const Span* list) {
1152 MESSAGE("%-10s %p:", label, list);
1153 for (const Span* s = list->next; s != list; s = s->next) {
1154 MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
1155 }
1156 MESSAGE("\n");
1157 }
1158 #endif
1159
DLL_Prepend(Span * list,Span * span)1160 static inline void DLL_Prepend(Span* list, Span* span) {
1161 ASSERT(span->next == NULL);
1162 ASSERT(span->prev == NULL);
1163 span->next = list->next;
1164 span->prev = list;
1165 list->next->prev = span;
1166 list->next = span;
1167 }
1168
1169 // -------------------------------------------------------------------------
1170 // Stack traces kept for sampled allocations
1171 // The following state is protected by pageheap_lock_.
1172 // -------------------------------------------------------------------------
1173
1174 // size/depth are made the same size as a pointer so that some generic
1175 // code below can conveniently cast them back and forth to void*.
1176 static const int kMaxStackDepth = 31;
1177 struct StackTrace {
1178 uintptr_t size; // Size of object
1179 uintptr_t depth; // Number of PC values stored in array below
1180 void* stack[kMaxStackDepth];
1181 };
1182 static PageHeapAllocator<StackTrace> stacktrace_allocator;
1183 static Span sampled_objects;
1184
1185 // -------------------------------------------------------------------------
1186 // Map from page-id to per-page data
1187 // -------------------------------------------------------------------------
1188
1189 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
1190 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
1191 // because sometimes the sizeclass is all the information we need.
1192
1193 // Selector class -- general selector uses 3-level map
1194 template <int BITS> class MapSelector {
1195 public:
1196 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
1197 typedef PackedCache<BITS, uint64_t> CacheType;
1198 };
1199
1200 #if defined(WTF_CHANGES)
1201 #if CPU(X86_64)
1202 // On all known X86-64 platforms, the upper 16 bits are always unused and therefore
1203 // can be excluded from the PageMap key.
1204 // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
1205
1206 static const size_t kBitsUnusedOn64Bit = 16;
1207 #else
1208 static const size_t kBitsUnusedOn64Bit = 0;
1209 #endif
1210
1211 // A three-level map for 64-bit machines
1212 template <> class MapSelector<64> {
1213 public:
1214 typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type;
1215 typedef PackedCache<64, uint64_t> CacheType;
1216 };
1217 #endif
1218
1219 // A two-level map for 32-bit machines
1220 template <> class MapSelector<32> {
1221 public:
1222 typedef TCMalloc_PageMap2<32 - kPageShift> Type;
1223 typedef PackedCache<32 - kPageShift, uint16_t> CacheType;
1224 };
1225
1226 // -------------------------------------------------------------------------
1227 // Page-level allocator
1228 // * Eager coalescing
1229 //
1230 // Heap for page-level allocation. We allow allocating and freeing a
1231 // contiguous runs of pages (called a "span").
1232 // -------------------------------------------------------------------------
1233
1234 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1235 // The central page heap collects spans of memory that have been deleted but are still committed until they are released
1236 // back to the system. We use a background thread to periodically scan the list of free spans and release some back to the
1237 // system. Every 5 seconds, the background thread wakes up and does the following:
1238 // - Check if we needed to commit memory in the last 5 seconds. If so, skip this scavenge because it's a sign that we are short
1239 // of free committed pages and so we should not release them back to the system yet.
1240 // - Otherwise, go through the list of free spans (from largest to smallest) and release up to a fraction of the free committed pages
1241 // back to the system.
1242 // - If the number of free committed pages reaches kMinimumFreeCommittedPageCount, we can stop the scavenging and block the
1243 // scavenging thread until the number of free committed pages goes above kMinimumFreeCommittedPageCount.
1244
1245 // Background thread wakes up every 5 seconds to scavenge as long as there is memory available to return to the system.
1246 static const int kScavengeTimerDelayInSeconds = 5;
1247
1248 // Number of free committed pages that we want to keep around.
1249 static const size_t kMinimumFreeCommittedPageCount = 512;
1250
1251 // During a scavenge, we'll release up to a fraction of the free committed pages.
1252 #if OS(WINDOWS)
1253 // We are slightly less aggressive in releasing memory on Windows due to performance reasons.
1254 static const int kMaxScavengeAmountFactor = 3;
1255 #else
1256 static const int kMaxScavengeAmountFactor = 2;
1257 #endif
1258 #endif
1259
1260 class TCMalloc_PageHeap {
1261 public:
1262 void init();
1263
1264 // Allocate a run of "n" pages. Returns zero if out of memory.
1265 Span* New(Length n);
1266
1267 // Delete the span "[p, p+n-1]".
1268 // REQUIRES: span was returned by earlier call to New() and
1269 // has not yet been deleted.
1270 void Delete(Span* span);
1271
1272 // Mark an allocated span as being used for small objects of the
1273 // specified size-class.
1274 // REQUIRES: span was returned by an earlier call to New()
1275 // and has not yet been deleted.
1276 void RegisterSizeClass(Span* span, size_t sc);
1277
1278 // Split an allocated span into two spans: one of length "n" pages
1279 // followed by another span of length "span->length - n" pages.
1280 // Modifies "*span" to point to the first span of length "n" pages.
1281 // Returns a pointer to the second span.
1282 //
1283 // REQUIRES: "0 < n < span->length"
1284 // REQUIRES: !span->free
1285 // REQUIRES: span->sizeclass == 0
1286 Span* Split(Span* span, Length n);
1287
1288 // Return the descriptor for the specified page.
GetDescriptor(PageID p) const1289 inline Span* GetDescriptor(PageID p) const {
1290 return reinterpret_cast<Span*>(pagemap_.get(p));
1291 }
1292
1293 #ifdef WTF_CHANGES
GetDescriptorEnsureSafe(PageID p)1294 inline Span* GetDescriptorEnsureSafe(PageID p)
1295 {
1296 pagemap_.Ensure(p, 1);
1297 return GetDescriptor(p);
1298 }
1299
1300 size_t ReturnedBytes() const;
1301 #endif
1302
1303 // Dump state to stderr
1304 #ifndef WTF_CHANGES
1305 void Dump(TCMalloc_Printer* out);
1306 #endif
1307
1308 // Return number of bytes allocated from system
SystemBytes() const1309 inline uint64_t SystemBytes() const { return system_bytes_; }
1310
1311 // Return number of free bytes in heap
FreeBytes() const1312 uint64_t FreeBytes() const {
1313 return (static_cast<uint64_t>(free_pages_) << kPageShift);
1314 }
1315
1316 bool Check();
1317 bool CheckList(Span* list, Length min_pages, Length max_pages);
1318
1319 // Release all pages on the free list for reuse by the OS:
1320 void ReleaseFreePages();
1321
1322 // Return 0 if we have no information, or else the correct sizeclass for p.
1323 // Reads and writes to pagemap_cache_ do not require locking.
1324 // The entries are 64 bits on 64-bit hardware and 16 bits on
1325 // 32-bit hardware, and we don't mind raciness as long as each read of
1326 // an entry yields a valid entry, not a partially updated entry.
GetSizeClassIfCached(PageID p) const1327 size_t GetSizeClassIfCached(PageID p) const {
1328 return pagemap_cache_.GetOrDefault(p, 0);
1329 }
CacheSizeClass(PageID p,size_t cl) const1330 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
1331
1332 private:
1333 // Pick the appropriate map and cache types based on pointer size
1334 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
1335 typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
1336 PageMap pagemap_;
1337 mutable PageMapCache pagemap_cache_;
1338
1339 // We segregate spans of a given size into two circular linked
1340 // lists: one for normal spans, and one for spans whose memory
1341 // has been returned to the system.
1342 struct SpanList {
1343 Span normal;
1344 Span returned;
1345 };
1346
1347 // List of free spans of length >= kMaxPages
1348 SpanList large_;
1349
1350 // Array mapping from span length to a doubly linked list of free spans
1351 SpanList free_[kMaxPages];
1352
1353 // Number of pages kept in free lists
1354 uintptr_t free_pages_;
1355
1356 // Bytes allocated from system
1357 uint64_t system_bytes_;
1358
1359 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1360 // Number of pages kept in free lists that are still committed.
1361 Length free_committed_pages_;
1362
1363 // Number of pages that we committed in the last scavenge wait interval.
1364 Length pages_committed_since_last_scavenge_;
1365 #endif
1366
1367 bool GrowHeap(Length n);
1368
1369 // REQUIRES span->length >= n
1370 // Remove span from its free list, and move any leftover part of
1371 // span into appropriate free lists. Also update "span" to have
1372 // length exactly "n" and mark it as non-free so it can be returned
1373 // to the client.
1374 //
1375 // "released" is true iff "span" was found on a "returned" list.
1376 void Carve(Span* span, Length n, bool released);
1377
RecordSpan(Span * span)1378 void RecordSpan(Span* span) {
1379 pagemap_.set(span->start, span);
1380 if (span->length > 1) {
1381 pagemap_.set(span->start + span->length - 1, span);
1382 }
1383 }
1384
1385 // Allocate a large span of length == n. If successful, returns a
1386 // span of exactly the specified length. Else, returns NULL.
1387 Span* AllocLarge(Length n);
1388
1389 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1390 // Incrementally release some memory to the system.
1391 // IncrementalScavenge(n) is called whenever n pages are freed.
1392 void IncrementalScavenge(Length n);
1393 #endif
1394
1395 // Number of pages to deallocate before doing more scavenging
1396 int64_t scavenge_counter_;
1397
1398 // Index of last free list we scavenged
1399 size_t scavenge_index_;
1400
1401 #if defined(WTF_CHANGES) && OS(DARWIN)
1402 friend class FastMallocZone;
1403 #endif
1404
1405 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1406 void initializeScavenger();
1407 ALWAYS_INLINE void signalScavenger();
1408 void scavenge();
1409 ALWAYS_INLINE bool shouldContinueScavenging() const;
1410
1411 #if !HAVE(DISPATCH_H)
1412 static NO_RETURN void* runScavengerThread(void*);
1413 NO_RETURN void scavengerThread();
1414
1415 // Keeps track of whether the background thread is actively scavenging memory every kScavengeTimerDelayInSeconds, or
1416 // it's blocked waiting for more pages to be deleted.
1417 bool m_scavengeThreadActive;
1418
1419 pthread_mutex_t m_scavengeMutex;
1420 pthread_cond_t m_scavengeCondition;
1421 #else // !HAVE(DISPATCH_H)
1422 void periodicScavenge();
1423
1424 dispatch_queue_t m_scavengeQueue;
1425 dispatch_source_t m_scavengeTimer;
1426 bool m_scavengingScheduled;
1427 #endif
1428
1429 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1430 };
1431
init()1432 void TCMalloc_PageHeap::init()
1433 {
1434 pagemap_.init(MetaDataAlloc);
1435 pagemap_cache_ = PageMapCache(0);
1436 free_pages_ = 0;
1437 system_bytes_ = 0;
1438
1439 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1440 free_committed_pages_ = 0;
1441 pages_committed_since_last_scavenge_ = 0;
1442 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1443
1444 scavenge_counter_ = 0;
1445 // Start scavenging at kMaxPages list
1446 scavenge_index_ = kMaxPages-1;
1447 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
1448 DLL_Init(&large_.normal);
1449 DLL_Init(&large_.returned);
1450 for (size_t i = 0; i < kMaxPages; i++) {
1451 DLL_Init(&free_[i].normal);
1452 DLL_Init(&free_[i].returned);
1453 }
1454
1455 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1456 initializeScavenger();
1457 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1458 }
1459
1460 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1461
1462 #if !HAVE(DISPATCH_H)
1463
initializeScavenger()1464 void TCMalloc_PageHeap::initializeScavenger()
1465 {
1466 pthread_mutex_init(&m_scavengeMutex, 0);
1467 pthread_cond_init(&m_scavengeCondition, 0);
1468 m_scavengeThreadActive = true;
1469 pthread_t thread;
1470 pthread_create(&thread, 0, runScavengerThread, this);
1471 }
1472
runScavengerThread(void * context)1473 void* TCMalloc_PageHeap::runScavengerThread(void* context)
1474 {
1475 static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
1476 #if COMPILER(MSVC)
1477 // Without this, Visual Studio will complain that this method does not return a value.
1478 return 0;
1479 #endif
1480 }
1481
signalScavenger()1482 ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
1483 {
1484 if (!m_scavengeThreadActive && shouldContinueScavenging())
1485 pthread_cond_signal(&m_scavengeCondition);
1486 }
1487
1488 #else // !HAVE(DISPATCH_H)
1489
initializeScavenger()1490 void TCMalloc_PageHeap::initializeScavenger()
1491 {
1492 m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL);
1493 m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue);
1494 dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, kScavengeTimerDelayInSeconds * NSEC_PER_SEC);
1495 dispatch_source_set_timer(m_scavengeTimer, startTime, kScavengeTimerDelayInSeconds * NSEC_PER_SEC, 1000 * NSEC_PER_USEC);
1496 dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); });
1497 m_scavengingScheduled = false;
1498 }
1499
signalScavenger()1500 ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
1501 {
1502 if (!m_scavengingScheduled && shouldContinueScavenging()) {
1503 m_scavengingScheduled = true;
1504 dispatch_resume(m_scavengeTimer);
1505 }
1506 }
1507
1508 #endif
1509
scavenge()1510 void TCMalloc_PageHeap::scavenge()
1511 {
1512 // If we have to commit memory in the last 5 seconds, it means we don't have enough free committed pages
1513 // for the amount of allocations that we do. So hold off on releasing memory back to the system.
1514 if (pages_committed_since_last_scavenge_ > 0) {
1515 pages_committed_since_last_scavenge_ = 0;
1516 return;
1517 }
1518 Length pagesDecommitted = 0;
1519 for (int i = kMaxPages; i >= 0; i--) {
1520 SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
1521 if (!DLL_IsEmpty(&slist->normal)) {
1522 // Release the last span on the normal portion of this list
1523 Span* s = slist->normal.prev;
1524 // Only decommit up to a fraction of the free committed pages if pages_allocated_since_last_scavenge_ > 0.
1525 if ((pagesDecommitted + s->length) * kMaxScavengeAmountFactor > free_committed_pages_)
1526 continue;
1527 DLL_Remove(s);
1528 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
1529 static_cast<size_t>(s->length << kPageShift));
1530 if (!s->decommitted) {
1531 pagesDecommitted += s->length;
1532 s->decommitted = true;
1533 }
1534 DLL_Prepend(&slist->returned, s);
1535 // We can stop scavenging if the number of free committed pages left is less than or equal to the minimum number we want to keep around.
1536 if (free_committed_pages_ <= kMinimumFreeCommittedPageCount + pagesDecommitted)
1537 break;
1538 }
1539 }
1540 pages_committed_since_last_scavenge_ = 0;
1541 ASSERT(free_committed_pages_ >= pagesDecommitted);
1542 free_committed_pages_ -= pagesDecommitted;
1543 }
1544
shouldContinueScavenging() const1545 ALWAYS_INLINE bool TCMalloc_PageHeap::shouldContinueScavenging() const
1546 {
1547 return free_committed_pages_ > kMinimumFreeCommittedPageCount;
1548 }
1549
1550 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1551
New(Length n)1552 inline Span* TCMalloc_PageHeap::New(Length n) {
1553 ASSERT(Check());
1554 ASSERT(n > 0);
1555
1556 // Find first size >= n that has a non-empty list
1557 for (Length s = n; s < kMaxPages; s++) {
1558 Span* ll = NULL;
1559 bool released = false;
1560 if (!DLL_IsEmpty(&free_[s].normal)) {
1561 // Found normal span
1562 ll = &free_[s].normal;
1563 } else if (!DLL_IsEmpty(&free_[s].returned)) {
1564 // Found returned span; reallocate it
1565 ll = &free_[s].returned;
1566 released = true;
1567 } else {
1568 // Keep looking in larger classes
1569 continue;
1570 }
1571
1572 Span* result = ll->next;
1573 Carve(result, n, released);
1574 if (result->decommitted) {
1575 TCMalloc_SystemCommit(reinterpret_cast<void*>(result->start << kPageShift), static_cast<size_t>(n << kPageShift));
1576 result->decommitted = false;
1577 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1578 pages_committed_since_last_scavenge_ += n;
1579 #endif
1580 }
1581 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1582 else {
1583 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1584 // free committed pages count.
1585 ASSERT(free_committed_pages_ >= n);
1586 free_committed_pages_ -= n;
1587 }
1588 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1589 ASSERT(Check());
1590 free_pages_ -= n;
1591 return result;
1592 }
1593
1594 Span* result = AllocLarge(n);
1595 if (result != NULL) {
1596 ASSERT_SPAN_COMMITTED(result);
1597 return result;
1598 }
1599
1600 // Grow the heap and try again
1601 if (!GrowHeap(n)) {
1602 ASSERT(Check());
1603 return NULL;
1604 }
1605
1606 return AllocLarge(n);
1607 }
1608
AllocLarge(Length n)1609 Span* TCMalloc_PageHeap::AllocLarge(Length n) {
1610 // find the best span (closest to n in size).
1611 // The following loops implements address-ordered best-fit.
1612 bool from_released = false;
1613 Span *best = NULL;
1614
1615 // Search through normal list
1616 for (Span* span = large_.normal.next;
1617 span != &large_.normal;
1618 span = span->next) {
1619 if (span->length >= n) {
1620 if ((best == NULL)
1621 || (span->length < best->length)
1622 || ((span->length == best->length) && (span->start < best->start))) {
1623 best = span;
1624 from_released = false;
1625 }
1626 }
1627 }
1628
1629 // Search through released list in case it has a better fit
1630 for (Span* span = large_.returned.next;
1631 span != &large_.returned;
1632 span = span->next) {
1633 if (span->length >= n) {
1634 if ((best == NULL)
1635 || (span->length < best->length)
1636 || ((span->length == best->length) && (span->start < best->start))) {
1637 best = span;
1638 from_released = true;
1639 }
1640 }
1641 }
1642
1643 if (best != NULL) {
1644 Carve(best, n, from_released);
1645 if (best->decommitted) {
1646 TCMalloc_SystemCommit(reinterpret_cast<void*>(best->start << kPageShift), static_cast<size_t>(n << kPageShift));
1647 best->decommitted = false;
1648 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1649 pages_committed_since_last_scavenge_ += n;
1650 #endif
1651 }
1652 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1653 else {
1654 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1655 // free committed pages count.
1656 ASSERT(free_committed_pages_ >= n);
1657 free_committed_pages_ -= n;
1658 }
1659 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1660 ASSERT(Check());
1661 free_pages_ -= n;
1662 return best;
1663 }
1664 return NULL;
1665 }
1666
Split(Span * span,Length n)1667 Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
1668 ASSERT(0 < n);
1669 ASSERT(n < span->length);
1670 ASSERT(!span->free);
1671 ASSERT(span->sizeclass == 0);
1672 Event(span, 'T', n);
1673
1674 const Length extra = span->length - n;
1675 Span* leftover = NewSpan(span->start + n, extra);
1676 Event(leftover, 'U', extra);
1677 RecordSpan(leftover);
1678 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
1679 span->length = n;
1680
1681 return leftover;
1682 }
1683
propagateDecommittedState(Span * destination,Span * source)1684 static ALWAYS_INLINE void propagateDecommittedState(Span* destination, Span* source)
1685 {
1686 destination->decommitted = source->decommitted;
1687 }
1688
Carve(Span * span,Length n,bool released)1689 inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
1690 ASSERT(n > 0);
1691 DLL_Remove(span);
1692 span->free = 0;
1693 Event(span, 'A', n);
1694
1695 const int extra = static_cast<int>(span->length - n);
1696 ASSERT(extra >= 0);
1697 if (extra > 0) {
1698 Span* leftover = NewSpan(span->start + n, extra);
1699 leftover->free = 1;
1700 propagateDecommittedState(leftover, span);
1701 Event(leftover, 'S', extra);
1702 RecordSpan(leftover);
1703
1704 // Place leftover span on appropriate free list
1705 SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_;
1706 Span* dst = released ? &listpair->returned : &listpair->normal;
1707 DLL_Prepend(dst, leftover);
1708
1709 span->length = n;
1710 pagemap_.set(span->start + n - 1, span);
1711 }
1712 }
1713
mergeDecommittedStates(Span * destination,Span * other)1714 static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other)
1715 {
1716 if (destination->decommitted && !other->decommitted) {
1717 TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShift),
1718 static_cast<size_t>(other->length << kPageShift));
1719 } else if (other->decommitted && !destination->decommitted) {
1720 TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPageShift),
1721 static_cast<size_t>(destination->length << kPageShift));
1722 destination->decommitted = true;
1723 }
1724 }
1725
Delete(Span * span)1726 inline void TCMalloc_PageHeap::Delete(Span* span) {
1727 ASSERT(Check());
1728 ASSERT(!span->free);
1729 ASSERT(span->length > 0);
1730 ASSERT(GetDescriptor(span->start) == span);
1731 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
1732 span->sizeclass = 0;
1733 #ifndef NO_TCMALLOC_SAMPLES
1734 span->sample = 0;
1735 #endif
1736
1737 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
1738 // necessary. We do not bother resetting the stale pagemap
1739 // entries for the pieces we are merging together because we only
1740 // care about the pagemap entries for the boundaries.
1741 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1742 // Track the total size of the neighboring free spans that are committed.
1743 Length neighboringCommittedSpansLength = 0;
1744 #endif
1745 const PageID p = span->start;
1746 const Length n = span->length;
1747 Span* prev = GetDescriptor(p-1);
1748 if (prev != NULL && prev->free) {
1749 // Merge preceding span into this span
1750 ASSERT(prev->start + prev->length == p);
1751 const Length len = prev->length;
1752 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1753 if (!prev->decommitted)
1754 neighboringCommittedSpansLength += len;
1755 #endif
1756 mergeDecommittedStates(span, prev);
1757 DLL_Remove(prev);
1758 DeleteSpan(prev);
1759 span->start -= len;
1760 span->length += len;
1761 pagemap_.set(span->start, span);
1762 Event(span, 'L', len);
1763 }
1764 Span* next = GetDescriptor(p+n);
1765 if (next != NULL && next->free) {
1766 // Merge next span into this span
1767 ASSERT(next->start == p+n);
1768 const Length len = next->length;
1769 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1770 if (!next->decommitted)
1771 neighboringCommittedSpansLength += len;
1772 #endif
1773 mergeDecommittedStates(span, next);
1774 DLL_Remove(next);
1775 DeleteSpan(next);
1776 span->length += len;
1777 pagemap_.set(span->start + span->length - 1, span);
1778 Event(span, 'R', len);
1779 }
1780
1781 Event(span, 'D', span->length);
1782 span->free = 1;
1783 if (span->decommitted) {
1784 if (span->length < kMaxPages)
1785 DLL_Prepend(&free_[span->length].returned, span);
1786 else
1787 DLL_Prepend(&large_.returned, span);
1788 } else {
1789 if (span->length < kMaxPages)
1790 DLL_Prepend(&free_[span->length].normal, span);
1791 else
1792 DLL_Prepend(&large_.normal, span);
1793 }
1794 free_pages_ += n;
1795
1796 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1797 if (span->decommitted) {
1798 // If the merged span is decommitted, that means we decommitted any neighboring spans that were
1799 // committed. Update the free committed pages count.
1800 free_committed_pages_ -= neighboringCommittedSpansLength;
1801 } else {
1802 // If the merged span remains committed, add the deleted span's size to the free committed pages count.
1803 free_committed_pages_ += n;
1804 }
1805
1806 // Make sure the scavenge thread becomes active if we have enough freed pages to release some back to the system.
1807 signalScavenger();
1808 #else
1809 IncrementalScavenge(n);
1810 #endif
1811
1812 ASSERT(Check());
1813 }
1814
1815 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
IncrementalScavenge(Length n)1816 void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
1817 // Fast path; not yet time to release memory
1818 scavenge_counter_ -= n;
1819 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
1820
1821 // If there is nothing to release, wait for so many pages before
1822 // scavenging again. With 4K pages, this comes to 16MB of memory.
1823 static const size_t kDefaultReleaseDelay = 1 << 8;
1824
1825 // Find index of free list to scavenge
1826 size_t index = scavenge_index_ + 1;
1827 for (size_t i = 0; i < kMaxPages+1; i++) {
1828 if (index > kMaxPages) index = 0;
1829 SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
1830 if (!DLL_IsEmpty(&slist->normal)) {
1831 // Release the last span on the normal portion of this list
1832 Span* s = slist->normal.prev;
1833 DLL_Remove(s);
1834 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
1835 static_cast<size_t>(s->length << kPageShift));
1836 s->decommitted = true;
1837 DLL_Prepend(&slist->returned, s);
1838
1839 scavenge_counter_ = std::max<size_t>(64UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay)));
1840
1841 if (index == kMaxPages && !DLL_IsEmpty(&slist->normal))
1842 scavenge_index_ = index - 1;
1843 else
1844 scavenge_index_ = index;
1845 return;
1846 }
1847 index++;
1848 }
1849
1850 // Nothing to scavenge, delay for a while
1851 scavenge_counter_ = kDefaultReleaseDelay;
1852 }
1853 #endif
1854
RegisterSizeClass(Span * span,size_t sc)1855 void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
1856 // Associate span object with all interior pages as well
1857 ASSERT(!span->free);
1858 ASSERT(GetDescriptor(span->start) == span);
1859 ASSERT(GetDescriptor(span->start+span->length-1) == span);
1860 Event(span, 'C', sc);
1861 span->sizeclass = static_cast<unsigned int>(sc);
1862 for (Length i = 1; i < span->length-1; i++) {
1863 pagemap_.set(span->start+i, span);
1864 }
1865 }
1866
1867 #ifdef WTF_CHANGES
ReturnedBytes() const1868 size_t TCMalloc_PageHeap::ReturnedBytes() const {
1869 size_t result = 0;
1870 for (unsigned s = 0; s < kMaxPages; s++) {
1871 const int r_length = DLL_Length(&free_[s].returned);
1872 unsigned r_pages = s * r_length;
1873 result += r_pages << kPageShift;
1874 }
1875
1876 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next)
1877 result += s->length << kPageShift;
1878 return result;
1879 }
1880 #endif
1881
1882 #ifndef WTF_CHANGES
PagesToMB(uint64_t pages)1883 static double PagesToMB(uint64_t pages) {
1884 return (pages << kPageShift) / 1048576.0;
1885 }
1886
Dump(TCMalloc_Printer * out)1887 void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
1888 int nonempty_sizes = 0;
1889 for (int s = 0; s < kMaxPages; s++) {
1890 if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
1891 nonempty_sizes++;
1892 }
1893 }
1894 out->printf("------------------------------------------------\n");
1895 out->printf("PageHeap: %d sizes; %6.1f MB free\n",
1896 nonempty_sizes, PagesToMB(free_pages_));
1897 out->printf("------------------------------------------------\n");
1898 uint64_t total_normal = 0;
1899 uint64_t total_returned = 0;
1900 for (int s = 0; s < kMaxPages; s++) {
1901 const int n_length = DLL_Length(&free_[s].normal);
1902 const int r_length = DLL_Length(&free_[s].returned);
1903 if (n_length + r_length > 0) {
1904 uint64_t n_pages = s * n_length;
1905 uint64_t r_pages = s * r_length;
1906 total_normal += n_pages;
1907 total_returned += r_pages;
1908 out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
1909 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1910 s,
1911 (n_length + r_length),
1912 PagesToMB(n_pages + r_pages),
1913 PagesToMB(total_normal + total_returned),
1914 PagesToMB(r_pages),
1915 PagesToMB(total_returned));
1916 }
1917 }
1918
1919 uint64_t n_pages = 0;
1920 uint64_t r_pages = 0;
1921 int n_spans = 0;
1922 int r_spans = 0;
1923 out->printf("Normal large spans:\n");
1924 for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
1925 out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
1926 s->length, PagesToMB(s->length));
1927 n_pages += s->length;
1928 n_spans++;
1929 }
1930 out->printf("Unmapped large spans:\n");
1931 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
1932 out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
1933 s->length, PagesToMB(s->length));
1934 r_pages += s->length;
1935 r_spans++;
1936 }
1937 total_normal += n_pages;
1938 total_returned += r_pages;
1939 out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
1940 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1941 (n_spans + r_spans),
1942 PagesToMB(n_pages + r_pages),
1943 PagesToMB(total_normal + total_returned),
1944 PagesToMB(r_pages),
1945 PagesToMB(total_returned));
1946 }
1947 #endif
1948
GrowHeap(Length n)1949 bool TCMalloc_PageHeap::GrowHeap(Length n) {
1950 ASSERT(kMaxPages >= kMinSystemAlloc);
1951 if (n > kMaxValidPages) return false;
1952 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
1953 size_t actual_size;
1954 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
1955 if (ptr == NULL) {
1956 if (n < ask) {
1957 // Try growing just "n" pages
1958 ask = n;
1959 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
1960 }
1961 if (ptr == NULL) return false;
1962 }
1963 ask = actual_size >> kPageShift;
1964
1965 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1966 pages_committed_since_last_scavenge_ += ask;
1967 #endif
1968
1969 uint64_t old_system_bytes = system_bytes_;
1970 system_bytes_ += (ask << kPageShift);
1971 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
1972 ASSERT(p > 0);
1973
1974 // If we have already a lot of pages allocated, just pre allocate a bunch of
1975 // memory for the page map. This prevents fragmentation by pagemap metadata
1976 // when a program keeps allocating and freeing large blocks.
1977
1978 if (old_system_bytes < kPageMapBigAllocationThreshold
1979 && system_bytes_ >= kPageMapBigAllocationThreshold) {
1980 pagemap_.PreallocateMoreMemory();
1981 }
1982
1983 // Make sure pagemap_ has entries for all of the new pages.
1984 // Plus ensure one before and one after so coalescing code
1985 // does not need bounds-checking.
1986 if (pagemap_.Ensure(p-1, ask+2)) {
1987 // Pretend the new area is allocated and then Delete() it to
1988 // cause any necessary coalescing to occur.
1989 //
1990 // We do not adjust free_pages_ here since Delete() will do it for us.
1991 Span* span = NewSpan(p, ask);
1992 RecordSpan(span);
1993 Delete(span);
1994 ASSERT(Check());
1995 return true;
1996 } else {
1997 // We could not allocate memory within "pagemap_"
1998 // TODO: Once we can return memory to the system, return the new span
1999 return false;
2000 }
2001 }
2002
Check()2003 bool TCMalloc_PageHeap::Check() {
2004 ASSERT(free_[0].normal.next == &free_[0].normal);
2005 ASSERT(free_[0].returned.next == &free_[0].returned);
2006 CheckList(&large_.normal, kMaxPages, 1000000000);
2007 CheckList(&large_.returned, kMaxPages, 1000000000);
2008 for (Length s = 1; s < kMaxPages; s++) {
2009 CheckList(&free_[s].normal, s, s);
2010 CheckList(&free_[s].returned, s, s);
2011 }
2012 return true;
2013 }
2014
2015 #if ASSERT_DISABLED
CheckList(Span *,Length,Length)2016 bool TCMalloc_PageHeap::CheckList(Span*, Length, Length) {
2017 return true;
2018 }
2019 #else
CheckList(Span * list,Length min_pages,Length max_pages)2020 bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages) {
2021 for (Span* s = list->next; s != list; s = s->next) {
2022 CHECK_CONDITION(s->free);
2023 CHECK_CONDITION(s->length >= min_pages);
2024 CHECK_CONDITION(s->length <= max_pages);
2025 CHECK_CONDITION(GetDescriptor(s->start) == s);
2026 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
2027 }
2028 return true;
2029 }
2030 #endif
2031
ReleaseFreeList(Span * list,Span * returned)2032 static void ReleaseFreeList(Span* list, Span* returned) {
2033 // Walk backwards through list so that when we push these
2034 // spans on the "returned" list, we preserve the order.
2035 while (!DLL_IsEmpty(list)) {
2036 Span* s = list->prev;
2037 DLL_Remove(s);
2038 DLL_Prepend(returned, s);
2039 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
2040 static_cast<size_t>(s->length << kPageShift));
2041 }
2042 }
2043
ReleaseFreePages()2044 void TCMalloc_PageHeap::ReleaseFreePages() {
2045 for (Length s = 0; s < kMaxPages; s++) {
2046 ReleaseFreeList(&free_[s].normal, &free_[s].returned);
2047 }
2048 ReleaseFreeList(&large_.normal, &large_.returned);
2049 ASSERT(Check());
2050 }
2051
2052 //-------------------------------------------------------------------
2053 // Free list
2054 //-------------------------------------------------------------------
2055
2056 class TCMalloc_ThreadCache_FreeList {
2057 private:
2058 void* list_; // Linked list of nodes
2059 uint16_t length_; // Current length
2060 uint16_t lowater_; // Low water mark for list length
2061
2062 public:
Init()2063 void Init() {
2064 list_ = NULL;
2065 length_ = 0;
2066 lowater_ = 0;
2067 }
2068
2069 // Return current length of list
length() const2070 int length() const {
2071 return length_;
2072 }
2073
2074 // Is list empty?
empty() const2075 bool empty() const {
2076 return list_ == NULL;
2077 }
2078
2079 // Low-water mark management
lowwatermark() const2080 int lowwatermark() const { return lowater_; }
clear_lowwatermark()2081 void clear_lowwatermark() { lowater_ = length_; }
2082
Push(void * ptr)2083 ALWAYS_INLINE void Push(void* ptr) {
2084 SLL_Push(&list_, ptr);
2085 length_++;
2086 }
2087
PushRange(int N,void * start,void * end)2088 void PushRange(int N, void *start, void *end) {
2089 SLL_PushRange(&list_, start, end);
2090 length_ = length_ + static_cast<uint16_t>(N);
2091 }
2092
PopRange(int N,void ** start,void ** end)2093 void PopRange(int N, void **start, void **end) {
2094 SLL_PopRange(&list_, N, start, end);
2095 ASSERT(length_ >= N);
2096 length_ = length_ - static_cast<uint16_t>(N);
2097 if (length_ < lowater_) lowater_ = length_;
2098 }
2099
Pop()2100 ALWAYS_INLINE void* Pop() {
2101 ASSERT(list_ != NULL);
2102 length_--;
2103 if (length_ < lowater_) lowater_ = length_;
2104 return SLL_Pop(&list_);
2105 }
2106
2107 #ifdef WTF_CHANGES
2108 template <class Finder, class Reader>
enumerateFreeObjects(Finder & finder,const Reader & reader)2109 void enumerateFreeObjects(Finder& finder, const Reader& reader)
2110 {
2111 for (void* nextObject = list_; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))
2112 finder.visit(nextObject);
2113 }
2114 #endif
2115 };
2116
2117 //-------------------------------------------------------------------
2118 // Data kept per thread
2119 //-------------------------------------------------------------------
2120
2121 class TCMalloc_ThreadCache {
2122 private:
2123 typedef TCMalloc_ThreadCache_FreeList FreeList;
2124 #if COMPILER(MSVC)
2125 typedef DWORD ThreadIdentifier;
2126 #else
2127 typedef pthread_t ThreadIdentifier;
2128 #endif
2129
2130 size_t size_; // Combined size of data
2131 ThreadIdentifier tid_; // Which thread owns it
2132 bool in_setspecific_; // Called pthread_setspecific?
2133 FreeList list_[kNumClasses]; // Array indexed by size-class
2134
2135 // We sample allocations, biased by the size of the allocation
2136 uint32_t rnd_; // Cheap random number generator
2137 size_t bytes_until_sample_; // Bytes until we sample next
2138
2139 // Allocate a new heap. REQUIRES: pageheap_lock is held.
2140 static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid);
2141
2142 // Use only as pthread thread-specific destructor function.
2143 static void DestroyThreadCache(void* ptr);
2144 public:
2145 // All ThreadCache objects are kept in a linked list (for stats collection)
2146 TCMalloc_ThreadCache* next_;
2147 TCMalloc_ThreadCache* prev_;
2148
2149 void Init(ThreadIdentifier tid);
2150 void Cleanup();
2151
2152 // Accessors (mostly just for printing stats)
freelist_length(size_t cl) const2153 int freelist_length(size_t cl) const { return list_[cl].length(); }
2154
2155 // Total byte size in cache
Size() const2156 size_t Size() const { return size_; }
2157
2158 void* Allocate(size_t size);
2159 void Deallocate(void* ptr, size_t size_class);
2160
2161 void FetchFromCentralCache(size_t cl, size_t allocationSize);
2162 void ReleaseToCentralCache(size_t cl, int N);
2163 void Scavenge();
2164 void Print() const;
2165
2166 // Record allocation of "k" bytes. Return true iff allocation
2167 // should be sampled
2168 bool SampleAllocation(size_t k);
2169
2170 // Pick next sampling point
2171 void PickNextSample(size_t k);
2172
2173 static void InitModule();
2174 static void InitTSD();
2175 static TCMalloc_ThreadCache* GetThreadHeap();
2176 static TCMalloc_ThreadCache* GetCache();
2177 static TCMalloc_ThreadCache* GetCacheIfPresent();
2178 static TCMalloc_ThreadCache* CreateCacheIfNecessary();
2179 static void DeleteCache(TCMalloc_ThreadCache* heap);
2180 static void BecomeIdle();
2181 static void RecomputeThreadCacheSize();
2182
2183 #ifdef WTF_CHANGES
2184 template <class Finder, class Reader>
enumerateFreeObjects(Finder & finder,const Reader & reader)2185 void enumerateFreeObjects(Finder& finder, const Reader& reader)
2186 {
2187 for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++)
2188 list_[sizeClass].enumerateFreeObjects(finder, reader);
2189 }
2190 #endif
2191 };
2192
2193 //-------------------------------------------------------------------
2194 // Data kept per size-class in central cache
2195 //-------------------------------------------------------------------
2196
2197 class TCMalloc_Central_FreeList {
2198 public:
2199 void Init(size_t cl);
2200
2201 // These methods all do internal locking.
2202
2203 // Insert the specified range into the central freelist. N is the number of
2204 // elements in the range.
2205 void InsertRange(void *start, void *end, int N);
2206
2207 // Returns the actual number of fetched elements into N.
2208 void RemoveRange(void **start, void **end, int *N);
2209
2210 // Returns the number of free objects in cache.
length()2211 size_t length() {
2212 SpinLockHolder h(&lock_);
2213 return counter_;
2214 }
2215
2216 // Returns the number of free objects in the transfer cache.
tc_length()2217 int tc_length() {
2218 SpinLockHolder h(&lock_);
2219 return used_slots_ * num_objects_to_move[size_class_];
2220 }
2221
2222 #ifdef WTF_CHANGES
2223 template <class Finder, class Reader>
enumerateFreeObjects(Finder & finder,const Reader & reader,TCMalloc_Central_FreeList * remoteCentralFreeList)2224 void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList)
2225 {
2226 for (Span* span = &empty_; span && span != &empty_; span = (span->next ? reader(span->next) : 0))
2227 ASSERT(!span->objects);
2228
2229 ASSERT(!nonempty_.objects);
2230 static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonempty_) - reinterpret_cast<const char*>(this);
2231
2232 Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset);
2233 Span* remoteSpan = nonempty_.next;
2234
2235 for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->next, span = (span->next ? reader(span->next) : 0)) {
2236 for (void* nextObject = span->objects; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))
2237 finder.visit(nextObject);
2238 }
2239 }
2240 #endif
2241
2242 private:
2243 // REQUIRES: lock_ is held
2244 // Remove object from cache and return.
2245 // Return NULL if no free entries in cache.
2246 void* FetchFromSpans();
2247
2248 // REQUIRES: lock_ is held
2249 // Remove object from cache and return. Fetches
2250 // from pageheap if cache is empty. Only returns
2251 // NULL on allocation failure.
2252 void* FetchFromSpansSafe();
2253
2254 // REQUIRES: lock_ is held
2255 // Release a linked list of objects to spans.
2256 // May temporarily release lock_.
2257 void ReleaseListToSpans(void *start);
2258
2259 // REQUIRES: lock_ is held
2260 // Release an object to spans.
2261 // May temporarily release lock_.
2262 void ReleaseToSpans(void* object);
2263
2264 // REQUIRES: lock_ is held
2265 // Populate cache by fetching from the page heap.
2266 // May temporarily release lock_.
2267 void Populate();
2268
2269 // REQUIRES: lock is held.
2270 // Tries to make room for a TCEntry. If the cache is full it will try to
2271 // expand it at the cost of some other cache size. Return false if there is
2272 // no space.
2273 bool MakeCacheSpace();
2274
2275 // REQUIRES: lock_ for locked_size_class is held.
2276 // Picks a "random" size class to steal TCEntry slot from. In reality it
2277 // just iterates over the sizeclasses but does so without taking a lock.
2278 // Returns true on success.
2279 // May temporarily lock a "random" size class.
2280 static bool EvictRandomSizeClass(size_t locked_size_class, bool force);
2281
2282 // REQUIRES: lock_ is *not* held.
2283 // Tries to shrink the Cache. If force is true it will relase objects to
2284 // spans if it allows it to shrink the cache. Return false if it failed to
2285 // shrink the cache. Decrements cache_size_ on succeess.
2286 // May temporarily take lock_. If it takes lock_, the locked_size_class
2287 // lock is released to the thread from holding two size class locks
2288 // concurrently which could lead to a deadlock.
2289 bool ShrinkCache(int locked_size_class, bool force);
2290
2291 // This lock protects all the data members. cached_entries and cache_size_
2292 // may be looked at without holding the lock.
2293 SpinLock lock_;
2294
2295 // We keep linked lists of empty and non-empty spans.
2296 size_t size_class_; // My size class
2297 Span empty_; // Dummy header for list of empty spans
2298 Span nonempty_; // Dummy header for list of non-empty spans
2299 size_t counter_; // Number of free objects in cache entry
2300
2301 // Here we reserve space for TCEntry cache slots. Since one size class can
2302 // end up getting all the TCEntries quota in the system we just preallocate
2303 // sufficient number of entries here.
2304 TCEntry tc_slots_[kNumTransferEntries];
2305
2306 // Number of currently used cached entries in tc_slots_. This variable is
2307 // updated under a lock but can be read without one.
2308 int32_t used_slots_;
2309 // The current number of slots for this size class. This is an
2310 // adaptive value that is increased if there is lots of traffic
2311 // on a given size class.
2312 int32_t cache_size_;
2313 };
2314
2315 // Pad each CentralCache object to multiple of 64 bytes
2316 class TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList {
2317 private:
2318 char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64];
2319 };
2320
2321 //-------------------------------------------------------------------
2322 // Global variables
2323 //-------------------------------------------------------------------
2324
2325 // Central cache -- a collection of free-lists, one per size-class.
2326 // We have a separate lock per free-list to reduce contention.
2327 static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
2328
2329 // Page-level allocator
2330 static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
2331 static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)];
2332 static bool phinited = false;
2333
2334 // Avoid extra level of indirection by making "pageheap" be just an alias
2335 // of pageheap_memory.
2336 typedef union {
2337 void* m_memory;
2338 TCMalloc_PageHeap* m_pageHeap;
2339 } PageHeapUnion;
2340
getPageHeap()2341 static inline TCMalloc_PageHeap* getPageHeap()
2342 {
2343 PageHeapUnion u = { &pageheap_memory[0] };
2344 return u.m_pageHeap;
2345 }
2346
2347 #define pageheap getPageHeap()
2348
2349 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2350
2351 #if !HAVE(DISPATCH_H)
2352 #if OS(WINDOWS)
sleep(unsigned seconds)2353 static void sleep(unsigned seconds)
2354 {
2355 ::Sleep(seconds * 1000);
2356 }
2357 #endif
2358
scavengerThread()2359 void TCMalloc_PageHeap::scavengerThread()
2360 {
2361 #if HAVE(PTHREAD_SETNAME_NP)
2362 pthread_setname_np("JavaScriptCore: FastMalloc scavenger");
2363 #endif
2364
2365 while (1) {
2366 if (!shouldContinueScavenging()) {
2367 pthread_mutex_lock(&m_scavengeMutex);
2368 m_scavengeThreadActive = false;
2369 // Block until there are enough freed pages to release back to the system.
2370 pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex);
2371 m_scavengeThreadActive = true;
2372 pthread_mutex_unlock(&m_scavengeMutex);
2373 }
2374 sleep(kScavengeTimerDelayInSeconds);
2375 {
2376 SpinLockHolder h(&pageheap_lock);
2377 pageheap->scavenge();
2378 }
2379 }
2380 }
2381
2382 #else
2383
periodicScavenge()2384 void TCMalloc_PageHeap::periodicScavenge()
2385 {
2386 {
2387 SpinLockHolder h(&pageheap_lock);
2388 pageheap->scavenge();
2389 }
2390
2391 if (!shouldContinueScavenging()) {
2392 m_scavengingScheduled = false;
2393 dispatch_suspend(m_scavengeTimer);
2394 }
2395 }
2396 #endif // HAVE(DISPATCH_H)
2397
2398 #endif
2399
2400 // If TLS is available, we also store a copy
2401 // of the per-thread object in a __thread variable
2402 // since __thread variables are faster to read
2403 // than pthread_getspecific(). We still need
2404 // pthread_setspecific() because __thread
2405 // variables provide no way to run cleanup
2406 // code when a thread is destroyed.
2407 #ifdef HAVE_TLS
2408 static __thread TCMalloc_ThreadCache *threadlocal_heap;
2409 #endif
2410 // Thread-specific key. Initialization here is somewhat tricky
2411 // because some Linux startup code invokes malloc() before it
2412 // is in a good enough state to handle pthread_keycreate().
2413 // Therefore, we use TSD keys only after tsd_inited is set to true.
2414 // Until then, we use a slow path to get the heap object.
2415 static bool tsd_inited = false;
2416 static pthread_key_t heap_key;
2417 #if COMPILER(MSVC)
2418 DWORD tlsIndex = TLS_OUT_OF_INDEXES;
2419 #endif
2420
setThreadHeap(TCMalloc_ThreadCache * heap)2421 static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap)
2422 {
2423 // still do pthread_setspecific when using MSVC fast TLS to
2424 // benefit from the delete callback.
2425 pthread_setspecific(heap_key, heap);
2426 #if COMPILER(MSVC)
2427 TlsSetValue(tlsIndex, heap);
2428 #endif
2429 }
2430
2431 // Allocator for thread heaps
2432 static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
2433
2434 // Linked list of heap objects. Protected by pageheap_lock.
2435 static TCMalloc_ThreadCache* thread_heaps = NULL;
2436 static int thread_heap_count = 0;
2437
2438 // Overall thread cache size. Protected by pageheap_lock.
2439 static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
2440
2441 // Global per-thread cache size. Writes are protected by
2442 // pageheap_lock. Reads are done without any locking, which should be
2443 // fine as long as size_t can be written atomically and we don't place
2444 // invariants between this variable and other pieces of state.
2445 static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
2446
2447 //-------------------------------------------------------------------
2448 // Central cache implementation
2449 //-------------------------------------------------------------------
2450
Init(size_t cl)2451 void TCMalloc_Central_FreeList::Init(size_t cl) {
2452 lock_.Init();
2453 size_class_ = cl;
2454 DLL_Init(&empty_);
2455 DLL_Init(&nonempty_);
2456 counter_ = 0;
2457
2458 cache_size_ = 1;
2459 used_slots_ = 0;
2460 ASSERT(cache_size_ <= kNumTransferEntries);
2461 }
2462
ReleaseListToSpans(void * start)2463 void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start) {
2464 while (start) {
2465 void *next = SLL_Next(start);
2466 ReleaseToSpans(start);
2467 start = next;
2468 }
2469 }
2470
ReleaseToSpans(void * object)2471 ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(void* object) {
2472 const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
2473 Span* span = pageheap->GetDescriptor(p);
2474 ASSERT(span != NULL);
2475 ASSERT(span->refcount > 0);
2476
2477 // If span is empty, move it to non-empty list
2478 if (span->objects == NULL) {
2479 DLL_Remove(span);
2480 DLL_Prepend(&nonempty_, span);
2481 Event(span, 'N', 0);
2482 }
2483
2484 // The following check is expensive, so it is disabled by default
2485 if (false) {
2486 // Check that object does not occur in list
2487 unsigned got = 0;
2488 for (void* p = span->objects; p != NULL; p = *((void**) p)) {
2489 ASSERT(p != object);
2490 got++;
2491 }
2492 ASSERT(got + span->refcount ==
2493 (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
2494 }
2495
2496 counter_++;
2497 span->refcount--;
2498 if (span->refcount == 0) {
2499 Event(span, '#', 0);
2500 counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
2501 DLL_Remove(span);
2502
2503 // Release central list lock while operating on pageheap
2504 lock_.Unlock();
2505 {
2506 SpinLockHolder h(&pageheap_lock);
2507 pageheap->Delete(span);
2508 }
2509 lock_.Lock();
2510 } else {
2511 *(reinterpret_cast<void**>(object)) = span->objects;
2512 span->objects = object;
2513 }
2514 }
2515
EvictRandomSizeClass(size_t locked_size_class,bool force)2516 ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
2517 size_t locked_size_class, bool force) {
2518 static int race_counter = 0;
2519 int t = race_counter++; // Updated without a lock, but who cares.
2520 if (t >= static_cast<int>(kNumClasses)) {
2521 while (t >= static_cast<int>(kNumClasses)) {
2522 t -= kNumClasses;
2523 }
2524 race_counter = t;
2525 }
2526 ASSERT(t >= 0);
2527 ASSERT(t < static_cast<int>(kNumClasses));
2528 if (t == static_cast<int>(locked_size_class)) return false;
2529 return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force);
2530 }
2531
MakeCacheSpace()2532 bool TCMalloc_Central_FreeList::MakeCacheSpace() {
2533 // Is there room in the cache?
2534 if (used_slots_ < cache_size_) return true;
2535 // Check if we can expand this cache?
2536 if (cache_size_ == kNumTransferEntries) return false;
2537 // Ok, we'll try to grab an entry from some other size class.
2538 if (EvictRandomSizeClass(size_class_, false) ||
2539 EvictRandomSizeClass(size_class_, true)) {
2540 // Succeeded in evicting, we're going to make our cache larger.
2541 cache_size_++;
2542 return true;
2543 }
2544 return false;
2545 }
2546
2547
2548 namespace {
2549 class LockInverter {
2550 private:
2551 SpinLock *held_, *temp_;
2552 public:
LockInverter(SpinLock * held,SpinLock * temp)2553 inline explicit LockInverter(SpinLock* held, SpinLock *temp)
2554 : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
~LockInverter()2555 inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
2556 };
2557 }
2558
ShrinkCache(int locked_size_class,bool force)2559 bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
2560 // Start with a quick check without taking a lock.
2561 if (cache_size_ == 0) return false;
2562 // We don't evict from a full cache unless we are 'forcing'.
2563 if (force == false && used_slots_ == cache_size_) return false;
2564
2565 // Grab lock, but first release the other lock held by this thread. We use
2566 // the lock inverter to ensure that we never hold two size class locks
2567 // concurrently. That can create a deadlock because there is no well
2568 // defined nesting order.
2569 LockInverter li(¢ral_cache[locked_size_class].lock_, &lock_);
2570 ASSERT(used_slots_ <= cache_size_);
2571 ASSERT(0 <= cache_size_);
2572 if (cache_size_ == 0) return false;
2573 if (used_slots_ == cache_size_) {
2574 if (force == false) return false;
2575 // ReleaseListToSpans releases the lock, so we have to make all the
2576 // updates to the central list before calling it.
2577 cache_size_--;
2578 used_slots_--;
2579 ReleaseListToSpans(tc_slots_[used_slots_].head);
2580 return true;
2581 }
2582 cache_size_--;
2583 return true;
2584 }
2585
InsertRange(void * start,void * end,int N)2586 void TCMalloc_Central_FreeList::InsertRange(void *start, void *end, int N) {
2587 SpinLockHolder h(&lock_);
2588 if (N == num_objects_to_move[size_class_] &&
2589 MakeCacheSpace()) {
2590 int slot = used_slots_++;
2591 ASSERT(slot >=0);
2592 ASSERT(slot < kNumTransferEntries);
2593 TCEntry *entry = &tc_slots_[slot];
2594 entry->head = start;
2595 entry->tail = end;
2596 return;
2597 }
2598 ReleaseListToSpans(start);
2599 }
2600
RemoveRange(void ** start,void ** end,int * N)2601 void TCMalloc_Central_FreeList::RemoveRange(void **start, void **end, int *N) {
2602 int num = *N;
2603 ASSERT(num > 0);
2604
2605 SpinLockHolder h(&lock_);
2606 if (num == num_objects_to_move[size_class_] && used_slots_ > 0) {
2607 int slot = --used_slots_;
2608 ASSERT(slot >= 0);
2609 TCEntry *entry = &tc_slots_[slot];
2610 *start = entry->head;
2611 *end = entry->tail;
2612 return;
2613 }
2614
2615 // TODO: Prefetch multiple TCEntries?
2616 void *tail = FetchFromSpansSafe();
2617 if (!tail) {
2618 // We are completely out of memory.
2619 *start = *end = NULL;
2620 *N = 0;
2621 return;
2622 }
2623
2624 SLL_SetNext(tail, NULL);
2625 void *head = tail;
2626 int count = 1;
2627 while (count < num) {
2628 void *t = FetchFromSpans();
2629 if (!t) break;
2630 SLL_Push(&head, t);
2631 count++;
2632 }
2633 *start = head;
2634 *end = tail;
2635 *N = count;
2636 }
2637
2638
FetchFromSpansSafe()2639 void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
2640 void *t = FetchFromSpans();
2641 if (!t) {
2642 Populate();
2643 t = FetchFromSpans();
2644 }
2645 return t;
2646 }
2647
FetchFromSpans()2648 void* TCMalloc_Central_FreeList::FetchFromSpans() {
2649 if (DLL_IsEmpty(&nonempty_)) return NULL;
2650 Span* span = nonempty_.next;
2651
2652 ASSERT(span->objects != NULL);
2653 ASSERT_SPAN_COMMITTED(span);
2654 span->refcount++;
2655 void* result = span->objects;
2656 span->objects = *(reinterpret_cast<void**>(result));
2657 if (span->objects == NULL) {
2658 // Move to empty list
2659 DLL_Remove(span);
2660 DLL_Prepend(&empty_, span);
2661 Event(span, 'E', 0);
2662 }
2663 counter_--;
2664 return result;
2665 }
2666
2667 // Fetch memory from the system and add to the central cache freelist.
Populate()2668 ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() {
2669 // Release central list lock while operating on pageheap
2670 lock_.Unlock();
2671 const size_t npages = class_to_pages[size_class_];
2672
2673 Span* span;
2674 {
2675 SpinLockHolder h(&pageheap_lock);
2676 span = pageheap->New(npages);
2677 if (span) pageheap->RegisterSizeClass(span, size_class_);
2678 }
2679 if (span == NULL) {
2680 MESSAGE("allocation failed: %d\n", errno);
2681 lock_.Lock();
2682 return;
2683 }
2684 ASSERT_SPAN_COMMITTED(span);
2685 ASSERT(span->length == npages);
2686 // Cache sizeclass info eagerly. Locking is not necessary.
2687 // (Instead of being eager, we could just replace any stale info
2688 // about this span, but that seems to be no better in practice.)
2689 for (size_t i = 0; i < npages; i++) {
2690 pageheap->CacheSizeClass(span->start + i, size_class_);
2691 }
2692
2693 // Split the block into pieces and add to the free-list
2694 // TODO: coloring of objects to avoid cache conflicts?
2695 void** tail = &span->objects;
2696 char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
2697 char* limit = ptr + (npages << kPageShift);
2698 const size_t size = ByteSizeForClass(size_class_);
2699 int num = 0;
2700 char* nptr;
2701 while ((nptr = ptr + size) <= limit) {
2702 *tail = ptr;
2703 tail = reinterpret_cast<void**>(ptr);
2704 ptr = nptr;
2705 num++;
2706 }
2707 ASSERT(ptr <= limit);
2708 *tail = NULL;
2709 span->refcount = 0; // No sub-object in use yet
2710
2711 // Add span to list of non-empty spans
2712 lock_.Lock();
2713 DLL_Prepend(&nonempty_, span);
2714 counter_ += num;
2715 }
2716
2717 //-------------------------------------------------------------------
2718 // TCMalloc_ThreadCache implementation
2719 //-------------------------------------------------------------------
2720
SampleAllocation(size_t k)2721 inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
2722 if (bytes_until_sample_ < k) {
2723 PickNextSample(k);
2724 return true;
2725 } else {
2726 bytes_until_sample_ -= k;
2727 return false;
2728 }
2729 }
2730
Init(ThreadIdentifier tid)2731 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid) {
2732 size_ = 0;
2733 next_ = NULL;
2734 prev_ = NULL;
2735 tid_ = tid;
2736 in_setspecific_ = false;
2737 for (size_t cl = 0; cl < kNumClasses; ++cl) {
2738 list_[cl].Init();
2739 }
2740
2741 // Initialize RNG -- run it for a bit to get to good values
2742 bytes_until_sample_ = 0;
2743 rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
2744 for (int i = 0; i < 100; i++) {
2745 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2));
2746 }
2747 }
2748
Cleanup()2749 void TCMalloc_ThreadCache::Cleanup() {
2750 // Put unused memory back into central cache
2751 for (size_t cl = 0; cl < kNumClasses; ++cl) {
2752 if (list_[cl].length() > 0) {
2753 ReleaseToCentralCache(cl, list_[cl].length());
2754 }
2755 }
2756 }
2757
Allocate(size_t size)2758 ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) {
2759 ASSERT(size <= kMaxSize);
2760 const size_t cl = SizeClass(size);
2761 FreeList* list = &list_[cl];
2762 size_t allocationSize = ByteSizeForClass(cl);
2763 if (list->empty()) {
2764 FetchFromCentralCache(cl, allocationSize);
2765 if (list->empty()) return NULL;
2766 }
2767 size_ -= allocationSize;
2768 return list->Pop();
2769 }
2770
Deallocate(void * ptr,size_t cl)2771 inline void TCMalloc_ThreadCache::Deallocate(void* ptr, size_t cl) {
2772 size_ += ByteSizeForClass(cl);
2773 FreeList* list = &list_[cl];
2774 list->Push(ptr);
2775 // If enough data is free, put back into central cache
2776 if (list->length() > kMaxFreeListLength) {
2777 ReleaseToCentralCache(cl, num_objects_to_move[cl]);
2778 }
2779 if (size_ >= per_thread_cache_size) Scavenge();
2780 }
2781
2782 // Remove some objects of class "cl" from central cache and add to thread heap
FetchFromCentralCache(size_t cl,size_t allocationSize)2783 ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) {
2784 int fetch_count = num_objects_to_move[cl];
2785 void *start, *end;
2786 central_cache[cl].RemoveRange(&start, &end, &fetch_count);
2787 list_[cl].PushRange(fetch_count, start, end);
2788 size_ += allocationSize * fetch_count;
2789 }
2790
2791 // Remove some objects of class "cl" from thread heap and add to central cache
ReleaseToCentralCache(size_t cl,int N)2792 inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
2793 ASSERT(N > 0);
2794 FreeList* src = &list_[cl];
2795 if (N > src->length()) N = src->length();
2796 size_ -= N*ByteSizeForClass(cl);
2797
2798 // We return prepackaged chains of the correct size to the central cache.
2799 // TODO: Use the same format internally in the thread caches?
2800 int batch_size = num_objects_to_move[cl];
2801 while (N > batch_size) {
2802 void *tail, *head;
2803 src->PopRange(batch_size, &head, &tail);
2804 central_cache[cl].InsertRange(head, tail, batch_size);
2805 N -= batch_size;
2806 }
2807 void *tail, *head;
2808 src->PopRange(N, &head, &tail);
2809 central_cache[cl].InsertRange(head, tail, N);
2810 }
2811
2812 // Release idle memory to the central cache
Scavenge()2813 inline void TCMalloc_ThreadCache::Scavenge() {
2814 // If the low-water mark for the free list is L, it means we would
2815 // not have had to allocate anything from the central cache even if
2816 // we had reduced the free list size by L. We aim to get closer to
2817 // that situation by dropping L/2 nodes from the free list. This
2818 // may not release much memory, but if so we will call scavenge again
2819 // pretty soon and the low-water marks will be high on that call.
2820 //int64 start = CycleClock::Now();
2821
2822 for (size_t cl = 0; cl < kNumClasses; cl++) {
2823 FreeList* list = &list_[cl];
2824 const int lowmark = list->lowwatermark();
2825 if (lowmark > 0) {
2826 const int drop = (lowmark > 1) ? lowmark/2 : 1;
2827 ReleaseToCentralCache(cl, drop);
2828 }
2829 list->clear_lowwatermark();
2830 }
2831
2832 //int64 finish = CycleClock::Now();
2833 //CycleTimer ct;
2834 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
2835 }
2836
PickNextSample(size_t k)2837 void TCMalloc_ThreadCache::PickNextSample(size_t k) {
2838 // Make next "random" number
2839 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
2840 static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
2841 uint32_t r = rnd_;
2842 rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
2843
2844 // Next point is "rnd_ % (sample_period)". I.e., average
2845 // increment is "sample_period/2".
2846 const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter);
2847 static int last_flag_value = -1;
2848
2849 if (flag_value != last_flag_value) {
2850 SpinLockHolder h(&sample_period_lock);
2851 int i;
2852 for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])) - 1); i++) {
2853 if (primes_list[i] >= flag_value) {
2854 break;
2855 }
2856 }
2857 sample_period = primes_list[i];
2858 last_flag_value = flag_value;
2859 }
2860
2861 bytes_until_sample_ += rnd_ % sample_period;
2862
2863 if (k > (static_cast<size_t>(-1) >> 2)) {
2864 // If the user has asked for a huge allocation then it is possible
2865 // for the code below to loop infinitely. Just return (note that
2866 // this throws off the sampling accuracy somewhat, but a user who
2867 // is allocating more than 1G of memory at a time can live with a
2868 // minor inaccuracy in profiling of small allocations, and also
2869 // would rather not wait for the loop below to terminate).
2870 return;
2871 }
2872
2873 while (bytes_until_sample_ < k) {
2874 // Increase bytes_until_sample_ by enough average sampling periods
2875 // (sample_period >> 1) to allow us to sample past the current
2876 // allocation.
2877 bytes_until_sample_ += (sample_period >> 1);
2878 }
2879
2880 bytes_until_sample_ -= k;
2881 }
2882
InitModule()2883 void TCMalloc_ThreadCache::InitModule() {
2884 // There is a slight potential race here because of double-checked
2885 // locking idiom. However, as long as the program does a small
2886 // allocation before switching to multi-threaded mode, we will be
2887 // fine. We increase the chances of doing such a small allocation
2888 // by doing one in the constructor of the module_enter_exit_hook
2889 // object declared below.
2890 SpinLockHolder h(&pageheap_lock);
2891 if (!phinited) {
2892 #ifdef WTF_CHANGES
2893 InitTSD();
2894 #endif
2895 InitSizeClasses();
2896 threadheap_allocator.Init();
2897 span_allocator.Init();
2898 span_allocator.New(); // Reduce cache conflicts
2899 span_allocator.New(); // Reduce cache conflicts
2900 stacktrace_allocator.Init();
2901 DLL_Init(&sampled_objects);
2902 for (size_t i = 0; i < kNumClasses; ++i) {
2903 central_cache[i].Init(i);
2904 }
2905 pageheap->init();
2906 phinited = 1;
2907 #if defined(WTF_CHANGES) && OS(DARWIN)
2908 FastMallocZone::init();
2909 #endif
2910 }
2911 }
2912
NewHeap(ThreadIdentifier tid)2913 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid) {
2914 // Create the heap and add it to the linked list
2915 TCMalloc_ThreadCache *heap = threadheap_allocator.New();
2916 heap->Init(tid);
2917 heap->next_ = thread_heaps;
2918 heap->prev_ = NULL;
2919 if (thread_heaps != NULL) thread_heaps->prev_ = heap;
2920 thread_heaps = heap;
2921 thread_heap_count++;
2922 RecomputeThreadCacheSize();
2923 return heap;
2924 }
2925
GetThreadHeap()2926 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() {
2927 #ifdef HAVE_TLS
2928 // __thread is faster, but only when the kernel supports it
2929 if (KernelSupportsTLS())
2930 return threadlocal_heap;
2931 #elif COMPILER(MSVC)
2932 return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex));
2933 #else
2934 return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key));
2935 #endif
2936 }
2937
GetCache()2938 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
2939 TCMalloc_ThreadCache* ptr = NULL;
2940 if (!tsd_inited) {
2941 InitModule();
2942 } else {
2943 ptr = GetThreadHeap();
2944 }
2945 if (ptr == NULL) ptr = CreateCacheIfNecessary();
2946 return ptr;
2947 }
2948
2949 // In deletion paths, we do not try to create a thread-cache. This is
2950 // because we may be in the thread destruction code and may have
2951 // already cleaned up the cache for this thread.
GetCacheIfPresent()2952 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
2953 if (!tsd_inited) return NULL;
2954 void* const p = GetThreadHeap();
2955 return reinterpret_cast<TCMalloc_ThreadCache*>(p);
2956 }
2957
InitTSD()2958 void TCMalloc_ThreadCache::InitTSD() {
2959 ASSERT(!tsd_inited);
2960 pthread_key_create(&heap_key, DestroyThreadCache);
2961 #if COMPILER(MSVC)
2962 tlsIndex = TlsAlloc();
2963 #endif
2964 tsd_inited = true;
2965
2966 #if !COMPILER(MSVC)
2967 // We may have used a fake pthread_t for the main thread. Fix it.
2968 pthread_t zero;
2969 memset(&zero, 0, sizeof(zero));
2970 #endif
2971 #ifndef WTF_CHANGES
2972 SpinLockHolder h(&pageheap_lock);
2973 #else
2974 ASSERT(pageheap_lock.IsHeld());
2975 #endif
2976 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
2977 #if COMPILER(MSVC)
2978 if (h->tid_ == 0) {
2979 h->tid_ = GetCurrentThreadId();
2980 }
2981 #else
2982 if (pthread_equal(h->tid_, zero)) {
2983 h->tid_ = pthread_self();
2984 }
2985 #endif
2986 }
2987 }
2988
CreateCacheIfNecessary()2989 TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
2990 // Initialize per-thread data if necessary
2991 TCMalloc_ThreadCache* heap = NULL;
2992 {
2993 SpinLockHolder h(&pageheap_lock);
2994
2995 #if COMPILER(MSVC)
2996 DWORD me;
2997 if (!tsd_inited) {
2998 me = 0;
2999 } else {
3000 me = GetCurrentThreadId();
3001 }
3002 #else
3003 // Early on in glibc's life, we cannot even call pthread_self()
3004 pthread_t me;
3005 if (!tsd_inited) {
3006 memset(&me, 0, sizeof(me));
3007 } else {
3008 me = pthread_self();
3009 }
3010 #endif
3011
3012 // This may be a recursive malloc call from pthread_setspecific()
3013 // In that case, the heap for this thread has already been created
3014 // and added to the linked list. So we search for that first.
3015 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
3016 #if COMPILER(MSVC)
3017 if (h->tid_ == me) {
3018 #else
3019 if (pthread_equal(h->tid_, me)) {
3020 #endif
3021 heap = h;
3022 break;
3023 }
3024 }
3025
3026 if (heap == NULL) heap = NewHeap(me);
3027 }
3028
3029 // We call pthread_setspecific() outside the lock because it may
3030 // call malloc() recursively. The recursive call will never get
3031 // here again because it will find the already allocated heap in the
3032 // linked list of heaps.
3033 if (!heap->in_setspecific_ && tsd_inited) {
3034 heap->in_setspecific_ = true;
3035 setThreadHeap(heap);
3036 }
3037 return heap;
3038 }
3039
3040 void TCMalloc_ThreadCache::BecomeIdle() {
3041 if (!tsd_inited) return; // No caches yet
3042 TCMalloc_ThreadCache* heap = GetThreadHeap();
3043 if (heap == NULL) return; // No thread cache to remove
3044 if (heap->in_setspecific_) return; // Do not disturb the active caller
3045
3046 heap->in_setspecific_ = true;
3047 pthread_setspecific(heap_key, NULL);
3048 #ifdef HAVE_TLS
3049 // Also update the copy in __thread
3050 threadlocal_heap = NULL;
3051 #endif
3052 heap->in_setspecific_ = false;
3053 if (GetThreadHeap() == heap) {
3054 // Somehow heap got reinstated by a recursive call to malloc
3055 // from pthread_setspecific. We give up in this case.
3056 return;
3057 }
3058
3059 // We can now get rid of the heap
3060 DeleteCache(heap);
3061 }
3062
3063 void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) {
3064 // Note that "ptr" cannot be NULL since pthread promises not
3065 // to invoke the destructor on NULL values, but for safety,
3066 // we check anyway.
3067 if (ptr == NULL) return;
3068 #ifdef HAVE_TLS
3069 // Prevent fast path of GetThreadHeap() from returning heap.
3070 threadlocal_heap = NULL;
3071 #endif
3072 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr));
3073 }
3074
3075 void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) {
3076 // Remove all memory from heap
3077 heap->Cleanup();
3078
3079 // Remove from linked list
3080 SpinLockHolder h(&pageheap_lock);
3081 if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
3082 if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
3083 if (thread_heaps == heap) thread_heaps = heap->next_;
3084 thread_heap_count--;
3085 RecomputeThreadCacheSize();
3086
3087 threadheap_allocator.Delete(heap);
3088 }
3089
3090 void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
3091 // Divide available space across threads
3092 int n = thread_heap_count > 0 ? thread_heap_count : 1;
3093 size_t space = overall_thread_cache_size / n;
3094
3095 // Limit to allowed range
3096 if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
3097 if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
3098
3099 per_thread_cache_size = space;
3100 }
3101
3102 void TCMalloc_ThreadCache::Print() const {
3103 for (size_t cl = 0; cl < kNumClasses; ++cl) {
3104 MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n",
3105 ByteSizeForClass(cl),
3106 list_[cl].length(),
3107 list_[cl].lowwatermark());
3108 }
3109 }
3110
3111 // Extract interesting stats
3112 struct TCMallocStats {
3113 uint64_t system_bytes; // Bytes alloced from system
3114 uint64_t thread_bytes; // Bytes in thread caches
3115 uint64_t central_bytes; // Bytes in central cache
3116 uint64_t transfer_bytes; // Bytes in central transfer cache
3117 uint64_t pageheap_bytes; // Bytes in page heap
3118 uint64_t metadata_bytes; // Bytes alloced for metadata
3119 };
3120
3121 #ifndef WTF_CHANGES
3122 // Get stats into "r". Also get per-size-class counts if class_count != NULL
3123 static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
3124 r->central_bytes = 0;
3125 r->transfer_bytes = 0;
3126 for (int cl = 0; cl < kNumClasses; ++cl) {
3127 const int length = central_cache[cl].length();
3128 const int tc_length = central_cache[cl].tc_length();
3129 r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
3130 r->transfer_bytes +=
3131 static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length;
3132 if (class_count) class_count[cl] = length + tc_length;
3133 }
3134
3135 // Add stats from per-thread heaps
3136 r->thread_bytes = 0;
3137 { // scope
3138 SpinLockHolder h(&pageheap_lock);
3139 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
3140 r->thread_bytes += h->Size();
3141 if (class_count) {
3142 for (size_t cl = 0; cl < kNumClasses; ++cl) {
3143 class_count[cl] += h->freelist_length(cl);
3144 }
3145 }
3146 }
3147 }
3148
3149 { //scope
3150 SpinLockHolder h(&pageheap_lock);
3151 r->system_bytes = pageheap->SystemBytes();
3152 r->metadata_bytes = metadata_system_bytes;
3153 r->pageheap_bytes = pageheap->FreeBytes();
3154 }
3155 }
3156 #endif
3157
3158 #ifndef WTF_CHANGES
3159 // WRITE stats to "out"
3160 static void DumpStats(TCMalloc_Printer* out, int level) {
3161 TCMallocStats stats;
3162 uint64_t class_count[kNumClasses];
3163 ExtractStats(&stats, (level >= 2 ? class_count : NULL));
3164
3165 if (level >= 2) {
3166 out->printf("------------------------------------------------\n");
3167 uint64_t cumulative = 0;
3168 for (int cl = 0; cl < kNumClasses; ++cl) {
3169 if (class_count[cl] > 0) {
3170 uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
3171 cumulative += class_bytes;
3172 out->printf("class %3d [ %8" PRIuS " bytes ] : "
3173 "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
3174 cl, ByteSizeForClass(cl),
3175 class_count[cl],
3176 class_bytes / 1048576.0,
3177 cumulative / 1048576.0);
3178 }
3179 }
3180
3181 SpinLockHolder h(&pageheap_lock);
3182 pageheap->Dump(out);
3183 }
3184
3185 const uint64_t bytes_in_use = stats.system_bytes
3186 - stats.pageheap_bytes
3187 - stats.central_bytes
3188 - stats.transfer_bytes
3189 - stats.thread_bytes;
3190
3191 out->printf("------------------------------------------------\n"
3192 "MALLOC: %12" PRIu64 " Heap size\n"
3193 "MALLOC: %12" PRIu64 " Bytes in use by application\n"
3194 "MALLOC: %12" PRIu64 " Bytes free in page heap\n"
3195 "MALLOC: %12" PRIu64 " Bytes free in central cache\n"
3196 "MALLOC: %12" PRIu64 " Bytes free in transfer cache\n"
3197 "MALLOC: %12" PRIu64 " Bytes free in thread caches\n"
3198 "MALLOC: %12" PRIu64 " Spans in use\n"
3199 "MALLOC: %12" PRIu64 " Thread heaps in use\n"
3200 "MALLOC: %12" PRIu64 " Metadata allocated\n"
3201 "------------------------------------------------\n",
3202 stats.system_bytes,
3203 bytes_in_use,
3204 stats.pageheap_bytes,
3205 stats.central_bytes,
3206 stats.transfer_bytes,
3207 stats.thread_bytes,
3208 uint64_t(span_allocator.inuse()),
3209 uint64_t(threadheap_allocator.inuse()),
3210 stats.metadata_bytes);
3211 }
3212
3213 static void PrintStats(int level) {
3214 const int kBufferSize = 16 << 10;
3215 char* buffer = new char[kBufferSize];
3216 TCMalloc_Printer printer(buffer, kBufferSize);
3217 DumpStats(&printer, level);
3218 write(STDERR_FILENO, buffer, strlen(buffer));
3219 delete[] buffer;
3220 }
3221
3222 static void** DumpStackTraces() {
3223 // Count how much space we need
3224 int needed_slots = 0;
3225 {
3226 SpinLockHolder h(&pageheap_lock);
3227 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
3228 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
3229 needed_slots += 3 + stack->depth;
3230 }
3231 needed_slots += 100; // Slop in case sample grows
3232 needed_slots += needed_slots/8; // An extra 12.5% slop
3233 }
3234
3235 void** result = new void*[needed_slots];
3236 if (result == NULL) {
3237 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
3238 needed_slots);
3239 return NULL;
3240 }
3241
3242 SpinLockHolder h(&pageheap_lock);
3243 int used_slots = 0;
3244 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
3245 ASSERT(used_slots < needed_slots); // Need to leave room for terminator
3246 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
3247 if (used_slots + 3 + stack->depth >= needed_slots) {
3248 // No more room
3249 break;
3250 }
3251
3252 result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
3253 result[used_slots+1] = reinterpret_cast<void*>(stack->size);
3254 result[used_slots+2] = reinterpret_cast<void*>(stack->depth);
3255 for (int d = 0; d < stack->depth; d++) {
3256 result[used_slots+3+d] = stack->stack[d];
3257 }
3258 used_slots += 3 + stack->depth;
3259 }
3260 result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
3261 return result;
3262 }
3263 #endif
3264
3265 #ifndef WTF_CHANGES
3266
3267 // TCMalloc's support for extra malloc interfaces
3268 class TCMallocImplementation : public MallocExtension {
3269 public:
3270 virtual void GetStats(char* buffer, int buffer_length) {
3271 ASSERT(buffer_length > 0);
3272 TCMalloc_Printer printer(buffer, buffer_length);
3273
3274 // Print level one stats unless lots of space is available
3275 if (buffer_length < 10000) {
3276 DumpStats(&printer, 1);
3277 } else {
3278 DumpStats(&printer, 2);
3279 }
3280 }
3281
3282 virtual void** ReadStackTraces() {
3283 return DumpStackTraces();
3284 }
3285
3286 virtual bool GetNumericProperty(const char* name, size_t* value) {
3287 ASSERT(name != NULL);
3288
3289 if (strcmp(name, "generic.current_allocated_bytes") == 0) {
3290 TCMallocStats stats;
3291 ExtractStats(&stats, NULL);
3292 *value = stats.system_bytes
3293 - stats.thread_bytes
3294 - stats.central_bytes
3295 - stats.pageheap_bytes;
3296 return true;
3297 }
3298
3299 if (strcmp(name, "generic.heap_size") == 0) {
3300 TCMallocStats stats;
3301 ExtractStats(&stats, NULL);
3302 *value = stats.system_bytes;
3303 return true;
3304 }
3305
3306 if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
3307 // We assume that bytes in the page heap are not fragmented too
3308 // badly, and are therefore available for allocation.
3309 SpinLockHolder l(&pageheap_lock);
3310 *value = pageheap->FreeBytes();
3311 return true;
3312 }
3313
3314 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3315 SpinLockHolder l(&pageheap_lock);
3316 *value = overall_thread_cache_size;
3317 return true;
3318 }
3319
3320 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
3321 TCMallocStats stats;
3322 ExtractStats(&stats, NULL);
3323 *value = stats.thread_bytes;
3324 return true;
3325 }
3326
3327 return false;
3328 }
3329
3330 virtual bool SetNumericProperty(const char* name, size_t value) {
3331 ASSERT(name != NULL);
3332
3333 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3334 // Clip the value to a reasonable range
3335 if (value < kMinThreadCacheSize) value = kMinThreadCacheSize;
3336 if (value > (1<<30)) value = (1<<30); // Limit to 1GB
3337
3338 SpinLockHolder l(&pageheap_lock);
3339 overall_thread_cache_size = static_cast<size_t>(value);
3340 TCMalloc_ThreadCache::RecomputeThreadCacheSize();
3341 return true;
3342 }
3343
3344 return false;
3345 }
3346
3347 virtual void MarkThreadIdle() {
3348 TCMalloc_ThreadCache::BecomeIdle();
3349 }
3350
3351 virtual void ReleaseFreeMemory() {
3352 SpinLockHolder h(&pageheap_lock);
3353 pageheap->ReleaseFreePages();
3354 }
3355 };
3356 #endif
3357
3358 // The constructor allocates an object to ensure that initialization
3359 // runs before main(), and therefore we do not have a chance to become
3360 // multi-threaded before initialization. We also create the TSD key
3361 // here. Presumably by the time this constructor runs, glibc is in
3362 // good enough shape to handle pthread_key_create().
3363 //
3364 // The constructor also takes the opportunity to tell STL to use
3365 // tcmalloc. We want to do this early, before construct time, so
3366 // all user STL allocations go through tcmalloc (which works really
3367 // well for STL).
3368 //
3369 // The destructor prints stats when the program exits.
3370 class TCMallocGuard {
3371 public:
3372
3373 TCMallocGuard() {
3374 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
3375 // Check whether the kernel also supports TLS (needs to happen at runtime)
3376 CheckIfKernelSupportsTLS();
3377 #endif
3378 #ifndef WTF_CHANGES
3379 #ifdef WIN32 // patch the windows VirtualAlloc, etc.
3380 PatchWindowsFunctions(); // defined in windows/patch_functions.cc
3381 #endif
3382 #endif
3383 free(malloc(1));
3384 TCMalloc_ThreadCache::InitTSD();
3385 free(malloc(1));
3386 #ifndef WTF_CHANGES
3387 MallocExtension::Register(new TCMallocImplementation);
3388 #endif
3389 }
3390
3391 #ifndef WTF_CHANGES
3392 ~TCMallocGuard() {
3393 const char* env = getenv("MALLOCSTATS");
3394 if (env != NULL) {
3395 int level = atoi(env);
3396 if (level < 1) level = 1;
3397 PrintStats(level);
3398 }
3399 #ifdef WIN32
3400 UnpatchWindowsFunctions();
3401 #endif
3402 }
3403 #endif
3404 };
3405
3406 #ifndef WTF_CHANGES
3407 static TCMallocGuard module_enter_exit_hook;
3408 #endif
3409
3410
3411 //-------------------------------------------------------------------
3412 // Helpers for the exported routines below
3413 //-------------------------------------------------------------------
3414
3415 #ifndef WTF_CHANGES
3416
3417 static Span* DoSampledAllocation(size_t size) {
3418
3419 // Grab the stack trace outside the heap lock
3420 StackTrace tmp;
3421 tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1);
3422 tmp.size = size;
3423
3424 SpinLockHolder h(&pageheap_lock);
3425 // Allocate span
3426 Span *span = pageheap->New(pages(size == 0 ? 1 : size));
3427 if (span == NULL) {
3428 return NULL;
3429 }
3430
3431 // Allocate stack trace
3432 StackTrace *stack = stacktrace_allocator.New();
3433 if (stack == NULL) {
3434 // Sampling failed because of lack of memory
3435 return span;
3436 }
3437
3438 *stack = tmp;
3439 span->sample = 1;
3440 span->objects = stack;
3441 DLL_Prepend(&sampled_objects, span);
3442
3443 return span;
3444 }
3445 #endif
3446
3447 static inline bool CheckCachedSizeClass(void *ptr) {
3448 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
3449 size_t cached_value = pageheap->GetSizeClassIfCached(p);
3450 return cached_value == 0 ||
3451 cached_value == pageheap->GetDescriptor(p)->sizeclass;
3452 }
3453
3454 static inline void* CheckedMallocResult(void *result)
3455 {
3456 ASSERT(result == 0 || CheckCachedSizeClass(result));
3457 return result;
3458 }
3459
3460 static inline void* SpanToMallocResult(Span *span) {
3461 ASSERT_SPAN_COMMITTED(span);
3462 pageheap->CacheSizeClass(span->start, 0);
3463 return
3464 CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift));
3465 }
3466
3467 #ifdef WTF_CHANGES
3468 template <bool crashOnFailure>
3469 #endif
3470 static ALWAYS_INLINE void* do_malloc(size_t size) {
3471 void* ret = NULL;
3472
3473 #ifdef WTF_CHANGES
3474 ASSERT(!isForbidden());
3475 #endif
3476
3477 // The following call forces module initialization
3478 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
3479 #ifndef WTF_CHANGES
3480 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
3481 Span* span = DoSampledAllocation(size);
3482 if (span != NULL) {
3483 ret = SpanToMallocResult(span);
3484 }
3485 } else
3486 #endif
3487 if (size > kMaxSize) {
3488 // Use page-level allocator
3489 SpinLockHolder h(&pageheap_lock);
3490 Span* span = pageheap->New(pages(size));
3491 if (span != NULL) {
3492 ret = SpanToMallocResult(span);
3493 }
3494 } else {
3495 // The common case, and also the simplest. This just pops the
3496 // size-appropriate freelist, afer replenishing it if it's empty.
3497 ret = CheckedMallocResult(heap->Allocate(size));
3498 }
3499 if (!ret) {
3500 #ifdef WTF_CHANGES
3501 if (crashOnFailure) // This branch should be optimized out by the compiler.
3502 CRASH();
3503 #else
3504 errno = ENOMEM;
3505 #endif
3506 }
3507 return ret;
3508 }
3509
3510 static ALWAYS_INLINE void do_free(void* ptr) {
3511 if (ptr == NULL) return;
3512 ASSERT(pageheap != NULL); // Should not call free() before malloc()
3513 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
3514 Span* span = NULL;
3515 size_t cl = pageheap->GetSizeClassIfCached(p);
3516
3517 if (cl == 0) {
3518 span = pageheap->GetDescriptor(p);
3519 cl = span->sizeclass;
3520 pageheap->CacheSizeClass(p, cl);
3521 }
3522 if (cl != 0) {
3523 #ifndef NO_TCMALLOC_SAMPLES
3524 ASSERT(!pageheap->GetDescriptor(p)->sample);
3525 #endif
3526 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
3527 if (heap != NULL) {
3528 heap->Deallocate(ptr, cl);
3529 } else {
3530 // Delete directly into central cache
3531 SLL_SetNext(ptr, NULL);
3532 central_cache[cl].InsertRange(ptr, ptr, 1);
3533 }
3534 } else {
3535 SpinLockHolder h(&pageheap_lock);
3536 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
3537 ASSERT(span != NULL && span->start == p);
3538 #ifndef NO_TCMALLOC_SAMPLES
3539 if (span->sample) {
3540 DLL_Remove(span);
3541 stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
3542 span->objects = NULL;
3543 }
3544 #endif
3545 pageheap->Delete(span);
3546 }
3547 }
3548
3549 #ifndef WTF_CHANGES
3550 // For use by exported routines below that want specific alignments
3551 //
3552 // Note: this code can be slow, and can significantly fragment memory.
3553 // The expectation is that memalign/posix_memalign/valloc/pvalloc will
3554 // not be invoked very often. This requirement simplifies our
3555 // implementation and allows us to tune for expected allocation
3556 // patterns.
3557 static void* do_memalign(size_t align, size_t size) {
3558 ASSERT((align & (align - 1)) == 0);
3559 ASSERT(align > 0);
3560 if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
3561
3562 // Allocate at least one byte to avoid boundary conditions below
3563 if (size == 0) size = 1;
3564
3565 if (size <= kMaxSize && align < kPageSize) {
3566 // Search through acceptable size classes looking for one with
3567 // enough alignment. This depends on the fact that
3568 // InitSizeClasses() currently produces several size classes that
3569 // are aligned at powers of two. We will waste time and space if
3570 // we miss in the size class array, but that is deemed acceptable
3571 // since memalign() should be used rarely.
3572 size_t cl = SizeClass(size);
3573 while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) {
3574 cl++;
3575 }
3576 if (cl < kNumClasses) {
3577 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
3578 return CheckedMallocResult(heap->Allocate(class_to_size[cl]));
3579 }
3580 }
3581
3582 // We will allocate directly from the page heap
3583 SpinLockHolder h(&pageheap_lock);
3584
3585 if (align <= kPageSize) {
3586 // Any page-level allocation will be fine
3587 // TODO: We could put the rest of this page in the appropriate
3588 // TODO: cache but it does not seem worth it.
3589 Span* span = pageheap->New(pages(size));
3590 return span == NULL ? NULL : SpanToMallocResult(span);
3591 }
3592
3593 // Allocate extra pages and carve off an aligned portion
3594 const Length alloc = pages(size + align);
3595 Span* span = pageheap->New(alloc);
3596 if (span == NULL) return NULL;
3597
3598 // Skip starting portion so that we end up aligned
3599 Length skip = 0;
3600 while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
3601 skip++;
3602 }
3603 ASSERT(skip < alloc);
3604 if (skip > 0) {
3605 Span* rest = pageheap->Split(span, skip);
3606 pageheap->Delete(span);
3607 span = rest;
3608 }
3609
3610 // Skip trailing portion that we do not need to return
3611 const Length needed = pages(size);
3612 ASSERT(span->length >= needed);
3613 if (span->length > needed) {
3614 Span* trailer = pageheap->Split(span, needed);
3615 pageheap->Delete(trailer);
3616 }
3617 return SpanToMallocResult(span);
3618 }
3619 #endif
3620
3621 // Helpers for use by exported routines below:
3622
3623 #ifndef WTF_CHANGES
3624 static inline void do_malloc_stats() {
3625 PrintStats(1);
3626 }
3627 #endif
3628
3629 static inline int do_mallopt(int, int) {
3630 return 1; // Indicates error
3631 }
3632
3633 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
3634 static inline struct mallinfo do_mallinfo() {
3635 TCMallocStats stats;
3636 ExtractStats(&stats, NULL);
3637
3638 // Just some of the fields are filled in.
3639 struct mallinfo info;
3640 memset(&info, 0, sizeof(info));
3641
3642 // Unfortunately, the struct contains "int" field, so some of the
3643 // size values will be truncated.
3644 info.arena = static_cast<int>(stats.system_bytes);
3645 info.fsmblks = static_cast<int>(stats.thread_bytes
3646 + stats.central_bytes
3647 + stats.transfer_bytes);
3648 info.fordblks = static_cast<int>(stats.pageheap_bytes);
3649 info.uordblks = static_cast<int>(stats.system_bytes
3650 - stats.thread_bytes
3651 - stats.central_bytes
3652 - stats.transfer_bytes
3653 - stats.pageheap_bytes);
3654
3655 return info;
3656 }
3657 #endif
3658
3659 //-------------------------------------------------------------------
3660 // Exported routines
3661 //-------------------------------------------------------------------
3662
3663 // CAVEAT: The code structure below ensures that MallocHook methods are always
3664 // called from the stack frame of the invoked allocation function.
3665 // heap-checker.cc depends on this to start a stack trace from
3666 // the call to the (de)allocation function.
3667
3668 #ifndef WTF_CHANGES
3669 extern "C"
3670 #else
3671 #define do_malloc do_malloc<crashOnFailure>
3672
3673 template <bool crashOnFailure>
3674 void* malloc(size_t);
3675
3676 void* fastMalloc(size_t size)
3677 {
3678 return malloc<true>(size);
3679 }
3680
3681 TryMallocReturnValue tryFastMalloc(size_t size)
3682 {
3683 return malloc<false>(size);
3684 }
3685
3686 template <bool crashOnFailure>
3687 ALWAYS_INLINE
3688 #endif
3689 void* malloc(size_t size) {
3690 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3691 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= size) // If overflow would occur...
3692 return 0;
3693 size += sizeof(AllocAlignmentInteger);
3694 void* result = do_malloc(size);
3695 if (!result)
3696 return 0;
3697
3698 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
3699 result = static_cast<AllocAlignmentInteger*>(result) + 1;
3700 #else
3701 void* result = do_malloc(size);
3702 #endif
3703
3704 #ifndef WTF_CHANGES
3705 MallocHook::InvokeNewHook(result, size);
3706 #endif
3707 return result;
3708 }
3709
3710 #ifndef WTF_CHANGES
3711 extern "C"
3712 #endif
3713 void free(void* ptr) {
3714 #ifndef WTF_CHANGES
3715 MallocHook::InvokeDeleteHook(ptr);
3716 #endif
3717
3718 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3719 if (!ptr)
3720 return;
3721
3722 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(ptr);
3723 if (*header != Internal::AllocTypeMalloc)
3724 Internal::fastMallocMatchFailed(ptr);
3725 do_free(header);
3726 #else
3727 do_free(ptr);
3728 #endif
3729 }
3730
3731 #ifndef WTF_CHANGES
3732 extern "C"
3733 #else
3734 template <bool crashOnFailure>
3735 void* calloc(size_t, size_t);
3736
3737 void* fastCalloc(size_t n, size_t elem_size)
3738 {
3739 return calloc<true>(n, elem_size);
3740 }
3741
3742 TryMallocReturnValue tryFastCalloc(size_t n, size_t elem_size)
3743 {
3744 return calloc<false>(n, elem_size);
3745 }
3746
3747 template <bool crashOnFailure>
3748 ALWAYS_INLINE
3749 #endif
3750 void* calloc(size_t n, size_t elem_size) {
3751 size_t totalBytes = n * elem_size;
3752
3753 // Protect against overflow
3754 if (n > 1 && elem_size && (totalBytes / elem_size) != n)
3755 return 0;
3756
3757 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3758 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes) // If overflow would occur...
3759 return 0;
3760
3761 totalBytes += sizeof(AllocAlignmentInteger);
3762 void* result = do_malloc(totalBytes);
3763 if (!result)
3764 return 0;
3765
3766 memset(result, 0, totalBytes);
3767 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
3768 result = static_cast<AllocAlignmentInteger*>(result) + 1;
3769 #else
3770 void* result = do_malloc(totalBytes);
3771 if (result != NULL) {
3772 memset(result, 0, totalBytes);
3773 }
3774 #endif
3775
3776 #ifndef WTF_CHANGES
3777 MallocHook::InvokeNewHook(result, totalBytes);
3778 #endif
3779 return result;
3780 }
3781
3782 // Since cfree isn't used anywhere, we don't compile it in.
3783 #ifndef WTF_CHANGES
3784 #ifndef WTF_CHANGES
3785 extern "C"
3786 #endif
3787 void cfree(void* ptr) {
3788 #ifndef WTF_CHANGES
3789 MallocHook::InvokeDeleteHook(ptr);
3790 #endif
3791 do_free(ptr);
3792 }
3793 #endif
3794
3795 #ifndef WTF_CHANGES
3796 extern "C"
3797 #else
3798 template <bool crashOnFailure>
3799 void* realloc(void*, size_t);
3800
3801 void* fastRealloc(void* old_ptr, size_t new_size)
3802 {
3803 return realloc<true>(old_ptr, new_size);
3804 }
3805
3806 TryMallocReturnValue tryFastRealloc(void* old_ptr, size_t new_size)
3807 {
3808 return realloc<false>(old_ptr, new_size);
3809 }
3810
3811 template <bool crashOnFailure>
3812 ALWAYS_INLINE
3813 #endif
3814 void* realloc(void* old_ptr, size_t new_size) {
3815 if (old_ptr == NULL) {
3816 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3817 void* result = malloc(new_size);
3818 #else
3819 void* result = do_malloc(new_size);
3820 #ifndef WTF_CHANGES
3821 MallocHook::InvokeNewHook(result, new_size);
3822 #endif
3823 #endif
3824 return result;
3825 }
3826 if (new_size == 0) {
3827 #ifndef WTF_CHANGES
3828 MallocHook::InvokeDeleteHook(old_ptr);
3829 #endif
3830 free(old_ptr);
3831 return NULL;
3832 }
3833
3834 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3835 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= new_size) // If overflow would occur...
3836 return 0;
3837 new_size += sizeof(AllocAlignmentInteger);
3838 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(old_ptr);
3839 if (*header != Internal::AllocTypeMalloc)
3840 Internal::fastMallocMatchFailed(old_ptr);
3841 old_ptr = header;
3842 #endif
3843
3844 // Get the size of the old entry
3845 const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
3846 size_t cl = pageheap->GetSizeClassIfCached(p);
3847 Span *span = NULL;
3848 size_t old_size;
3849 if (cl == 0) {
3850 span = pageheap->GetDescriptor(p);
3851 cl = span->sizeclass;
3852 pageheap->CacheSizeClass(p, cl);
3853 }
3854 if (cl != 0) {
3855 old_size = ByteSizeForClass(cl);
3856 } else {
3857 ASSERT(span != NULL);
3858 old_size = span->length << kPageShift;
3859 }
3860
3861 // Reallocate if the new size is larger than the old size,
3862 // or if the new size is significantly smaller than the old size.
3863 if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) {
3864 // Need to reallocate
3865 void* new_ptr = do_malloc(new_size);
3866 if (new_ptr == NULL) {
3867 return NULL;
3868 }
3869 #ifndef WTF_CHANGES
3870 MallocHook::InvokeNewHook(new_ptr, new_size);
3871 #endif
3872 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
3873 #ifndef WTF_CHANGES
3874 MallocHook::InvokeDeleteHook(old_ptr);
3875 #endif
3876 // We could use a variant of do_free() that leverages the fact
3877 // that we already know the sizeclass of old_ptr. The benefit
3878 // would be small, so don't bother.
3879 do_free(old_ptr);
3880 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3881 new_ptr = static_cast<AllocAlignmentInteger*>(new_ptr) + 1;
3882 #endif
3883 return new_ptr;
3884 } else {
3885 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3886 old_ptr = static_cast<AllocAlignmentInteger*>(old_ptr) + 1; // Set old_ptr back to the user pointer.
3887 #endif
3888 return old_ptr;
3889 }
3890 }
3891
3892 #ifdef WTF_CHANGES
3893 #undef do_malloc
3894 #else
3895
3896 static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER;
3897
3898 static inline void* cpp_alloc(size_t size, bool nothrow) {
3899 for (;;) {
3900 void* p = do_malloc(size);
3901 #ifdef PREANSINEW
3902 return p;
3903 #else
3904 if (p == NULL) { // allocation failed
3905 // Get the current new handler. NB: this function is not
3906 // thread-safe. We make a feeble stab at making it so here, but
3907 // this lock only protects against tcmalloc interfering with
3908 // itself, not with other libraries calling set_new_handler.
3909 std::new_handler nh;
3910 {
3911 SpinLockHolder h(&set_new_handler_lock);
3912 nh = std::set_new_handler(0);
3913 (void) std::set_new_handler(nh);
3914 }
3915 // If no new_handler is established, the allocation failed.
3916 if (!nh) {
3917 if (nothrow) return 0;
3918 throw std::bad_alloc();
3919 }
3920 // Otherwise, try the new_handler. If it returns, retry the
3921 // allocation. If it throws std::bad_alloc, fail the allocation.
3922 // if it throws something else, don't interfere.
3923 try {
3924 (*nh)();
3925 } catch (const std::bad_alloc&) {
3926 if (!nothrow) throw;
3927 return p;
3928 }
3929 } else { // allocation success
3930 return p;
3931 }
3932 #endif
3933 }
3934 }
3935
3936 void* operator new(size_t size) {
3937 void* p = cpp_alloc(size, false);
3938 // We keep this next instruction out of cpp_alloc for a reason: when
3939 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3940 // new call into cpp_alloc, which messes up our whole section-based
3941 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3942 // isn't the last thing this fn calls, and prevents the folding.
3943 MallocHook::InvokeNewHook(p, size);
3944 return p;
3945 }
3946
3947 void* operator new(size_t size, const std::nothrow_t&) __THROW {
3948 void* p = cpp_alloc(size, true);
3949 MallocHook::InvokeNewHook(p, size);
3950 return p;
3951 }
3952
3953 void operator delete(void* p) __THROW {
3954 MallocHook::InvokeDeleteHook(p);
3955 do_free(p);
3956 }
3957
3958 void operator delete(void* p, const std::nothrow_t&) __THROW {
3959 MallocHook::InvokeDeleteHook(p);
3960 do_free(p);
3961 }
3962
3963 void* operator new[](size_t size) {
3964 void* p = cpp_alloc(size, false);
3965 // We keep this next instruction out of cpp_alloc for a reason: when
3966 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3967 // new call into cpp_alloc, which messes up our whole section-based
3968 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3969 // isn't the last thing this fn calls, and prevents the folding.
3970 MallocHook::InvokeNewHook(p, size);
3971 return p;
3972 }
3973
3974 void* operator new[](size_t size, const std::nothrow_t&) __THROW {
3975 void* p = cpp_alloc(size, true);
3976 MallocHook::InvokeNewHook(p, size);
3977 return p;
3978 }
3979
3980 void operator delete[](void* p) __THROW {
3981 MallocHook::InvokeDeleteHook(p);
3982 do_free(p);
3983 }
3984
3985 void operator delete[](void* p, const std::nothrow_t&) __THROW {
3986 MallocHook::InvokeDeleteHook(p);
3987 do_free(p);
3988 }
3989
3990 extern "C" void* memalign(size_t align, size_t size) __THROW {
3991 void* result = do_memalign(align, size);
3992 MallocHook::InvokeNewHook(result, size);
3993 return result;
3994 }
3995
3996 extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size)
3997 __THROW {
3998 if (((align % sizeof(void*)) != 0) ||
3999 ((align & (align - 1)) != 0) ||
4000 (align == 0)) {
4001 return EINVAL;
4002 }
4003
4004 void* result = do_memalign(align, size);
4005 MallocHook::InvokeNewHook(result, size);
4006 if (result == NULL) {
4007 return ENOMEM;
4008 } else {
4009 *result_ptr = result;
4010 return 0;
4011 }
4012 }
4013
4014 static size_t pagesize = 0;
4015
4016 extern "C" void* valloc(size_t size) __THROW {
4017 // Allocate page-aligned object of length >= size bytes
4018 if (pagesize == 0) pagesize = getpagesize();
4019 void* result = do_memalign(pagesize, size);
4020 MallocHook::InvokeNewHook(result, size);
4021 return result;
4022 }
4023
4024 extern "C" void* pvalloc(size_t size) __THROW {
4025 // Round up size to a multiple of pagesize
4026 if (pagesize == 0) pagesize = getpagesize();
4027 size = (size + pagesize - 1) & ~(pagesize - 1);
4028 void* result = do_memalign(pagesize, size);
4029 MallocHook::InvokeNewHook(result, size);
4030 return result;
4031 }
4032
4033 extern "C" void malloc_stats(void) {
4034 do_malloc_stats();
4035 }
4036
4037 extern "C" int mallopt(int cmd, int value) {
4038 return do_mallopt(cmd, value);
4039 }
4040
4041 #ifdef HAVE_STRUCT_MALLINFO
4042 extern "C" struct mallinfo mallinfo(void) {
4043 return do_mallinfo();
4044 }
4045 #endif
4046
4047 //-------------------------------------------------------------------
4048 // Some library routines on RedHat 9 allocate memory using malloc()
4049 // and free it using __libc_free() (or vice-versa). Since we provide
4050 // our own implementations of malloc/free, we need to make sure that
4051 // the __libc_XXX variants (defined as part of glibc) also point to
4052 // the same implementations.
4053 //-------------------------------------------------------------------
4054
4055 #if defined(__GLIBC__)
4056 extern "C" {
4057 #if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
4058 // Potentially faster variants that use the gcc alias extension.
4059 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
4060 # define ALIAS(x) __attribute__ ((weak, alias (x)))
4061 void* __libc_malloc(size_t size) ALIAS("malloc");
4062 void __libc_free(void* ptr) ALIAS("free");
4063 void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc");
4064 void* __libc_calloc(size_t n, size_t size) ALIAS("calloc");
4065 void __libc_cfree(void* ptr) ALIAS("cfree");
4066 void* __libc_memalign(size_t align, size_t s) ALIAS("memalign");
4067 void* __libc_valloc(size_t size) ALIAS("valloc");
4068 void* __libc_pvalloc(size_t size) ALIAS("pvalloc");
4069 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign");
4070 # undef ALIAS
4071 # else /* not __GNUC__ */
4072 // Portable wrappers
4073 void* __libc_malloc(size_t size) { return malloc(size); }
4074 void __libc_free(void* ptr) { free(ptr); }
4075 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
4076 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
4077 void __libc_cfree(void* ptr) { cfree(ptr); }
4078 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
4079 void* __libc_valloc(size_t size) { return valloc(size); }
4080 void* __libc_pvalloc(size_t size) { return pvalloc(size); }
4081 int __posix_memalign(void** r, size_t a, size_t s) {
4082 return posix_memalign(r, a, s);
4083 }
4084 # endif /* __GNUC__ */
4085 }
4086 #endif /* __GLIBC__ */
4087
4088 // Override __libc_memalign in libc on linux boxes specially.
4089 // They have a bug in libc that causes them to (very rarely) allocate
4090 // with __libc_memalign() yet deallocate with free() and the
4091 // definitions above don't catch it.
4092 // This function is an exception to the rule of calling MallocHook method
4093 // from the stack frame of the allocation function;
4094 // heap-checker handles this special case explicitly.
4095 static void *MemalignOverride(size_t align, size_t size, const void *caller)
4096 __THROW {
4097 void* result = do_memalign(align, size);
4098 MallocHook::InvokeNewHook(result, size);
4099 return result;
4100 }
4101 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
4102
4103 #endif
4104
4105 #if defined(WTF_CHANGES) && OS(DARWIN)
4106
4107 class FreeObjectFinder {
4108 const RemoteMemoryReader& m_reader;
4109 HashSet<void*> m_freeObjects;
4110
4111 public:
4112 FreeObjectFinder(const RemoteMemoryReader& reader) : m_reader(reader) { }
4113
4114 void visit(void* ptr) { m_freeObjects.add(ptr); }
4115 bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); }
4116 bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_cast<void*>(ptr)); }
4117 size_t freeObjectCount() const { return m_freeObjects.size(); }
4118
4119 void findFreeObjects(TCMalloc_ThreadCache* threadCache)
4120 {
4121 for (; threadCache; threadCache = (threadCache->next_ ? m_reader(threadCache->next_) : 0))
4122 threadCache->enumerateFreeObjects(*this, m_reader);
4123 }
4124
4125 void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes, TCMalloc_Central_FreeListPadded* remoteCentralFreeList)
4126 {
4127 for (unsigned i = 0; i < numSizes; i++)
4128 centralFreeList[i].enumerateFreeObjects(*this, m_reader, remoteCentralFreeList + i);
4129 }
4130 };
4131
4132 class PageMapFreeObjectFinder {
4133 const RemoteMemoryReader& m_reader;
4134 FreeObjectFinder& m_freeObjectFinder;
4135
4136 public:
4137 PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder)
4138 : m_reader(reader)
4139 , m_freeObjectFinder(freeObjectFinder)
4140 { }
4141
4142 int visit(void* ptr) const
4143 {
4144 if (!ptr)
4145 return 1;
4146
4147 Span* span = m_reader(reinterpret_cast<Span*>(ptr));
4148 if (span->free) {
4149 void* ptr = reinterpret_cast<void*>(span->start << kPageShift);
4150 m_freeObjectFinder.visit(ptr);
4151 } else if (span->sizeclass) {
4152 // Walk the free list of the small-object span, keeping track of each object seen
4153 for (void* nextObject = span->objects; nextObject; nextObject = *m_reader(reinterpret_cast<void**>(nextObject)))
4154 m_freeObjectFinder.visit(nextObject);
4155 }
4156 return span->length;
4157 }
4158 };
4159
4160 class PageMapMemoryUsageRecorder {
4161 task_t m_task;
4162 void* m_context;
4163 unsigned m_typeMask;
4164 vm_range_recorder_t* m_recorder;
4165 const RemoteMemoryReader& m_reader;
4166 const FreeObjectFinder& m_freeObjectFinder;
4167
4168 HashSet<void*> m_seenPointers;
4169 Vector<Span*> m_coalescedSpans;
4170
4171 public:
4172 PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder)
4173 : m_task(task)
4174 , m_context(context)
4175 , m_typeMask(typeMask)
4176 , m_recorder(recorder)
4177 , m_reader(reader)
4178 , m_freeObjectFinder(freeObjectFinder)
4179 { }
4180
4181 ~PageMapMemoryUsageRecorder()
4182 {
4183 ASSERT(!m_coalescedSpans.size());
4184 }
4185
4186 void recordPendingRegions()
4187 {
4188 Span* lastSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
4189 vm_range_t ptrRange = { m_coalescedSpans[0]->start << kPageShift, 0 };
4190 ptrRange.size = (lastSpan->start << kPageShift) - ptrRange.address + (lastSpan->length * kPageSize);
4191
4192 // Mark the memory region the spans represent as a candidate for containing pointers
4193 if (m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE)
4194 (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1);
4195
4196 if (!(m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) {
4197 m_coalescedSpans.clear();
4198 return;
4199 }
4200
4201 Vector<vm_range_t, 1024> allocatedPointers;
4202 for (size_t i = 0; i < m_coalescedSpans.size(); ++i) {
4203 Span *theSpan = m_coalescedSpans[i];
4204 if (theSpan->free)
4205 continue;
4206
4207 vm_address_t spanStartAddress = theSpan->start << kPageShift;
4208 vm_size_t spanSizeInBytes = theSpan->length * kPageSize;
4209
4210 if (!theSpan->sizeclass) {
4211 // If it's an allocated large object span, mark it as in use
4212 if (!m_freeObjectFinder.isFreeObject(spanStartAddress))
4213 allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes});
4214 } else {
4215 const size_t objectSize = ByteSizeForClass(theSpan->sizeclass);
4216
4217 // Mark each allocated small object within the span as in use
4218 const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes;
4219 for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) {
4220 if (!m_freeObjectFinder.isFreeObject(object))
4221 allocatedPointers.append((vm_range_t){object, objectSize});
4222 }
4223 }
4224 }
4225
4226 (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size());
4227
4228 m_coalescedSpans.clear();
4229 }
4230
4231 int visit(void* ptr)
4232 {
4233 if (!ptr)
4234 return 1;
4235
4236 Span* span = m_reader(reinterpret_cast<Span*>(ptr));
4237 if (!span->start)
4238 return 1;
4239
4240 if (m_seenPointers.contains(ptr))
4241 return span->length;
4242 m_seenPointers.add(ptr);
4243
4244 if (!m_coalescedSpans.size()) {
4245 m_coalescedSpans.append(span);
4246 return span->length;
4247 }
4248
4249 Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
4250 vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift;
4251 vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize;
4252
4253 // If the new span is adjacent to the previous span, do nothing for now.
4254 vm_address_t spanStartAddress = span->start << kPageShift;
4255 if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) {
4256 m_coalescedSpans.append(span);
4257 return span->length;
4258 }
4259
4260 // New span is not adjacent to previous span, so record the spans coalesced so far.
4261 recordPendingRegions();
4262 m_coalescedSpans.append(span);
4263
4264 return span->length;
4265 }
4266 };
4267
4268 class AdminRegionRecorder {
4269 task_t m_task;
4270 void* m_context;
4271 unsigned m_typeMask;
4272 vm_range_recorder_t* m_recorder;
4273 const RemoteMemoryReader& m_reader;
4274
4275 Vector<vm_range_t, 1024> m_pendingRegions;
4276
4277 public:
4278 AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader)
4279 : m_task(task)
4280 , m_context(context)
4281 , m_typeMask(typeMask)
4282 , m_recorder(recorder)
4283 , m_reader(reader)
4284 { }
4285
4286 void recordRegion(vm_address_t ptr, size_t size)
4287 {
4288 if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE)
4289 m_pendingRegions.append((vm_range_t){ ptr, size });
4290 }
4291
4292 void visit(void *ptr, size_t size)
4293 {
4294 recordRegion(reinterpret_cast<vm_address_t>(ptr), size);
4295 }
4296
4297 void recordPendingRegions()
4298 {
4299 if (m_pendingRegions.size()) {
4300 (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size());
4301 m_pendingRegions.clear();
4302 }
4303 }
4304
4305 ~AdminRegionRecorder()
4306 {
4307 ASSERT(!m_pendingRegions.size());
4308 }
4309 };
4310
4311 kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typeMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t recorder)
4312 {
4313 RemoteMemoryReader memoryReader(task, reader);
4314
4315 InitSizeClasses();
4316
4317 FastMallocZone* mzone = memoryReader(reinterpret_cast<FastMallocZone*>(zoneAddress));
4318 TCMalloc_PageHeap* pageHeap = memoryReader(mzone->m_pageHeap);
4319 TCMalloc_ThreadCache** threadHeapsPointer = memoryReader(mzone->m_threadHeaps);
4320 TCMalloc_ThreadCache* threadHeaps = memoryReader(*threadHeapsPointer);
4321
4322 TCMalloc_Central_FreeListPadded* centralCaches = memoryReader(mzone->m_centralCaches, sizeof(TCMalloc_Central_FreeListPadded) * kNumClasses);
4323
4324 FreeObjectFinder finder(memoryReader);
4325 finder.findFreeObjects(threadHeaps);
4326 finder.findFreeObjects(centralCaches, kNumClasses, mzone->m_centralCaches);
4327
4328 TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_;
4329 PageMapFreeObjectFinder pageMapFinder(memoryReader, finder);
4330 pageMap->visitValues(pageMapFinder, memoryReader);
4331
4332 PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder);
4333 pageMap->visitValues(usageRecorder, memoryReader);
4334 usageRecorder.recordPendingRegions();
4335
4336 AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder, memoryReader);
4337 pageMap->visitAllocations(adminRegionRecorder, memoryReader);
4338
4339 PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator);
4340 PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator);
4341
4342 spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
4343 pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
4344
4345 adminRegionRecorder.recordPendingRegions();
4346
4347 return 0;
4348 }
4349
4350 size_t FastMallocZone::size(malloc_zone_t*, const void*)
4351 {
4352 return 0;
4353 }
4354
4355 void* FastMallocZone::zoneMalloc(malloc_zone_t*, size_t)
4356 {
4357 return 0;
4358 }
4359
4360 void* FastMallocZone::zoneCalloc(malloc_zone_t*, size_t, size_t)
4361 {
4362 return 0;
4363 }
4364
4365 void FastMallocZone::zoneFree(malloc_zone_t*, void* ptr)
4366 {
4367 // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
4368 // is not in this zone. When this happens, the pointer being freed was not allocated by any
4369 // zone so we need to print a useful error for the application developer.
4370 malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr);
4371 }
4372
4373 void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t)
4374 {
4375 return 0;
4376 }
4377
4378
4379 #undef malloc
4380 #undef free
4381 #undef realloc
4382 #undef calloc
4383
4384 extern "C" {
4385 malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
4386 &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
4387
4388 #if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !OS(IPHONE_OS)
4389 , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
4390 #endif
4391
4392 };
4393 }
4394
4395 FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator)
4396 : m_pageHeap(pageHeap)
4397 , m_threadHeaps(threadHeaps)
4398 , m_centralCaches(centralCaches)
4399 , m_spanAllocator(spanAllocator)
4400 , m_pageHeapAllocator(pageHeapAllocator)
4401 {
4402 memset(&m_zone, 0, sizeof(m_zone));
4403 m_zone.version = 4;
4404 m_zone.zone_name = "JavaScriptCore FastMalloc";
4405 m_zone.size = &FastMallocZone::size;
4406 m_zone.malloc = &FastMallocZone::zoneMalloc;
4407 m_zone.calloc = &FastMallocZone::zoneCalloc;
4408 m_zone.realloc = &FastMallocZone::zoneRealloc;
4409 m_zone.free = &FastMallocZone::zoneFree;
4410 m_zone.valloc = &FastMallocZone::zoneValloc;
4411 m_zone.destroy = &FastMallocZone::zoneDestroy;
4412 m_zone.introspect = &jscore_fastmalloc_introspection;
4413 malloc_zone_register(&m_zone);
4414 }
4415
4416
4417 void FastMallocZone::init()
4418 {
4419 static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
4420 }
4421
4422 #endif
4423
4424 #if WTF_CHANGES
4425 void releaseFastMallocFreeMemory()
4426 {
4427 // Flush free pages in the current thread cache back to the page heap.
4428 // Low watermark mechanism in Scavenge() prevents full return on the first pass.
4429 // The second pass flushes everything.
4430 if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) {
4431 threadCache->Scavenge();
4432 threadCache->Scavenge();
4433 }
4434
4435 SpinLockHolder h(&pageheap_lock);
4436 pageheap->ReleaseFreePages();
4437 }
4438
4439 FastMallocStatistics fastMallocStatistics()
4440 {
4441 FastMallocStatistics statistics;
4442 {
4443 SpinLockHolder lockHolder(&pageheap_lock);
4444 statistics.heapSize = static_cast<size_t>(pageheap->SystemBytes());
4445 statistics.freeSizeInHeap = static_cast<size_t>(pageheap->FreeBytes());
4446 statistics.returnedSize = pageheap->ReturnedBytes();
4447 statistics.freeSizeInCaches = 0;
4448 for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
4449 statistics.freeSizeInCaches += threadCache->Size();
4450 }
4451 for (unsigned cl = 0; cl < kNumClasses; ++cl) {
4452 const int length = central_cache[cl].length();
4453 const int tc_length = central_cache[cl].tc_length();
4454 statistics.freeSizeInCaches += ByteSizeForClass(cl) * (length + tc_length);
4455 }
4456 return statistics;
4457 }
4458
4459 } // namespace WTF
4460 #endif
4461
4462 #endif // FORCE_SYSTEM_MALLOC
4463