1// Copyright (c) 2013 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "base/process/memory.h" 6 7// AddressSanitizer handles heap corruption, and on 64 bit Macs, the malloc 8// system automatically abort()s on heap corruption. 9#if !defined(ADDRESS_SANITIZER) && ARCH_CPU_32_BITS 10#define HANDLE_MEMORY_CORRUPTION_MANUALLY 11#endif 12 13#include <CoreFoundation/CoreFoundation.h> 14#include <errno.h> 15#include <mach/mach.h> 16#include <mach/mach_vm.h> 17#include <malloc/malloc.h> 18#import <objc/runtime.h> 19 20#include <new> 21 22#include "base/lazy_instance.h" 23#include "base/logging.h" 24#include "base/mac/mac_util.h" 25#include "base/mac/mach_logging.h" 26#include "base/scoped_clear_errno.h" 27#include "third_party/apple_apsl/CFBase.h" 28#include "third_party/apple_apsl/malloc.h" 29 30#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 31#include <dlfcn.h> 32#include <mach-o/nlist.h> 33 34#include "base/threading/thread_local.h" 35#include "third_party/mach_override/mach_override.h" 36#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 37 38namespace base { 39 40// These are helpers for EnableTerminationOnHeapCorruption, which is a no-op 41// on 64 bit Macs. 42#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 43namespace { 44 45// Finds the library path for malloc() and thus the libC part of libSystem, 46// which in Lion is in a separate image. 47const char* LookUpLibCPath() { 48 const void* addr = reinterpret_cast<void*>(&malloc); 49 50 Dl_info info; 51 if (dladdr(addr, &info)) 52 return info.dli_fname; 53 54 DLOG(WARNING) << "Could not find image path for malloc()"; 55 return NULL; 56} 57 58typedef void(*malloc_error_break_t)(void); 59malloc_error_break_t g_original_malloc_error_break = NULL; 60 61// Returns the function pointer for malloc_error_break. This symbol is declared 62// as __private_extern__ and cannot be dlsym()ed. Instead, use nlist() to 63// get it. 64malloc_error_break_t LookUpMallocErrorBreak() { 65 const char* lib_c_path = LookUpLibCPath(); 66 if (!lib_c_path) 67 return NULL; 68 69 // Only need to look up two symbols, but nlist() requires a NULL-terminated 70 // array and takes no count. 71 struct nlist nl[3]; 72 bzero(&nl, sizeof(nl)); 73 74 // The symbol to find. 75 nl[0].n_un.n_name = const_cast<char*>("_malloc_error_break"); 76 77 // A reference symbol by which the address of the desired symbol will be 78 // calculated. 79 nl[1].n_un.n_name = const_cast<char*>("_malloc"); 80 81 int rv = nlist(lib_c_path, nl); 82 if (rv != 0 || nl[0].n_type == N_UNDF || nl[1].n_type == N_UNDF) { 83 return NULL; 84 } 85 86 // nlist() returns addresses as offsets in the image, not the instruction 87 // pointer in memory. Use the known in-memory address of malloc() 88 // to compute the offset for malloc_error_break(). 89 uintptr_t reference_addr = reinterpret_cast<uintptr_t>(&malloc); 90 reference_addr -= nl[1].n_value; 91 reference_addr += nl[0].n_value; 92 93 return reinterpret_cast<malloc_error_break_t>(reference_addr); 94} 95 96// Combines ThreadLocalBoolean with AutoReset. It would be convenient 97// to compose ThreadLocalPointer<bool> with base::AutoReset<bool>, but that 98// would require allocating some storage for the bool. 99class ThreadLocalBooleanAutoReset { 100 public: 101 ThreadLocalBooleanAutoReset(ThreadLocalBoolean* tlb, bool new_value) 102 : scoped_tlb_(tlb), 103 original_value_(tlb->Get()) { 104 scoped_tlb_->Set(new_value); 105 } 106 ~ThreadLocalBooleanAutoReset() { 107 scoped_tlb_->Set(original_value_); 108 } 109 110 private: 111 ThreadLocalBoolean* scoped_tlb_; 112 bool original_value_; 113 114 DISALLOW_COPY_AND_ASSIGN(ThreadLocalBooleanAutoReset); 115}; 116 117base::LazyInstance<ThreadLocalBoolean>::Leaky 118 g_unchecked_alloc = LAZY_INSTANCE_INITIALIZER; 119 120// NOTE(shess): This is called when the malloc library noticed that the heap 121// is fubar. Avoid calls which will re-enter the malloc library. 122void CrMallocErrorBreak() { 123 g_original_malloc_error_break(); 124 125 // Out of memory is certainly not heap corruption, and not necessarily 126 // something for which the process should be terminated. Leave that decision 127 // to the OOM killer. 128 if (errno == ENOMEM) 129 return; 130 131 // The malloc library attempts to log to ASL (syslog) before calling this 132 // code, which fails accessing a Unix-domain socket when sandboxed. The 133 // failed socket results in writing to a -1 fd, leaving EBADF in errno. If 134 // UncheckedMalloc() is on the stack, for large allocations (15k and up) only 135 // an OOM failure leads here. Smaller allocations could also arrive here due 136 // to freelist corruption, but there is no way to distinguish that from OOM at 137 // this point. 138 // 139 // NOTE(shess): I hypothesize that EPERM case in 10.9 is the same root cause 140 // as EBADF. Unfortunately, 10.9's opensource releases don't include malloc 141 // source code at this time. 142 // <http://crbug.com/312234> 143 if ((errno == EBADF || errno == EPERM) && g_unchecked_alloc.Get().Get()) 144 return; 145 146 // A unit test checks this error message, so it needs to be in release builds. 147 char buf[1024] = 148 "Terminating process due to a potential for future heap corruption: " 149 "errno="; 150 char errnobuf[] = { 151 '0' + ((errno / 100) % 10), 152 '0' + ((errno / 10) % 10), 153 '0' + (errno % 10), 154 '\000' 155 }; 156 COMPILE_ASSERT(ELAST <= 999, errno_too_large_to_encode); 157 strlcat(buf, errnobuf, sizeof(buf)); 158 RAW_LOG(ERROR, buf); 159 160 // Crash by writing to NULL+errno to allow analyzing errno from 161 // crash dump info (setting a breakpad key would re-enter the malloc 162 // library). Max documented errno in intro(2) is actually 102, but 163 // it really just needs to be "small" to stay on the right vm page. 164 const int kMaxErrno = 256; 165 char* volatile death_ptr = NULL; 166 death_ptr += std::min(errno, kMaxErrno); 167 *death_ptr = '!'; 168} 169 170} // namespace 171#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 172 173void EnableTerminationOnHeapCorruption() { 174#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 175 // Only override once, otherwise CrMallocErrorBreak() will recurse 176 // to itself. 177 if (g_original_malloc_error_break) 178 return; 179 180 malloc_error_break_t malloc_error_break = LookUpMallocErrorBreak(); 181 if (!malloc_error_break) { 182 DLOG(WARNING) << "Could not find malloc_error_break"; 183 return; 184 } 185 186 mach_error_t err = mach_override_ptr( 187 (void*)malloc_error_break, 188 (void*)&CrMallocErrorBreak, 189 (void**)&g_original_malloc_error_break); 190 191 if (err != err_none) 192 DLOG(WARNING) << "Could not override malloc_error_break; error = " << err; 193#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 194} 195 196// ------------------------------------------------------------------------ 197 198namespace { 199 200bool g_oom_killer_enabled; 201 202#if !defined(ADDRESS_SANITIZER) 203 204// Starting with Mac OS X 10.7, the zone allocators set up by the system are 205// read-only, to prevent them from being overwritten in an attack. However, 206// blindly unprotecting and reprotecting the zone allocators fails with 207// GuardMalloc because GuardMalloc sets up its zone allocator using a block of 208// memory in its bss. Explicit saving/restoring of the protection is required. 209// 210// This function takes a pointer to a malloc zone, de-protects it if necessary, 211// and returns (in the out parameters) a region of memory (if any) to be 212// re-protected when modifications are complete. This approach assumes that 213// there is no contention for the protection of this memory. 214void DeprotectMallocZone(ChromeMallocZone* default_zone, 215 mach_vm_address_t* reprotection_start, 216 mach_vm_size_t* reprotection_length, 217 vm_prot_t* reprotection_value) { 218 mach_port_t unused; 219 *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone); 220 struct vm_region_basic_info_64 info; 221 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64; 222 kern_return_t result = 223 mach_vm_region(mach_task_self(), 224 reprotection_start, 225 reprotection_length, 226 VM_REGION_BASIC_INFO_64, 227 reinterpret_cast<vm_region_info_t>(&info), 228 &count, 229 &unused); 230 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_region"; 231 232 // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but 233 // balance it with a deallocate in case this ever changes. See 10.9.2 234 // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region. 235 mach_port_deallocate(mach_task_self(), unused); 236 237 // Does the region fully enclose the zone pointers? Possibly unwarranted 238 // simplification used: using the size of a full version 8 malloc zone rather 239 // than the actual smaller size if the passed-in zone is not version 8. 240 CHECK(*reprotection_start <= 241 reinterpret_cast<mach_vm_address_t>(default_zone)); 242 mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) - 243 reinterpret_cast<mach_vm_size_t>(*reprotection_start); 244 CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length); 245 246 if (info.protection & VM_PROT_WRITE) { 247 // No change needed; the zone is already writable. 248 *reprotection_start = 0; 249 *reprotection_length = 0; 250 *reprotection_value = VM_PROT_NONE; 251 } else { 252 *reprotection_value = info.protection; 253 result = mach_vm_protect(mach_task_self(), 254 *reprotection_start, 255 *reprotection_length, 256 false, 257 info.protection | VM_PROT_WRITE); 258 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; 259 } 260} 261 262// === C malloc/calloc/valloc/realloc/posix_memalign === 263 264typedef void* (*malloc_type)(struct _malloc_zone_t* zone, 265 size_t size); 266typedef void* (*calloc_type)(struct _malloc_zone_t* zone, 267 size_t num_items, 268 size_t size); 269typedef void* (*valloc_type)(struct _malloc_zone_t* zone, 270 size_t size); 271typedef void (*free_type)(struct _malloc_zone_t* zone, 272 void* ptr); 273typedef void* (*realloc_type)(struct _malloc_zone_t* zone, 274 void* ptr, 275 size_t size); 276typedef void* (*memalign_type)(struct _malloc_zone_t* zone, 277 size_t alignment, 278 size_t size); 279 280malloc_type g_old_malloc; 281calloc_type g_old_calloc; 282valloc_type g_old_valloc; 283free_type g_old_free; 284realloc_type g_old_realloc; 285memalign_type g_old_memalign; 286 287malloc_type g_old_malloc_purgeable; 288calloc_type g_old_calloc_purgeable; 289valloc_type g_old_valloc_purgeable; 290free_type g_old_free_purgeable; 291realloc_type g_old_realloc_purgeable; 292memalign_type g_old_memalign_purgeable; 293 294void* oom_killer_malloc(struct _malloc_zone_t* zone, 295 size_t size) { 296#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 297 ScopedClearErrno clear_errno; 298#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 299 void* result = g_old_malloc(zone, size); 300 if (!result && size) 301 debug::BreakDebugger(); 302 return result; 303} 304 305void* oom_killer_calloc(struct _malloc_zone_t* zone, 306 size_t num_items, 307 size_t size) { 308#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 309 ScopedClearErrno clear_errno; 310#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 311 void* result = g_old_calloc(zone, num_items, size); 312 if (!result && num_items && size) 313 debug::BreakDebugger(); 314 return result; 315} 316 317void* oom_killer_valloc(struct _malloc_zone_t* zone, 318 size_t size) { 319#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 320 ScopedClearErrno clear_errno; 321#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 322 void* result = g_old_valloc(zone, size); 323 if (!result && size) 324 debug::BreakDebugger(); 325 return result; 326} 327 328void oom_killer_free(struct _malloc_zone_t* zone, 329 void* ptr) { 330#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 331 ScopedClearErrno clear_errno; 332#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 333 g_old_free(zone, ptr); 334} 335 336void* oom_killer_realloc(struct _malloc_zone_t* zone, 337 void* ptr, 338 size_t size) { 339#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 340 ScopedClearErrno clear_errno; 341#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 342 void* result = g_old_realloc(zone, ptr, size); 343 if (!result && size) 344 debug::BreakDebugger(); 345 return result; 346} 347 348void* oom_killer_memalign(struct _malloc_zone_t* zone, 349 size_t alignment, 350 size_t size) { 351#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 352 ScopedClearErrno clear_errno; 353#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 354 void* result = g_old_memalign(zone, alignment, size); 355 // Only die if posix_memalign would have returned ENOMEM, since there are 356 // other reasons why NULL might be returned (see 357 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ). 358 if (!result && size && alignment >= sizeof(void*) 359 && (alignment & (alignment - 1)) == 0) { 360 debug::BreakDebugger(); 361 } 362 return result; 363} 364 365void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, 366 size_t size) { 367#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 368 ScopedClearErrno clear_errno; 369#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 370 void* result = g_old_malloc_purgeable(zone, size); 371 if (!result && size) 372 debug::BreakDebugger(); 373 return result; 374} 375 376void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone, 377 size_t num_items, 378 size_t size) { 379#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 380 ScopedClearErrno clear_errno; 381#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 382 void* result = g_old_calloc_purgeable(zone, num_items, size); 383 if (!result && num_items && size) 384 debug::BreakDebugger(); 385 return result; 386} 387 388void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, 389 size_t size) { 390#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 391 ScopedClearErrno clear_errno; 392#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 393 void* result = g_old_valloc_purgeable(zone, size); 394 if (!result && size) 395 debug::BreakDebugger(); 396 return result; 397} 398 399void oom_killer_free_purgeable(struct _malloc_zone_t* zone, 400 void* ptr) { 401#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 402 ScopedClearErrno clear_errno; 403#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 404 g_old_free_purgeable(zone, ptr); 405} 406 407void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone, 408 void* ptr, 409 size_t size) { 410#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 411 ScopedClearErrno clear_errno; 412#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 413 void* result = g_old_realloc_purgeable(zone, ptr, size); 414 if (!result && size) 415 debug::BreakDebugger(); 416 return result; 417} 418 419void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone, 420 size_t alignment, 421 size_t size) { 422#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 423 ScopedClearErrno clear_errno; 424#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 425 void* result = g_old_memalign_purgeable(zone, alignment, size); 426 // Only die if posix_memalign would have returned ENOMEM, since there are 427 // other reasons why NULL might be returned (see 428 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ). 429 if (!result && size && alignment >= sizeof(void*) 430 && (alignment & (alignment - 1)) == 0) { 431 debug::BreakDebugger(); 432 } 433 return result; 434} 435 436#endif // !defined(ADDRESS_SANITIZER) 437 438// === C++ operator new === 439 440void oom_killer_new() { 441 debug::BreakDebugger(); 442} 443 444#if !defined(ADDRESS_SANITIZER) 445 446// === Core Foundation CFAllocators === 447 448bool CanGetContextForCFAllocator() { 449 return !base::mac::IsOSLaterThanYosemite_DontCallThis(); 450} 451 452CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) { 453 if (base::mac::IsOSSnowLeopard()) { 454 ChromeCFAllocatorLeopards* our_allocator = 455 const_cast<ChromeCFAllocatorLeopards*>( 456 reinterpret_cast<const ChromeCFAllocatorLeopards*>(allocator)); 457 return &our_allocator->_context; 458 } else if (base::mac::IsOSLion() || 459 base::mac::IsOSMountainLion() || 460 base::mac::IsOSMavericks() || 461 base::mac::IsOSYosemite()) { 462 ChromeCFAllocatorLions* our_allocator = 463 const_cast<ChromeCFAllocatorLions*>( 464 reinterpret_cast<const ChromeCFAllocatorLions*>(allocator)); 465 return &our_allocator->_context; 466 } else { 467 return NULL; 468 } 469} 470 471CFAllocatorAllocateCallBack g_old_cfallocator_system_default; 472CFAllocatorAllocateCallBack g_old_cfallocator_malloc; 473CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone; 474 475void* oom_killer_cfallocator_system_default(CFIndex alloc_size, 476 CFOptionFlags hint, 477 void* info) { 478 void* result = g_old_cfallocator_system_default(alloc_size, hint, info); 479 if (!result) 480 debug::BreakDebugger(); 481 return result; 482} 483 484void* oom_killer_cfallocator_malloc(CFIndex alloc_size, 485 CFOptionFlags hint, 486 void* info) { 487 void* result = g_old_cfallocator_malloc(alloc_size, hint, info); 488 if (!result) 489 debug::BreakDebugger(); 490 return result; 491} 492 493void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size, 494 CFOptionFlags hint, 495 void* info) { 496 void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info); 497 if (!result) 498 debug::BreakDebugger(); 499 return result; 500} 501 502#endif // !defined(ADDRESS_SANITIZER) 503 504// === Cocoa NSObject allocation === 505 506typedef id (*allocWithZone_t)(id, SEL, NSZone*); 507allocWithZone_t g_old_allocWithZone; 508 509id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) 510{ 511 id result = g_old_allocWithZone(self, _cmd, zone); 512 if (!result) 513 debug::BreakDebugger(); 514 return result; 515} 516 517} // namespace 518 519bool UncheckedMalloc(size_t size, void** result) { 520#if defined(ADDRESS_SANITIZER) 521 *result = malloc(size); 522#else 523 if (g_old_malloc) { 524#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 525 ScopedClearErrno clear_errno; 526 ThreadLocalBooleanAutoReset flag(g_unchecked_alloc.Pointer(), true); 527#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 528 *result = g_old_malloc(malloc_default_zone(), size); 529 } else { 530 *result = malloc(size); 531 } 532#endif // defined(ADDRESS_SANITIZER) 533 534 return *result != NULL; 535} 536 537bool UncheckedCalloc(size_t num_items, size_t size, void** result) { 538#if defined(ADDRESS_SANITIZER) 539 *result = calloc(num_items, size); 540#else 541 if (g_old_calloc) { 542#if defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 543 ScopedClearErrno clear_errno; 544 ThreadLocalBooleanAutoReset flag(g_unchecked_alloc.Pointer(), true); 545#endif // defined(HANDLE_MEMORY_CORRUPTION_MANUALLY) 546 *result = g_old_calloc(malloc_default_zone(), num_items, size); 547 } else { 548 *result = calloc(num_items, size); 549 } 550#endif // defined(ADDRESS_SANITIZER) 551 552 return *result != NULL; 553} 554 555void* UncheckedMalloc(size_t size) { 556 void* address; 557 return UncheckedMalloc(size, &address) ? address : NULL; 558} 559 560void* UncheckedCalloc(size_t num_items, size_t size) { 561 void* address; 562 return UncheckedCalloc(num_items, size, &address) ? address : NULL; 563} 564 565void EnableTerminationOnOutOfMemory() { 566 if (g_oom_killer_enabled) 567 return; 568 569 g_oom_killer_enabled = true; 570 571 // === C malloc/calloc/valloc/realloc/posix_memalign === 572 573 // This approach is not perfect, as requests for amounts of memory larger than 574 // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will 575 // still fail with a NULL rather than dying (see 576 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details). 577 // Unfortunately, it's the best we can do. Also note that this does not affect 578 // allocations from non-default zones. 579 580#if !defined(ADDRESS_SANITIZER) 581 // Don't do anything special on OOM for the malloc zones replaced by 582 // AddressSanitizer, as modifying or protecting them may not work correctly. 583 584 CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc && 585 !g_old_memalign) << "Old allocators unexpectedly non-null"; 586 587 CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable && 588 !g_old_valloc_purgeable && !g_old_realloc_purgeable && 589 !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null"; 590 591 ChromeMallocZone* default_zone = 592 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone()); 593 ChromeMallocZone* purgeable_zone = 594 reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone()); 595 596 mach_vm_address_t default_reprotection_start = 0; 597 mach_vm_size_t default_reprotection_length = 0; 598 vm_prot_t default_reprotection_value = VM_PROT_NONE; 599 DeprotectMallocZone(default_zone, 600 &default_reprotection_start, 601 &default_reprotection_length, 602 &default_reprotection_value); 603 604 mach_vm_address_t purgeable_reprotection_start = 0; 605 mach_vm_size_t purgeable_reprotection_length = 0; 606 vm_prot_t purgeable_reprotection_value = VM_PROT_NONE; 607 if (purgeable_zone) { 608 DeprotectMallocZone(purgeable_zone, 609 &purgeable_reprotection_start, 610 &purgeable_reprotection_length, 611 &purgeable_reprotection_value); 612 } 613 614 // Default zone 615 616 g_old_malloc = default_zone->malloc; 617 g_old_calloc = default_zone->calloc; 618 g_old_valloc = default_zone->valloc; 619 g_old_free = default_zone->free; 620 g_old_realloc = default_zone->realloc; 621 CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_free && 622 g_old_realloc) 623 << "Failed to get system allocation functions."; 624 625 default_zone->malloc = oom_killer_malloc; 626 default_zone->calloc = oom_killer_calloc; 627 default_zone->valloc = oom_killer_valloc; 628 default_zone->free = oom_killer_free; 629 default_zone->realloc = oom_killer_realloc; 630 631 if (default_zone->version >= 5) { 632 g_old_memalign = default_zone->memalign; 633 if (g_old_memalign) 634 default_zone->memalign = oom_killer_memalign; 635 } 636 637 // Purgeable zone (if it exists) 638 639 if (purgeable_zone) { 640 g_old_malloc_purgeable = purgeable_zone->malloc; 641 g_old_calloc_purgeable = purgeable_zone->calloc; 642 g_old_valloc_purgeable = purgeable_zone->valloc; 643 g_old_free_purgeable = purgeable_zone->free; 644 g_old_realloc_purgeable = purgeable_zone->realloc; 645 CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable && 646 g_old_valloc_purgeable && g_old_free_purgeable && 647 g_old_realloc_purgeable) 648 << "Failed to get system allocation functions."; 649 650 purgeable_zone->malloc = oom_killer_malloc_purgeable; 651 purgeable_zone->calloc = oom_killer_calloc_purgeable; 652 purgeable_zone->valloc = oom_killer_valloc_purgeable; 653 purgeable_zone->free = oom_killer_free_purgeable; 654 purgeable_zone->realloc = oom_killer_realloc_purgeable; 655 656 if (purgeable_zone->version >= 5) { 657 g_old_memalign_purgeable = purgeable_zone->memalign; 658 if (g_old_memalign_purgeable) 659 purgeable_zone->memalign = oom_killer_memalign_purgeable; 660 } 661 } 662 663 // Restore protection if it was active. 664 665 if (default_reprotection_start) { 666 kern_return_t result = mach_vm_protect(mach_task_self(), 667 default_reprotection_start, 668 default_reprotection_length, 669 false, 670 default_reprotection_value); 671 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; 672 } 673 674 if (purgeable_reprotection_start) { 675 kern_return_t result = mach_vm_protect(mach_task_self(), 676 purgeable_reprotection_start, 677 purgeable_reprotection_length, 678 false, 679 purgeable_reprotection_value); 680 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; 681 } 682#endif 683 684 // === C malloc_zone_batch_malloc === 685 686 // batch_malloc is omitted because the default malloc zone's implementation 687 // only supports batch_malloc for "tiny" allocations from the free list. It 688 // will fail for allocations larger than "tiny", and will only allocate as 689 // many blocks as it's able to from the free list. These factors mean that it 690 // can return less than the requested memory even in a non-out-of-memory 691 // situation. There's no good way to detect whether a batch_malloc failure is 692 // due to these other factors, or due to genuine memory or address space 693 // exhaustion. The fact that it only allocates space from the "tiny" free list 694 // means that it's likely that a failure will not be due to memory exhaustion. 695 // Similarly, these constraints on batch_malloc mean that callers must always 696 // be expecting to receive less memory than was requested, even in situations 697 // where memory pressure is not a concern. Finally, the only public interface 698 // to batch_malloc is malloc_zone_batch_malloc, which is specific to the 699 // system's malloc implementation. It's unlikely that anyone's even heard of 700 // it. 701 702 // === C++ operator new === 703 704 // Yes, operator new does call through to malloc, but this will catch failures 705 // that our imperfect handling of malloc cannot. 706 707 std::set_new_handler(oom_killer_new); 708 709#ifndef ADDRESS_SANITIZER 710 // === Core Foundation CFAllocators === 711 712 // This will not catch allocation done by custom allocators, but will catch 713 // all allocation done by system-provided ones. 714 715 CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc && 716 !g_old_cfallocator_malloc_zone) 717 << "Old allocators unexpectedly non-null"; 718 719 bool cf_allocator_internals_known = CanGetContextForCFAllocator(); 720 721 if (cf_allocator_internals_known) { 722 CFAllocatorContext* context = 723 ContextForCFAllocator(kCFAllocatorSystemDefault); 724 CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault."; 725 g_old_cfallocator_system_default = context->allocate; 726 CHECK(g_old_cfallocator_system_default) 727 << "Failed to get kCFAllocatorSystemDefault allocation function."; 728 context->allocate = oom_killer_cfallocator_system_default; 729 730 context = ContextForCFAllocator(kCFAllocatorMalloc); 731 CHECK(context) << "Failed to get context for kCFAllocatorMalloc."; 732 g_old_cfallocator_malloc = context->allocate; 733 CHECK(g_old_cfallocator_malloc) 734 << "Failed to get kCFAllocatorMalloc allocation function."; 735 context->allocate = oom_killer_cfallocator_malloc; 736 737 context = ContextForCFAllocator(kCFAllocatorMallocZone); 738 CHECK(context) << "Failed to get context for kCFAllocatorMallocZone."; 739 g_old_cfallocator_malloc_zone = context->allocate; 740 CHECK(g_old_cfallocator_malloc_zone) 741 << "Failed to get kCFAllocatorMallocZone allocation function."; 742 context->allocate = oom_killer_cfallocator_malloc_zone; 743 } else { 744 DLOG(WARNING) << "Internals of CFAllocator not known; out-of-memory " 745 "failures via CFAllocator will not result in termination. " 746 "http://crbug.com/45650"; 747 } 748#endif 749 750 // === Cocoa NSObject allocation === 751 752 // Note that both +[NSObject new] and +[NSObject alloc] call through to 753 // +[NSObject allocWithZone:]. 754 755 CHECK(!g_old_allocWithZone) 756 << "Old allocator unexpectedly non-null"; 757 758 Class nsobject_class = [NSObject class]; 759 Method orig_method = class_getClassMethod(nsobject_class, 760 @selector(allocWithZone:)); 761 g_old_allocWithZone = reinterpret_cast<allocWithZone_t>( 762 method_getImplementation(orig_method)); 763 CHECK(g_old_allocWithZone) 764 << "Failed to get allocWithZone allocation function."; 765 method_setImplementation(orig_method, 766 reinterpret_cast<IMP>(oom_killer_allocWithZone)); 767} 768 769} // namespace base 770