• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/utils/allocation.h"
6 
7 #include <stdlib.h>  // For free, malloc.
8 
9 #include "src/base/bits.h"
10 #include "src/base/bounded-page-allocator.h"
11 #include "src/base/lazy-instance.h"
12 #include "src/base/logging.h"
13 #include "src/base/page-allocator.h"
14 #include "src/base/platform/platform.h"
15 #include "src/base/platform/wrappers.h"
16 #include "src/base/sanitizer/lsan-page-allocator.h"
17 #include "src/base/sanitizer/lsan-virtual-address-space.h"
18 #include "src/base/vector.h"
19 #include "src/base/virtual-address-space.h"
20 #include "src/flags/flags.h"
21 #include "src/init/v8.h"
22 #include "src/sandbox/sandbox.h"
23 #include "src/utils/memcopy.h"
24 
25 #if V8_LIBC_BIONIC
26 #include <malloc.h>
27 #endif
28 
29 namespace v8 {
30 namespace internal {
31 
32 namespace {
33 
AlignedAllocInternal(size_t size,size_t alignment)34 void* AlignedAllocInternal(size_t size, size_t alignment) {
35   void* ptr;
36 #if V8_OS_WIN
37   ptr = _aligned_malloc(size, alignment);
38 #elif V8_LIBC_BIONIC
39   // posix_memalign is not exposed in some Android versions, so we fall back to
40   // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
41   ptr = memalign(alignment, size);
42 #elif V8_OS_STARBOARD
43   ptr = SbMemoryAllocateAligned(alignment, size);
44 #else
45   if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
46 #endif
47   return ptr;
48 }
49 
50 class PageAllocatorInitializer {
51  public:
PageAllocatorInitializer()52   PageAllocatorInitializer() {
53     page_allocator_ = V8::GetCurrentPlatform()->GetPageAllocator();
54     if (page_allocator_ == nullptr) {
55       static base::LeakyObject<base::PageAllocator> default_page_allocator;
56       page_allocator_ = default_page_allocator.get();
57     }
58 #if defined(LEAK_SANITIZER)
59     static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
60         page_allocator_);
61     page_allocator_ = lsan_allocator.get();
62 #endif
63   }
64 
page_allocator() const65   PageAllocator* page_allocator() const { return page_allocator_; }
66 
SetPageAllocatorForTesting(PageAllocator * allocator)67   void SetPageAllocatorForTesting(PageAllocator* allocator) {
68     page_allocator_ = allocator;
69   }
70 
71  private:
72   PageAllocator* page_allocator_;
73 };
74 
75 DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
76                                 GetPageAllocatorInitializer)
77 
78 // We will attempt allocation this many times. After each failure, we call
79 // OnCriticalMemoryPressure to try to free some memory.
80 const int kAllocationTries = 2;
81 
82 }  // namespace
83 
GetPlatformPageAllocator()84 v8::PageAllocator* GetPlatformPageAllocator() {
85   DCHECK_NOT_NULL(GetPageAllocatorInitializer()->page_allocator());
86   return GetPageAllocatorInitializer()->page_allocator();
87 }
88 
GetPlatformVirtualAddressSpace()89 v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace() {
90 #if defined(LEAK_SANITIZER)
91   static base::LeakyObject<base::LsanVirtualAddressSpace> vas(
92       std::make_unique<base::VirtualAddressSpace>());
93 #else
94   static base::LeakyObject<base::VirtualAddressSpace> vas;
95 #endif
96   return vas.get();
97 }
98 
99 #ifdef V8_SANDBOX
GetSandboxPageAllocator()100 v8::PageAllocator* GetSandboxPageAllocator() {
101   // TODO(chromium:1218005) remove this code once the cage is no longer
102   // optional.
103   if (GetProcessWideSandbox()->is_disabled()) {
104     return GetPlatformPageAllocator();
105   } else {
106     CHECK(GetProcessWideSandbox()->is_initialized());
107     return GetProcessWideSandbox()->page_allocator();
108   }
109 }
110 #endif
111 
SetPlatformPageAllocatorForTesting(v8::PageAllocator * new_page_allocator)112 v8::PageAllocator* SetPlatformPageAllocatorForTesting(
113     v8::PageAllocator* new_page_allocator) {
114   v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
115   GetPageAllocatorInitializer()->SetPageAllocatorForTesting(new_page_allocator);
116   return old_page_allocator;
117 }
118 
operator new(size_t size)119 void* Malloced::operator new(size_t size) {
120   void* result = AllocWithRetry(size);
121   if (result == nullptr) {
122     V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
123   }
124   return result;
125 }
126 
operator delete(void * p)127 void Malloced::operator delete(void* p) { base::Free(p); }
128 
StrDup(const char * str)129 char* StrDup(const char* str) {
130   size_t length = strlen(str);
131   char* result = NewArray<char>(length + 1);
132   MemCopy(result, str, length);
133   result[length] = '\0';
134   return result;
135 }
136 
StrNDup(const char * str,size_t n)137 char* StrNDup(const char* str, size_t n) {
138   size_t length = strlen(str);
139   if (n < length) length = n;
140   char* result = NewArray<char>(length + 1);
141   MemCopy(result, str, length);
142   result[length] = '\0';
143   return result;
144 }
145 
AllocWithRetry(size_t size,MallocFn malloc_fn)146 void* AllocWithRetry(size_t size, MallocFn malloc_fn) {
147   void* result = nullptr;
148   for (int i = 0; i < kAllocationTries; ++i) {
149     result = malloc_fn(size);
150     if (result != nullptr) break;
151     if (!OnCriticalMemoryPressure(size)) break;
152   }
153   return result;
154 }
155 
AlignedAlloc(size_t size,size_t alignment)156 void* AlignedAlloc(size_t size, size_t alignment) {
157   DCHECK_LE(alignof(void*), alignment);
158   DCHECK(base::bits::IsPowerOfTwo(alignment));
159   void* result = nullptr;
160   for (int i = 0; i < kAllocationTries; ++i) {
161     result = AlignedAllocInternal(size, alignment);
162     if (result != nullptr) break;
163     if (!OnCriticalMemoryPressure(size + alignment)) break;
164   }
165   if (result == nullptr) {
166     V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
167   }
168   return result;
169 }
170 
AlignedFree(void * ptr)171 void AlignedFree(void* ptr) {
172 #if V8_OS_WIN
173   _aligned_free(ptr);
174 #elif V8_LIBC_BIONIC
175   // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
176   base::Free(ptr);
177 #elif V8_OS_STARBOARD
178   SbMemoryFreeAligned(ptr);
179 #else
180   base::Free(ptr);
181 #endif
182 }
183 
AllocatePageSize()184 size_t AllocatePageSize() {
185   return GetPlatformPageAllocator()->AllocatePageSize();
186 }
187 
CommitPageSize()188 size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }
189 
SetRandomMmapSeed(int64_t seed)190 void SetRandomMmapSeed(int64_t seed) {
191   GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
192 }
193 
GetRandomMmapAddr()194 void* GetRandomMmapAddr() {
195   return GetPlatformPageAllocator()->GetRandomMmapAddr();
196 }
197 
AllocatePages(v8::PageAllocator * page_allocator,void * hint,size_t size,size_t alignment,PageAllocator::Permission access)198 void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
199                     size_t alignment, PageAllocator::Permission access) {
200   DCHECK_NOT_NULL(page_allocator);
201   DCHECK_EQ(hint, AlignedAddress(hint, alignment));
202   DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
203   if (FLAG_randomize_all_allocations) {
204     hint = AlignedAddress(page_allocator->GetRandomMmapAddr(), alignment);
205   }
206   void* result = nullptr;
207   for (int i = 0; i < kAllocationTries; ++i) {
208     result = page_allocator->AllocatePages(hint, size, alignment, access);
209     if (result != nullptr) break;
210     size_t request_size = size + alignment - page_allocator->AllocatePageSize();
211     if (!OnCriticalMemoryPressure(request_size)) break;
212   }
213   return result;
214 }
215 
FreePages(v8::PageAllocator * page_allocator,void * address,const size_t size)216 void FreePages(v8::PageAllocator* page_allocator, void* address,
217                const size_t size) {
218   DCHECK_NOT_NULL(page_allocator);
219   DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
220   CHECK(page_allocator->FreePages(address, size));
221 }
222 
ReleasePages(v8::PageAllocator * page_allocator,void * address,size_t size,size_t new_size)223 void ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
224                   size_t new_size) {
225   DCHECK_NOT_NULL(page_allocator);
226   DCHECK_LT(new_size, size);
227   DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
228   CHECK(page_allocator->ReleasePages(address, size, new_size));
229 }
230 
SetPermissions(v8::PageAllocator * page_allocator,void * address,size_t size,PageAllocator::Permission access)231 bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
232                     size_t size, PageAllocator::Permission access) {
233   DCHECK_NOT_NULL(page_allocator);
234   return page_allocator->SetPermissions(address, size, access);
235 }
236 
OnCriticalMemoryPressure(size_t length)237 bool OnCriticalMemoryPressure(size_t length) {
238   // TODO(bbudge) Rework retry logic once embedders implement the more
239   // informative overload.
240   if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
241     V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
242   }
243   return true;
244 }
245 
246 VirtualMemory::VirtualMemory() = default;
247 
VirtualMemory(v8::PageAllocator * page_allocator,size_t size,void * hint,size_t alignment,JitPermission jit)248 VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
249                              void* hint, size_t alignment, JitPermission jit)
250     : page_allocator_(page_allocator) {
251   DCHECK_NOT_NULL(page_allocator);
252   DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
253   size_t page_size = page_allocator_->AllocatePageSize();
254   alignment = RoundUp(alignment, page_size);
255   PageAllocator::Permission permissions =
256       jit == kMapAsJittable ? PageAllocator::kNoAccessWillJitLater
257                             : PageAllocator::kNoAccess;
258   Address address = reinterpret_cast<Address>(AllocatePages(
259       page_allocator_, hint, RoundUp(size, page_size), alignment, permissions));
260   if (address != kNullAddress) {
261     DCHECK(IsAligned(address, alignment));
262     region_ = base::AddressRegion(address, size);
263   }
264 }
265 
~VirtualMemory()266 VirtualMemory::~VirtualMemory() {
267   if (IsReserved()) {
268     Free();
269   }
270 }
271 
Reset()272 void VirtualMemory::Reset() {
273   page_allocator_ = nullptr;
274   region_ = base::AddressRegion();
275 }
276 
SetPermissions(Address address,size_t size,PageAllocator::Permission access)277 bool VirtualMemory::SetPermissions(Address address, size_t size,
278                                    PageAllocator::Permission access) {
279   CHECK(InVM(address, size));
280   bool result =
281       v8::internal::SetPermissions(page_allocator_, address, size, access);
282   DCHECK(result);
283   return result;
284 }
285 
Release(Address free_start)286 size_t VirtualMemory::Release(Address free_start) {
287   DCHECK(IsReserved());
288   DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize()));
289   // Notice: Order is important here. The VirtualMemory object might live
290   // inside the allocated region.
291 
292   const size_t old_size = region_.size();
293   const size_t free_size = old_size - (free_start - region_.begin());
294   CHECK(InVM(free_start, free_size));
295   region_.set_size(old_size - free_size);
296   ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
297                old_size, region_.size());
298   return free_size;
299 }
300 
Free()301 void VirtualMemory::Free() {
302   DCHECK(IsReserved());
303   // Notice: Order is important here. The VirtualMemory object might live
304   // inside the allocated region.
305   v8::PageAllocator* page_allocator = page_allocator_;
306   base::AddressRegion region = region_;
307   Reset();
308   // FreePages expects size to be aligned to allocation granularity however
309   // ReleasePages may leave size at only commit granularity. Align it here.
310   FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
311             RoundUp(region.size(), page_allocator->AllocatePageSize()));
312 }
313 
FreeReadOnly()314 void VirtualMemory::FreeReadOnly() {
315   DCHECK(IsReserved());
316   // The only difference to Free is that it doesn't call Reset which would write
317   // to the VirtualMemory object.
318   v8::PageAllocator* page_allocator = page_allocator_;
319   base::AddressRegion region = region_;
320 
321   // FreePages expects size to be aligned to allocation granularity however
322   // ReleasePages may leave size at only commit granularity. Align it here.
323   FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
324             RoundUp(region.size(), page_allocator->AllocatePageSize()));
325 }
326 
327 VirtualMemoryCage::VirtualMemoryCage() = default;
328 
~VirtualMemoryCage()329 VirtualMemoryCage::~VirtualMemoryCage() { Free(); }
330 
VirtualMemoryCage(VirtualMemoryCage && other)331 VirtualMemoryCage::VirtualMemoryCage(VirtualMemoryCage&& other) V8_NOEXCEPT {
332   *this = std::move(other);
333 }
334 
operator =(VirtualMemoryCage && other)335 VirtualMemoryCage& VirtualMemoryCage::operator=(VirtualMemoryCage&& other)
336     V8_NOEXCEPT {
337   page_allocator_ = std::move(other.page_allocator_);
338   reservation_ = std::move(other.reservation_);
339   return *this;
340 }
341 
342 namespace {
VirtualMemoryCageStart(Address reservation_start,const VirtualMemoryCage::ReservationParams & params)343 inline Address VirtualMemoryCageStart(
344     Address reservation_start,
345     const VirtualMemoryCage::ReservationParams& params) {
346   return RoundUp(reservation_start + params.base_bias_size,
347                  params.base_alignment) -
348          params.base_bias_size;
349 }
350 }  // namespace
351 
InitReservation(const ReservationParams & params,base::AddressRegion existing_reservation)352 bool VirtualMemoryCage::InitReservation(
353     const ReservationParams& params, base::AddressRegion existing_reservation) {
354   DCHECK(!reservation_.IsReserved());
355 
356   const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
357   CHECK(IsAligned(params.reservation_size, allocate_page_size));
358   CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
359         (IsAligned(params.base_alignment, allocate_page_size) &&
360          IsAligned(params.base_bias_size, allocate_page_size)));
361   CHECK_LE(params.base_bias_size, params.reservation_size);
362 
363   if (!existing_reservation.is_empty()) {
364     CHECK_EQ(existing_reservation.size(), params.reservation_size);
365     CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
366           IsAligned(existing_reservation.begin(), params.base_alignment));
367     reservation_ =
368         VirtualMemory(params.page_allocator, existing_reservation.begin(),
369                       existing_reservation.size());
370     base_ = reservation_.address() + params.base_bias_size;
371   } else if (params.base_alignment == ReservationParams::kAnyBaseAlignment ||
372              params.base_bias_size == 0) {
373     // When the base doesn't need to be aligned or when the requested
374     // base_bias_size is zero, the virtual memory reservation fails only
375     // due to OOM.
376     Address hint =
377         RoundDown(params.requested_start_hint,
378                   RoundUp(params.base_alignment, allocate_page_size));
379     VirtualMemory reservation(params.page_allocator, params.reservation_size,
380                               reinterpret_cast<void*>(hint),
381                               params.base_alignment);
382     if (!reservation.IsReserved()) return false;
383 
384     reservation_ = std::move(reservation);
385     base_ = reservation_.address() + params.base_bias_size;
386     CHECK_EQ(reservation_.size(), params.reservation_size);
387   } else {
388     // Otherwise, we need to try harder by first overreserving
389     // in hopes of finding a correctly aligned address within the larger
390     // reservation.
391     Address hint =
392         RoundDown(params.requested_start_hint,
393                   RoundUp(params.base_alignment, allocate_page_size)) -
394         RoundUp(params.base_bias_size, allocate_page_size);
395     const int kMaxAttempts = 4;
396     for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
397       // Reserve a region of twice the size so that there is an aligned address
398       // within it that's usable as the cage base.
399       VirtualMemory padded_reservation(params.page_allocator,
400                                        params.reservation_size * 2,
401                                        reinterpret_cast<void*>(hint));
402       if (!padded_reservation.IsReserved()) return false;
403 
404       // Find properly aligned sub-region inside the reservation.
405       Address address =
406           VirtualMemoryCageStart(padded_reservation.address(), params);
407       CHECK(padded_reservation.InVM(address, params.reservation_size));
408 
409 #if defined(V8_OS_FUCHSIA)
410       // Fuchsia does not respect given hints so as a workaround we will use
411       // overreserved address space region instead of trying to re-reserve
412       // a subregion.
413       bool overreserve = true;
414 #else
415       // For the last attempt use the overreserved region to avoid an OOM crash.
416       // This case can happen if there are many isolates being created in
417       // parallel that race for reserving the regions.
418       bool overreserve = (attempt == kMaxAttempts - 1);
419 #endif
420 
421       if (overreserve) {
422         if (padded_reservation.InVM(address, params.reservation_size)) {
423           reservation_ = std::move(padded_reservation);
424           base_ = address + params.base_bias_size;
425           break;
426         }
427       } else {
428         // Now free the padded reservation and immediately try to reserve an
429         // exact region at aligned address. We have to do this dancing because
430         // the reservation address requirement is more complex than just a
431         // certain alignment and not all operating systems support freeing parts
432         // of reserved address space regions.
433         padded_reservation.Free();
434 
435         VirtualMemory reservation(params.page_allocator,
436                                   params.reservation_size,
437                                   reinterpret_cast<void*>(address));
438         if (!reservation.IsReserved()) return false;
439 
440         // The reservation could still be somewhere else but we can accept it
441         // if it has the required alignment.
442         Address start_address =
443             VirtualMemoryCageStart(reservation.address(), params);
444         if (reservation.address() == start_address) {
445           reservation_ = std::move(reservation);
446           base_ = start_address + params.base_bias_size;
447           CHECK_EQ(reservation_.size(), params.reservation_size);
448           break;
449         }
450       }
451     }
452   }
453   CHECK_NE(base_, kNullAddress);
454   CHECK(IsAligned(base_, params.base_alignment));
455 
456   const Address allocatable_base = RoundUp(base_, params.page_size);
457   const size_t allocatable_size =
458       RoundDown(params.reservation_size - (allocatable_base - base_) -
459                     params.base_bias_size,
460                 params.page_size);
461   size_ = allocatable_base + allocatable_size - base_;
462   page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
463       params.page_allocator, allocatable_base, allocatable_size,
464       params.page_size,
465       base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
466   return true;
467 }
468 
Free()469 void VirtualMemoryCage::Free() {
470   if (IsReserved()) {
471     base_ = kNullAddress;
472     size_ = 0;
473     page_allocator_.reset();
474     reservation_.Free();
475   }
476 }
477 
478 }  // namespace internal
479 }  // namespace v8
480