• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/utils/allocation.h"
6 
7 #include <stdlib.h>  // For free, malloc.
8 #include "src/base/bits.h"
9 #include "src/base/lazy-instance.h"
10 #include "src/base/logging.h"
11 #include "src/base/page-allocator.h"
12 #include "src/base/platform/platform.h"
13 #include "src/flags/flags.h"
14 #include "src/init/v8.h"
15 #include "src/sanitizer/lsan-page-allocator.h"
16 #include "src/utils/memcopy.h"
17 #include "src/utils/vector.h"
18 
19 #if V8_LIBC_BIONIC
20 #include <malloc.h>  // NOLINT
21 #endif
22 
23 namespace v8 {
24 namespace internal {
25 
26 namespace {
27 
AlignedAllocInternal(size_t size,size_t alignment)28 void* AlignedAllocInternal(size_t size, size_t alignment) {
29   void* ptr;
30 #if V8_OS_WIN
31   ptr = _aligned_malloc(size, alignment);
32 #elif V8_LIBC_BIONIC
33   // posix_memalign is not exposed in some Android versions, so we fall back to
34   // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
35   ptr = memalign(alignment, size);
36 #else
37   if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
38 #endif
39   return ptr;
40 }
41 
42 class PageAllocatorInitializer {
43  public:
PageAllocatorInitializer()44   PageAllocatorInitializer() {
45     page_allocator_ = V8::GetCurrentPlatform()->GetPageAllocator();
46     if (page_allocator_ == nullptr) {
47       static base::LeakyObject<base::PageAllocator> default_page_allocator;
48       page_allocator_ = default_page_allocator.get();
49     }
50 #if defined(LEAK_SANITIZER)
51     static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
52         page_allocator_);
53     page_allocator_ = lsan_allocator.get();
54 #endif
55   }
56 
page_allocator() const57   PageAllocator* page_allocator() const { return page_allocator_; }
58 
SetPageAllocatorForTesting(PageAllocator * allocator)59   void SetPageAllocatorForTesting(PageAllocator* allocator) {
60     page_allocator_ = allocator;
61   }
62 
63  private:
64   PageAllocator* page_allocator_;
65 };
66 
67 DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
68                                 GetPageTableInitializer)
69 
70 // We will attempt allocation this many times. After each failure, we call
71 // OnCriticalMemoryPressure to try to free some memory.
72 const int kAllocationTries = 2;
73 
74 }  // namespace
75 
GetPlatformPageAllocator()76 v8::PageAllocator* GetPlatformPageAllocator() {
77   DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
78   return GetPageTableInitializer()->page_allocator();
79 }
80 
SetPlatformPageAllocatorForTesting(v8::PageAllocator * new_page_allocator)81 v8::PageAllocator* SetPlatformPageAllocatorForTesting(
82     v8::PageAllocator* new_page_allocator) {
83   v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
84   GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
85   return old_page_allocator;
86 }
87 
operator new(size_t size)88 void* Malloced::operator new(size_t size) {
89   void* result = AllocWithRetry(size);
90   if (result == nullptr) {
91     V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
92   }
93   return result;
94 }
95 
operator delete(void * p)96 void Malloced::operator delete(void* p) { free(p); }
97 
StrDup(const char * str)98 char* StrDup(const char* str) {
99   size_t length = strlen(str);
100   char* result = NewArray<char>(length + 1);
101   MemCopy(result, str, length);
102   result[length] = '\0';
103   return result;
104 }
105 
StrNDup(const char * str,size_t n)106 char* StrNDup(const char* str, size_t n) {
107   size_t length = strlen(str);
108   if (n < length) length = n;
109   char* result = NewArray<char>(length + 1);
110   MemCopy(result, str, length);
111   result[length] = '\0';
112   return result;
113 }
114 
AllocWithRetry(size_t size)115 void* AllocWithRetry(size_t size) {
116   void* result = nullptr;
117   for (int i = 0; i < kAllocationTries; ++i) {
118     result = malloc(size);
119     if (result != nullptr) break;
120     if (!OnCriticalMemoryPressure(size)) break;
121   }
122   return result;
123 }
124 
AlignedAlloc(size_t size,size_t alignment)125 void* AlignedAlloc(size_t size, size_t alignment) {
126   DCHECK_LE(alignof(void*), alignment);
127   DCHECK(base::bits::IsPowerOfTwo(alignment));
128   void* result = nullptr;
129   for (int i = 0; i < kAllocationTries; ++i) {
130     result = AlignedAllocInternal(size, alignment);
131     if (result != nullptr) break;
132     if (!OnCriticalMemoryPressure(size + alignment)) break;
133   }
134   if (result == nullptr) {
135     V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
136   }
137   return result;
138 }
139 
AlignedFree(void * ptr)140 void AlignedFree(void* ptr) {
141 #if V8_OS_WIN
142   _aligned_free(ptr);
143 #elif V8_LIBC_BIONIC
144   // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
145   free(ptr);
146 #else
147   free(ptr);
148 #endif
149 }
150 
AllocatePageSize()151 size_t AllocatePageSize() {
152   return GetPlatformPageAllocator()->AllocatePageSize();
153 }
154 
CommitPageSize()155 size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }
156 
SetRandomMmapSeed(int64_t seed)157 void SetRandomMmapSeed(int64_t seed) {
158   GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
159 }
160 
GetRandomMmapAddr()161 void* GetRandomMmapAddr() {
162   return GetPlatformPageAllocator()->GetRandomMmapAddr();
163 }
164 
AllocatePages(v8::PageAllocator * page_allocator,void * hint,size_t size,size_t alignment,PageAllocator::Permission access)165 void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
166                     size_t alignment, PageAllocator::Permission access) {
167   DCHECK_NOT_NULL(page_allocator);
168   DCHECK_EQ(hint, AlignedAddress(hint, alignment));
169   DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
170   if (FLAG_randomize_all_allocations) {
171     hint = page_allocator->GetRandomMmapAddr();
172   }
173   void* result = nullptr;
174   for (int i = 0; i < kAllocationTries; ++i) {
175     result = page_allocator->AllocatePages(hint, size, alignment, access);
176     if (result != nullptr) break;
177     size_t request_size = size + alignment - page_allocator->AllocatePageSize();
178     if (!OnCriticalMemoryPressure(request_size)) break;
179   }
180   return result;
181 }
182 
FreePages(v8::PageAllocator * page_allocator,void * address,const size_t size)183 bool FreePages(v8::PageAllocator* page_allocator, void* address,
184                const size_t size) {
185   DCHECK_NOT_NULL(page_allocator);
186   DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
187   return page_allocator->FreePages(address, size);
188 }
189 
ReleasePages(v8::PageAllocator * page_allocator,void * address,size_t size,size_t new_size)190 bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
191                   size_t new_size) {
192   DCHECK_NOT_NULL(page_allocator);
193   DCHECK_LT(new_size, size);
194   DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
195   return page_allocator->ReleasePages(address, size, new_size);
196 }
197 
SetPermissions(v8::PageAllocator * page_allocator,void * address,size_t size,PageAllocator::Permission access)198 bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
199                     size_t size, PageAllocator::Permission access) {
200   DCHECK_NOT_NULL(page_allocator);
201   return page_allocator->SetPermissions(address, size, access);
202 }
203 
OnCriticalMemoryPressure(size_t length)204 bool OnCriticalMemoryPressure(size_t length) {
205   // TODO(bbudge) Rework retry logic once embedders implement the more
206   // informative overload.
207   if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
208     V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
209   }
210   return true;
211 }
212 
213 VirtualMemory::VirtualMemory() = default;
214 
VirtualMemory(v8::PageAllocator * page_allocator,size_t size,void * hint,size_t alignment,JitPermission jit)215 VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
216                              void* hint, size_t alignment, JitPermission jit)
217     : page_allocator_(page_allocator) {
218   DCHECK_NOT_NULL(page_allocator);
219   DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
220   size_t page_size = page_allocator_->AllocatePageSize();
221   alignment = RoundUp(alignment, page_size);
222   PageAllocator::Permission permissions =
223       jit == kMapAsJittable ? PageAllocator::kNoAccessWillJitLater
224                             : PageAllocator::kNoAccess;
225   Address address = reinterpret_cast<Address>(AllocatePages(
226       page_allocator_, hint, RoundUp(size, page_size), alignment, permissions));
227   if (address != kNullAddress) {
228     DCHECK(IsAligned(address, alignment));
229     region_ = base::AddressRegion(address, size);
230   }
231 }
232 
~VirtualMemory()233 VirtualMemory::~VirtualMemory() {
234   if (IsReserved()) {
235     Free();
236   }
237 }
238 
Reset()239 void VirtualMemory::Reset() {
240   page_allocator_ = nullptr;
241   region_ = base::AddressRegion();
242 }
243 
SetPermissions(Address address,size_t size,PageAllocator::Permission access)244 bool VirtualMemory::SetPermissions(Address address, size_t size,
245                                    PageAllocator::Permission access) {
246   CHECK(InVM(address, size));
247   bool result =
248       v8::internal::SetPermissions(page_allocator_, address, size, access);
249   DCHECK(result);
250   return result;
251 }
252 
Release(Address free_start)253 size_t VirtualMemory::Release(Address free_start) {
254   DCHECK(IsReserved());
255   DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize()));
256   // Notice: Order is important here. The VirtualMemory object might live
257   // inside the allocated region.
258 
259   const size_t old_size = region_.size();
260   const size_t free_size = old_size - (free_start - region_.begin());
261   CHECK(InVM(free_start, free_size));
262   region_.set_size(old_size - free_size);
263   CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
264                      old_size, region_.size()));
265   return free_size;
266 }
267 
Free()268 void VirtualMemory::Free() {
269   DCHECK(IsReserved());
270   // Notice: Order is important here. The VirtualMemory object might live
271   // inside the allocated region.
272   v8::PageAllocator* page_allocator = page_allocator_;
273   base::AddressRegion region = region_;
274   Reset();
275   // FreePages expects size to be aligned to allocation granularity however
276   // ReleasePages may leave size at only commit granularity. Align it here.
277   CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
278                   RoundUp(region.size(), page_allocator->AllocatePageSize())));
279 }
280 
FreeReadOnly()281 void VirtualMemory::FreeReadOnly() {
282   DCHECK(IsReserved());
283   // The only difference to Free is that it doesn't call Reset which would write
284   // to the VirtualMemory object.
285   v8::PageAllocator* page_allocator = page_allocator_;
286   base::AddressRegion region = region_;
287 
288   // FreePages expects size to be aligned to allocation granularity however
289   // ReleasePages may leave size at only commit granularity. Align it here.
290   CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
291                   RoundUp(region.size(), page_allocator->AllocatePageSize())));
292 }
293 
294 }  // namespace internal
295 }  // namespace v8
296