• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/allocation.h"
6 
7 #include <stdlib.h>  // For free, malloc.
8 #include "src/base/bits.h"
9 #include "src/base/lazy-instance.h"
10 #include "src/base/logging.h"
11 #include "src/base/page-allocator.h"
12 #include "src/base/platform/platform.h"
13 #include "src/utils.h"
14 #include "src/v8.h"
15 
16 #if V8_LIBC_BIONIC
17 #include <malloc.h>  // NOLINT
18 #endif
19 
20 #if defined(LEAK_SANITIZER)
21 #include <sanitizer/lsan_interface.h>
22 #endif
23 
24 namespace v8 {
25 namespace internal {
26 
27 namespace {
28 
AlignedAllocInternal(size_t size,size_t alignment)29 void* AlignedAllocInternal(size_t size, size_t alignment) {
30   void* ptr;
31 #if V8_OS_WIN
32   ptr = _aligned_malloc(size, alignment);
33 #elif V8_LIBC_BIONIC
34   // posix_memalign is not exposed in some Android versions, so we fall back to
35   // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
36   ptr = memalign(alignment, size);
37 #else
38   if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
39 #endif
40   return ptr;
41 }
42 
43 // TODO(bbudge) Simplify this once all embedders implement a page allocator.
44 struct InitializePageAllocator {
Constructv8::internal::__anon911505280111::InitializePageAllocator45   static void Construct(void* page_allocator_ptr_arg) {
46     auto page_allocator_ptr =
47         reinterpret_cast<v8::PageAllocator**>(page_allocator_ptr_arg);
48     v8::PageAllocator* page_allocator =
49         V8::GetCurrentPlatform()->GetPageAllocator();
50     if (page_allocator == nullptr) {
51       static v8::base::PageAllocator default_allocator;
52       page_allocator = &default_allocator;
53     }
54     *page_allocator_ptr = page_allocator;
55   }
56 };
57 
58 static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
59     page_allocator = LAZY_INSTANCE_INITIALIZER;
60 
GetPageAllocator()61 v8::PageAllocator* GetPageAllocator() { return page_allocator.Get(); }
62 
63 // We will attempt allocation this many times. After each failure, we call
64 // OnCriticalMemoryPressure to try to free some memory.
65 const int kAllocationTries = 2;
66 
67 }  // namespace
68 
New(size_t size)69 void* Malloced::New(size_t size) {
70   void* result = AllocWithRetry(size);
71   if (result == nullptr) {
72     V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
73   }
74   return result;
75 }
76 
Delete(void * p)77 void Malloced::Delete(void* p) {
78   free(p);
79 }
80 
StrDup(const char * str)81 char* StrDup(const char* str) {
82   int length = StrLength(str);
83   char* result = NewArray<char>(length + 1);
84   MemCopy(result, str, length);
85   result[length] = '\0';
86   return result;
87 }
88 
StrNDup(const char * str,int n)89 char* StrNDup(const char* str, int n) {
90   int length = StrLength(str);
91   if (n < length) length = n;
92   char* result = NewArray<char>(length + 1);
93   MemCopy(result, str, length);
94   result[length] = '\0';
95   return result;
96 }
97 
AllocWithRetry(size_t size)98 void* AllocWithRetry(size_t size) {
99   void* result = nullptr;
100   for (int i = 0; i < kAllocationTries; ++i) {
101     result = malloc(size);
102     if (result != nullptr) break;
103     if (!OnCriticalMemoryPressure(size)) break;
104   }
105   return result;
106 }
107 
AlignedAlloc(size_t size,size_t alignment)108 void* AlignedAlloc(size_t size, size_t alignment) {
109   DCHECK_LE(V8_ALIGNOF(void*), alignment);
110   DCHECK(base::bits::IsPowerOfTwo(alignment));
111   void* result = nullptr;
112   for (int i = 0; i < kAllocationTries; ++i) {
113     result = AlignedAllocInternal(size, alignment);
114     if (result != nullptr) break;
115     if (!OnCriticalMemoryPressure(size + alignment)) break;
116   }
117   if (result == nullptr) {
118     V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
119   }
120   return result;
121 }
122 
AlignedFree(void * ptr)123 void AlignedFree(void *ptr) {
124 #if V8_OS_WIN
125   _aligned_free(ptr);
126 #elif V8_LIBC_BIONIC
127   // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
128   free(ptr);
129 #else
130   free(ptr);
131 #endif
132 }
133 
AllocatePageSize()134 size_t AllocatePageSize() { return GetPageAllocator()->AllocatePageSize(); }
135 
CommitPageSize()136 size_t CommitPageSize() { return GetPageAllocator()->CommitPageSize(); }
137 
SetRandomMmapSeed(int64_t seed)138 void SetRandomMmapSeed(int64_t seed) {
139   GetPageAllocator()->SetRandomMmapSeed(seed);
140 }
141 
GetRandomMmapAddr()142 void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
143 
AllocatePages(void * address,size_t size,size_t alignment,PageAllocator::Permission access)144 void* AllocatePages(void* address, size_t size, size_t alignment,
145                     PageAllocator::Permission access) {
146   DCHECK_EQ(address, AlignedAddress(address, alignment));
147   DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
148   void* result = nullptr;
149   for (int i = 0; i < kAllocationTries; ++i) {
150     result =
151         GetPageAllocator()->AllocatePages(address, size, alignment, access);
152     if (result != nullptr) break;
153     size_t request_size = size + alignment - AllocatePageSize();
154     if (!OnCriticalMemoryPressure(request_size)) break;
155   }
156 #if defined(LEAK_SANITIZER)
157   if (result != nullptr) {
158     __lsan_register_root_region(result, size);
159   }
160 #endif
161   return result;
162 }
163 
FreePages(void * address,const size_t size)164 bool FreePages(void* address, const size_t size) {
165   DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
166   bool result = GetPageAllocator()->FreePages(address, size);
167 #if defined(LEAK_SANITIZER)
168   if (result) {
169     __lsan_unregister_root_region(address, size);
170   }
171 #endif
172   return result;
173 }
174 
ReleasePages(void * address,size_t size,size_t new_size)175 bool ReleasePages(void* address, size_t size, size_t new_size) {
176   DCHECK_LT(new_size, size);
177   bool result = GetPageAllocator()->ReleasePages(address, size, new_size);
178 #if defined(LEAK_SANITIZER)
179   if (result) {
180     __lsan_unregister_root_region(address, size);
181     __lsan_register_root_region(address, new_size);
182   }
183 #endif
184   return result;
185 }
186 
SetPermissions(void * address,size_t size,PageAllocator::Permission access)187 bool SetPermissions(void* address, size_t size,
188                     PageAllocator::Permission access) {
189   return GetPageAllocator()->SetPermissions(address, size, access);
190 }
191 
AllocatePage(void * address,size_t * allocated)192 byte* AllocatePage(void* address, size_t* allocated) {
193   size_t page_size = AllocatePageSize();
194   void* result =
195       AllocatePages(address, page_size, page_size, PageAllocator::kReadWrite);
196   if (result != nullptr) *allocated = page_size;
197   return static_cast<byte*>(result);
198 }
199 
OnCriticalMemoryPressure(size_t length)200 bool OnCriticalMemoryPressure(size_t length) {
201   // TODO(bbudge) Rework retry logic once embedders implement the more
202   // informative overload.
203   if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
204     V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
205   }
206   return true;
207 }
208 
VirtualMemory()209 VirtualMemory::VirtualMemory() : address_(kNullAddress), size_(0) {}
210 
VirtualMemory(size_t size,void * hint,size_t alignment)211 VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
212     : address_(kNullAddress), size_(0) {
213   size_t page_size = AllocatePageSize();
214   size_t alloc_size = RoundUp(size, page_size);
215   address_ = reinterpret_cast<Address>(
216       AllocatePages(hint, alloc_size, alignment, PageAllocator::kNoAccess));
217   if (address_ != kNullAddress) {
218     size_ = alloc_size;
219   }
220 }
221 
~VirtualMemory()222 VirtualMemory::~VirtualMemory() {
223   if (IsReserved()) {
224     Free();
225   }
226 }
227 
Reset()228 void VirtualMemory::Reset() {
229   address_ = kNullAddress;
230   size_ = 0;
231 }
232 
SetPermissions(Address address,size_t size,PageAllocator::Permission access)233 bool VirtualMemory::SetPermissions(Address address, size_t size,
234                                    PageAllocator::Permission access) {
235   CHECK(InVM(address, size));
236   bool result = v8::internal::SetPermissions(address, size, access);
237   DCHECK(result);
238   return result;
239 }
240 
Release(Address free_start)241 size_t VirtualMemory::Release(Address free_start) {
242   DCHECK(IsReserved());
243   DCHECK(IsAddressAligned(free_start, CommitPageSize()));
244   // Notice: Order is important here. The VirtualMemory object might live
245   // inside the allocated region.
246   const size_t free_size = size_ - (free_start - address_);
247   CHECK(InVM(free_start, free_size));
248   DCHECK_LT(address_, free_start);
249   DCHECK_LT(free_start, address_ + size_);
250   CHECK(ReleasePages(reinterpret_cast<void*>(address_), size_,
251                      size_ - free_size));
252   size_ -= free_size;
253   return free_size;
254 }
255 
Free()256 void VirtualMemory::Free() {
257   DCHECK(IsReserved());
258   // Notice: Order is important here. The VirtualMemory object might live
259   // inside the allocated region.
260   Address address = address_;
261   size_t size = size_;
262   CHECK(InVM(address, size));
263   Reset();
264   // FreePages expects size to be aligned to allocation granularity. Trimming
265   // may leave size at only commit granularity. Align it here.
266   CHECK(FreePages(reinterpret_cast<void*>(address),
267                   RoundUp(size, AllocatePageSize())));
268 }
269 
TakeControl(VirtualMemory * from)270 void VirtualMemory::TakeControl(VirtualMemory* from) {
271   DCHECK(!IsReserved());
272   address_ = from->address_;
273   size_ = from->size_;
274   from->Reset();
275 }
276 
AllocVirtualMemory(size_t size,void * hint,VirtualMemory * result)277 bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
278   VirtualMemory vm(size, hint);
279   if (vm.IsReserved()) {
280     result->TakeControl(&vm);
281     return true;
282   }
283   return false;
284 }
285 
AlignedAllocVirtualMemory(size_t size,size_t alignment,void * hint,VirtualMemory * result)286 bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
287                                VirtualMemory* result) {
288   VirtualMemory vm(size, hint, alignment);
289   if (vm.IsReserved()) {
290     result->TakeControl(&vm);
291     return true;
292   }
293   return false;
294 }
295 
296 }  // namespace internal
297 }  // namespace v8
298