1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_UTILS_ALLOCATION_H_
6 #define V8_UTILS_ALLOCATION_H_
7
8 #include "include/v8-platform.h"
9 #include "src/base/address-region.h"
10 #include "src/base/compiler-specific.h"
11 #include "src/base/platform/platform.h"
12 #include "src/common/globals.h"
13 #include "src/init/v8.h"
14
15 namespace v8 {
16
17 namespace base {
18 class BoundedPageAllocator;
19 } // namespace base
20
21 namespace internal {
22
23 class Isolate;
24
25 // This file defines memory allocation functions. If a first attempt at an
26 // allocation fails, these functions call back into the embedder, then attempt
27 // the allocation a second time. The embedder callback must not reenter V8.
28
29 // Called when allocation routines fail to allocate, even with a possible retry.
30 // This function should not return, but should terminate the current processing.
31 [[noreturn]] V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(
32 Isolate* isolate, const char* message);
33
34 // Superclass for classes managed with new & delete.
35 class V8_EXPORT_PRIVATE Malloced {
36 public:
37 static void* operator new(size_t size);
38 static void operator delete(void* p);
39 };
40
41 template <typename T>
NewArray(size_t size)42 T* NewArray(size_t size) {
43 T* result = new (std::nothrow) T[size];
44 if (result == nullptr) {
45 V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
46 result = new (std::nothrow) T[size];
47 if (result == nullptr) FatalProcessOutOfMemory(nullptr, "NewArray");
48 }
49 return result;
50 }
51
52 template <typename T, typename = typename std::enable_if<
53 base::is_trivially_copyable<T>::value>::type>
NewArray(size_t size,T default_val)54 T* NewArray(size_t size, T default_val) {
55 T* result = reinterpret_cast<T*>(NewArray<uint8_t>(sizeof(T) * size));
56 for (size_t i = 0; i < size; ++i) result[i] = default_val;
57 return result;
58 }
59
60 template <typename T>
DeleteArray(T * array)61 void DeleteArray(T* array) {
62 delete[] array;
63 }
64
65 template <typename T>
66 struct ArrayDeleter {
operatorArrayDeleter67 void operator()(T* array) { DeleteArray(array); }
68 };
69
70 template <typename T>
71 using ArrayUniquePtr = std::unique_ptr<T, ArrayDeleter<T>>;
72
73 // The normal strdup functions use malloc. These versions of StrDup
74 // and StrNDup uses new and calls the FatalProcessOutOfMemory handler
75 // if allocation fails.
76 V8_EXPORT_PRIVATE char* StrDup(const char* str);
77 char* StrNDup(const char* str, int n);
78
79 // Allocation policy for allocating in the C free store using malloc
80 // and free. Used as the default policy for lists.
81 class FreeStoreAllocationPolicy {
82 public:
83 template <typename T, typename TypeTag = T[]>
NewArray(size_t length)84 V8_INLINE T* NewArray(size_t length) {
85 return static_cast<T*>(Malloced::operator new(length * sizeof(T)));
86 }
87 template <typename T, typename TypeTag = T[]>
DeleteArray(T * p,size_t length)88 V8_INLINE void DeleteArray(T* p, size_t length) {
89 Malloced::operator delete(p);
90 }
91 };
92
93 using MallocFn = void* (*)(size_t);
94
95 // Performs a malloc, with retry logic on failure. Returns nullptr on failure.
96 // Call free to release memory allocated with this function.
97 void* AllocWithRetry(size_t size, MallocFn = base::Malloc);
98
99 V8_EXPORT_PRIVATE void* AlignedAlloc(size_t size, size_t alignment);
100 V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
101
102 // Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
103 V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
104
105 // Returns platfrom virtual memory space instance. Guaranteed to be a valid
106 // pointer.
107 V8_EXPORT_PRIVATE v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace();
108
109 #ifdef V8_SANDBOX
110 // Returns the page allocator instance for allocating pages inside the sandbox.
111 // Guaranteed to be a valid pointer.
112 V8_EXPORT_PRIVATE v8::PageAllocator* GetSandboxPageAllocator();
113 #endif
114
115 // Returns the appropriate page allocator to use for ArrayBuffer backing
116 // stores. If the sandbox is enabled, these must be allocated inside the
117 // sandbox and so this will be the SandboxPageAllocator. Otherwise it will be
118 // the PlatformPageAllocator.
GetArrayBufferPageAllocator()119 inline v8::PageAllocator* GetArrayBufferPageAllocator() {
120 #ifdef V8_SANDBOX
121 return GetSandboxPageAllocator();
122 #else
123 return GetPlatformPageAllocator();
124 #endif
125 }
126
127 // Sets the given page allocator as the platform page allocator and returns
128 // the current one. This function *must* be used only for testing purposes.
129 // It is not thread-safe and the testing infrastructure should ensure that
130 // the tests do not modify the value simultaneously.
131 V8_EXPORT_PRIVATE v8::PageAllocator* SetPlatformPageAllocatorForTesting(
132 v8::PageAllocator* page_allocator);
133
134 // Gets the page granularity for AllocatePages and FreePages. Addresses returned
135 // by AllocatePages are aligned to this size.
136 V8_EXPORT_PRIVATE size_t AllocatePageSize();
137
138 // Gets the granularity at which the permissions and release calls can be made.
139 V8_EXPORT_PRIVATE size_t CommitPageSize();
140
141 // Sets the random seed so that GetRandomMmapAddr() will generate repeatable
142 // sequences of random mmap addresses.
143 V8_EXPORT_PRIVATE void SetRandomMmapSeed(int64_t seed);
144
145 // Generate a random address to be used for hinting allocation calls.
146 V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
147
148 // Allocates memory. Permissions are set according to the access argument.
149 // |address| is a hint. |size| and |alignment| must be multiples of
150 // AllocatePageSize(). Returns the address of the allocated memory, with the
151 // specified size and alignment, or nullptr on failure.
152 V8_EXPORT_PRIVATE
153 V8_WARN_UNUSED_RESULT void* AllocatePages(v8::PageAllocator* page_allocator,
154 void* address, size_t size,
155 size_t alignment,
156 PageAllocator::Permission access);
157
158 // Frees memory allocated by a call to AllocatePages. |address| and |size| must
159 // be multiples of AllocatePageSize().
160 V8_EXPORT_PRIVATE
161 void FreePages(v8::PageAllocator* page_allocator, void* address,
162 const size_t size);
163
164 // Releases memory that is no longer needed. The range specified by |address|
165 // and |size| must be an allocated memory region. |size| and |new_size| must be
166 // multiples of CommitPageSize(). Memory from |new_size| to |size| is released.
167 // Released memory is left in an undefined state, so it should not be accessed.
168 V8_EXPORT_PRIVATE
169 void ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
170 size_t new_size);
171
172 // Sets permissions according to |access|. |address| and |size| must be
173 // multiples of CommitPageSize(). Setting permission to kNoAccess may
174 // cause the memory contents to be lost. Returns true on success, otherwise
175 // false.
176 V8_EXPORT_PRIVATE
177 V8_WARN_UNUSED_RESULT bool SetPermissions(v8::PageAllocator* page_allocator,
178 void* address, size_t size,
179 PageAllocator::Permission access);
SetPermissions(v8::PageAllocator * page_allocator,Address address,size_t size,PageAllocator::Permission access)180 inline bool SetPermissions(v8::PageAllocator* page_allocator, Address address,
181 size_t size, PageAllocator::Permission access) {
182 return SetPermissions(page_allocator, reinterpret_cast<void*>(address), size,
183 access);
184 }
185
186 // Function that may release reserved memory regions to allow failed allocations
187 // to succeed. |length| is the amount of memory needed. Returns |true| if memory
188 // could be released, false otherwise.
189 V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
190
191 // Represents and controls an area of reserved memory.
192 class VirtualMemory final {
193 public:
194 enum JitPermission { kNoJit, kMapAsJittable };
195
196 // Empty VirtualMemory object, controlling no reserved memory.
197 V8_EXPORT_PRIVATE VirtualMemory();
198
199 VirtualMemory(const VirtualMemory&) = delete;
200 VirtualMemory& operator=(const VirtualMemory&) = delete;
201
202 // Reserves virtual memory containing an area of the given size that is
203 // aligned per |alignment| rounded up to the |page_allocator|'s allocate page
204 // size. The |size| must be aligned with |page_allocator|'s commit page size.
205 // This may not be at the position returned by address().
206 V8_EXPORT_PRIVATE VirtualMemory(v8::PageAllocator* page_allocator,
207 size_t size, void* hint, size_t alignment = 1,
208 JitPermission jit = kNoJit);
209
210 // Construct a virtual memory by assigning it some already mapped address
211 // and size.
VirtualMemory(v8::PageAllocator * page_allocator,Address address,size_t size)212 VirtualMemory(v8::PageAllocator* page_allocator, Address address, size_t size)
213 : page_allocator_(page_allocator), region_(address, size) {
214 DCHECK_NOT_NULL(page_allocator);
215 DCHECK(IsAligned(address, page_allocator->AllocatePageSize()));
216 DCHECK(IsAligned(size, page_allocator->CommitPageSize()));
217 }
218
219 // Releases the reserved memory, if any, controlled by this VirtualMemory
220 // object.
221 V8_EXPORT_PRIVATE ~VirtualMemory();
222
223 // Move constructor.
VirtualMemory(VirtualMemory && other)224 VirtualMemory(VirtualMemory&& other) V8_NOEXCEPT { *this = std::move(other); }
225
226 // Move assignment operator.
227 VirtualMemory& operator=(VirtualMemory&& other) V8_NOEXCEPT {
228 DCHECK(!IsReserved());
229 page_allocator_ = other.page_allocator_;
230 region_ = other.region_;
231 other.Reset();
232 return *this;
233 }
234
235 // Returns whether the memory has been reserved.
IsReserved()236 bool IsReserved() const { return region_.begin() != kNullAddress; }
237
238 // Initialize or resets an embedded VirtualMemory object.
239 V8_EXPORT_PRIVATE void Reset();
240
page_allocator()241 v8::PageAllocator* page_allocator() { return page_allocator_; }
242
region()243 const base::AddressRegion& region() const { return region_; }
244
245 // Returns the start address of the reserved memory.
246 // If the memory was reserved with an alignment, this address is not
247 // necessarily aligned. The user might need to round it up to a multiple of
248 // the alignment to get the start of the aligned block.
address()249 Address address() const {
250 DCHECK(IsReserved());
251 return region_.begin();
252 }
253
end()254 Address end() const {
255 DCHECK(IsReserved());
256 return region_.end();
257 }
258
259 // Returns the size of the reserved memory. The returned value is only
260 // meaningful when IsReserved() returns true.
261 // If the memory was reserved with an alignment, this size may be larger
262 // than the requested size.
size()263 size_t size() const { return region_.size(); }
264
265 // Sets permissions according to the access argument. address and size must be
266 // multiples of CommitPageSize(). Returns true on success, otherwise false.
267 V8_EXPORT_PRIVATE bool SetPermissions(Address address, size_t size,
268 PageAllocator::Permission access);
269
270 // Releases memory after |free_start|. Returns the number of bytes released.
271 V8_EXPORT_PRIVATE size_t Release(Address free_start);
272
273 // Frees all memory.
274 V8_EXPORT_PRIVATE void Free();
275
276 // As with Free but does not write to the VirtualMemory object itself so it
277 // can be called on a VirtualMemory that is itself not writable.
278 V8_EXPORT_PRIVATE void FreeReadOnly();
279
InVM(Address address,size_t size)280 bool InVM(Address address, size_t size) const {
281 return region_.contains(address, size);
282 }
283
284 private:
285 // Page allocator that controls the virtual memory.
286 v8::PageAllocator* page_allocator_ = nullptr;
287 base::AddressRegion region_;
288 };
289
290 // Represents a VirtualMemory reservation along with a BoundedPageAllocator that
291 // can be used to allocate within the reservation.
292 //
293 // Virtual memory cages are used for both the pointer compression cage and code
294 // ranges (on platforms that require code ranges) and are configurable via
295 // ReservationParams.
296 //
297 // +------------+-----------+------------ ~~~ --+- ~~~ -+
298 // | ... | ... | ... | ... |
299 // +------------+-----------+------------ ~~~ --+- ~~~ -+
300 // ^ ^ ^
301 // start cage base allocatable base
302 //
303 // <------------> <------------------->
304 // base bias size allocatable size
305 // <------------------------------->
306 // cage size
307 // <---------------------------------------------------->
308 // reservation size
309 //
310 // - The reservation is made using ReservationParams::page_allocator.
311 // - start is the start of the virtual memory reservation.
312 // - cage base is the base address of the cage.
313 // - allocatable base is the cage base rounded up to the nearest
314 // ReservationParams::page_size, and is the start of the allocatable area for
315 // the BoundedPageAllocator.
316 // - cage size is the size of the area from cage base to the end of the
317 // allocatable area.
318 //
319 // - The base bias is configured by ReservationParams::base_bias_size.
320 // - The reservation size is configured by ReservationParams::reservation_size
321 // but it might be actually bigger if we end up over-reserving the virtual
322 // address space.
323 //
324 // Additionally,
325 // - The alignment of the cage base is configured by
326 // ReservationParams::base_alignment.
327 // - The page size of the BoundedPageAllocator is configured by
328 // ReservationParams::page_size.
329 // - A hint for the value of start can be passed by
330 // ReservationParams::requested_start_hint.
331 //
332 // The configuration is subject to the following alignment requirements.
333 // Below, AllocatePageSize is short for
334 // ReservationParams::page_allocator->AllocatePageSize().
335 //
336 // - The reservation size must be AllocatePageSize-aligned.
337 // - If the base alignment is not kAnyBaseAlignment, both the base alignment
338 // and the base bias size must be AllocatePageSize-aligned.
339 // - The base alignment may be kAnyBaseAlignment to denote any alignment is
340 // acceptable. In this case the base bias size does not need to be aligned.
341 class VirtualMemoryCage {
342 public:
343 VirtualMemoryCage();
344 virtual ~VirtualMemoryCage();
345
346 VirtualMemoryCage(const VirtualMemoryCage&) = delete;
347 VirtualMemoryCage& operator=(VirtualMemoryCage&) = delete;
348
349 VirtualMemoryCage(VirtualMemoryCage&& other) V8_NOEXCEPT;
350 VirtualMemoryCage& operator=(VirtualMemoryCage&& other) V8_NOEXCEPT;
351
base()352 Address base() const { return base_; }
size()353 size_t size() const { return size_; }
354
page_allocator()355 base::BoundedPageAllocator* page_allocator() const {
356 return page_allocator_.get();
357 }
358
reservation()359 VirtualMemory* reservation() { return &reservation_; }
reservation()360 const VirtualMemory* reservation() const { return &reservation_; }
361
IsReserved()362 bool IsReserved() const {
363 DCHECK_EQ(base_ != kNullAddress, reservation_.IsReserved());
364 DCHECK_EQ(base_ != kNullAddress, size_ != 0);
365 return reservation_.IsReserved();
366 }
367
368 struct ReservationParams {
369 // The allocator to use to reserve the virtual memory.
370 v8::PageAllocator* page_allocator;
371 // See diagram above.
372 size_t reservation_size;
373 size_t base_alignment;
374 size_t base_bias_size;
375 size_t page_size;
376 Address requested_start_hint;
377
378 static constexpr size_t kAnyBaseAlignment = 1;
379 };
380
381 // A number of attempts is made to try to reserve a region that satisfies the
382 // constraints in params, but this may fail. The base address may be different
383 // than the one requested.
384 // If an existing reservation is provided, it will be used for this cage
385 // instead. The caller retains ownership of the reservation and is responsible
386 // for keeping the memory reserved during the lifetime of this object.
387 bool InitReservation(
388 const ReservationParams& params,
389 base::AddressRegion existing_reservation = base::AddressRegion());
390
391 void Free();
392
393 protected:
394 Address base_ = kNullAddress;
395 size_t size_ = 0;
396 std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
397 VirtualMemory reservation_;
398 };
399
400 } // namespace internal
401 } // namespace v8
402
403 #endif // V8_UTILS_ALLOCATION_H_
404