• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_MEMORY_ALLOCATOR_H_
6 #define V8_HEAP_MEMORY_ALLOCATOR_H_
7 
8 #include <atomic>
9 #include <memory>
10 #include <unordered_map>
11 #include <unordered_set>
12 #include <vector>
13 
14 #include "include/v8-platform.h"
15 #include "src/base/bounded-page-allocator.h"
16 #include "src/base/export-template.h"
17 #include "src/base/macros.h"
18 #include "src/base/platform/mutex.h"
19 #include "src/base/platform/semaphore.h"
20 #include "src/heap/heap.h"
21 #include "src/heap/memory-chunk.h"
22 #include "src/heap/spaces.h"
23 #include "src/tasks/cancelable-task.h"
24 #include "src/utils/allocation.h"
25 
26 namespace v8 {
27 namespace internal {
28 
29 class Heap;
30 class Isolate;
31 class ReadOnlyPage;
32 
33 // The process-wide singleton that keeps track of code range regions with the
34 // intention to reuse free code range regions as a workaround for CFG memory
35 // leaks (see crbug.com/870054).
36 class CodeRangeAddressHint {
37  public:
38   // Returns the most recently freed code range start address for the given
39   // size. If there is no such entry, then a random address is returned.
40   V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
41 
42   V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
43                                               size_t code_range_size);
44 
45  private:
46   base::Mutex mutex_;
47   // A map from code range size to an array of recently freed code range
48   // addresses. There should be O(1) different code range sizes.
49   // The length of each array is limited by the peak number of code ranges,
50   // which should be also O(1).
51   std::unordered_map<size_t, std::vector<Address>> recently_freed_;
52 };
53 
54 // ----------------------------------------------------------------------------
55 // A space acquires chunks of memory from the operating system. The memory
56 // allocator allocates and deallocates pages for the paged heap spaces and large
57 // pages for large object space.
58 class MemoryAllocator {
59  public:
60   // Unmapper takes care of concurrently unmapping and uncommitting memory
61   // chunks.
62   class Unmapper {
63    public:
64     class UnmapFreeMemoryJob;
65 
Unmapper(Heap * heap,MemoryAllocator * allocator)66     Unmapper(Heap* heap, MemoryAllocator* allocator)
67         : heap_(heap), allocator_(allocator) {
68       chunks_[kRegular].reserve(kReservedQueueingSlots);
69       chunks_[kPooled].reserve(kReservedQueueingSlots);
70     }
71 
AddMemoryChunkSafe(MemoryChunk * chunk)72     void AddMemoryChunkSafe(MemoryChunk* chunk) {
73       if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
74         AddMemoryChunkSafe<kRegular>(chunk);
75       } else {
76         AddMemoryChunkSafe<kNonRegular>(chunk);
77       }
78     }
79 
TryGetPooledMemoryChunkSafe()80     MemoryChunk* TryGetPooledMemoryChunkSafe() {
81       // Procedure:
82       // (1) Try to get a chunk that was declared as pooled and already has
83       // been uncommitted.
84       // (2) Try to steal any memory chunk of kPageSize that would've been
85       // unmapped.
86       MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
87       if (chunk == nullptr) {
88         chunk = GetMemoryChunkSafe<kRegular>();
89         if (chunk != nullptr) {
90           // For stolen chunks we need to manually free any allocated memory.
91           chunk->ReleaseAllAllocatedMemory();
92         }
93       }
94       return chunk;
95     }
96 
97     V8_EXPORT_PRIVATE void FreeQueuedChunks();
98     void CancelAndWaitForPendingTasks();
99     void PrepareForGC();
100     V8_EXPORT_PRIVATE void EnsureUnmappingCompleted();
101     V8_EXPORT_PRIVATE void TearDown();
102     size_t NumberOfCommittedChunks();
103     V8_EXPORT_PRIVATE int NumberOfChunks();
104     size_t CommittedBufferedMemory();
105 
106    private:
107     static const int kReservedQueueingSlots = 64;
108     static const int kMaxUnmapperTasks = 4;
109 
110     enum ChunkQueueType {
111       kRegular,     // Pages of kPageSize that do not live in a CodeRange and
112                     // can thus be used for stealing.
113       kNonRegular,  // Large chunks and executable chunks.
114       kPooled,      // Pooled chunks, already uncommited and ready for reuse.
115       kNumberOfChunkQueues,
116     };
117 
118     enum class FreeMode {
119       kUncommitPooled,
120       kReleasePooled,
121     };
122 
123     template <ChunkQueueType type>
AddMemoryChunkSafe(MemoryChunk * chunk)124     void AddMemoryChunkSafe(MemoryChunk* chunk) {
125       base::MutexGuard guard(&mutex_);
126       chunks_[type].push_back(chunk);
127     }
128 
129     template <ChunkQueueType type>
GetMemoryChunkSafe()130     MemoryChunk* GetMemoryChunkSafe() {
131       base::MutexGuard guard(&mutex_);
132       if (chunks_[type].empty()) return nullptr;
133       MemoryChunk* chunk = chunks_[type].back();
134       chunks_[type].pop_back();
135       return chunk;
136     }
137 
138     bool MakeRoomForNewTasks();
139 
140     template <FreeMode mode>
141     void PerformFreeMemoryOnQueuedChunks(JobDelegate* delegate = nullptr);
142 
143     void PerformFreeMemoryOnQueuedNonRegularChunks(
144         JobDelegate* delegate = nullptr);
145 
146     Heap* const heap_;
147     MemoryAllocator* const allocator_;
148     base::Mutex mutex_;
149     std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
150     std::unique_ptr<v8::JobHandle> job_handle_;
151 
152     friend class MemoryAllocator;
153   };
154 
155   enum AllocationMode {
156     kRegular,
157     kPooled,
158   };
159 
160   enum FreeMode {
161     kFull,
162     kAlreadyPooled,
163     kPreFreeAndQueue,
164     kPooledAndQueue,
165   };
166 
167   V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
168 
169   // Computes the memory area of discardable memory within a given memory area
170   // [addr, addr+size) and returns the result as base::AddressRegion. If the
171   // memory is not discardable base::AddressRegion is an empty region.
172   V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
173       Address addr, size_t size);
174 
175   V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
176                                     size_t code_range_size);
177 
178   V8_EXPORT_PRIVATE void TearDown();
179 
180   // Allocates a Page from the allocator. AllocationMode is used to indicate
181   // whether pooled allocation, which only works for MemoryChunk::kPageSize,
182   // should be tried first.
183   template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
184             typename SpaceType>
185   EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
186   Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
187 
188   LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
189                                Executability executable);
190 
191   ReadOnlyPage* AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner);
192 
193   std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
194       ::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
195 
196   template <MemoryAllocator::FreeMode mode = kFull>
197   EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
198   void Free(MemoryChunk* chunk);
199   void FreeReadOnlyPage(ReadOnlyPage* chunk);
200 
201   // Returns allocated spaces in bytes.
Size()202   size_t Size() const { return size_; }
203 
204   // Returns allocated executable spaces in bytes.
SizeExecutable()205   size_t SizeExecutable() const { return size_executable_; }
206 
207   // Returns the maximum available bytes of heaps.
Available()208   size_t Available() const {
209     const size_t size = Size();
210     return capacity_ < size ? 0 : capacity_ - size;
211   }
212 
213   // Returns an indication of whether a pointer is in a space that has
214   // been allocated by this MemoryAllocator.
IsOutsideAllocatedSpace(Address address)215   V8_INLINE bool IsOutsideAllocatedSpace(Address address) const {
216     return address < lowest_ever_allocated_ ||
217            address >= highest_ever_allocated_;
218   }
219 
220   // Returns a BasicMemoryChunk in which the memory region from commit_area_size
221   // to reserve_area_size of the chunk area is reserved but not committed, it
222   // could be committed later by calling MemoryChunk::CommitArea.
223   V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
224       size_t reserve_area_size, size_t commit_area_size,
225       Executability executable, BaseSpace* space);
226 
227   // Returns a MemoryChunk in which the memory region from commit_area_size to
228   // reserve_area_size of the chunk area is reserved but not committed, it
229   // could be committed later by calling MemoryChunk::CommitArea.
230   V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
231                                                size_t commit_area_size,
232                                                Executability executable,
233                                                BaseSpace* space);
234 
235   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
236                                 size_t alignment, Executability executable,
237                                 void* hint, VirtualMemory* controller);
238 
239   void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
240 
241   // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
242   // internally memory is freed from |start_free| to the end of the reservation.
243   // Additional memory beyond the page is not accounted though, so
244   // |bytes_to_free| is computed by the caller.
245   void PartialFreeMemory(BasicMemoryChunk* chunk, Address start_free,
246                          size_t bytes_to_free, Address new_area_end);
247 
248   // Checks if an allocated MemoryChunk was intended to be used for executable
249   // memory.
IsMemoryChunkExecutable(MemoryChunk * chunk)250   bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
251     return executable_memory_.find(chunk) != executable_memory_.end();
252   }
253 
254   // Commit memory region owned by given reservation object.  Returns true if
255   // it succeeded and false otherwise.
256   bool CommitMemory(VirtualMemory* reservation);
257 
258   // Uncommit memory region owned by given reservation object. Returns true if
259   // it succeeded and false otherwise.
260   bool UncommitMemory(VirtualMemory* reservation);
261 
262   // Zaps a contiguous block of memory [start..(start+size)[ with
263   // a given zap value.
264   void ZapBlock(Address start, size_t size, uintptr_t zap_value);
265 
266   V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
267                                                     Address start,
268                                                     size_t commit_size,
269                                                     size_t reserved_size);
270 
271   // Page allocator instance for allocating non-executable pages.
272   // Guaranteed to be a valid pointer.
data_page_allocator()273   v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
274 
275   // Page allocator instance for allocating executable pages.
276   // Guaranteed to be a valid pointer.
code_page_allocator()277   v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
278 
279   // Returns page allocator suitable for allocating pages with requested
280   // executability.
page_allocator(Executability executable)281   v8::PageAllocator* page_allocator(Executability executable) {
282     return executable == EXECUTABLE ? code_page_allocator_
283                                     : data_page_allocator_;
284   }
285 
286   // A region of memory that may contain executable code including reserved
287   // OS page with read-write access in the beginning.
code_range()288   const base::AddressRegion& code_range() const {
289     // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
290     DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
291     DCHECK_IMPLIES(!code_range_.is_empty(),
292                    code_range_.contains(code_page_allocator_instance_->begin(),
293                                         code_page_allocator_instance_->size()));
294     return code_range_;
295   }
296 
unmapper()297   Unmapper* unmapper() { return &unmapper_; }
298 
299   // Performs all necessary bookkeeping to free the memory, but does not free
300   // it.
301   void UnregisterMemory(MemoryChunk* chunk);
302   void UnregisterMemory(BasicMemoryChunk* chunk,
303                         Executability executable = NOT_EXECUTABLE);
304   void UnregisterSharedMemory(BasicMemoryChunk* chunk);
305 
306   void RegisterReadOnlyMemory(ReadOnlyPage* page);
307 
308  private:
309   void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
310                                    size_t requested);
311 
312   // PreFreeMemory logically frees the object, i.e., it unregisters the
313   // memory, logs a delete event and adds the chunk to remembered unmapped
314   // pages.
315   void PreFreeMemory(MemoryChunk* chunk);
316 
317   // PerformFreeMemory can be called concurrently when PreFree was executed
318   // before.
319   void PerformFreeMemory(MemoryChunk* chunk);
320 
321   // See AllocatePage for public interface. Note that currently we only
322   // support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
323   template <typename SpaceType>
324   MemoryChunk* AllocatePagePooled(SpaceType* owner);
325 
326   // Initializes pages in a chunk. Returns the first page address.
327   // This function and GetChunkId() are provided for the mark-compact
328   // collector to rebuild page headers in the from space, which is
329   // used as a marking stack and its page headers are destroyed.
330   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
331                                PagedSpace* owner);
332 
UpdateAllocatedSpaceLimits(Address low,Address high)333   void UpdateAllocatedSpaceLimits(Address low, Address high) {
334     // The use of atomic primitives does not guarantee correctness (wrt.
335     // desired semantics) by default. The loop here ensures that we update the
336     // values only if they did not change in between.
337     Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
338     while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
339                               ptr, low, std::memory_order_acq_rel)) {
340     }
341     ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
342     while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
343                                ptr, high, std::memory_order_acq_rel)) {
344     }
345   }
346 
RegisterExecutableMemoryChunk(MemoryChunk * chunk)347   void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
348     base::MutexGuard guard(&executable_memory_mutex_);
349     DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
350     DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
351     executable_memory_.insert(chunk);
352   }
353 
UnregisterExecutableMemoryChunk(MemoryChunk * chunk)354   void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
355     base::MutexGuard guard(&executable_memory_mutex_);
356     DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
357     executable_memory_.erase(chunk);
358     chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
359   }
360 
361   Isolate* isolate_;
362 
363   // This object controls virtual space reserved for code on the V8 heap. This
364   // is only valid for 64-bit architectures where kRequiresCodeRange.
365   VirtualMemory code_reservation_;
366 
367   // Page allocator used for allocating data pages. Depending on the
368   // configuration it may be a page allocator instance provided by
369   // v8::Platform or a BoundedPageAllocator (when pointer compression is
370   // enabled).
371   v8::PageAllocator* data_page_allocator_;
372 
373   // Page allocator used for allocating code pages. Depending on the
374   // configuration it may be a page allocator instance provided by
375   // v8::Platform or a BoundedPageAllocator (when pointer compression is
376   // enabled or on those 64-bit architectures where pc-relative 32-bit
377   // displacement can be used for call and jump instructions).
378   v8::PageAllocator* code_page_allocator_;
379 
380   // A part of the |code_reservation_| that may contain executable code
381   // including reserved page with read-write access in the beginning.
382   // See details below.
383   base::AddressRegion code_range_;
384 
385   // This unique pointer owns the instance of bounded code allocator
386   // that controls executable pages allocation. It does not control the
387   // optionally existing page in the beginning of the |code_range_|.
388   // So, summarizing all above, the following conditions hold:
389   // 1) |code_reservation_| >= |code_range_|
390   // 2) |code_range_| >= |optional RW pages| +
391   // |code_page_allocator_instance_|. 3) |code_reservation_| is
392   // AllocatePageSize()-aligned 4) |code_page_allocator_instance_| is
393   // MemoryChunk::kAlignment-aligned 5) |code_range_| is
394   // CommitPageSize()-aligned
395   std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
396 
397   // Maximum space size in bytes.
398   size_t capacity_;
399 
400   // Allocated space size in bytes.
401   std::atomic<size_t> size_;
402   // Allocated executable space size in bytes.
403   std::atomic<size_t> size_executable_;
404 
405   // We keep the lowest and highest addresses allocated as a quick way
406   // of determining that pointers are outside the heap. The estimate is
407   // conservative, i.e. not all addresses in 'allocated' space are allocated
408   // to our heap. The range is [lowest, highest[, inclusive on the low end
409   // and exclusive on the high end.
410   std::atomic<Address> lowest_ever_allocated_;
411   std::atomic<Address> highest_ever_allocated_;
412 
413   VirtualMemory last_chunk_;
414   Unmapper unmapper_;
415 
416   // Data structure to remember allocated executable memory chunks.
417   std::unordered_set<MemoryChunk*> executable_memory_;
418   base::Mutex executable_memory_mutex_;
419 
420   friend class heap::TestCodePageAllocatorScope;
421   friend class heap::TestMemoryAllocatorScope;
422 
423   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
424 };
425 
426 extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
427     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
428         size_t size, PagedSpace* owner, Executability executable);
429 extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
430     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
431         size_t size, SemiSpace* owner, Executability executable);
432 extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
433     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
434         size_t size, SemiSpace* owner, Executability executable);
435 
436 extern template EXPORT_TEMPLATE_DECLARE(
437     V8_EXPORT_PRIVATE) void MemoryAllocator::
438     Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
439 extern template EXPORT_TEMPLATE_DECLARE(
440     V8_EXPORT_PRIVATE) void MemoryAllocator::
441     Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
442 extern template EXPORT_TEMPLATE_DECLARE(
443     V8_EXPORT_PRIVATE) void MemoryAllocator::
444     Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
445 extern template EXPORT_TEMPLATE_DECLARE(
446     V8_EXPORT_PRIVATE) void MemoryAllocator::
447     Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
448 
449 }  // namespace internal
450 }  // namespace v8
451 
452 #endif  // V8_HEAP_MEMORY_ALLOCATOR_H_
453