• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_MEMORY_ALLOCATOR_H_
6 #define V8_HEAP_MEMORY_ALLOCATOR_H_
7 
8 #include <atomic>
9 #include <memory>
10 #include <unordered_map>
11 #include <unordered_set>
12 #include <vector>
13 
14 #include "include/v8-platform.h"
15 #include "src/base/bounded-page-allocator.h"
16 #include "src/base/export-template.h"
17 #include "src/base/macros.h"
18 #include "src/base/platform/mutex.h"
19 #include "src/base/platform/semaphore.h"
20 #include "src/common/globals.h"
21 #include "src/heap/basic-memory-chunk.h"
22 #include "src/heap/code-range.h"
23 #include "src/heap/memory-chunk.h"
24 #include "src/heap/spaces.h"
25 #include "src/tasks/cancelable-task.h"
26 #include "src/utils/allocation.h"
27 
28 namespace v8 {
29 namespace internal {
30 
31 class Heap;
32 class Isolate;
33 class ReadOnlyPage;
34 
35 // ----------------------------------------------------------------------------
36 // A space acquires chunks of memory from the operating system. The memory
37 // allocator allocates and deallocates pages for the paged heap spaces and large
38 // pages for large object space.
39 class MemoryAllocator {
40  public:
41   // Unmapper takes care of concurrently unmapping and uncommitting memory
42   // chunks.
43   class Unmapper {
44    public:
45     class UnmapFreeMemoryJob;
46 
Unmapper(Heap * heap,MemoryAllocator * allocator)47     Unmapper(Heap* heap, MemoryAllocator* allocator)
48         : heap_(heap), allocator_(allocator) {
49       chunks_[ChunkQueueType::kRegular].reserve(kReservedQueueingSlots);
50       chunks_[ChunkQueueType::kPooled].reserve(kReservedQueueingSlots);
51     }
52 
AddMemoryChunkSafe(MemoryChunk * chunk)53     void AddMemoryChunkSafe(MemoryChunk* chunk) {
54       if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
55         AddMemoryChunkSafe(ChunkQueueType::kRegular, chunk);
56       } else {
57         AddMemoryChunkSafe(ChunkQueueType::kNonRegular, chunk);
58       }
59     }
60 
TryGetPooledMemoryChunkSafe()61     MemoryChunk* TryGetPooledMemoryChunkSafe() {
62       // Procedure:
63       // (1) Try to get a chunk that was declared as pooled and already has
64       // been uncommitted.
65       // (2) Try to steal any memory chunk of kPageSize that would've been
66       // uncommitted.
67       MemoryChunk* chunk = GetMemoryChunkSafe(ChunkQueueType::kPooled);
68       if (chunk == nullptr) {
69         chunk = GetMemoryChunkSafe(ChunkQueueType::kRegular);
70         if (chunk != nullptr) {
71           // For stolen chunks we need to manually free any allocated memory.
72           chunk->ReleaseAllAllocatedMemory();
73         }
74       }
75       return chunk;
76     }
77 
78     V8_EXPORT_PRIVATE void FreeQueuedChunks();
79     void CancelAndWaitForPendingTasks();
80     void PrepareForGC();
81     V8_EXPORT_PRIVATE void EnsureUnmappingCompleted();
82     V8_EXPORT_PRIVATE void TearDown();
83     size_t NumberOfCommittedChunks();
84     V8_EXPORT_PRIVATE int NumberOfChunks();
85     size_t CommittedBufferedMemory();
86 
87    private:
88     static const int kReservedQueueingSlots = 64;
89     static const int kMaxUnmapperTasks = 4;
90 
91     enum ChunkQueueType {
92       kRegular,     // Pages of kPageSize that do not live in a CodeRange and
93                     // can thus be used for stealing.
94       kNonRegular,  // Large chunks and executable chunks.
95       kPooled,      // Pooled chunks, already freed and ready for reuse.
96       kNumberOfChunkQueues,
97     };
98 
99     enum class FreeMode {
100       // Disables any access on pooled pages before adding them to the pool.
101       kUncommitPooled,
102 
103       // Free pooled pages. Only used on tear down and last-resort GCs.
104       kFreePooled,
105     };
106 
AddMemoryChunkSafe(ChunkQueueType type,MemoryChunk * chunk)107     void AddMemoryChunkSafe(ChunkQueueType type, MemoryChunk* chunk) {
108       base::MutexGuard guard(&mutex_);
109       chunks_[type].push_back(chunk);
110     }
111 
GetMemoryChunkSafe(ChunkQueueType type)112     MemoryChunk* GetMemoryChunkSafe(ChunkQueueType type) {
113       base::MutexGuard guard(&mutex_);
114       if (chunks_[type].empty()) return nullptr;
115       MemoryChunk* chunk = chunks_[type].back();
116       chunks_[type].pop_back();
117       return chunk;
118     }
119 
120     bool MakeRoomForNewTasks();
121 
122     void PerformFreeMemoryOnQueuedChunks(FreeMode mode,
123                                          JobDelegate* delegate = nullptr);
124 
125     void PerformFreeMemoryOnQueuedNonRegularChunks(
126         JobDelegate* delegate = nullptr);
127 
128     Heap* const heap_;
129     MemoryAllocator* const allocator_;
130     base::Mutex mutex_;
131     std::vector<MemoryChunk*> chunks_[ChunkQueueType::kNumberOfChunkQueues];
132     std::unique_ptr<v8::JobHandle> job_handle_;
133 
134     friend class MemoryAllocator;
135   };
136 
137   enum class AllocationMode {
138     // Regular allocation path. Does not use pool.
139     kRegular,
140 
141     // Uses the pool for allocation first.
142     kUsePool,
143   };
144 
145   enum class FreeMode {
146     // Frees page immediately on the main thread.
147     kImmediately,
148 
149     // Frees page on background thread.
150     kConcurrently,
151 
152     // Uncommits but does not free page on background thread. Page is added to
153     // pool. Used to avoid the munmap/mmap-cycle when we quickly reallocate
154     // pages.
155     kConcurrentlyAndPool,
156   };
157 
158   // Initialize page sizes field in V8::Initialize.
159   static void InitializeOncePerProcess();
160 
GetCommitPageSize()161   V8_INLINE static intptr_t GetCommitPageSize() {
162     DCHECK_LT(0, commit_page_size_);
163     return commit_page_size_;
164   }
165 
GetCommitPageSizeBits()166   V8_INLINE static intptr_t GetCommitPageSizeBits() {
167     DCHECK_LT(0, commit_page_size_bits_);
168     return commit_page_size_bits_;
169   }
170 
171   // Computes the memory area of discardable memory within a given memory area
172   // [addr, addr+size) and returns the result as base::AddressRegion. If the
173   // memory is not discardable base::AddressRegion is an empty region.
174   V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
175       Address addr, size_t size);
176 
177   V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate,
178                                     v8::PageAllocator* code_page_allocator,
179                                     size_t max_capacity);
180 
181   V8_EXPORT_PRIVATE void TearDown();
182 
183   // Allocates a Page from the allocator. AllocationMode is used to indicate
184   // whether pooled allocation, which only works for MemoryChunk::kPageSize,
185   // should be tried first.
186   V8_EXPORT_PRIVATE Page* AllocatePage(
187       MemoryAllocator::AllocationMode alloc_mode, Space* space,
188       Executability executable);
189 
190   V8_EXPORT_PRIVATE LargePage* AllocateLargePage(LargeObjectSpace* space,
191                                                  size_t object_size,
192                                                  Executability executable);
193 
194   ReadOnlyPage* AllocateReadOnlyPage(ReadOnlySpace* space);
195 
196   std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
197       ::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
198 
199   V8_EXPORT_PRIVATE void Free(MemoryAllocator::FreeMode mode,
200                               MemoryChunk* chunk);
201   void FreeReadOnlyPage(ReadOnlyPage* chunk);
202 
203   // Returns allocated spaces in bytes.
Size()204   size_t Size() const { return size_; }
205 
206   // Returns allocated executable spaces in bytes.
SizeExecutable()207   size_t SizeExecutable() const { return size_executable_; }
208 
209   // Returns the maximum available bytes of heaps.
Available()210   size_t Available() const {
211     const size_t size = Size();
212     return capacity_ < size ? 0 : capacity_ - size;
213   }
214 
215   // Returns an indication of whether a pointer is in a space that has
216   // been allocated by this MemoryAllocator.
IsOutsideAllocatedSpace(Address address)217   V8_INLINE bool IsOutsideAllocatedSpace(Address address) const {
218     return address < lowest_ever_allocated_ ||
219            address >= highest_ever_allocated_;
220   }
221 
222   // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
223   // internally memory is freed from |start_free| to the end of the reservation.
224   // Additional memory beyond the page is not accounted though, so
225   // |bytes_to_free| is computed by the caller.
226   void PartialFreeMemory(BasicMemoryChunk* chunk, Address start_free,
227                          size_t bytes_to_free, Address new_area_end);
228 
229 #ifdef DEBUG
230   // Checks if an allocated MemoryChunk was intended to be used for executable
231   // memory.
IsMemoryChunkExecutable(MemoryChunk * chunk)232   bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
233     base::MutexGuard guard(&executable_memory_mutex_);
234     return executable_memory_.find(chunk) != executable_memory_.end();
235   }
236 #endif  // DEBUG
237 
238   // Zaps a contiguous block of memory [start..(start+size)[ with
239   // a given zap value.
240   void ZapBlock(Address start, size_t size, uintptr_t zap_value);
241 
242   // Page allocator instance for allocating non-executable pages.
243   // Guaranteed to be a valid pointer.
data_page_allocator()244   v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
245 
246   // Page allocator instance for allocating executable pages.
247   // Guaranteed to be a valid pointer.
code_page_allocator()248   v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
249 
250   // Returns page allocator suitable for allocating pages with requested
251   // executability.
page_allocator(Executability executable)252   v8::PageAllocator* page_allocator(Executability executable) {
253     return executable == EXECUTABLE ? code_page_allocator_
254                                     : data_page_allocator_;
255   }
256 
unmapper()257   Unmapper* unmapper() { return &unmapper_; }
258 
259   void UnregisterReadOnlyPage(ReadOnlyPage* page);
260 
261   Address HandleAllocationFailure();
262 
263  private:
264   // Used to store all data about MemoryChunk allocation, e.g. in
265   // AllocateUninitializedChunk.
266   struct MemoryChunkAllocationResult {
267     void* start;
268     size_t size;
269     size_t area_start;
270     size_t area_end;
271     VirtualMemory reservation;
272   };
273 
274   // Computes the size of a MemoryChunk from the size of the object_area and
275   // whether the chunk is executable or not.
276   static size_t ComputeChunkSize(size_t area_size, Executability executable);
277 
278   // Internal allocation method for all pages/memory chunks. Returns data about
279   // the unintialized memory region.
280   V8_WARN_UNUSED_RESULT base::Optional<MemoryChunkAllocationResult>
281   AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
282                              Executability executable, PageSize page_size);
283 
284   // Internal raw allocation method that allocates an aligned MemoryChunk and
285   // sets the right memory permissions.
286   Address AllocateAlignedMemory(size_t chunk_size, size_t area_size,
287                                 size_t alignment, Executability executable,
288                                 void* hint, VirtualMemory* controller);
289 
290   // Commit memory region owned by given reservation object.  Returns true if
291   // it succeeded and false otherwise.
292   bool CommitMemory(VirtualMemory* reservation);
293 
294   // Sets memory permissions on executable memory chunks. This entails page
295   // header (RW), guard pages (no access) and the object area (code modification
296   // permissions).
297   V8_WARN_UNUSED_RESULT bool SetPermissionsOnExecutableMemoryChunk(
298       VirtualMemory* vm, Address start, size_t area_size, size_t reserved_size);
299 
300   // Disallows any access on memory region owned by given reservation object.
301   // Returns true if it succeeded and false otherwise.
302   bool UncommitMemory(VirtualMemory* reservation);
303 
304   // Frees the given memory region.
305   void FreeMemoryRegion(v8::PageAllocator* page_allocator, Address addr,
306                         size_t size);
307 
308   // PreFreeMemory logically frees the object, i.e., it unregisters the
309   // memory, logs a delete event and adds the chunk to remembered unmapped
310   // pages.
311   void PreFreeMemory(MemoryChunk* chunk);
312 
313   // PerformFreeMemory can be called concurrently when PreFree was executed
314   // before.
315   void PerformFreeMemory(MemoryChunk* chunk);
316 
317   // See AllocatePage for public interface. Note that currently we only
318   // support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
319   base::Optional<MemoryChunkAllocationResult> AllocateUninitializedPageFromPool(
320       Space* space);
321 
322   // Frees a pooled page. Only used on tear-down and last-resort GCs.
323   void FreePooledChunk(MemoryChunk* chunk);
324 
325   // Initializes pages in a chunk. Returns the first page address.
326   // This function and GetChunkId() are provided for the mark-compact
327   // collector to rebuild page headers in the from space, which is
328   // used as a marking stack and its page headers are destroyed.
329   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
330                                PagedSpace* space);
331 
UpdateAllocatedSpaceLimits(Address low,Address high)332   void UpdateAllocatedSpaceLimits(Address low, Address high) {
333     // The use of atomic primitives does not guarantee correctness (wrt.
334     // desired semantics) by default. The loop here ensures that we update the
335     // values only if they did not change in between.
336     Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
337     while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
338                               ptr, low, std::memory_order_acq_rel)) {
339     }
340     ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
341     while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
342                                ptr, high, std::memory_order_acq_rel)) {
343     }
344   }
345 
346   // Performs all necessary bookkeeping to free the memory, but does not free
347   // it.
348   void UnregisterMemoryChunk(MemoryChunk* chunk);
349   void UnregisterSharedBasicMemoryChunk(BasicMemoryChunk* chunk);
350   void UnregisterBasicMemoryChunk(BasicMemoryChunk* chunk,
351                                   Executability executable = NOT_EXECUTABLE);
352 
353   void RegisterReadOnlyMemory(ReadOnlyPage* page);
354 
355 #ifdef DEBUG
RegisterExecutableMemoryChunk(MemoryChunk * chunk)356   void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
357     base::MutexGuard guard(&executable_memory_mutex_);
358     DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
359     DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
360     executable_memory_.insert(chunk);
361   }
362 
UnregisterExecutableMemoryChunk(MemoryChunk * chunk)363   void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
364     base::MutexGuard guard(&executable_memory_mutex_);
365     DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
366     executable_memory_.erase(chunk);
367   }
368 #endif  // DEBUG
369 
370   Isolate* isolate_;
371 
372   // Page allocator used for allocating data pages. Depending on the
373   // configuration it may be a page allocator instance provided by
374   // v8::Platform or a BoundedPageAllocator (when pointer compression is
375   // enabled).
376   v8::PageAllocator* data_page_allocator_;
377 
378   // Page allocator used for allocating code pages. Depending on the
379   // configuration it may be a page allocator instance provided by v8::Platform
380   // or a BoundedPageAllocator from Heap::code_range_ (when pointer compression
381   // is enabled or on those 64-bit architectures where pc-relative 32-bit
382   // displacement can be used for call and jump instructions).
383   v8::PageAllocator* code_page_allocator_;
384 
385   // Maximum space size in bytes.
386   size_t capacity_;
387 
388   // Allocated space size in bytes.
389   std::atomic<size_t> size_;
390   // Allocated executable space size in bytes.
391   std::atomic<size_t> size_executable_;
392 
393   // We keep the lowest and highest addresses allocated as a quick way
394   // of determining that pointers are outside the heap. The estimate is
395   // conservative, i.e. not all addresses in 'allocated' space are allocated
396   // to our heap. The range is [lowest, highest[, inclusive on the low end
397   // and exclusive on the high end.
398   std::atomic<Address> lowest_ever_allocated_;
399   std::atomic<Address> highest_ever_allocated_;
400 
401   base::Optional<VirtualMemory> reserved_chunk_at_virtual_memory_limit_;
402   Unmapper unmapper_;
403 
404 #ifdef DEBUG
405   // Data structure to remember allocated executable memory chunks.
406   // This data structure is used only in DCHECKs.
407   std::unordered_set<MemoryChunk*> executable_memory_;
408   base::Mutex executable_memory_mutex_;
409 #endif  // DEBUG
410 
411   V8_EXPORT_PRIVATE static size_t commit_page_size_;
412   V8_EXPORT_PRIVATE static size_t commit_page_size_bits_;
413 
414   friend class heap::TestCodePageAllocatorScope;
415   friend class heap::TestMemoryAllocatorScope;
416 
417   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
418 };
419 
420 }  // namespace internal
421 }  // namespace v8
422 
423 #endif  // V8_HEAP_MEMORY_ALLOCATOR_H_
424