• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_LARGE_SPACES_H_
6 #define V8_HEAP_LARGE_SPACES_H_
7 
8 #include <atomic>
9 #include <functional>
10 #include <memory>
11 #include <unordered_map>
12 
13 #include "src/base/macros.h"
14 #include "src/base/platform/mutex.h"
15 #include "src/common/globals.h"
16 #include "src/heap/heap.h"
17 #include "src/heap/memory-chunk.h"
18 #include "src/heap/spaces.h"
19 #include "src/objects/heap-object.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 class Isolate;
25 class LocalHeap;
26 
27 class LargePage : public MemoryChunk {
28  public:
29   // A limit to guarantee that we do not overflow typed slot offset in the old
30   // to old remembered set. Note that this limit is higher than what assembler
31   // already imposes on x64 and ia32 architectures.
32   static const int kMaxCodePageSize = 512 * MB;
33 
34   LargePage(Heap* heap, BaseSpace* space, size_t chunk_size, Address area_start,
35             Address area_end, VirtualMemory reservation,
36             Executability executable);
37 
FromHeapObject(HeapObject o)38   static LargePage* FromHeapObject(HeapObject o) {
39     DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
40     return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
41   }
42 
GetObject()43   HeapObject GetObject() { return HeapObject::FromAddress(area_start()); }
44 
next_page()45   LargePage* next_page() { return static_cast<LargePage*>(list_node_.next()); }
next_page()46   const LargePage* next_page() const {
47     return static_cast<const LargePage*>(list_node_.next());
48   }
49 
50   // Uncommit memory that is not in use anymore by the object. If the object
51   // cannot be shrunk 0 is returned.
52   Address GetAddressToShrink(Address object_address, size_t object_size);
53 
54   void ClearOutOfLiveRangeSlots(Address free_start);
55 
56  private:
57   static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
58                                Executability executable);
59 
60   friend class MemoryAllocator;
61 };
62 
63 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
64 
65 // -----------------------------------------------------------------------------
66 // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and managed by
67 // the large object space. Large objects do not move during garbage collections.
68 
69 class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
70  public:
71   using iterator = LargePageIterator;
72   using const_iterator = ConstLargePageIterator;
73 
~LargeObjectSpace()74   ~LargeObjectSpace() override { TearDown(); }
75 
76   // Releases internal resources, frees objects in this space.
77   void TearDown();
78 
79   // Available bytes for objects in this space.
80   size_t Available() const override;
81 
Size()82   size_t Size() const override { return size_; }
SizeOfObjects()83   size_t SizeOfObjects() const override { return objects_size_; }
84 
85   // Approximate amount of physical memory committed for this space.
86   size_t CommittedPhysicalMemory() const override;
87 
PageCount()88   int PageCount() const { return page_count_; }
89 
90   // Frees unmarked objects.
91   virtual void FreeUnmarkedObjects();
92 
93   // Checks whether a heap object is in this space; O(1).
94   bool Contains(HeapObject obj) const;
95   // Checks whether an address is in the object area in this space. Iterates all
96   // objects in the space. May be slow.
97   bool ContainsSlow(Address addr) const;
98 
99   // Checks whether the space is empty.
IsEmpty()100   bool IsEmpty() const { return first_page() == nullptr; }
101 
102   virtual void AddPage(LargePage* page, size_t object_size);
103   virtual void RemovePage(LargePage* page, size_t object_size);
104 
first_page()105   LargePage* first_page() override {
106     return reinterpret_cast<LargePage*>(memory_chunk_list_.front());
107   }
first_page()108   const LargePage* first_page() const override {
109     return reinterpret_cast<const LargePage*>(memory_chunk_list_.front());
110   }
111 
begin()112   iterator begin() { return iterator(first_page()); }
end()113   iterator end() { return iterator(nullptr); }
114 
begin()115   const_iterator begin() const { return const_iterator(first_page()); }
end()116   const_iterator end() const { return const_iterator(nullptr); }
117 
118   std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
119 
is_off_thread()120   virtual bool is_off_thread() const { return false; }
121 
122 #ifdef VERIFY_HEAP
123   virtual void Verify(Isolate* isolate);
124 #endif
125 
126 #ifdef DEBUG
127   void Print() override;
128 #endif
129 
130   // The last allocated object that is not guaranteed to be initialized when the
131   // concurrent marker visits it.
pending_object()132   Address pending_object() const {
133     return pending_object_.load(std::memory_order_acquire);
134   }
135 
ResetPendingObject()136   void ResetPendingObject() {
137     pending_object_.store(0, std::memory_order_release);
138   }
139 
pending_allocation_mutex()140   base::SharedMutex* pending_allocation_mutex() {
141     return &pending_allocation_mutex_;
142   }
143 
144  protected:
145   LargeObjectSpace(Heap* heap, AllocationSpace id);
146 
147   void AdvanceAndInvokeAllocationObservers(Address soon_object, size_t size);
148 
149   LargePage* AllocateLargePage(int object_size, Executability executable);
150 
151   void UpdatePendingObject(HeapObject object);
152 
153   std::atomic<size_t> size_;  // allocated bytes
154   int page_count_;       // number of chunks
155   std::atomic<size_t> objects_size_;  // size of objects
156   base::Mutex allocation_mutex_;
157 
158   // Current potentially uninitialized object. Protected by
159   // pending_allocation_mutex_.
160   std::atomic<Address> pending_object_;
161 
162   // Used to protect pending_object_.
163   base::SharedMutex pending_allocation_mutex_;
164 
165  private:
166   friend class LargeObjectSpaceObjectIterator;
167 };
168 
169 class OldLargeObjectSpace : public LargeObjectSpace {
170  public:
171   explicit OldLargeObjectSpace(Heap* heap);
172 
173   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
174   AllocateRaw(int object_size);
175 
176   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
177   AllocateRawBackground(LocalHeap* local_heap, int object_size);
178 
179   // Clears the marking state of live objects.
180   void ClearMarkingStateOfLiveObjects();
181 
182   void PromoteNewLargeObject(LargePage* page);
183 
184  protected:
185   explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
186   V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
187                                                      Executability executable);
188   V8_WARN_UNUSED_RESULT AllocationResult AllocateRawBackground(
189       LocalHeap* local_heap, int object_size, Executability executable);
190 };
191 
192 class NewLargeObjectSpace : public LargeObjectSpace {
193  public:
194   NewLargeObjectSpace(Heap* heap, size_t capacity);
195 
196   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
197   AllocateRaw(int object_size);
198 
199   // Available bytes for objects in this space.
200   size_t Available() const override;
201 
202   void Flip();
203 
204   void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
205 
206   void SetCapacity(size_t capacity);
207 
208  private:
209   size_t capacity_;
210 };
211 
212 class CodeLargeObjectSpace : public OldLargeObjectSpace {
213  public:
214   explicit CodeLargeObjectSpace(Heap* heap);
215 
216   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
217   AllocateRaw(int object_size);
218 
219   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
220   AllocateRawBackground(LocalHeap* local_heap, int object_size);
221 
222   // Finds a large object page containing the given address, returns nullptr if
223   // such a page doesn't exist.
224   LargePage* FindPage(Address a);
225 
226  protected:
227   void AddPage(LargePage* page, size_t object_size) override;
228   void RemovePage(LargePage* page, size_t object_size) override;
229 
230  private:
231   static const size_t kInitialChunkMapCapacity = 1024;
232   void InsertChunkMapEntries(LargePage* page);
233   void RemoveChunkMapEntries(LargePage* page);
234 
235   // Page-aligned addresses to their corresponding LargePage.
236   std::unordered_map<Address, LargePage*> chunk_map_;
237 };
238 
239 class LargeObjectSpaceObjectIterator : public ObjectIterator {
240  public:
241   explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
242 
243   HeapObject Next() override;
244 
245  private:
246   LargePage* current_;
247 };
248 
249 }  // namespace internal
250 }  // namespace v8
251 
252 #endif  // V8_HEAP_LARGE_SPACES_H_
253