• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_LARGE_SPACES_H_
6 #define V8_HEAP_LARGE_SPACES_H_
7 
8 #include <atomic>
9 #include <functional>
10 #include <memory>
11 #include <unordered_map>
12 
13 #include "src/base/macros.h"
14 #include "src/base/platform/mutex.h"
15 #include "src/common/globals.h"
16 #include "src/heap/heap.h"
17 #include "src/heap/memory-chunk.h"
18 #include "src/heap/spaces.h"
19 #include "src/objects/heap-object.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 class Isolate;
25 class LocalHeap;
26 
27 class LargePage : public MemoryChunk {
28  public:
29   // A limit to guarantee that we do not overflow typed slot offset in the old
30   // to old remembered set. Note that this limit is higher than what assembler
31   // already imposes on x64 and ia32 architectures.
32   static const int kMaxCodePageSize = 512 * MB;
33 
FromHeapObject(HeapObject o)34   static LargePage* FromHeapObject(HeapObject o) {
35     return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
36   }
37 
GetObject()38   HeapObject GetObject() { return HeapObject::FromAddress(area_start()); }
39 
next_page()40   LargePage* next_page() { return static_cast<LargePage*>(list_node_.next()); }
41 
42   // Uncommit memory that is not in use anymore by the object. If the object
43   // cannot be shrunk 0 is returned.
44   Address GetAddressToShrink(Address object_address, size_t object_size);
45 
46   void ClearOutOfLiveRangeSlots(Address free_start);
47 
48  private:
49   static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
50                                Executability executable);
51 
52   friend class MemoryAllocator;
53 };
54 
55 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
56 
57 // -----------------------------------------------------------------------------
58 // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and managed by
59 // the large object space. Large objects do not move during garbage collections.
60 
61 class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
62  public:
63   using iterator = LargePageIterator;
64 
~LargeObjectSpace()65   ~LargeObjectSpace() override { TearDown(); }
66 
67   // Releases internal resources, frees objects in this space.
68   void TearDown();
69 
70   // Available bytes for objects in this space.
71   size_t Available() override;
72 
Size()73   size_t Size() override { return size_; }
SizeOfObjects()74   size_t SizeOfObjects() override { return objects_size_; }
75 
76   // Approximate amount of physical memory committed for this space.
77   size_t CommittedPhysicalMemory() override;
78 
PageCount()79   int PageCount() { return page_count_; }
80 
81   // Frees unmarked objects.
82   virtual void FreeUnmarkedObjects();
83 
84   // Checks whether a heap object is in this space; O(1).
85   bool Contains(HeapObject obj);
86   // Checks whether an address is in the object area in this space. Iterates all
87   // objects in the space. May be slow.
88   bool ContainsSlow(Address addr);
89 
90   // Checks whether the space is empty.
IsEmpty()91   bool IsEmpty() { return first_page() == nullptr; }
92 
93   virtual void AddPage(LargePage* page, size_t object_size);
94   virtual void RemovePage(LargePage* page, size_t object_size);
95 
first_page()96   LargePage* first_page() {
97     return reinterpret_cast<LargePage*>(Space::first_page());
98   }
99 
begin()100   iterator begin() { return iterator(first_page()); }
end()101   iterator end() { return iterator(nullptr); }
102 
103   std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
104 
is_off_thread()105   virtual bool is_off_thread() const { return false; }
106 
107 #ifdef VERIFY_HEAP
108   virtual void Verify(Isolate* isolate);
109 #endif
110 
111 #ifdef DEBUG
112   void Print() override;
113 #endif
114 
115  protected:
116   LargeObjectSpace(Heap* heap, AllocationSpace id);
117 
118   void AdvanceAndInvokeAllocationObservers(Address soon_object, size_t size);
119 
120   LargePage* AllocateLargePage(int object_size, Executability executable);
121 
122   std::atomic<size_t> size_;  // allocated bytes
123   int page_count_;       // number of chunks
124   std::atomic<size_t> objects_size_;  // size of objects
125   base::Mutex allocation_mutex_;
126 
127  private:
128   friend class LargeObjectSpaceObjectIterator;
129 };
130 
131 class OldLargeObjectSpace : public LargeObjectSpace {
132  public:
133   explicit OldLargeObjectSpace(Heap* heap);
134 
135   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
136   AllocateRaw(int object_size);
137 
138   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
139   AllocateRawBackground(LocalHeap* local_heap, int object_size);
140 
141   // Clears the marking state of live objects.
142   void ClearMarkingStateOfLiveObjects();
143 
144   void PromoteNewLargeObject(LargePage* page);
145 
146  protected:
147   explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
148   V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
149                                                      Executability executable);
150 };
151 
152 class NewLargeObjectSpace : public LargeObjectSpace {
153  public:
154   NewLargeObjectSpace(Heap* heap, size_t capacity);
155 
156   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
157   AllocateRaw(int object_size);
158 
159   // Available bytes for objects in this space.
160   size_t Available() override;
161 
162   void Flip();
163 
164   void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
165 
166   void SetCapacity(size_t capacity);
167 
168   // The last allocated object that is not guaranteed to be initialized when the
169   // concurrent marker visits it.
pending_object()170   Address pending_object() {
171     return pending_object_.load(std::memory_order_relaxed);
172   }
173 
ResetPendingObject()174   void ResetPendingObject() { pending_object_.store(0); }
175 
176  private:
177   std::atomic<Address> pending_object_;
178   size_t capacity_;
179 };
180 
181 class CodeLargeObjectSpace : public OldLargeObjectSpace {
182  public:
183   explicit CodeLargeObjectSpace(Heap* heap);
184 
185   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
186   AllocateRaw(int object_size);
187 
188   // Finds a large object page containing the given address, returns nullptr if
189   // such a page doesn't exist.
190   LargePage* FindPage(Address a);
191 
192  protected:
193   void AddPage(LargePage* page, size_t object_size) override;
194   void RemovePage(LargePage* page, size_t object_size) override;
195 
196  private:
197   static const size_t kInitialChunkMapCapacity = 1024;
198   void InsertChunkMapEntries(LargePage* page);
199   void RemoveChunkMapEntries(LargePage* page);
200 
201   // Page-aligned addresses to their corresponding LargePage.
202   std::unordered_map<Address, LargePage*> chunk_map_;
203 };
204 
205 class LargeObjectSpaceObjectIterator : public ObjectIterator {
206  public:
207   explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
208 
209   HeapObject Next() override;
210 
211  private:
212   LargePage* current_;
213 };
214 
215 }  // namespace internal
216 }  // namespace v8
217 
218 #endif  // V8_HEAP_LARGE_SPACES_H_
219