• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_HEAP_H_
6 #define V8_HEAP_HEAP_H_
7 
8 #include <cmath>
9 #include <map>
10 #include <unordered_map>
11 #include <unordered_set>
12 #include <vector>
13 
14 // Clients of this interface shouldn't depend on lots of heap internals.
15 // Do not include anything from src/heap here!
16 #include "include/v8.h"
17 #include "src/accessors.h"
18 #include "src/allocation.h"
19 #include "src/assert-scope.h"
20 #include "src/base/atomic-utils.h"
21 #include "src/external-reference-table.h"
22 #include "src/globals.h"
23 #include "src/heap-symbols.h"
24 #include "src/objects.h"
25 #include "src/objects/fixed-array.h"
26 #include "src/objects/string-table.h"
27 #include "src/roots.h"
28 #include "src/visitors.h"
29 
30 namespace v8 {
31 
32 namespace debug {
33 typedef void (*OutOfMemoryCallback)(void* data);
34 }  // namespace debug
35 
36 namespace internal {
37 
38 namespace heap {
39 class HeapTester;
40 class TestMemoryAllocatorScope;
41 }  // namespace heap
42 
43 class ObjectBoilerplateDescription;
44 class BytecodeArray;
45 class CodeDataContainer;
46 class DeoptimizationData;
47 class HandlerTable;
48 class IncrementalMarking;
49 class JSArrayBuffer;
50 class ExternalString;
51 using v8::MemoryPressureLevel;
52 
53 // Heap roots that are known to be immortal immovable, for which we can safely
54 // skip write barriers. This list is not complete and has omissions.
55 #define IMMORTAL_IMMOVABLE_ROOT_LIST(V)     \
56   V(ArgumentsMarker)                        \
57   V(ArgumentsMarkerMap)                     \
58   V(ArrayBufferNeuteringProtector)          \
59   V(ArrayIteratorProtector)                 \
60   V(BigIntMap)                              \
61   V(BlockContextMap)                        \
62   V(ObjectBoilerplateDescriptionMap)        \
63   V(BooleanMap)                             \
64   V(ByteArrayMap)                           \
65   V(BytecodeArrayMap)                       \
66   V(CatchContextMap)                        \
67   V(CellMap)                                \
68   V(CodeMap)                                \
69   V(DebugEvaluateContextMap)                \
70   V(DescriptorArrayMap)                     \
71   V(EphemeronHashTableMap)                  \
72   V(EmptyByteArray)                         \
73   V(EmptyDescriptorArray)                   \
74   V(EmptyFixedArray)                        \
75   V(EmptyFixedFloat32Array)                 \
76   V(EmptyFixedFloat64Array)                 \
77   V(EmptyFixedInt16Array)                   \
78   V(EmptyFixedInt32Array)                   \
79   V(EmptyFixedInt8Array)                    \
80   V(EmptyFixedUint16Array)                  \
81   V(EmptyFixedUint32Array)                  \
82   V(EmptyFixedUint8Array)                   \
83   V(EmptyFixedUint8ClampedArray)            \
84   V(EmptyOrderedHashMap)                    \
85   V(EmptyOrderedHashSet)                    \
86   V(EmptyPropertyCell)                      \
87   V(EmptyScopeInfo)                         \
88   V(EmptyScript)                            \
89   V(EmptySloppyArgumentsElements)           \
90   V(EmptySlowElementDictionary)             \
91   V(EvalContextMap)                         \
92   V(Exception)                              \
93   V(FalseValue)                             \
94   V(FixedArrayMap)                          \
95   V(FixedCOWArrayMap)                       \
96   V(FixedDoubleArrayMap)                    \
97   V(ForeignMap)                             \
98   V(FreeSpaceMap)                           \
99   V(FunctionContextMap)                     \
100   V(GlobalDictionaryMap)                    \
101   V(GlobalPropertyCellMap)                  \
102   V(HashTableMap)                           \
103   V(HeapNumberMap)                          \
104   V(HoleNanValue)                           \
105   V(InfinityValue)                          \
106   V(IsConcatSpreadableProtector)            \
107   V(JSMessageObjectMap)                     \
108   V(JsConstructEntryCode)                   \
109   V(JsEntryCode)                            \
110   V(ManyClosuresCell)                       \
111   V(ManyClosuresCellMap)                    \
112   V(MetaMap)                                \
113   V(MinusInfinityValue)                     \
114   V(MinusZeroValue)                         \
115   V(ModuleContextMap)                       \
116   V(ModuleInfoMap)                          \
117   V(MutableHeapNumberMap)                   \
118   V(NameDictionaryMap)                      \
119   V(NanValue)                               \
120   V(NativeContextMap)                       \
121   V(NoClosuresCellMap)                      \
122   V(NoElementsProtector)                    \
123   V(NullMap)                                \
124   V(NullValue)                              \
125   V(NumberDictionaryMap)                    \
126   V(OneClosureCellMap)                      \
127   V(OnePointerFillerMap)                    \
128   V(OptimizedOut)                           \
129   V(OrderedHashMapMap)                      \
130   V(OrderedHashSetMap)                      \
131   V(PreParsedScopeDataMap)                  \
132   V(PropertyArrayMap)                       \
133   V(ScopeInfoMap)                           \
134   V(ScriptContextMap)                       \
135   V(ScriptContextTableMap)                  \
136   V(SelfReferenceMarker)                    \
137   V(SharedFunctionInfoMap)                  \
138   V(SimpleNumberDictionaryMap)              \
139   V(SloppyArgumentsElementsMap)             \
140   V(SmallOrderedHashMapMap)                 \
141   V(SmallOrderedHashSetMap)                 \
142   V(ArraySpeciesProtector)                  \
143   V(TypedArraySpeciesProtector)             \
144   V(PromiseSpeciesProtector)                \
145   V(StaleRegister)                          \
146   V(StringLengthProtector)                  \
147   V(StringTableMap)                         \
148   V(SymbolMap)                              \
149   V(TerminationException)                   \
150   V(TheHoleMap)                             \
151   V(TheHoleValue)                           \
152   V(TransitionArrayMap)                     \
153   V(TrueValue)                              \
154   V(TwoPointerFillerMap)                    \
155   V(UndefinedMap)                           \
156   V(UndefinedValue)                         \
157   V(UninitializedMap)                       \
158   V(UninitializedValue)                     \
159   V(UncompiledDataWithoutPreParsedScopeMap) \
160   V(UncompiledDataWithPreParsedScopeMap)    \
161   V(WeakFixedArrayMap)                      \
162   V(WeakArrayListMap)                       \
163   V(WithContextMap)                         \
164   V(empty_string)                           \
165   PRIVATE_SYMBOL_LIST(V)
166 
167 class AllocationObserver;
168 class ArrayBufferCollector;
169 class ArrayBufferTracker;
170 class ConcurrentMarking;
171 class GCIdleTimeAction;
172 class GCIdleTimeHandler;
173 class GCIdleTimeHeapState;
174 class GCTracer;
175 class HeapController;
176 class HeapObjectAllocationTracker;
177 class HeapObjectsFilter;
178 class HeapStats;
179 class HistogramTimer;
180 class Isolate;
181 class LocalEmbedderHeapTracer;
182 class MemoryAllocator;
183 class MemoryReducer;
184 class MinorMarkCompactCollector;
185 class ObjectIterator;
186 class ObjectStats;
187 class Page;
188 class PagedSpace;
189 class RootVisitor;
190 class ScavengeJob;
191 class Scavenger;
192 class Space;
193 class StoreBuffer;
194 class StressScavengeObserver;
195 class TracePossibleWrapperReporter;
196 class WeakObjectRetainer;
197 
198 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
199 
200 enum ArrayStorageAllocationMode {
201   DONT_INITIALIZE_ARRAY_ELEMENTS,
202   INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
203 };
204 
205 enum class ClearRecordedSlots { kYes, kNo };
206 
207 enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
208 
209 enum class FixedArrayVisitationMode { kRegular, kIncremental };
210 
211 enum class TraceRetainingPathMode { kEnabled, kDisabled };
212 
213 enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
214 
215 enum class GarbageCollectionReason {
216   kUnknown = 0,
217   kAllocationFailure = 1,
218   kAllocationLimit = 2,
219   kContextDisposal = 3,
220   kCountersExtension = 4,
221   kDebugger = 5,
222   kDeserializer = 6,
223   kExternalMemoryPressure = 7,
224   kFinalizeMarkingViaStackGuard = 8,
225   kFinalizeMarkingViaTask = 9,
226   kFullHashtable = 10,
227   kHeapProfiler = 11,
228   kIdleTask = 12,
229   kLastResort = 13,
230   kLowMemoryNotification = 14,
231   kMakeHeapIterable = 15,
232   kMemoryPressure = 16,
233   kMemoryReducer = 17,
234   kRuntime = 18,
235   kSamplingProfiler = 19,
236   kSnapshotCreator = 20,
237   kTesting = 21,
238   kExternalFinalize = 22
239   // If you add new items here, then update the incremental_marking_reason,
240   // mark_compact_reason, and scavenge_reason counters in counters.h.
241   // Also update src/tools/metrics/histograms/histograms.xml in chromium.
242 };
243 
244 enum class YoungGenerationHandling {
245   kRegularScavenge = 0,
246   kFastPromotionDuringScavenge = 1,
247   // Histogram::InspectConstructionArguments in chromium requires us to have at
248   // least three buckets.
249   kUnusedBucket = 2,
250   // If you add new items here, then update the young_generation_handling in
251   // counters.h.
252   // Also update src/tools/metrics/histograms/histograms.xml in chromium.
253 };
254 
255 class AllocationResult {
256  public:
257   static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
258     return AllocationResult(space);
259   }
260 
261   // Implicit constructor from Object*.
AllocationResult(Object * object)262   AllocationResult(Object* object)  // NOLINT
263       : object_(object) {
264     // AllocationResults can't return Smis, which are used to represent
265     // failure and the space to retry in.
266     CHECK(!object->IsSmi());
267   }
268 
AllocationResult()269   AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
270 
IsRetry()271   inline bool IsRetry() { return object_->IsSmi(); }
272   inline HeapObject* ToObjectChecked();
273   inline AllocationSpace RetrySpace();
274 
275   template <typename T>
To(T ** obj)276   bool To(T** obj) {
277     if (IsRetry()) return false;
278     *obj = T::cast(object_);
279     return true;
280   }
281 
282  private:
AllocationResult(AllocationSpace space)283   explicit AllocationResult(AllocationSpace space)
284       : object_(Smi::FromInt(static_cast<int>(space))) {}
285 
286   Object* object_;
287 };
288 
289 STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
290 
291 #ifdef DEBUG
292 struct CommentStatistic {
293   const char* comment;
294   int size;
295   int count;
ClearCommentStatistic296   void Clear() {
297     comment = nullptr;
298     size = 0;
299     count = 0;
300   }
301   // Must be small, since an iteration is used for lookup.
302   static const int kMaxComments = 64;
303 };
304 #endif
305 
306 class Heap {
307  public:
308   // Declare all the root indices.  This defines the root list order.
309   // clang-format off
310   enum RootListIndex {
311 #define DECL(type, name, camel_name) k##camel_name##RootIndex,
312     STRONG_ROOT_LIST(DECL)
313 #undef DECL
314 
315 #define DECL(name, str) k##name##RootIndex,
316     INTERNALIZED_STRING_LIST(DECL)
317 #undef DECL
318 
319 #define DECL(name) k##name##RootIndex,
320     PRIVATE_SYMBOL_LIST(DECL)
321 #undef DECL
322 
323 #define DECL(name, description) k##name##RootIndex,
324     PUBLIC_SYMBOL_LIST(DECL)
325     WELL_KNOWN_SYMBOL_LIST(DECL)
326 #undef DECL
327 
328 #define DECL(accessor_name, AccessorName) k##AccessorName##AccessorRootIndex,
329     ACCESSOR_INFO_LIST(DECL)
330 #undef DECL
331 
332 #define DECL(NAME, Name, name) k##Name##MapRootIndex,
333     STRUCT_LIST(DECL)
334 #undef DECL
335 
336 #define DECL(NAME, Name, Size, name) k##Name##Size##MapRootIndex,
337    ALLOCATION_SITE_LIST(DECL)
338 #undef DECL
339 
340 #define DECL(NAME, Name, Size, name) k##Name##Size##MapRootIndex,
341     DATA_HANDLER_LIST(DECL)
342 #undef DECL
343 
344     kStringTableRootIndex,
345 
346 #define DECL(type, name, camel_name) k##camel_name##RootIndex,
347     SMI_ROOT_LIST(DECL)
348 #undef DECL
349 
350     kRootListLength,
351     kStrongRootListLength = kStringTableRootIndex,
352     kSmiRootsStart = kStringTableRootIndex + 1
353   };
354   // clang-format on
355 
356   enum FindMementoMode { kForRuntime, kForGC };
357 
358   enum HeapState {
359     NOT_IN_GC,
360     SCAVENGE,
361     MARK_COMPACT,
362     MINOR_MARK_COMPACT,
363     TEAR_DOWN
364   };
365 
366   using PretenuringFeedbackMap = std::unordered_map<AllocationSite*, size_t>;
367 
368   // Taking this mutex prevents the GC from entering a phase that relocates
369   // object references.
relocation_mutex()370   base::Mutex* relocation_mutex() { return &relocation_mutex_; }
371 
372   // Support for partial snapshots.  After calling this we have a linear
373   // space to write objects in each space.
374   struct Chunk {
375     uint32_t size;
376     Address start;
377     Address end;
378   };
379   typedef std::vector<Chunk> Reservation;
380 
381   static const int kInitalOldGenerationLimitFactor = 2;
382 
383 #if V8_OS_ANDROID
384   // Don't apply pointer multiplier on Android since it has no swap space and
385   // should instead adapt it's heap size based on available physical memory.
386   static const int kPointerMultiplier = 1;
387 #else
388   static const int kPointerMultiplier = i::kPointerSize / 4;
389 #endif
390 
391   // Semi-space size needs to be a multiple of page size.
392   static const size_t kMinSemiSpaceSizeInKB =
393       1 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
394   static const size_t kMaxSemiSpaceSizeInKB =
395       16 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
396 
397   static const int kTraceRingBufferSize = 512;
398   static const int kStacktraceBufferSize = 512;
399 
400   static const int kNoGCFlags = 0;
401   static const int kReduceMemoryFootprintMask = 1;
402   static const int kAbortIncrementalMarkingMask = 2;
403   static const int kFinalizeIncrementalMarkingMask = 4;
404 
405   // Making the heap iterable requires us to abort incremental marking.
406   static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
407 
408   // The roots that have an index less than this are always in old space.
409   static const int kOldSpaceRoots = 0x20;
410 
411   // The minimum size of a HeapObject on the heap.
412   static const int kMinObjectSizeInWords = 2;
413 
414   static const int kMinPromotedPercentForFastPromotionMode = 90;
415 
416   STATIC_ASSERT(kUndefinedValueRootIndex ==
417                 Internals::kUndefinedValueRootIndex);
418   STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
419   STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
420   STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
421   STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
422   STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
423 
424   // Calculates the maximum amount of filler that could be required by the
425   // given alignment.
426   static int GetMaximumFillToAlign(AllocationAlignment alignment);
427   // Calculates the actual amount of filler required for a given address at the
428   // given alignment.
429   static int GetFillToAlign(Address address, AllocationAlignment alignment);
430 
431   void FatalProcessOutOfMemory(const char* location);
432 
433   V8_EXPORT_PRIVATE static bool RootIsImmortalImmovable(int root_index);
434 
435   // Checks whether the space is valid.
436   static bool IsValidAllocationSpace(AllocationSpace space);
437 
438   // Generated code can embed direct references to non-writable roots if
439   // they are in new space.
440   static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
441 
442   // Zapping is needed for verify heap, and always done in debug builds.
ShouldZapGarbage()443   static inline bool ShouldZapGarbage() {
444 #ifdef DEBUG
445     return true;
446 #else
447 #ifdef VERIFY_HEAP
448     return FLAG_verify_heap;
449 #else
450     return false;
451 #endif
452 #endif
453   }
454 
ZapValue()455   static uintptr_t ZapValue() {
456     return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
457   }
458 
IsYoungGenerationCollector(GarbageCollector collector)459   static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
460     return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
461   }
462 
YoungGenerationCollector()463   static inline GarbageCollector YoungGenerationCollector() {
464 #if ENABLE_MINOR_MC
465     return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
466 #else
467     return SCAVENGER;
468 #endif  // ENABLE_MINOR_MC
469   }
470 
CollectorName(GarbageCollector collector)471   static inline const char* CollectorName(GarbageCollector collector) {
472     switch (collector) {
473       case SCAVENGER:
474         return "Scavenger";
475       case MARK_COMPACTOR:
476         return "Mark-Compact";
477       case MINOR_MARK_COMPACTOR:
478         return "Minor Mark-Compact";
479     }
480     return "Unknown collector";
481   }
482 
483   // Copy block of memory from src to dst. Size of block should be aligned
484   // by pointer size.
485   static inline void CopyBlock(Address dst, Address src, int byte_size);
486 
487   V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code* host);
488   V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject* object,
489                                                         Address slot,
490                                                         HeapObject* value);
491   V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
492       Heap* heap, FixedArray* array, int offset, int length);
493   V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
494       Code* host, RelocInfo* rinfo, HeapObject* value);
495   V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject* object,
496                                                    Address slot,
497                                                    HeapObject* value);
498   V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow(
499       Heap* heap, HeapObject* object);
500   V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code* host,
501                                                           RelocInfo* rinfo,
502                                                           HeapObject* value);
503   V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject* object);
504 
505   // Notifies the heap that is ok to start marking or other activities that
506   // should not happen during deserialization.
507   void NotifyDeserializationComplete();
508 
509   inline Address* NewSpaceAllocationTopAddress();
510   inline Address* NewSpaceAllocationLimitAddress();
511   inline Address* OldSpaceAllocationTopAddress();
512   inline Address* OldSpaceAllocationLimitAddress();
513 
514   // FreeSpace objects have a null map after deserialization. Update the map.
515   void RepairFreeListsAfterDeserialization();
516 
517   // Move len elements within a given array from src_index index to dst_index
518   // index.
519   void MoveElements(FixedArray* array, int dst_index, int src_index, int len,
520                     WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
521 
522   // Initialize a filler object to keep the ability to iterate over the heap
523   // when introducing gaps within pages. If slots could have been recorded in
524   // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
525   // pass ClearRecordedSlots::kNo. If the memory after the object header of
526   // the filler should be cleared, pass in kClearFreedMemory. The default is
527   // kDontClearFreedMemory.
528   V8_EXPORT_PRIVATE HeapObject* CreateFillerObjectAt(
529       Address addr, int size, ClearRecordedSlots clear_slots_mode,
530       ClearFreedMemoryMode clear_memory_mode =
531           ClearFreedMemoryMode::kDontClearFreedMemory);
532 
533   template <typename T>
534   void CreateFillerForArray(T* object, int elements_to_trim, int bytes_to_trim);
535 
536   bool CanMoveObjectStart(HeapObject* object);
537 
538   static bool IsImmovable(HeapObject* object);
539 
540   // Trim the given array from the left. Note that this relocates the object
541   // start and hence is only valid if there is only a single reference to it.
542   FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
543 
544   // Trim the given array from the right.
545   void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
546   void RightTrimWeakFixedArray(WeakFixedArray* obj, int elements_to_trim);
547 
548   // Converts the given boolean condition to JavaScript boolean value.
549   inline Oddball* ToBoolean(bool condition);
550 
551   // Notify the heap that a context has been disposed.
552   int NotifyContextDisposed(bool dependant_context);
553 
set_native_contexts_list(Object * object)554   void set_native_contexts_list(Object* object) {
555     native_contexts_list_ = object;
556   }
native_contexts_list()557   Object* native_contexts_list() const { return native_contexts_list_; }
558 
set_allocation_sites_list(Object * object)559   void set_allocation_sites_list(Object* object) {
560     allocation_sites_list_ = object;
561   }
allocation_sites_list()562   Object* allocation_sites_list() { return allocation_sites_list_; }
563 
564   // Used in CreateAllocationSiteStub and the (de)serializer.
allocation_sites_list_address()565   Object** allocation_sites_list_address() { return &allocation_sites_list_; }
566 
567   // Traverse all the allocaions_sites [nested_site and weak_next] in the list
568   // and foreach call the visitor
569   void ForeachAllocationSite(Object* list,
570                              std::function<void(AllocationSite*)> visitor);
571 
572   // Number of mark-sweeps.
ms_count()573   int ms_count() const { return ms_count_; }
574 
575   // Checks whether the given object is allowed to be migrated from it's
576   // current space into the given destination space. Used for debugging.
577   bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
578 
579   void CheckHandleCount();
580 
581   // Number of "runtime allocations" done so far.
allocations_count()582   uint32_t allocations_count() { return allocations_count_; }
583 
584   // Print short heap statistics.
585   void PrintShortHeapStatistics();
586 
write_protect_code_memory()587   bool write_protect_code_memory() const { return write_protect_code_memory_; }
588 
code_space_memory_modification_scope_depth()589   uintptr_t code_space_memory_modification_scope_depth() {
590     return code_space_memory_modification_scope_depth_;
591   }
592 
increment_code_space_memory_modification_scope_depth()593   void increment_code_space_memory_modification_scope_depth() {
594     code_space_memory_modification_scope_depth_++;
595   }
596 
decrement_code_space_memory_modification_scope_depth()597   void decrement_code_space_memory_modification_scope_depth() {
598     code_space_memory_modification_scope_depth_--;
599   }
600 
601   void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
602   void UnprotectAndRegisterMemoryChunk(HeapObject* object);
603   void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
604   V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
605 
EnableUnprotectedMemoryChunksRegistry()606   void EnableUnprotectedMemoryChunksRegistry() {
607     unprotected_memory_chunks_registry_enabled_ = true;
608   }
609 
DisableUnprotectedMemoryChunksRegistry()610   void DisableUnprotectedMemoryChunksRegistry() {
611     unprotected_memory_chunks_registry_enabled_ = false;
612   }
613 
unprotected_memory_chunks_registry_enabled()614   bool unprotected_memory_chunks_registry_enabled() {
615     return unprotected_memory_chunks_registry_enabled_;
616   }
617 
gc_state()618   inline HeapState gc_state() { return gc_state_; }
619   void SetGCState(HeapState state);
IsTearingDown()620   bool IsTearingDown() const { return gc_state_ == TEAR_DOWN; }
621 
IsInGCPostProcessing()622   inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
623 
624   // If an object has an AllocationMemento trailing it, return it, otherwise
625   // return nullptr;
626   template <FindMementoMode mode>
627   inline AllocationMemento* FindAllocationMemento(Map* map, HeapObject* object);
628 
629   // Returns false if not able to reserve.
630   bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
631 
632   //
633   // Support for the API.
634   //
635 
636   void CreateApiObjects();
637 
638   // Implements the corresponding V8 API function.
639   bool IdleNotification(double deadline_in_seconds);
640   bool IdleNotification(int idle_time_in_ms);
641 
642   void MemoryPressureNotification(MemoryPressureLevel level,
643                                   bool is_isolate_locked);
644   void CheckMemoryPressure();
645 
646   void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
647   void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
648                                    size_t heap_limit);
649 
650   double MonotonicallyIncreasingTimeInMs();
651 
652   void RecordStats(HeapStats* stats, bool take_snapshot = false);
653 
654   // Check new space expansion criteria and expand semispaces if it was hit.
655   void CheckNewSpaceExpansionCriteria();
656 
657   void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
658 
659   // An object should be promoted if the object has survived a
660   // scavenge operation.
661   inline bool ShouldBePromoted(Address old_address);
662 
663   void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
664 
665   inline uint64_t HashSeed();
666 
667   inline int NextScriptId();
668   inline int NextDebuggingId();
669   inline int GetNextTemplateSerialNumber();
670 
671   void SetSerializedObjects(FixedArray* objects);
672   void SetSerializedGlobalProxySizes(FixedArray* sizes);
673 
674   // For post mortem debugging.
675   void RememberUnmappedPage(Address page, bool compacted);
676 
external_memory_hard_limit()677   int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
678 
external_memory()679   int64_t external_memory() { return external_memory_; }
update_external_memory(int64_t delta)680   void update_external_memory(int64_t delta) { external_memory_ += delta; }
681 
update_external_memory_concurrently_freed(intptr_t freed)682   void update_external_memory_concurrently_freed(intptr_t freed) {
683     external_memory_concurrently_freed_ += freed;
684   }
685 
account_external_memory_concurrently_freed()686   void account_external_memory_concurrently_freed() {
687     external_memory_ -= external_memory_concurrently_freed_;
688     external_memory_concurrently_freed_ = 0;
689   }
690 
691   void ProcessMovedExternalString(Page* old_page, Page* new_page,
692                                   ExternalString* string);
693 
694   void CompactWeakArrayLists(PretenureFlag pretenure);
695 
696   void AddRetainedMap(Handle<Map> map);
697 
698   // This event is triggered after successful allocation of a new object made
699   // by runtime. Allocations of target space for object evacuation do not
700   // trigger the event. In order to track ALL allocations one must turn off
701   // FLAG_inline_new.
702   inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
703 
704   // This event is triggered after object is moved to a new place.
705   inline void OnMoveEvent(HeapObject* target, HeapObject* source,
706                           int size_in_bytes);
707 
708   inline bool CanAllocateInReadOnlySpace();
deserialization_complete()709   bool deserialization_complete() const { return deserialization_complete_; }
710 
711   bool HasLowAllocationRate();
712   bool HasHighFragmentation();
713   bool HasHighFragmentation(size_t used, size_t committed);
714 
715   void ActivateMemoryReducerIfNeeded();
716 
717   bool ShouldOptimizeForMemoryUsage();
718 
HighMemoryPressure()719   bool HighMemoryPressure() {
720     return memory_pressure_level_ != MemoryPressureLevel::kNone;
721   }
722 
RestoreHeapLimit(size_t heap_limit)723   void RestoreHeapLimit(size_t heap_limit) {
724     // Do not set the limit lower than the live size + some slack.
725     size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
726     max_old_generation_size_ =
727         Min(max_old_generation_size_, Max(heap_limit, min_limit));
728   }
729 
730   // ===========================================================================
731   // Initialization. ===========================================================
732   // ===========================================================================
733 
734   // Configure heap sizes
735   // max_semi_space_size_in_kb: maximum semi-space size in KB
736   // max_old_generation_size_in_mb: maximum old generation size in MB
737   // code_range_size_in_mb: code range size in MB
738   void ConfigureHeap(size_t max_semi_space_size_in_kb,
739                      size_t max_old_generation_size_in_mb,
740                      size_t code_range_size_in_mb);
741   void ConfigureHeapDefault();
742 
743   // Prepares the heap, setting up memory areas that are needed in the isolate
744   // without actually creating any objects.
745   void SetUp();
746 
747   // (Re-)Initialize hash seed from flag or RNG.
748   void InitializeHashSeed();
749 
750   // Bootstraps the object heap with the core set of objects required to run.
751   // Returns whether it succeeded.
752   bool CreateHeapObjects();
753 
754   // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
755   void CreateObjectStats();
756 
757   // Sets the TearDown state, so no new GC tasks get posted.
758   void StartTearDown();
759 
760   // Destroys all memory allocated by the heap.
761   void TearDown();
762 
763   // Returns whether SetUp has been called.
764   bool HasBeenSetUp();
765 
766   // ===========================================================================
767   // Getters for spaces. =======================================================
768   // ===========================================================================
769 
770   inline Address NewSpaceTop();
771 
new_space()772   NewSpace* new_space() { return new_space_; }
old_space()773   OldSpace* old_space() { return old_space_; }
code_space()774   CodeSpace* code_space() { return code_space_; }
map_space()775   MapSpace* map_space() { return map_space_; }
lo_space()776   LargeObjectSpace* lo_space() { return lo_space_; }
new_lo_space()777   NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
read_only_space()778   ReadOnlySpace* read_only_space() { return read_only_space_; }
779 
780   inline PagedSpace* paged_space(int idx);
781   inline Space* space(int idx);
782 
783   // Returns name of the space.
784   const char* GetSpaceName(int idx);
785 
786   // ===========================================================================
787   // Getters to other components. ==============================================
788   // ===========================================================================
789 
tracer()790   GCTracer* tracer() { return tracer_; }
791 
memory_allocator()792   MemoryAllocator* memory_allocator() { return memory_allocator_; }
793 
794   inline Isolate* isolate();
795 
mark_compact_collector()796   MarkCompactCollector* mark_compact_collector() {
797     return mark_compact_collector_;
798   }
799 
minor_mark_compact_collector()800   MinorMarkCompactCollector* minor_mark_compact_collector() {
801     return minor_mark_compact_collector_;
802   }
803 
array_buffer_collector()804   ArrayBufferCollector* array_buffer_collector() {
805     return array_buffer_collector_;
806   }
807 
808   // ===========================================================================
809   // Root set access. ==========================================================
810   // ===========================================================================
811   friend class ReadOnlyRoots;
812 
813  public:
814 // Heap root getters.
815 #define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)816   MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
817 #undef ROOT_ACCESSOR
818 
819 #define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
820   inline Map* name##_map();
821   DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
822 #undef DATA_HANDLER_MAP_ACCESSOR
823 
824 #define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
825   inline AccessorInfo* accessor_name##_accessor();
826   ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
827 #undef ACCESSOR_INFO_ACCESSOR
828 
829   Object* root(RootListIndex index) { return roots_[index]; }
root_handle(RootListIndex index)830   Handle<Object> root_handle(RootListIndex index) {
831     return Handle<Object>(&roots_[index]);
832   }
833   template <typename T>
IsRootHandle(Handle<T> handle,RootListIndex * index)834   bool IsRootHandle(Handle<T> handle, RootListIndex* index) const {
835     Object** const handle_location = bit_cast<Object**>(handle.address());
836     if (handle_location >= &roots_[kRootListLength]) return false;
837     if (handle_location < &roots_[0]) return false;
838     *index = static_cast<RootListIndex>(handle_location - &roots_[0]);
839     return true;
840   }
841 
842   // Generated code can embed this address to get access to the roots.
roots_array_start()843   Object** roots_array_start() { return roots_; }
844 
external_reference_table()845   ExternalReferenceTable* external_reference_table() {
846     DCHECK(external_reference_table_.is_initialized());
847     return &external_reference_table_;
848   }
849 
roots_to_external_reference_table_offset()850   static constexpr int roots_to_external_reference_table_offset() {
851     return kRootsExternalReferenceTableOffset;
852   }
853 
roots_to_builtins_offset()854   static constexpr int roots_to_builtins_offset() {
855     return kRootsBuiltinsOffset;
856   }
857 
root_register_addressable_end_offset()858   static constexpr int root_register_addressable_end_offset() {
859     return kRootRegisterAddressableEndOffset;
860   }
861 
root_register_addressable_end()862   Address root_register_addressable_end() {
863     return reinterpret_cast<Address>(roots_array_start()) +
864            kRootRegisterAddressableEndOffset;
865   }
866 
867   // Sets the stub_cache_ (only used when expanding the dictionary).
868   void SetRootCodeStubs(SimpleNumberDictionary* value);
869 
SetRootMaterializedObjects(FixedArray * objects)870   void SetRootMaterializedObjects(FixedArray* objects) {
871     roots_[kMaterializedObjectsRootIndex] = objects;
872   }
873 
SetRootScriptList(Object * value)874   void SetRootScriptList(Object* value) {
875     roots_[kScriptListRootIndex] = value;
876   }
877 
SetRootStringTable(StringTable * value)878   void SetRootStringTable(StringTable* value) {
879     roots_[kStringTableRootIndex] = value;
880   }
881 
SetRootNoScriptSharedFunctionInfos(Object * value)882   void SetRootNoScriptSharedFunctionInfos(Object* value) {
883     roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
884   }
885 
SetMessageListeners(TemplateList * value)886   void SetMessageListeners(TemplateList* value) {
887     roots_[kMessageListenersRootIndex] = value;
888   }
889 
890   // Set the stack limit in the roots_ array.  Some architectures generate
891   // code that looks here, because it is faster than loading from the static
892   // jslimit_/real_jslimit_ variable in the StackGuard.
893   void SetStackLimits();
894 
895   // The stack limit is thread-dependent. To be able to reproduce the same
896   // snapshot blob, we need to reset it before serializing.
897   void ClearStackLimits();
898 
899   // Generated code can treat direct references to this root as constant.
900   bool RootCanBeTreatedAsConstant(RootListIndex root_index);
901 
902   Map* MapForFixedTypedArray(ExternalArrayType array_type);
903   Map* MapForFixedTypedArray(ElementsKind elements_kind);
904   FixedTypedArrayBase* EmptyFixedTypedArrayForMap(const Map* map);
905 
906   void RegisterStrongRoots(Object** start, Object** end);
907   void UnregisterStrongRoots(Object** start);
908 
909   bool IsDeserializeLazyHandler(Code* code);
910   void SetDeserializeLazyHandler(Code* code);
911   void SetDeserializeLazyHandlerWide(Code* code);
912   void SetDeserializeLazyHandlerExtraWide(Code* code);
913 
914   void SetBuiltinsConstantsTable(FixedArray* cache);
915 
916   // ===========================================================================
917   // Inline allocation. ========================================================
918   // ===========================================================================
919 
920   // Indicates whether inline bump-pointer allocation has been disabled.
inline_allocation_disabled()921   bool inline_allocation_disabled() { return inline_allocation_disabled_; }
922 
923   // Switch whether inline bump-pointer allocation should be used.
924   void EnableInlineAllocation();
925   void DisableInlineAllocation();
926 
927   // ===========================================================================
928   // Methods triggering GCs. ===================================================
929   // ===========================================================================
930 
931   // Performs garbage collection operation.
932   // Returns whether there is a chance that another major GC could
933   // collect more garbage.
934   V8_EXPORT_PRIVATE bool CollectGarbage(
935       AllocationSpace space, GarbageCollectionReason gc_reason,
936       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
937 
938   // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
939   // non-zero, then the slower precise sweeper is used, which leaves the heap
940   // in a state where we can iterate over the heap visiting all objects.
941   V8_EXPORT_PRIVATE void CollectAllGarbage(
942       int flags, GarbageCollectionReason gc_reason,
943       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
944 
945   // Last hope GC, should try to squeeze as much as possible.
946   void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason);
947 
948   // Reports and external memory pressure event, either performs a major GC or
949   // completes incremental marking in order to free external resources.
950   void ReportExternalMemoryPressure();
951 
952   typedef v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback
953       GetExternallyAllocatedMemoryInBytesCallback;
954 
SetGetExternallyAllocatedMemoryInBytesCallback(GetExternallyAllocatedMemoryInBytesCallback callback)955   void SetGetExternallyAllocatedMemoryInBytesCallback(
956       GetExternallyAllocatedMemoryInBytesCallback callback) {
957     external_memory_callback_ = callback;
958   }
959 
960   // Invoked when GC was requested via the stack guard.
961   void HandleGCRequest();
962 
963   // ===========================================================================
964   // Builtins. =================================================================
965   // ===========================================================================
966 
967   Code* builtin(int index);
968   Address builtin_address(int index);
969   void set_builtin(int index, HeapObject* builtin);
970 
971   // ===========================================================================
972   // Iterators. ================================================================
973   // ===========================================================================
974 
975   void IterateRoots(RootVisitor* v, VisitMode mode);
976   void IterateStrongRoots(RootVisitor* v, VisitMode mode);
977   // Iterates over entries in the smi roots list.  Only interesting to the
978   // serializer/deserializer, since GC does not care about smis.
979   void IterateSmiRoots(RootVisitor* v);
980   // Iterates over weak string tables.
981   void IterateWeakRoots(RootVisitor* v, VisitMode mode);
982   // Iterates over weak global handles.
983   void IterateWeakGlobalHandles(RootVisitor* v);
984   // Iterates over builtins.
985   void IterateBuiltins(RootVisitor* v);
986 
987   // ===========================================================================
988   // Store buffer API. =========================================================
989   // ===========================================================================
990 
991   // Used for query incremental marking status in generated code.
IsMarkingFlagAddress()992   Address* IsMarkingFlagAddress() {
993     return reinterpret_cast<Address*>(&is_marking_flag_);
994   }
995 
SetIsMarkingFlag(uint8_t flag)996   void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
997 
998   Address* store_buffer_top_address();
999   static intptr_t store_buffer_mask_constant();
1000   static Address store_buffer_overflow_function_address();
1001 
1002   void ClearRecordedSlot(HeapObject* object, Object** slot);
1003   void ClearRecordedSlotRange(Address start, Address end);
1004 
1005   bool HasRecordedSlot(HeapObject* object, Object** slot);
1006 
1007   // ===========================================================================
1008   // Incremental marking API. ==================================================
1009   // ===========================================================================
1010 
GCFlagsForIncrementalMarking()1011   int GCFlagsForIncrementalMarking() {
1012     return ShouldOptimizeForMemoryUsage() ? kReduceMemoryFootprintMask
1013                                           : kNoGCFlags;
1014   }
1015 
1016   // Start incremental marking and ensure that idle time handler can perform
1017   // incremental steps.
1018   void StartIdleIncrementalMarking(
1019       GarbageCollectionReason gc_reason,
1020       GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1021 
1022   // Starts incremental marking assuming incremental marking is currently
1023   // stopped.
1024   void StartIncrementalMarking(
1025       int gc_flags, GarbageCollectionReason gc_reason,
1026       GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1027 
1028   void StartIncrementalMarkingIfAllocationLimitIsReached(
1029       int gc_flags,
1030       GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1031 
1032   void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
1033   // Synchronously finalizes incremental marking.
1034   void FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason);
1035 
1036   void RegisterDeserializedObjectsForBlackAllocation(
1037       Reservation* reservations, const std::vector<HeapObject*>& large_objects,
1038       const std::vector<Address>& maps);
1039 
incremental_marking()1040   IncrementalMarking* incremental_marking() { return incremental_marking_; }
1041 
1042   // ===========================================================================
1043   // Concurrent marking API. ===================================================
1044   // ===========================================================================
1045 
concurrent_marking()1046   ConcurrentMarking* concurrent_marking() { return concurrent_marking_; }
1047 
1048   // The runtime uses this function to notify potentially unsafe object layout
1049   // changes that require special synchronization with the concurrent marker.
1050   // The old size is the size of the object before layout change.
1051   void NotifyObjectLayoutChange(HeapObject* object, int old_size,
1052                                 const DisallowHeapAllocation&);
1053 
1054 #ifdef VERIFY_HEAP
1055   // This function checks that either
1056   // - the map transition is safe,
1057   // - or it was communicated to GC using NotifyObjectLayoutChange.
1058   void VerifyObjectLayoutChange(HeapObject* object, Map* new_map);
1059 #endif
1060 
1061   // ===========================================================================
1062   // Deoptimization support API. ===============================================
1063   // ===========================================================================
1064 
1065   // Setters for code offsets of well-known deoptimization targets.
1066   void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
1067   void SetConstructStubCreateDeoptPCOffset(int pc_offset);
1068   void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
1069   void SetInterpreterEntryReturnPCOffset(int pc_offset);
1070 
1071   // Invalidates references in the given {code} object that are directly
1072   // embedded within the instruction stream. Mutates write-protected code.
1073   void InvalidateCodeEmbeddedObjects(Code* code);
1074 
1075   // Invalidates references in the given {code} object that are referenced
1076   // transitively from the deoptimization data. Mutates write-protected code.
1077   void InvalidateCodeDeoptimizationData(Code* code);
1078 
1079   void DeoptMarkedAllocationSites();
1080 
1081   bool DeoptMaybeTenuredAllocationSites();
1082 
1083   // ===========================================================================
1084   // Embedder heap tracer support. =============================================
1085   // ===========================================================================
1086 
local_embedder_heap_tracer()1087   LocalEmbedderHeapTracer* local_embedder_heap_tracer() {
1088     return local_embedder_heap_tracer_;
1089   }
1090   void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
1091   void TracePossibleWrapper(JSObject* js_object);
1092   void RegisterExternallyReferencedObject(Object** object);
1093   void SetEmbedderStackStateForNextFinalizaton(
1094       EmbedderHeapTracer::EmbedderStackState stack_state);
1095 
1096   // ===========================================================================
1097   // External string table API. ================================================
1098   // ===========================================================================
1099 
1100   // Registers an external string.
1101   inline void RegisterExternalString(String* string);
1102 
1103   // Called when a string's resource is changed. The size of the payload is sent
1104   // as argument of the method.
1105   inline void UpdateExternalString(String* string, size_t old_payload,
1106                                    size_t new_payload);
1107 
1108   // Finalizes an external string by deleting the associated external
1109   // data and clearing the resource pointer.
1110   inline void FinalizeExternalString(String* string);
1111 
1112   // ===========================================================================
1113   // Methods checking/returning the space of a given object/address. ===========
1114   // ===========================================================================
1115 
1116   // Returns whether the object resides in new space.
1117   static inline bool InNewSpace(Object* object);
1118   static inline bool InNewSpace(MaybeObject* object);
1119   static inline bool InNewSpace(HeapObject* heap_object);
1120   static inline bool InFromSpace(Object* object);
1121   static inline bool InFromSpace(MaybeObject* object);
1122   static inline bool InFromSpace(HeapObject* heap_object);
1123   static inline bool InToSpace(Object* object);
1124   static inline bool InToSpace(MaybeObject* object);
1125   static inline bool InToSpace(HeapObject* heap_object);
1126 
1127   // Returns whether the object resides in old space.
1128   inline bool InOldSpace(Object* object);
1129 
1130   // Returns whether the object resides in read-only space.
1131   inline bool InReadOnlySpace(Object* object);
1132 
1133   // Checks whether an address/object in the heap (including auxiliary
1134   // area and unused area).
1135   bool Contains(HeapObject* value);
1136 
1137   // Checks whether an address/object in a space.
1138   // Currently used by tests, serialization and heap verification only.
1139   bool InSpace(HeapObject* value, AllocationSpace space);
1140 
1141   // Slow methods that can be used for verification as they can also be used
1142   // with off-heap Addresses.
1143   bool ContainsSlow(Address addr);
1144   bool InSpaceSlow(Address addr, AllocationSpace space);
1145   inline bool InNewSpaceSlow(Address address);
1146   inline bool InOldSpaceSlow(Address address);
1147 
1148   // Find the heap which owns this HeapObject. Should never be called for
1149   // objects in RO space.
1150   static inline Heap* FromWritableHeapObject(const HeapObject* obj);
1151 
1152   // ===========================================================================
1153   // Object statistics tracking. ===============================================
1154   // ===========================================================================
1155 
1156   // Returns the number of buckets used by object statistics tracking during a
1157   // major GC. Note that the following methods fail gracefully when the bounds
1158   // are exceeded though.
1159   size_t NumberOfTrackedHeapObjectTypes();
1160 
1161   // Returns object statistics about count and size at the last major GC.
1162   // Objects are being grouped into buckets that roughly resemble existing
1163   // instance types.
1164   size_t ObjectCountAtLastGC(size_t index);
1165   size_t ObjectSizeAtLastGC(size_t index);
1166 
1167   // Retrieves names of buckets used by object statistics tracking.
1168   bool GetObjectTypeName(size_t index, const char** object_type,
1169                          const char** object_sub_type);
1170 
1171   // The total number of native contexts object on the heap.
1172   size_t NumberOfNativeContexts();
1173   // The total number of native contexts that were detached but were not
1174   // garbage collected yet.
1175   size_t NumberOfDetachedContexts();
1176 
1177   // ===========================================================================
1178   // Code statistics. ==========================================================
1179   // ===========================================================================
1180 
1181   // Collect code (Code and BytecodeArray objects) statistics.
1182   void CollectCodeStatistics();
1183 
1184   // ===========================================================================
1185   // GC statistics. ============================================================
1186   // ===========================================================================
1187 
1188   // Returns the maximum amount of memory reserved for the heap.
1189   size_t MaxReserved();
MaxSemiSpaceSize()1190   size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
InitialSemiSpaceSize()1191   size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
MaxOldGenerationSize()1192   size_t MaxOldGenerationSize() { return max_old_generation_size_; }
1193 
1194   V8_EXPORT_PRIVATE static size_t ComputeMaxOldGenerationSize(
1195       uint64_t physical_memory);
1196 
ComputeMaxSemiSpaceSize(uint64_t physical_memory)1197   static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) {
1198     const uint64_t min_physical_memory = 512 * MB;
1199     const uint64_t max_physical_memory = 3 * static_cast<uint64_t>(GB);
1200 
1201     uint64_t capped_physical_memory =
1202         Max(Min(physical_memory, max_physical_memory), min_physical_memory);
1203     // linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
1204     size_t semi_space_size_in_kb =
1205         static_cast<size_t>(((capped_physical_memory - min_physical_memory) *
1206                              (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
1207                                 (max_physical_memory - min_physical_memory) +
1208                             kMinSemiSpaceSizeInKB);
1209     return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
1210   }
1211 
1212   // Returns the capacity of the heap in bytes w/o growing. Heap grows when
1213   // more spaces are needed until it reaches the limit.
1214   size_t Capacity();
1215 
1216   // Returns the capacity of the old generation.
1217   size_t OldGenerationCapacity();
1218 
1219   // Returns the amount of memory currently committed for the heap and memory
1220   // held alive by the unmapper.
1221   size_t CommittedMemoryOfHeapAndUnmapper();
1222 
1223   // Returns the amount of memory currently committed for the heap.
1224   size_t CommittedMemory();
1225 
1226   // Returns the amount of memory currently committed for the old space.
1227   size_t CommittedOldGenerationMemory();
1228 
1229   // Returns the amount of executable memory currently committed for the heap.
1230   size_t CommittedMemoryExecutable();
1231 
1232   // Returns the amount of phyical memory currently committed for the heap.
1233   size_t CommittedPhysicalMemory();
1234 
1235   // Returns the maximum amount of memory ever committed for the heap.
MaximumCommittedMemory()1236   size_t MaximumCommittedMemory() { return maximum_committed_; }
1237 
1238   // Updates the maximum committed memory for the heap. Should be called
1239   // whenever a space grows.
1240   void UpdateMaximumCommitted();
1241 
1242   // Returns the available bytes in space w/o growing.
1243   // Heap doesn't guarantee that it can allocate an object that requires
1244   // all available bytes. Check MaxHeapObjectSize() instead.
1245   size_t Available();
1246 
1247   // Returns of size of all objects residing in the heap.
1248   size_t SizeOfObjects();
1249 
1250   void UpdateSurvivalStatistics(int start_new_space_size);
1251 
IncrementPromotedObjectsSize(size_t object_size)1252   inline void IncrementPromotedObjectsSize(size_t object_size) {
1253     promoted_objects_size_ += object_size;
1254   }
promoted_objects_size()1255   inline size_t promoted_objects_size() { return promoted_objects_size_; }
1256 
IncrementSemiSpaceCopiedObjectSize(size_t object_size)1257   inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
1258     semi_space_copied_object_size_ += object_size;
1259   }
semi_space_copied_object_size()1260   inline size_t semi_space_copied_object_size() {
1261     return semi_space_copied_object_size_;
1262   }
1263 
SurvivedNewSpaceObjectSize()1264   inline size_t SurvivedNewSpaceObjectSize() {
1265     return promoted_objects_size_ + semi_space_copied_object_size_;
1266   }
1267 
IncrementNodesDiedInNewSpace()1268   inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1269 
IncrementNodesCopiedInNewSpace()1270   inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1271 
IncrementNodesPromoted()1272   inline void IncrementNodesPromoted() { nodes_promoted_++; }
1273 
IncrementYoungSurvivorsCounter(size_t survived)1274   inline void IncrementYoungSurvivorsCounter(size_t survived) {
1275     survived_last_scavenge_ = survived;
1276     survived_since_last_expansion_ += survived;
1277   }
1278 
OldGenerationObjectsAndPromotedExternalMemorySize()1279   inline uint64_t OldGenerationObjectsAndPromotedExternalMemorySize() {
1280     return OldGenerationSizeOfObjects() + PromotedExternalMemorySize();
1281   }
1282 
1283   inline void UpdateNewSpaceAllocationCounter();
1284 
1285   inline size_t NewSpaceAllocationCounter();
1286 
1287   // This should be used only for testing.
set_new_space_allocation_counter(size_t new_value)1288   void set_new_space_allocation_counter(size_t new_value) {
1289     new_space_allocation_counter_ = new_value;
1290   }
1291 
UpdateOldGenerationAllocationCounter()1292   void UpdateOldGenerationAllocationCounter() {
1293     old_generation_allocation_counter_at_last_gc_ =
1294         OldGenerationAllocationCounter();
1295     old_generation_size_at_last_gc_ = 0;
1296   }
1297 
OldGenerationAllocationCounter()1298   size_t OldGenerationAllocationCounter() {
1299     return old_generation_allocation_counter_at_last_gc_ +
1300            PromotedSinceLastGC();
1301   }
1302 
1303   // This should be used only for testing.
set_old_generation_allocation_counter_at_last_gc(size_t new_value)1304   void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
1305     old_generation_allocation_counter_at_last_gc_ = new_value;
1306   }
1307 
PromotedSinceLastGC()1308   size_t PromotedSinceLastGC() {
1309     size_t old_generation_size = OldGenerationSizeOfObjects();
1310     DCHECK_GE(old_generation_size, old_generation_size_at_last_gc_);
1311     return old_generation_size - old_generation_size_at_last_gc_;
1312   }
1313 
1314   // This is called by the sweeper when it discovers more free space
1315   // than expected at the end of the preceding GC.
NotifyRefinedOldGenerationSize(size_t decreased_bytes)1316   void NotifyRefinedOldGenerationSize(size_t decreased_bytes) {
1317     if (old_generation_size_at_last_gc_ != 0) {
1318       // OldGenerationSizeOfObjects() is now smaller by |decreased_bytes|.
1319       // Adjust old_generation_size_at_last_gc_ too, so that PromotedSinceLastGC
1320       // continues to increase monotonically, rather than decreasing here.
1321       DCHECK_GE(old_generation_size_at_last_gc_, decreased_bytes);
1322       old_generation_size_at_last_gc_ -= decreased_bytes;
1323     }
1324   }
1325 
gc_count()1326   int gc_count() const { return gc_count_; }
1327 
1328   // Returns the size of objects residing in non-new spaces.
1329   // Excludes external memory held by those objects.
1330   size_t OldGenerationSizeOfObjects();
1331 
1332   // ===========================================================================
1333   // Prologue/epilogue callback methods.========================================
1334   // ===========================================================================
1335 
1336   void AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
1337                              GCType gc_type_filter, void* data);
1338   void RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
1339                                 void* data);
1340 
1341   void AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
1342                              GCType gc_type_filter, void* data);
1343   void RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
1344                                 void* data);
1345 
1346   void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
1347   void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1348 
1349   // ===========================================================================
1350   // Allocation methods. =======================================================
1351   // ===========================================================================
1352 
1353   // Creates a filler object and returns a heap object immediately after it.
1354   V8_WARN_UNUSED_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
1355                                                       int filler_size);
1356 
1357   // Creates a filler object if needed for alignment and returns a heap object
1358   // immediately after it. If any space is left after the returned object,
1359   // another filler object is created so the over allocated memory is iterable.
1360   V8_WARN_UNUSED_RESULT HeapObject* AlignWithFiller(
1361       HeapObject* object, int object_size, int allocation_size,
1362       AllocationAlignment alignment);
1363 
1364   // ===========================================================================
1365   // ArrayBuffer tracking. =====================================================
1366   // ===========================================================================
1367 
1368   // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
1369   // in the registration/unregistration APIs. Consider dropping the "New" from
1370   // "RegisterNewArrayBuffer" because one can re-register a previously
1371   // unregistered buffer, too, and the name is confusing.
1372   void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
1373   void UnregisterArrayBuffer(JSArrayBuffer* buffer);
1374 
1375   // ===========================================================================
1376   // Allocation site tracking. =================================================
1377   // ===========================================================================
1378 
1379   // Updates the AllocationSite of a given {object}. The entry (including the
1380   // count) is cached on the local pretenuring feedback.
1381   inline void UpdateAllocationSite(
1382       Map* map, HeapObject* object,
1383       PretenuringFeedbackMap* pretenuring_feedback);
1384 
1385   // Merges local pretenuring feedback into the global one. Note that this
1386   // method needs to be called after evacuation, as allocation sites may be
1387   // evacuated and this method resolves forward pointers accordingly.
1388   void MergeAllocationSitePretenuringFeedback(
1389       const PretenuringFeedbackMap& local_pretenuring_feedback);
1390 
1391   // ===========================================================================
1392   // Allocation tracking. ======================================================
1393   // ===========================================================================
1394 
1395   // Adds {new_space_observer} to new space and {observer} to any other space.
1396   void AddAllocationObserversToAllSpaces(
1397       AllocationObserver* observer, AllocationObserver* new_space_observer);
1398 
1399   // Removes {new_space_observer} from new space and {observer} from any other
1400   // space.
1401   void RemoveAllocationObserversFromAllSpaces(
1402       AllocationObserver* observer, AllocationObserver* new_space_observer);
1403 
allocation_step_in_progress()1404   bool allocation_step_in_progress() { return allocation_step_in_progress_; }
set_allocation_step_in_progress(bool val)1405   void set_allocation_step_in_progress(bool val) {
1406     allocation_step_in_progress_ = val;
1407   }
1408 
1409   // ===========================================================================
1410   // Heap object allocation tracking. ==========================================
1411   // ===========================================================================
1412 
1413   void AddHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
1414   void RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
has_heap_object_allocation_tracker()1415   bool has_heap_object_allocation_tracker() const {
1416     return !allocation_trackers_.empty();
1417   }
1418 
1419   // ===========================================================================
1420   // Retaining path tracking. ==================================================
1421   // ===========================================================================
1422 
1423   // Adds the given object to the weak table of retaining path targets.
1424   // On each GC if the marker discovers the object, it will print the retaining
1425   // path. This requires --track-retaining-path flag.
1426   void AddRetainingPathTarget(Handle<HeapObject> object,
1427                               RetainingPathOption option);
1428 
1429   // ===========================================================================
1430   // Stack frame support. ======================================================
1431   // ===========================================================================
1432 
1433   // Returns the Code object for a given interior pointer. Returns nullptr if
1434   // {inner_pointer} is not contained within a Code object.
1435   Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
1436 
1437   // Returns true if {addr} is contained within {code} and false otherwise.
1438   // Mostly useful for debugging.
1439   bool GcSafeCodeContains(HeapObject* code, Address addr);
1440 
1441 // =============================================================================
1442 #ifdef VERIFY_HEAP
1443   // Verify the heap is in its normal state before or after a GC.
1444   void Verify();
1445   void VerifyRememberedSetFor(HeapObject* object);
1446 #endif
1447 
1448 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
set_allocation_timeout(int timeout)1449   void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1450 #endif
1451 
1452 #ifdef DEBUG
1453   void VerifyCountersAfterSweeping();
1454   void VerifyCountersBeforeConcurrentSweeping();
1455 
1456   void Print();
1457   void PrintHandles();
1458 
1459   // Report code statistics.
1460   void ReportCodeStatistics(const char* title);
1461 #endif
GetRandomMmapAddr()1462   void* GetRandomMmapAddr() {
1463     void* result = v8::internal::GetRandomMmapAddr();
1464 #if V8_TARGET_ARCH_X64
1465 #if V8_OS_MACOSX
1466     // The Darwin kernel [as of macOS 10.12.5] does not clean up page
1467     // directory entries [PDE] created from mmap or mach_vm_allocate, even
1468     // after the region is destroyed. Using a virtual address space that is
1469     // too large causes a leak of about 1 wired [can never be paged out] page
1470     // per call to mmap(). The page is only reclaimed when the process is
1471     // killed. Confine the hint to a 32-bit section of the virtual address
1472     // space. See crbug.com/700928.
1473     uintptr_t offset =
1474         reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
1475         kMmapRegionMask;
1476     result = reinterpret_cast<void*>(mmap_region_base_ + offset);
1477 #endif  // V8_OS_MACOSX
1478 #endif  // V8_TARGET_ARCH_X64
1479     return result;
1480   }
1481 
1482   static const char* GarbageCollectionReasonToString(
1483       GarbageCollectionReason gc_reason);
1484 
1485   // Calculates the nof entries for the full sized number to string cache.
1486   inline int MaxNumberToStringCacheSize() const;
1487 
1488  private:
1489   class SkipStoreBufferScope;
1490 
1491   typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
1492                                                         Object** pointer);
1493 
1494   // External strings table is a place where all external strings are
1495   // registered.  We need to keep track of such strings to properly
1496   // finalize them.
1497   class ExternalStringTable {
1498    public:
ExternalStringTable(Heap * heap)1499     explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
1500 
1501     // Registers an external string.
1502     inline void AddString(String* string);
1503     bool Contains(HeapObject* obj);
1504 
1505     void IterateAll(RootVisitor* v);
1506     void IterateNewSpaceStrings(RootVisitor* v);
1507     void PromoteAllNewSpaceStrings();
1508 
1509     // Restores internal invariant and gets rid of collected strings. Must be
1510     // called after each Iterate*() that modified the strings.
1511     void CleanUpAll();
1512     void CleanUpNewSpaceStrings();
1513 
1514     // Finalize all registered external strings and clear tables.
1515     void TearDown();
1516 
1517     void UpdateNewSpaceReferences(
1518         Heap::ExternalStringTableUpdaterCallback updater_func);
1519     void UpdateReferences(
1520         Heap::ExternalStringTableUpdaterCallback updater_func);
1521 
1522    private:
1523     void Verify();
1524     void VerifyNewSpace();
1525 
1526     Heap* const heap_;
1527 
1528     // To speed up scavenge collections new space string are kept
1529     // separate from old space strings.
1530     std::vector<Object*> new_space_strings_;
1531     std::vector<Object*> old_space_strings_;
1532 
1533     DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
1534   };
1535 
1536   struct StrongRootsList;
1537 
1538   struct StringTypeTable {
1539     InstanceType type;
1540     int size;
1541     RootListIndex index;
1542   };
1543 
1544   struct ConstantStringTable {
1545     const char* contents;
1546     RootListIndex index;
1547   };
1548 
1549   struct StructTable {
1550     InstanceType type;
1551     int size;
1552     RootListIndex index;
1553   };
1554 
1555   struct GCCallbackTuple {
GCCallbackTupleGCCallbackTuple1556     GCCallbackTuple(v8::Isolate::GCCallbackWithData callback, GCType gc_type,
1557                     void* data)
1558         : callback(callback), gc_type(gc_type), data(data) {}
1559 
1560     bool operator==(const GCCallbackTuple& other) const;
1561     GCCallbackTuple& operator=(const GCCallbackTuple& other);
1562 
1563     v8::Isolate::GCCallbackWithData callback;
1564     GCType gc_type;
1565     void* data;
1566   };
1567 
1568   static const int kInitialStringTableSize = StringTable::kMinCapacity;
1569   static const int kInitialEvalCacheSize = 64;
1570   static const int kInitialNumberStringCacheSize = 256;
1571 
1572   static const int kRememberedUnmappedPages = 128;
1573 
1574   static const StringTypeTable string_type_table[];
1575   static const ConstantStringTable constant_string_table[];
1576   static const StructTable struct_table[];
1577 
1578   static const int kYoungSurvivalRateHighThreshold = 90;
1579   static const int kYoungSurvivalRateAllowedDeviation = 15;
1580   static const int kOldSurvivalRateLowThreshold = 10;
1581 
1582   static const int kMaxMarkCompactsInIdleRound = 7;
1583   static const int kIdleScavengeThreshold = 5;
1584 
1585   static const int kInitialFeedbackCapacity = 256;
1586 
1587   static const int kMaxScavengerTasks = 8;
1588 
1589   Heap();
1590 
1591   static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
1592       Heap* heap, Object** pointer);
1593 
1594   // Selects the proper allocation space based on the pretenuring decision.
SelectSpace(PretenureFlag pretenure)1595   static AllocationSpace SelectSpace(PretenureFlag pretenure) {
1596     switch (pretenure) {
1597       case TENURED_READ_ONLY:
1598         return RO_SPACE;
1599       case TENURED:
1600         return OLD_SPACE;
1601       case NOT_TENURED:
1602         return NEW_SPACE;
1603       default:
1604         UNREACHABLE();
1605     }
1606   }
1607 
DefaultGetExternallyAllocatedMemoryInBytesCallback()1608   static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() {
1609     return 0;
1610   }
1611 
1612 #define ROOT_ACCESSOR(type, name, camel_name) \
1613   inline void set_##name(type* value);
ROOT_LIST(ROOT_ACCESSOR)1614   ROOT_LIST(ROOT_ACCESSOR)
1615 #undef ROOT_ACCESSOR
1616 
1617   StoreBuffer* store_buffer() { return store_buffer_; }
1618 
set_current_gc_flags(int flags)1619   void set_current_gc_flags(int flags) {
1620     current_gc_flags_ = flags;
1621     DCHECK(!ShouldFinalizeIncrementalMarking() ||
1622            !ShouldAbortIncrementalMarking());
1623   }
1624 
ShouldReduceMemory()1625   inline bool ShouldReduceMemory() const {
1626     return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
1627   }
1628 
ShouldAbortIncrementalMarking()1629   inline bool ShouldAbortIncrementalMarking() const {
1630     return (current_gc_flags_ & kAbortIncrementalMarkingMask) != 0;
1631   }
1632 
ShouldFinalizeIncrementalMarking()1633   inline bool ShouldFinalizeIncrementalMarking() const {
1634     return (current_gc_flags_ & kFinalizeIncrementalMarkingMask) != 0;
1635   }
1636 
1637   int NumberOfScavengeTasks();
1638 
1639   // Checks whether a global GC is necessary
1640   GarbageCollector SelectGarbageCollector(AllocationSpace space,
1641                                           const char** reason);
1642 
1643   // Make sure there is a filler value behind the top of the new space
1644   // so that the GC does not confuse some unintialized/stale memory
1645   // with the allocation memento of the object at the top
1646   void EnsureFillerObjectAtTop();
1647 
1648   // Ensure that we have swept all spaces in such a way that we can iterate
1649   // over all objects.  May cause a GC.
1650   void MakeHeapIterable();
1651 
1652   // Performs garbage collection
1653   // Returns whether there is a chance another major GC could
1654   // collect more garbage.
1655   bool PerformGarbageCollection(
1656       GarbageCollector collector,
1657       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1658 
1659   inline void UpdateOldSpaceLimits();
1660 
1661   bool CreateInitialMaps();
1662   void CreateInternalAccessorInfoObjects();
1663   void CreateInitialObjects();
1664 
1665   // These five Create*EntryStub functions are here and forced to not be inlined
1666   // because of a gcc-4.4 bug that assigns wrong vtable entries.
1667   V8_NOINLINE void CreateJSEntryStub();
1668   V8_NOINLINE void CreateJSConstructEntryStub();
1669   V8_NOINLINE void CreateJSRunMicrotasksEntryStub();
1670 
1671   void CreateFixedStubs();
1672 
1673   // Commits from space if it is uncommitted.
1674   void EnsureFromSpaceIsCommitted();
1675 
1676   // Uncommit unused semi space.
1677   bool UncommitFromSpace();
1678 
1679   // Fill in bogus values in from space
1680   void ZapFromSpace();
1681 
1682   // Zaps the memory of a code object.
1683   void ZapCodeObject(Address start_address, int size_in_bytes);
1684 
1685   // Deopts all code that contains allocation instruction which are tenured or
1686   // not tenured. Moreover it clears the pretenuring allocation site statistics.
1687   void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
1688 
1689   // Evaluates local pretenuring for the old space and calls
1690   // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
1691   // the old space.
1692   void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
1693 
1694   // Record statistics after garbage collection.
1695   void ReportStatisticsAfterGC();
1696 
1697   // Flush the number to string cache.
1698   void FlushNumberStringCache();
1699 
1700   void ConfigureInitialOldGenerationSize();
1701 
1702   bool HasLowYoungGenerationAllocationRate();
1703   bool HasLowOldGenerationAllocationRate();
1704   double YoungGenerationMutatorUtilization();
1705   double OldGenerationMutatorUtilization();
1706 
1707   void ReduceNewSpaceSize();
1708 
1709   GCIdleTimeHeapState ComputeHeapState();
1710 
1711   bool PerformIdleTimeAction(GCIdleTimeAction action,
1712                              GCIdleTimeHeapState heap_state,
1713                              double deadline_in_ms);
1714 
1715   void IdleNotificationEpilogue(GCIdleTimeAction action,
1716                                 GCIdleTimeHeapState heap_state, double start_ms,
1717                                 double deadline_in_ms);
1718 
1719   int NextAllocationTimeout(int current_timeout = 0);
1720   inline void UpdateAllocationsHash(HeapObject* object);
1721   inline void UpdateAllocationsHash(uint32_t value);
1722   void PrintAllocationsHash();
1723 
1724   void PrintMaxMarkingLimitReached();
1725   void PrintMaxNewSpaceSizeReached();
1726 
1727   int NextStressMarkingLimit();
1728 
1729   void AddToRingBuffer(const char* string);
1730   void GetFromRingBuffer(char* buffer);
1731 
1732   void CompactRetainedMaps(WeakArrayList* retained_maps);
1733 
1734   void CollectGarbageOnMemoryPressure();
1735 
1736   bool InvokeNearHeapLimitCallback();
1737 
1738   void ComputeFastPromotionMode();
1739 
1740   // Attempt to over-approximate the weak closure by marking object groups and
1741   // implicit references from global handles, but don't atomically complete
1742   // marking. If we continue to mark incrementally, we might have marked
1743   // objects that die later.
1744   void FinalizeIncrementalMarkingIncrementally(
1745       GarbageCollectionReason gc_reason);
1746 
1747   // Returns the timer used for a given GC type.
1748   // - GCScavenger: young generation GC
1749   // - GCCompactor: full GC
1750   // - GCFinalzeMC: finalization of incremental full GC
1751   // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
1752   // memory reduction
1753   HistogramTimer* GCTypeTimer(GarbageCollector collector);
1754   HistogramTimer* GCTypePriorityTimer(GarbageCollector collector);
1755 
1756   // ===========================================================================
1757   // Pretenuring. ==============================================================
1758   // ===========================================================================
1759 
1760   // Pretenuring decisions are made based on feedback collected during new space
1761   // evacuation. Note that between feedback collection and calling this method
1762   // object in old space must not move.
1763   void ProcessPretenuringFeedback();
1764 
1765   // Removes an entry from the global pretenuring storage.
1766   void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
1767 
1768   // ===========================================================================
1769   // Actual GC. ================================================================
1770   // ===========================================================================
1771 
1772   // Code that should be run before and after each GC.  Includes some
1773   // reporting/verification activities when compiled with DEBUG set.
1774   void GarbageCollectionPrologue();
1775   void GarbageCollectionEpilogue();
1776 
1777   // Performs a major collection in the whole heap.
1778   void MarkCompact();
1779   // Performs a minor collection of just the young generation.
1780   void MinorMarkCompact();
1781 
1782   // Code to be run before and after mark-compact.
1783   void MarkCompactPrologue();
1784   void MarkCompactEpilogue();
1785 
1786   // Performs a minor collection in new generation.
1787   void Scavenge();
1788   void EvacuateYoungGeneration();
1789 
1790   void UpdateNewSpaceReferencesInExternalStringTable(
1791       ExternalStringTableUpdaterCallback updater_func);
1792 
1793   void UpdateReferencesInExternalStringTable(
1794       ExternalStringTableUpdaterCallback updater_func);
1795 
1796   void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
1797   void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
1798   void ProcessNativeContexts(WeakObjectRetainer* retainer);
1799   void ProcessAllocationSites(WeakObjectRetainer* retainer);
1800   void ProcessWeakListRoots(WeakObjectRetainer* retainer);
1801 
1802   // ===========================================================================
1803   // GC statistics. ============================================================
1804   // ===========================================================================
1805 
OldGenerationSpaceAvailable()1806   inline size_t OldGenerationSpaceAvailable() {
1807     if (old_generation_allocation_limit_ <=
1808         OldGenerationObjectsAndPromotedExternalMemorySize())
1809       return 0;
1810     return old_generation_allocation_limit_ -
1811            static_cast<size_t>(
1812                OldGenerationObjectsAndPromotedExternalMemorySize());
1813   }
1814 
1815   // We allow incremental marking to overshoot the allocation limit for
1816   // performace reasons. If the overshoot is too large then we are more
1817   // eager to finalize incremental marking.
AllocationLimitOvershotByLargeMargin()1818   inline bool AllocationLimitOvershotByLargeMargin() {
1819     // This guards against too eager finalization in small heaps.
1820     // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
1821     size_t kMarginForSmallHeaps = 32u * MB;
1822     if (old_generation_allocation_limit_ >=
1823         OldGenerationObjectsAndPromotedExternalMemorySize())
1824       return false;
1825     uint64_t overshoot = OldGenerationObjectsAndPromotedExternalMemorySize() -
1826                          old_generation_allocation_limit_;
1827     // Overshoot margin is 50% of allocation limit or half-way to the max heap
1828     // with special handling of small heaps.
1829     uint64_t margin =
1830         Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
1831             (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
1832     return overshoot >= margin;
1833   }
1834 
1835   void UpdateTotalGCTime(double duration);
1836 
MaximumSizeScavenge()1837   bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
1838 
1839   bool IsIneffectiveMarkCompact(size_t old_generation_size,
1840                                 double mutator_utilization);
1841   void CheckIneffectiveMarkCompact(size_t old_generation_size,
1842                                    double mutator_utilization);
1843 
1844   // ===========================================================================
1845   // Growing strategy. =========================================================
1846   // ===========================================================================
1847 
heap_controller()1848   HeapController* heap_controller() { return heap_controller_; }
memory_reducer()1849   MemoryReducer* memory_reducer() { return memory_reducer_; }
1850 
1851   // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
1852   // This constant limits the effect of load RAIL mode on GC.
1853   // The value is arbitrary and chosen as the largest load time observed in
1854   // v8 browsing benchmarks.
1855   static const int kMaxLoadTimeMs = 7000;
1856 
1857   bool ShouldOptimizeForLoadTime();
1858 
old_generation_allocation_limit()1859   size_t old_generation_allocation_limit() const {
1860     return old_generation_allocation_limit_;
1861   }
1862 
always_allocate()1863   bool always_allocate() { return always_allocate_scope_count_ != 0; }
1864 
1865   bool CanExpandOldGeneration(size_t size);
1866 
1867   bool ShouldExpandOldGenerationOnSlowAllocation();
1868 
1869   enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault };
1870 
1871   HeapGrowingMode CurrentHeapGrowingMode();
1872 
1873   enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
1874   IncrementalMarkingLimit IncrementalMarkingLimitReached();
1875 
1876   // ===========================================================================
1877   // Idle notification. ========================================================
1878   // ===========================================================================
1879 
1880   bool RecentIdleNotificationHappened();
1881   void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
1882 
1883   // ===========================================================================
1884   // HeapIterator helpers. =====================================================
1885   // ===========================================================================
1886 
heap_iterator_start()1887   void heap_iterator_start() { heap_iterator_depth_++; }
1888 
heap_iterator_end()1889   void heap_iterator_end() { heap_iterator_depth_--; }
1890 
in_heap_iterator()1891   bool in_heap_iterator() { return heap_iterator_depth_ > 0; }
1892 
1893   // ===========================================================================
1894   // Allocation methods. =======================================================
1895   // ===========================================================================
1896 
1897   // Allocates a JS Map in the heap.
1898   V8_WARN_UNUSED_RESULT AllocationResult
1899   AllocateMap(InstanceType instance_type, int instance_size,
1900               ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
1901               int inobject_properties = 0);
1902 
1903   // Allocate an uninitialized object.  The memory is non-executable if the
1904   // hardware and OS allow.  This is the single choke-point for allocations
1905   // performed by the runtime and should not be bypassed (to extend this to
1906   // inlined allocations, use the Heap::DisableInlineAllocation() support).
1907   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
1908       int size_in_bytes, AllocationSpace space,
1909       AllocationAlignment aligment = kWordAligned);
1910 
1911   // This method will try to perform an allocation of a given size in a given
1912   // space. If the allocation fails, a regular full garbage collection is
1913   // triggered and the allocation is retried. This is performed multiple times.
1914   // If after that retry procedure the allocation still fails nullptr is
1915   // returned.
1916   HeapObject* AllocateRawWithLightRetry(
1917       int size, AllocationSpace space,
1918       AllocationAlignment alignment = kWordAligned);
1919 
1920   // This method will try to perform an allocation of a given size in a given
1921   // space. If the allocation fails, a regular full garbage collection is
1922   // triggered and the allocation is retried. This is performed multiple times.
1923   // If after that retry procedure the allocation still fails a "hammer"
1924   // garbage collection is triggered which tries to significantly reduce memory.
1925   // If the allocation still fails after that a fatal error is thrown.
1926   HeapObject* AllocateRawWithRetryOrFail(
1927       int size, AllocationSpace space,
1928       AllocationAlignment alignment = kWordAligned);
1929   HeapObject* AllocateRawCodeInLargeObjectSpace(int size);
1930 
1931   // Allocates a heap object based on the map.
1932   V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map* map,
1933                                                   AllocationSpace space);
1934 
1935   // Takes a code object and checks if it is on memory which is not subject to
1936   // compaction. This method will return a new code object on an immovable
1937   // memory location if the original code object was movable.
1938   HeapObject* EnsureImmovableCode(HeapObject* heap_object, int object_size);
1939 
1940   // Allocates a partial map for bootstrapping.
1941   V8_WARN_UNUSED_RESULT AllocationResult
1942   AllocatePartialMap(InstanceType instance_type, int instance_size);
1943 
1944   void FinalizePartialMap(Map* map);
1945 
1946   // Allocate empty fixed typed array of given type.
1947   V8_WARN_UNUSED_RESULT AllocationResult
1948   AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
1949 
set_force_oom(bool value)1950   void set_force_oom(bool value) { force_oom_ = value; }
1951 
1952   // ===========================================================================
1953   // Retaining path tracing ====================================================
1954   // ===========================================================================
1955 
1956   void AddRetainer(HeapObject* retainer, HeapObject* object);
1957   void AddEphemeronRetainer(HeapObject* retainer, HeapObject* object);
1958   void AddRetainingRoot(Root root, HeapObject* object);
1959   // Returns true if the given object is a target of retaining path tracking.
1960   // Stores the option corresponding to the object in the provided *option.
1961   bool IsRetainingPathTarget(HeapObject* object, RetainingPathOption* option);
1962   void PrintRetainingPath(HeapObject* object, RetainingPathOption option);
1963 
1964   // The amount of external memory registered through the API.
1965   int64_t external_memory_;
1966 
1967   // The limit when to trigger memory pressure from the API.
1968   int64_t external_memory_limit_;
1969 
1970   // Caches the amount of external memory registered at the last MC.
1971   int64_t external_memory_at_last_mark_compact_;
1972 
1973   // The amount of memory that has been freed concurrently.
1974   std::atomic<intptr_t> external_memory_concurrently_freed_;
1975 
1976   // This can be calculated directly from a pointer to the heap; however, it is
1977   // more expedient to get at the isolate directly from within Heap methods.
1978   Isolate* isolate_;
1979 
1980   Object* roots_[kRootListLength];
1981 
1982   // This table is accessed from builtin code compiled into the snapshot, and
1983   // thus its offset from roots_ must remain static. This is verified in
1984   // Isolate::Init() using runtime checks.
1985   static constexpr int kRootsExternalReferenceTableOffset =
1986       kRootListLength * kPointerSize;
1987   ExternalReferenceTable external_reference_table_;
1988 
1989   // As external references above, builtins are accessed through an offset from
1990   // the roots register. Its offset from roots_ must remain static. This is
1991   // verified in Isolate::Init() using runtime checks.
1992   static constexpr int kRootsBuiltinsOffset =
1993       kRootsExternalReferenceTableOffset +
1994       ExternalReferenceTable::SizeInBytes();
1995   Object* builtins_[Builtins::builtin_count];
1996 
1997   // kRootRegister may be used to address any location that starts at the
1998   // Isolate and ends at this point. Fields past this point are not guaranteed
1999   // to live at a static offset from kRootRegister.
2000   static constexpr int kRootRegisterAddressableEndOffset =
2001       kRootsBuiltinsOffset + Builtins::builtin_count * kPointerSize;
2002 
2003   size_t code_range_size_;
2004   size_t max_semi_space_size_;
2005   size_t initial_semispace_size_;
2006   size_t max_old_generation_size_;
2007   size_t initial_max_old_generation_size_;
2008   size_t initial_old_generation_size_;
2009   bool old_generation_size_configured_;
2010   size_t maximum_committed_;
2011 
2012   // For keeping track of how much data has survived
2013   // scavenge since last new space expansion.
2014   size_t survived_since_last_expansion_;
2015 
2016   // ... and since the last scavenge.
2017   size_t survived_last_scavenge_;
2018 
2019   // This is not the depth of nested AlwaysAllocateScope's but rather a single
2020   // count, as scopes can be acquired from multiple tasks (read: threads).
2021   std::atomic<size_t> always_allocate_scope_count_;
2022 
2023   // Stores the memory pressure level that set by MemoryPressureNotification
2024   // and reset by a mark-compact garbage collection.
2025   std::atomic<MemoryPressureLevel> memory_pressure_level_;
2026 
2027   std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
2028       near_heap_limit_callbacks_;
2029 
2030   // For keeping track of context disposals.
2031   int contexts_disposed_;
2032 
2033   // The length of the retained_maps array at the time of context disposal.
2034   // This separates maps in the retained_maps array that were created before
2035   // and after context disposal.
2036   int number_of_disposed_maps_;
2037 
2038   NewSpace* new_space_;
2039   OldSpace* old_space_;
2040   CodeSpace* code_space_;
2041   MapSpace* map_space_;
2042   LargeObjectSpace* lo_space_;
2043   NewLargeObjectSpace* new_lo_space_;
2044   ReadOnlySpace* read_only_space_;
2045   // Map from the space id to the space.
2046   Space* space_[LAST_SPACE + 1];
2047 
2048   // Determines whether code space is write-protected. This is essentially a
2049   // race-free copy of the {FLAG_write_protect_code_memory} flag.
2050   bool write_protect_code_memory_;
2051 
2052   // Holds the number of open CodeSpaceMemoryModificationScopes.
2053   uintptr_t code_space_memory_modification_scope_depth_;
2054 
2055   HeapState gc_state_;
2056   int gc_post_processing_depth_;
2057 
2058   // Returns the amount of external memory registered since last global gc.
2059   uint64_t PromotedExternalMemorySize();
2060 
2061   // How many "runtime allocations" happened.
2062   uint32_t allocations_count_;
2063 
2064   // Running hash over allocations performed.
2065   uint32_t raw_allocations_hash_;
2066 
2067   // Starts marking when stress_marking_percentage_% of the marking start limit
2068   // is reached.
2069   int stress_marking_percentage_;
2070 
2071   // Observer that causes more frequent checks for reached incremental marking
2072   // limit.
2073   AllocationObserver* stress_marking_observer_;
2074 
2075   // Observer that can cause early scavenge start.
2076   StressScavengeObserver* stress_scavenge_observer_;
2077 
2078   bool allocation_step_in_progress_;
2079 
2080   // The maximum percent of the marking limit reached wihout causing marking.
2081   // This is tracked when specyfing --fuzzer-gc-analysis.
2082   double max_marking_limit_reached_;
2083 
2084   // How many mark-sweep collections happened.
2085   unsigned int ms_count_;
2086 
2087   // How many gc happened.
2088   unsigned int gc_count_;
2089 
2090   // The number of Mark-Compact garbage collections that are considered as
2091   // ineffective. See IsIneffectiveMarkCompact() predicate.
2092   int consecutive_ineffective_mark_compacts_;
2093 
2094   static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
2095   uintptr_t mmap_region_base_;
2096 
2097   // For post mortem debugging.
2098   int remembered_unmapped_pages_index_;
2099   Address remembered_unmapped_pages_[kRememberedUnmappedPages];
2100 
2101   // Limit that triggers a global GC on the next (normally caused) GC.  This
2102   // is checked when we have already decided to do a GC to help determine
2103   // which collector to invoke, before expanding a paged space in the old
2104   // generation and on every allocation in large object space.
2105   size_t old_generation_allocation_limit_;
2106 
2107   // Indicates that inline bump-pointer allocation has been globally disabled
2108   // for all spaces. This is used to disable allocations in generated code.
2109   bool inline_allocation_disabled_;
2110 
2111   // Weak list heads, threaded through the objects.
2112   // List heads are initialized lazily and contain the undefined_value at start.
2113   Object* native_contexts_list_;
2114   Object* allocation_sites_list_;
2115 
2116   std::vector<GCCallbackTuple> gc_epilogue_callbacks_;
2117   std::vector<GCCallbackTuple> gc_prologue_callbacks_;
2118 
2119   GetExternallyAllocatedMemoryInBytesCallback external_memory_callback_;
2120 
2121   int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
2122 
2123   GCTracer* tracer_;
2124 
2125   size_t promoted_objects_size_;
2126   double promotion_ratio_;
2127   double promotion_rate_;
2128   size_t semi_space_copied_object_size_;
2129   size_t previous_semi_space_copied_object_size_;
2130   double semi_space_copied_rate_;
2131   int nodes_died_in_new_space_;
2132   int nodes_copied_in_new_space_;
2133   int nodes_promoted_;
2134 
2135   // This is the pretenuring trigger for allocation sites that are in maybe
2136   // tenure state. When we switched to the maximum new space size we deoptimize
2137   // the code that belongs to the allocation site and derive the lifetime
2138   // of the allocation site.
2139   unsigned int maximum_size_scavenges_;
2140 
2141   // Total time spent in GC.
2142   double total_gc_time_ms_;
2143 
2144   // Last time an idle notification happened.
2145   double last_idle_notification_time_;
2146 
2147   // Last time a garbage collection happened.
2148   double last_gc_time_;
2149 
2150   MarkCompactCollector* mark_compact_collector_;
2151   MinorMarkCompactCollector* minor_mark_compact_collector_;
2152 
2153   ArrayBufferCollector* array_buffer_collector_;
2154 
2155   MemoryAllocator* memory_allocator_;
2156 
2157   StoreBuffer* store_buffer_;
2158 
2159   HeapController* heap_controller_;
2160 
2161   IncrementalMarking* incremental_marking_;
2162   ConcurrentMarking* concurrent_marking_;
2163 
2164   GCIdleTimeHandler* gc_idle_time_handler_;
2165 
2166   MemoryReducer* memory_reducer_;
2167 
2168   ObjectStats* live_object_stats_;
2169   ObjectStats* dead_object_stats_;
2170 
2171   ScavengeJob* scavenge_job_;
2172   base::Semaphore parallel_scavenge_semaphore_;
2173 
2174   AllocationObserver* idle_scavenge_observer_;
2175 
2176   // This counter is increased before each GC and never reset.
2177   // To account for the bytes allocated since the last GC, use the
2178   // NewSpaceAllocationCounter() function.
2179   size_t new_space_allocation_counter_;
2180 
2181   // This counter is increased before each GC and never reset. To
2182   // account for the bytes allocated since the last GC, use the
2183   // OldGenerationAllocationCounter() function.
2184   size_t old_generation_allocation_counter_at_last_gc_;
2185 
2186   // The size of objects in old generation after the last MarkCompact GC.
2187   size_t old_generation_size_at_last_gc_;
2188 
2189   // The feedback storage is used to store allocation sites (keys) and how often
2190   // they have been visited (values) by finding a memento behind an object. The
2191   // storage is only alive temporary during a GC. The invariant is that all
2192   // pointers in this map are already fixed, i.e., they do not point to
2193   // forwarding pointers.
2194   PretenuringFeedbackMap global_pretenuring_feedback_;
2195 
2196   char trace_ring_buffer_[kTraceRingBufferSize];
2197 
2198   // Used as boolean.
2199   uint8_t is_marking_flag_;
2200 
2201   // If it's not full then the data is from 0 to ring_buffer_end_.  If it's
2202   // full then the data is from ring_buffer_end_ to the end of the buffer and
2203   // from 0 to ring_buffer_end_.
2204   bool ring_buffer_full_;
2205   size_t ring_buffer_end_;
2206 
2207   // Flag is set when the heap has been configured.  The heap can be repeatedly
2208   // configured through the API until it is set up.
2209   bool configured_;
2210 
2211   // Currently set GC flags that are respected by all GC components.
2212   int current_gc_flags_;
2213 
2214   // Currently set GC callback flags that are used to pass information between
2215   // the embedder and V8's GC.
2216   GCCallbackFlags current_gc_callback_flags_;
2217 
2218   ExternalStringTable external_string_table_;
2219 
2220   base::Mutex relocation_mutex_;
2221 
2222   int gc_callbacks_depth_;
2223 
2224   bool deserialization_complete_;
2225 
2226   StrongRootsList* strong_roots_list_;
2227 
2228   // The depth of HeapIterator nestings.
2229   int heap_iterator_depth_;
2230 
2231   LocalEmbedderHeapTracer* local_embedder_heap_tracer_;
2232 
2233   bool fast_promotion_mode_;
2234 
2235   // Used for testing purposes.
2236   bool force_oom_;
2237   bool delay_sweeper_tasks_for_testing_;
2238 
2239   HeapObject* pending_layout_change_object_;
2240 
2241   base::Mutex unprotected_memory_chunks_mutex_;
2242   std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
2243   bool unprotected_memory_chunks_registry_enabled_;
2244 
2245 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
2246   // If the --gc-interval flag is set to a positive value, this
2247   // variable holds the value indicating the number of allocations
2248   // remain until the next failure and garbage collection.
2249   int allocation_timeout_;
2250 #endif  // V8_ENABLE_ALLOCATION_TIMEOUT
2251 
2252   std::map<HeapObject*, HeapObject*> retainer_;
2253   std::map<HeapObject*, Root> retaining_root_;
2254   // If an object is retained by an ephemeron, then the retaining key of the
2255   // ephemeron is stored in this map.
2256   std::map<HeapObject*, HeapObject*> ephemeron_retainer_;
2257   // For each index inthe retaining_path_targets_ array this map
2258   // stores the option of the corresponding target.
2259   std::map<int, RetainingPathOption> retaining_path_target_option_;
2260 
2261   std::vector<HeapObjectAllocationTracker*> allocation_trackers_;
2262 
2263   // Classes in "heap" can be friends.
2264   friend class AlwaysAllocateScope;
2265   friend class ConcurrentMarking;
2266   friend class EphemeronHashTableMarkingTask;
2267   friend class GCCallbacksScope;
2268   friend class GCTracer;
2269   friend class MemoryController;
2270   friend class HeapIterator;
2271   friend class IdleScavengeObserver;
2272   friend class IncrementalMarking;
2273   friend class IncrementalMarkingJob;
2274   friend class LargeObjectSpace;
2275   template <FixedArrayVisitationMode fixed_array_mode,
2276             TraceRetainingPathMode retaining_path_mode, typename MarkingState>
2277   friend class MarkingVisitor;
2278   friend class MarkCompactCollector;
2279   friend class MarkCompactCollectorBase;
2280   friend class MinorMarkCompactCollector;
2281   friend class NewSpace;
2282   friend class ObjectStatsCollector;
2283   friend class Page;
2284   friend class PagedSpace;
2285   friend class Scavenger;
2286   friend class StoreBuffer;
2287   friend class Sweeper;
2288   friend class heap::TestMemoryAllocatorScope;
2289 
2290   // The allocator interface.
2291   friend class Factory;
2292 
2293   // The Isolate constructs us.
2294   friend class Isolate;
2295 
2296   // Used in cctest.
2297   friend class heap::HeapTester;
2298 
2299   FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
2300 
2301   DISALLOW_COPY_AND_ASSIGN(Heap);
2302 };
2303 
2304 
2305 class HeapStats {
2306  public:
2307   static const int kStartMarker = 0xDECADE00;
2308   static const int kEndMarker = 0xDECADE01;
2309 
2310   intptr_t* start_marker;                  //  0
2311   size_t* ro_space_size;                   //  1
2312   size_t* ro_space_capacity;               //  2
2313   size_t* new_space_size;                  //  3
2314   size_t* new_space_capacity;              //  4
2315   size_t* old_space_size;                  //  5
2316   size_t* old_space_capacity;              //  6
2317   size_t* code_space_size;                 //  7
2318   size_t* code_space_capacity;             //  8
2319   size_t* map_space_size;                  //  9
2320   size_t* map_space_capacity;              // 10
2321   size_t* lo_space_size;                   // 11
2322   size_t* global_handle_count;             // 12
2323   size_t* weak_global_handle_count;        // 13
2324   size_t* pending_global_handle_count;     // 14
2325   size_t* near_death_global_handle_count;  // 15
2326   size_t* free_global_handle_count;        // 16
2327   size_t* memory_allocator_size;           // 17
2328   size_t* memory_allocator_capacity;       // 18
2329   size_t* malloced_memory;                 // 19
2330   size_t* malloced_peak_memory;            // 20
2331   size_t* objects_per_type;                // 21
2332   size_t* size_per_type;                   // 22
2333   int* os_error;                           // 23
2334   char* last_few_messages;                 // 24
2335   char* js_stacktrace;                     // 25
2336   intptr_t* end_marker;                    // 26
2337 };
2338 
2339 
2340 class AlwaysAllocateScope {
2341  public:
2342   explicit inline AlwaysAllocateScope(Isolate* isolate);
2343   inline ~AlwaysAllocateScope();
2344 
2345  private:
2346   Heap* heap_;
2347 };
2348 
2349 // The CodeSpaceMemoryModificationScope can only be used by the main thread.
2350 class CodeSpaceMemoryModificationScope {
2351  public:
2352   explicit inline CodeSpaceMemoryModificationScope(Heap* heap);
2353   inline ~CodeSpaceMemoryModificationScope();
2354 
2355  private:
2356   Heap* heap_;
2357 };
2358 
2359 // The CodePageCollectionMemoryModificationScope can only be used by the main
2360 // thread. It will not be enabled if a CodeSpaceMemoryModificationScope is
2361 // already active.
2362 class CodePageCollectionMemoryModificationScope {
2363  public:
2364   explicit inline CodePageCollectionMemoryModificationScope(Heap* heap);
2365   inline ~CodePageCollectionMemoryModificationScope();
2366 
2367  private:
2368   Heap* heap_;
2369 };
2370 
2371 // The CodePageMemoryModificationScope does not check if tansitions to
2372 // writeable and back to executable are actually allowed, i.e. the MemoryChunk
2373 // was registered to be executable. It can be used by concurrent threads.
2374 class CodePageMemoryModificationScope {
2375  public:
2376   explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
2377   inline ~CodePageMemoryModificationScope();
2378 
2379  private:
2380   MemoryChunk* chunk_;
2381   bool scope_active_;
2382 
2383   // Disallow any GCs inside this scope, as a relocation of the underlying
2384   // object would change the {MemoryChunk} that this scope targets.
2385   DisallowHeapAllocation no_heap_allocation_;
2386 };
2387 
2388 // Visitor class to verify interior pointers in spaces that do not contain
2389 // or care about intergenerational references. All heap object pointers have to
2390 // point into the heap to a location that has a map pointer at its first word.
2391 // Caveat: Heap::Contains is an approximation because it can return true for
2392 // objects in a heap space but above the allocation pointer.
2393 class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
2394  public:
VerifyPointersVisitor(Heap * heap)2395   explicit VerifyPointersVisitor(Heap* heap) : heap_(heap) {}
2396   void VisitPointers(HeapObject* host, Object** start, Object** end) override;
2397   void VisitPointers(HeapObject* host, MaybeObject** start,
2398                      MaybeObject** end) override;
2399   void VisitRootPointers(Root root, const char* description, Object** start,
2400                          Object** end) override;
2401 
2402  protected:
2403   virtual void VerifyPointers(HeapObject* host, MaybeObject** start,
2404                               MaybeObject** end);
2405 
2406   Heap* heap_;
2407 };
2408 
2409 
2410 // Verify that all objects are Smis.
2411 class VerifySmisVisitor : public RootVisitor {
2412  public:
2413   void VisitRootPointers(Root root, const char* description, Object** start,
2414                          Object** end) override;
2415 };
2416 
2417 // Space iterator for iterating over all the paged spaces of the heap: Map
2418 // space, old space, code space and optionally read only space. Returns each
2419 // space in turn, and null when it is done.
2420 class V8_EXPORT_PRIVATE PagedSpaces BASE_EMBEDDED {
2421  public:
2422   enum class SpacesSpecifier { kSweepablePagedSpaces, kAllPagedSpaces };
2423 
2424   explicit PagedSpaces(Heap* heap, SpacesSpecifier specifier =
2425                                        SpacesSpecifier::kSweepablePagedSpaces)
heap_(heap)2426       : heap_(heap),
2427         counter_(specifier == SpacesSpecifier::kAllPagedSpaces ? RO_SPACE
2428                                                                : OLD_SPACE) {}
2429   PagedSpace* next();
2430 
2431  private:
2432   Heap* heap_;
2433   int counter_;
2434 };
2435 
2436 
2437 class SpaceIterator : public Malloced {
2438  public:
2439   explicit SpaceIterator(Heap* heap);
2440   virtual ~SpaceIterator();
2441 
2442   bool has_next();
2443   Space* next();
2444 
2445  private:
2446   Heap* heap_;
2447   int current_space_;         // from enum AllocationSpace.
2448 };
2449 
2450 
2451 // A HeapIterator provides iteration over the whole heap. It
2452 // aggregates the specific iterators for the different spaces as
2453 // these can only iterate over one space only.
2454 //
2455 // HeapIterator ensures there is no allocation during its lifetime
2456 // (using an embedded DisallowHeapAllocation instance).
2457 //
2458 // HeapIterator can skip free list nodes (that is, de-allocated heap
2459 // objects that still remain in the heap). As implementation of free
2460 // nodes filtering uses GC marks, it can't be used during MS/MC GC
2461 // phases. Also, it is forbidden to interrupt iteration in this mode,
2462 // as this will leave heap objects marked (and thus, unusable).
2463 class HeapIterator BASE_EMBEDDED {
2464  public:
2465   enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
2466 
2467   explicit HeapIterator(Heap* heap,
2468                         HeapObjectsFiltering filtering = kNoFiltering);
2469   ~HeapIterator();
2470 
2471   HeapObject* next();
2472 
2473  private:
2474   HeapObject* NextObject();
2475 
2476   DisallowHeapAllocation no_heap_allocation_;
2477 
2478   Heap* heap_;
2479   HeapObjectsFiltering filtering_;
2480   HeapObjectsFilter* filter_;
2481   // Space iterator for iterating all the spaces.
2482   SpaceIterator* space_iterator_;
2483   // Object iterator for the space currently being iterated.
2484   std::unique_ptr<ObjectIterator> object_iterator_;
2485 };
2486 
2487 // Abstract base class for checking whether a weak object should be retained.
2488 class WeakObjectRetainer {
2489  public:
~WeakObjectRetainer()2490   virtual ~WeakObjectRetainer() {}
2491 
2492   // Return whether this object should be retained. If nullptr is returned the
2493   // object has no references. Otherwise the address of the retained object
2494   // should be returned as in some GC situations the object has been moved.
2495   virtual Object* RetainAs(Object* object) = 0;
2496 };
2497 
2498 // -----------------------------------------------------------------------------
2499 // Allows observation of allocations.
2500 class AllocationObserver {
2501  public:
AllocationObserver(intptr_t step_size)2502   explicit AllocationObserver(intptr_t step_size)
2503       : step_size_(step_size), bytes_to_next_step_(step_size) {
2504     DCHECK_LE(kPointerSize, step_size);
2505   }
~AllocationObserver()2506   virtual ~AllocationObserver() {}
2507 
2508   // Called each time the observed space does an allocation step. This may be
2509   // more frequently than the step_size we are monitoring (e.g. when there are
2510   // multiple observers, or when page or space boundary is encountered.)
2511   void AllocationStep(int bytes_allocated, Address soon_object, size_t size);
2512 
2513  protected:
step_size()2514   intptr_t step_size() const { return step_size_; }
bytes_to_next_step()2515   intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
2516 
2517   // Pure virtual method provided by the subclasses that gets called when at
2518   // least step_size bytes have been allocated. soon_object is the address just
2519   // allocated (but not yet initialized.) size is the size of the object as
2520   // requested (i.e. w/o the alignment fillers). Some complexities to be aware
2521   // of:
2522   // 1) soon_object will be nullptr in cases where we end up observing an
2523   //    allocation that happens to be a filler space (e.g. page boundaries.)
2524   // 2) size is the requested size at the time of allocation. Right-trimming
2525   //    may change the object size dynamically.
2526   // 3) soon_object may actually be the first object in an allocation-folding
2527   //    group. In such a case size is the size of the group rather than the
2528   //    first object.
2529   virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
2530 
2531   // Subclasses can override this method to make step size dynamic.
GetNextStepSize()2532   virtual intptr_t GetNextStepSize() { return step_size_; }
2533 
2534   intptr_t step_size_;
2535   intptr_t bytes_to_next_step_;
2536 
2537  private:
2538   friend class Space;
2539   DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
2540 };
2541 
2542 V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
2543 
2544 // -----------------------------------------------------------------------------
2545 // Allows observation of heap object allocations.
2546 class HeapObjectAllocationTracker {
2547  public:
2548   virtual void AllocationEvent(Address addr, int size) = 0;
MoveEvent(Address from,Address to,int size)2549   virtual void MoveEvent(Address from, Address to, int size) {}
UpdateObjectSizeEvent(Address addr,int size)2550   virtual void UpdateObjectSizeEvent(Address addr, int size) {}
2551   virtual ~HeapObjectAllocationTracker() = default;
2552 };
2553 
2554 }  // namespace internal
2555 }  // namespace v8
2556 
2557 #endif  // V8_HEAP_HEAP_H_
2558