• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/concurrent-marking.h"
6 
7 #include <stack>
8 #include <unordered_map>
9 
10 #include "include/v8config.h"
11 #include "src/base/template-utils.h"
12 #include "src/heap/gc-tracer.h"
13 #include "src/heap/heap-inl.h"
14 #include "src/heap/heap.h"
15 #include "src/heap/mark-compact-inl.h"
16 #include "src/heap/mark-compact.h"
17 #include "src/heap/marking.h"
18 #include "src/heap/objects-visiting-inl.h"
19 #include "src/heap/objects-visiting.h"
20 #include "src/heap/worklist.h"
21 #include "src/isolate.h"
22 #include "src/objects/hash-table-inl.h"
23 #include "src/utils-inl.h"
24 #include "src/utils.h"
25 #include "src/v8.h"
26 
27 namespace v8 {
28 namespace internal {
29 
30 class ConcurrentMarkingState final
31     : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
32  public:
ConcurrentMarkingState(LiveBytesMap * live_bytes)33   explicit ConcurrentMarkingState(LiveBytesMap* live_bytes)
34       : live_bytes_(live_bytes) {}
35 
bitmap(const MemoryChunk * chunk)36   Bitmap* bitmap(const MemoryChunk* chunk) {
37     return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
38   }
39 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)40   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
41     (*live_bytes_)[chunk] += by;
42   }
43 
44   // The live_bytes and SetLiveBytes methods of the marking state are
45   // not used by the concurrent marker.
46 
47  private:
48   LiveBytesMap* live_bytes_;
49 };
50 
51 // Helper class for storing in-object slot addresses and values.
52 class SlotSnapshot {
53  public:
SlotSnapshot()54   SlotSnapshot() : number_of_slots_(0) {}
number_of_slots() const55   int number_of_slots() const { return number_of_slots_; }
slot(int i) const56   Object** slot(int i) const { return snapshot_[i].first; }
value(int i) const57   Object* value(int i) const { return snapshot_[i].second; }
clear()58   void clear() { number_of_slots_ = 0; }
add(Object ** slot,Object * value)59   void add(Object** slot, Object* value) {
60     snapshot_[number_of_slots_].first = slot;
61     snapshot_[number_of_slots_].second = value;
62     ++number_of_slots_;
63   }
64 
65  private:
66   static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kPointerSize;
67   int number_of_slots_;
68   std::pair<Object**, Object*> snapshot_[kMaxSnapshotSize];
69   DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
70 };
71 
72 class ConcurrentMarkingVisitor final
73     : public HeapVisitor<int, ConcurrentMarkingVisitor> {
74  public:
75   using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
76 
ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist * shared,ConcurrentMarking::MarkingWorklist * bailout,LiveBytesMap * live_bytes,WeakObjects * weak_objects,int task_id)77   explicit ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist* shared,
78                                     ConcurrentMarking::MarkingWorklist* bailout,
79                                     LiveBytesMap* live_bytes,
80                                     WeakObjects* weak_objects, int task_id)
81       : shared_(shared, task_id),
82         bailout_(bailout, task_id),
83         weak_objects_(weak_objects),
84         marking_state_(live_bytes),
85         task_id_(task_id) {}
86 
87   template <typename T>
Cast(HeapObject * object)88   static V8_INLINE T* Cast(HeapObject* object) {
89     return T::cast(object);
90   }
91 
ShouldVisit(HeapObject * object)92   bool ShouldVisit(HeapObject* object) {
93     return marking_state_.GreyToBlack(object);
94   }
95 
AllowDefaultJSObjectVisit()96   bool AllowDefaultJSObjectVisit() { return false; }
97 
ProcessStrongHeapObject(HeapObject * host,Object ** slot,HeapObject * heap_object)98   void ProcessStrongHeapObject(HeapObject* host, Object** slot,
99                                HeapObject* heap_object) {
100     MarkObject(heap_object);
101     MarkCompactCollector::RecordSlot(host, slot, heap_object);
102   }
103 
ProcessWeakHeapObject(HeapObject * host,HeapObjectReference ** slot,HeapObject * heap_object)104   void ProcessWeakHeapObject(HeapObject* host, HeapObjectReference** slot,
105                              HeapObject* heap_object) {
106 #ifdef THREAD_SANITIZER
107     // Perform a dummy acquire load to tell TSAN that there is no data race
108     // in mark-bit initialization. See MemoryChunk::Initialize for the
109     // corresponding release store.
110     MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
111     CHECK_NOT_NULL(chunk->synchronized_heap());
112 #endif
113     if (marking_state_.IsBlackOrGrey(heap_object)) {
114       // Weak references with live values are directly processed here to
115       // reduce the processing time of weak cells during the main GC
116       // pause.
117       MarkCompactCollector::RecordSlot(host, slot, heap_object);
118     } else {
119       // If we do not know about liveness of the value, we have to process
120       // the reference when we know the liveness of the whole transitive
121       // closure.
122       weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
123     }
124   }
125 
VisitPointers(HeapObject * host,Object ** start,Object ** end)126   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
127     for (Object** slot = start; slot < end; slot++) {
128       Object* object = base::AsAtomicPointer::Relaxed_Load(slot);
129       DCHECK(!HasWeakHeapObjectTag(object));
130       if (object->IsHeapObject()) {
131         ProcessStrongHeapObject(host, slot, HeapObject::cast(object));
132       }
133     }
134   }
135 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)136   void VisitPointers(HeapObject* host, MaybeObject** start,
137                      MaybeObject** end) override {
138     for (MaybeObject** slot = start; slot < end; slot++) {
139       MaybeObject* object = base::AsAtomicPointer::Relaxed_Load(slot);
140       HeapObject* heap_object;
141       if (object->ToStrongHeapObject(&heap_object)) {
142         // If the reference changes concurrently from strong to weak, the write
143         // barrier will treat the weak reference as strong, so we won't miss the
144         // weak reference.
145         ProcessStrongHeapObject(host, reinterpret_cast<Object**>(slot),
146                                 heap_object);
147       } else if (object->ToWeakHeapObject(&heap_object)) {
148         ProcessWeakHeapObject(
149             host, reinterpret_cast<HeapObjectReference**>(slot), heap_object);
150       }
151     }
152   }
153 
VisitPointersInSnapshot(HeapObject * host,const SlotSnapshot & snapshot)154   void VisitPointersInSnapshot(HeapObject* host, const SlotSnapshot& snapshot) {
155     for (int i = 0; i < snapshot.number_of_slots(); i++) {
156       Object** slot = snapshot.slot(i);
157       Object* object = snapshot.value(i);
158       DCHECK(!HasWeakHeapObjectTag(object));
159       if (!object->IsHeapObject()) continue;
160       HeapObject* heap_object = HeapObject::cast(object);
161       MarkObject(heap_object);
162       MarkCompactCollector::RecordSlot(host, slot, heap_object);
163     }
164   }
165 
166   // ===========================================================================
167   // JS object =================================================================
168   // ===========================================================================
169 
VisitJSObject(Map * map,JSObject * object)170   int VisitJSObject(Map* map, JSObject* object) {
171     return VisitJSObjectSubclass(map, object);
172   }
173 
VisitJSObjectFast(Map * map,JSObject * object)174   int VisitJSObjectFast(Map* map, JSObject* object) {
175     return VisitJSObjectSubclass(map, object);
176   }
177 
VisitJSArrayBuffer(Map * map,JSArrayBuffer * object)178   int VisitJSArrayBuffer(Map* map, JSArrayBuffer* object) {
179     return VisitJSObjectSubclass(map, object);
180   }
181 
VisitWasmInstanceObject(Map * map,WasmInstanceObject * object)182   int VisitWasmInstanceObject(Map* map, WasmInstanceObject* object) {
183     return VisitJSObjectSubclass(map, object);
184   }
185 
VisitJSApiObject(Map * map,JSObject * object)186   int VisitJSApiObject(Map* map, JSObject* object) {
187     if (marking_state_.IsGrey(object)) {
188       // The main thread will do wrapper tracing in Blink.
189       bailout_.Push(object);
190     }
191     return 0;
192   }
193 
VisitJSFunction(Map * map,JSFunction * object)194   int VisitJSFunction(Map* map, JSFunction* object) {
195     int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
196     int used_size = map->UsedInstanceSize();
197     DCHECK_LE(used_size, size);
198     DCHECK_GE(used_size, JSObject::kHeaderSize);
199     const SlotSnapshot& snapshot = MakeSlotSnapshotWeak(map, object, used_size);
200     if (!ShouldVisit(object)) return 0;
201     VisitPointersInSnapshot(object, snapshot);
202     return size;
203   }
204 
205   // ===========================================================================
206   // Strings with pointers =====================================================
207   // ===========================================================================
208 
VisitConsString(Map * map,ConsString * object)209   int VisitConsString(Map* map, ConsString* object) {
210     int size = ConsString::BodyDescriptor::SizeOf(map, object);
211     const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
212     if (!ShouldVisit(object)) return 0;
213     VisitPointersInSnapshot(object, snapshot);
214     return size;
215   }
216 
VisitSlicedString(Map * map,SlicedString * object)217   int VisitSlicedString(Map* map, SlicedString* object) {
218     int size = SlicedString::BodyDescriptor::SizeOf(map, object);
219     const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
220     if (!ShouldVisit(object)) return 0;
221     VisitPointersInSnapshot(object, snapshot);
222     return size;
223   }
224 
VisitThinString(Map * map,ThinString * object)225   int VisitThinString(Map* map, ThinString* object) {
226     int size = ThinString::BodyDescriptor::SizeOf(map, object);
227     const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
228     if (!ShouldVisit(object)) return 0;
229     VisitPointersInSnapshot(object, snapshot);
230     return size;
231   }
232 
233   // ===========================================================================
234   // Strings without pointers ==================================================
235   // ===========================================================================
236 
VisitSeqOneByteString(Map * map,SeqOneByteString * object)237   int VisitSeqOneByteString(Map* map, SeqOneByteString* object) {
238     int size = SeqOneByteString::SizeFor(object->synchronized_length());
239     if (!ShouldVisit(object)) return 0;
240     VisitMapPointer(object, object->map_slot());
241     return size;
242   }
243 
VisitSeqTwoByteString(Map * map,SeqTwoByteString * object)244   int VisitSeqTwoByteString(Map* map, SeqTwoByteString* object) {
245     int size = SeqTwoByteString::SizeFor(object->synchronized_length());
246     if (!ShouldVisit(object)) return 0;
247     VisitMapPointer(object, object->map_slot());
248     return size;
249   }
250 
251   // ===========================================================================
252   // Fixed array object ========================================================
253   // ===========================================================================
254 
VisitFixedArray(Map * map,FixedArray * object)255   int VisitFixedArray(Map* map, FixedArray* object) {
256     return VisitLeftTrimmableArray(map, object);
257   }
258 
VisitFixedDoubleArray(Map * map,FixedDoubleArray * object)259   int VisitFixedDoubleArray(Map* map, FixedDoubleArray* object) {
260     return VisitLeftTrimmableArray(map, object);
261   }
262 
263   // ===========================================================================
264   // Code object ===============================================================
265   // ===========================================================================
266 
VisitCode(Map * map,Code * object)267   int VisitCode(Map* map, Code* object) {
268     bailout_.Push(object);
269     return 0;
270   }
271 
272   // ===========================================================================
273   // Objects with weak fields and/or side-effectiful visitation.
274   // ===========================================================================
275 
VisitBytecodeArray(Map * map,BytecodeArray * object)276   int VisitBytecodeArray(Map* map, BytecodeArray* object) {
277     if (!ShouldVisit(object)) return 0;
278     int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object);
279     VisitMapPointer(object, object->map_slot());
280     BytecodeArray::BodyDescriptorWeak::IterateBody(map, object, size, this);
281     object->MakeOlder();
282     return size;
283   }
284 
VisitAllocationSite(Map * map,AllocationSite * object)285   int VisitAllocationSite(Map* map, AllocationSite* object) {
286     if (!ShouldVisit(object)) return 0;
287     int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
288     VisitMapPointer(object, object->map_slot());
289     AllocationSite::BodyDescriptorWeak::IterateBody(map, object, size, this);
290     return size;
291   }
292 
VisitCodeDataContainer(Map * map,CodeDataContainer * object)293   int VisitCodeDataContainer(Map* map, CodeDataContainer* object) {
294     if (!ShouldVisit(object)) return 0;
295     int size = CodeDataContainer::BodyDescriptorWeak::SizeOf(map, object);
296     VisitMapPointer(object, object->map_slot());
297     CodeDataContainer::BodyDescriptorWeak::IterateBody(map, object, size, this);
298     return size;
299   }
300 
VisitMap(Map * meta_map,Map * map)301   int VisitMap(Map* meta_map, Map* map) {
302     if (marking_state_.IsGrey(map)) {
303       // Maps have ad-hoc weakness for descriptor arrays. They also clear the
304       // code-cache. Conservatively visit strong fields skipping the
305       // descriptor array field and the code cache field.
306       VisitMapPointer(map, map->map_slot());
307       VisitPointer(map, HeapObject::RawField(map, Map::kPrototypeOffset));
308       VisitPointer(
309           map, HeapObject::RawField(map, Map::kConstructorOrBackPointerOffset));
310       VisitPointer(map, HeapObject::RawMaybeWeakField(
311                             map, Map::kTransitionsOrPrototypeInfoOffset));
312       VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset));
313       bailout_.Push(map);
314     }
315     return 0;
316   }
317 
VisitNativeContext(Map * map,Context * object)318   int VisitNativeContext(Map* map, Context* object) {
319     if (!ShouldVisit(object)) return 0;
320     int size = Context::BodyDescriptorWeak::SizeOf(map, object);
321     VisitMapPointer(object, object->map_slot());
322     Context::BodyDescriptorWeak::IterateBody(map, object, size, this);
323     return size;
324   }
325 
VisitTransitionArray(Map * map,TransitionArray * array)326   int VisitTransitionArray(Map* map, TransitionArray* array) {
327     if (!ShouldVisit(array)) return 0;
328     VisitMapPointer(array, array->map_slot());
329     int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
330     TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
331     weak_objects_->transition_arrays.Push(task_id_, array);
332     return size;
333   }
334 
VisitJSWeakCollection(Map * map,JSWeakCollection * object)335   int VisitJSWeakCollection(Map* map, JSWeakCollection* object) {
336     return VisitJSObjectSubclass(map, object);
337   }
338 
VisitEphemeronHashTable(Map * map,EphemeronHashTable * table)339   int VisitEphemeronHashTable(Map* map, EphemeronHashTable* table) {
340     if (!ShouldVisit(table)) return 0;
341     weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
342 
343     for (int i = 0; i < table->Capacity(); i++) {
344       Object** key_slot =
345           table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
346       HeapObject* key = HeapObject::cast(table->KeyAt(i));
347       MarkCompactCollector::RecordSlot(table, key_slot, key);
348 
349       Object** value_slot =
350           table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
351 
352       if (marking_state_.IsBlackOrGrey(key)) {
353         VisitPointer(table, value_slot);
354 
355       } else {
356         Object* value_obj = table->ValueAt(i);
357 
358         if (value_obj->IsHeapObject()) {
359           HeapObject* value = HeapObject::cast(value_obj);
360           MarkCompactCollector::RecordSlot(table, value_slot, value);
361 
362           // Revisit ephemerons with both key and value unreachable at end
363           // of concurrent marking cycle.
364           if (marking_state_.IsWhite(value)) {
365             weak_objects_->discovered_ephemerons.Push(task_id_,
366                                                       Ephemeron{key, value});
367           }
368         }
369       }
370     }
371 
372     return table->SizeFromMap(map);
373   }
374 
375   // Implements ephemeron semantics: Marks value if key is already reachable.
376   // Returns true if value was actually marked.
VisitEphemeron(HeapObject * key,HeapObject * value)377   bool VisitEphemeron(HeapObject* key, HeapObject* value) {
378     if (marking_state_.IsBlackOrGrey(key)) {
379       if (marking_state_.WhiteToGrey(value)) {
380         shared_.Push(value);
381         return true;
382       }
383 
384     } else if (marking_state_.IsWhite(value)) {
385       weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
386     }
387 
388     return false;
389   }
390 
MarkObject(HeapObject * object)391   void MarkObject(HeapObject* object) {
392 #ifdef THREAD_SANITIZER
393     // Perform a dummy acquire load to tell TSAN that there is no data race
394     // in mark-bit initialization. See MemoryChunk::Initialize for the
395     // corresponding release store.
396     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
397     CHECK_NOT_NULL(chunk->synchronized_heap());
398 #endif
399     if (marking_state_.WhiteToGrey(object)) {
400       shared_.Push(object);
401     }
402   }
403 
404  private:
405   // Helper class for collecting in-object slot addresses and values.
406   class SlotSnapshottingVisitor final : public ObjectVisitor {
407    public:
SlotSnapshottingVisitor(SlotSnapshot * slot_snapshot)408     explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
409         : slot_snapshot_(slot_snapshot) {
410       slot_snapshot_->clear();
411     }
412 
VisitPointers(HeapObject * host,Object ** start,Object ** end)413     void VisitPointers(HeapObject* host, Object** start,
414                        Object** end) override {
415       for (Object** p = start; p < end; p++) {
416         Object* object = reinterpret_cast<Object*>(
417             base::Relaxed_Load(reinterpret_cast<const base::AtomicWord*>(p)));
418         slot_snapshot_->add(p, object);
419       }
420     }
421 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)422     void VisitPointers(HeapObject* host, MaybeObject** start,
423                        MaybeObject** end) override {
424       // This should never happen, because we don't use snapshotting for objects
425       // which contain weak references.
426       UNREACHABLE();
427     }
428 
429    private:
430     SlotSnapshot* slot_snapshot_;
431   };
432 
433   template <typename T>
VisitJSObjectSubclass(Map * map,T * object)434   int VisitJSObjectSubclass(Map* map, T* object) {
435     int size = T::BodyDescriptor::SizeOf(map, object);
436     int used_size = map->UsedInstanceSize();
437     DCHECK_LE(used_size, size);
438     DCHECK_GE(used_size, T::kHeaderSize);
439     const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, used_size);
440     if (!ShouldVisit(object)) return 0;
441     VisitPointersInSnapshot(object, snapshot);
442     return size;
443   }
444 
445   template <typename T>
VisitLeftTrimmableArray(Map * map,T * object)446   int VisitLeftTrimmableArray(Map* map, T* object) {
447     // The synchronized_length() function checks that the length is a Smi.
448     // This is not necessarily the case if the array is being left-trimmed.
449     Object* length = object->unchecked_synchronized_length();
450     if (!ShouldVisit(object)) return 0;
451     // The cached length must be the actual length as the array is not black.
452     // Left trimming marks the array black before over-writing the length.
453     DCHECK(length->IsSmi());
454     int size = T::SizeFor(Smi::ToInt(length));
455     VisitMapPointer(object, object->map_slot());
456     T::BodyDescriptor::IterateBody(map, object, size, this);
457     return size;
458   }
459 
460   template <typename T>
MakeSlotSnapshot(Map * map,T * object,int size)461   const SlotSnapshot& MakeSlotSnapshot(Map* map, T* object, int size) {
462     SlotSnapshottingVisitor visitor(&slot_snapshot_);
463     visitor.VisitPointer(object,
464                          reinterpret_cast<Object**>(object->map_slot()));
465     T::BodyDescriptor::IterateBody(map, object, size, &visitor);
466     return slot_snapshot_;
467   }
468 
469   template <typename T>
MakeSlotSnapshotWeak(Map * map,T * object,int size)470   const SlotSnapshot& MakeSlotSnapshotWeak(Map* map, T* object, int size) {
471     SlotSnapshottingVisitor visitor(&slot_snapshot_);
472     visitor.VisitPointer(object,
473                          reinterpret_cast<Object**>(object->map_slot()));
474     T::BodyDescriptorWeak::IterateBody(map, object, size, &visitor);
475     return slot_snapshot_;
476   }
477   ConcurrentMarking::MarkingWorklist::View shared_;
478   ConcurrentMarking::MarkingWorklist::View bailout_;
479   WeakObjects* weak_objects_;
480   ConcurrentMarkingState marking_state_;
481   int task_id_;
482   SlotSnapshot slot_snapshot_;
483 };
484 
485 // Strings can change maps due to conversion to thin string or external strings.
486 // Use reinterpret cast to avoid data race in slow dchecks.
487 template <>
Cast(HeapObject * object)488 ConsString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
489   return reinterpret_cast<ConsString*>(object);
490 }
491 
492 template <>
Cast(HeapObject * object)493 SlicedString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
494   return reinterpret_cast<SlicedString*>(object);
495 }
496 
497 template <>
Cast(HeapObject * object)498 ThinString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
499   return reinterpret_cast<ThinString*>(object);
500 }
501 
502 template <>
Cast(HeapObject * object)503 SeqOneByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
504   return reinterpret_cast<SeqOneByteString*>(object);
505 }
506 
507 template <>
Cast(HeapObject * object)508 SeqTwoByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
509   return reinterpret_cast<SeqTwoByteString*>(object);
510 }
511 
512 // Fixed array can become a free space during left trimming.
513 template <>
Cast(HeapObject * object)514 FixedArray* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
515   return reinterpret_cast<FixedArray*>(object);
516 }
517 
518 class ConcurrentMarking::Task : public CancelableTask {
519  public:
Task(Isolate * isolate,ConcurrentMarking * concurrent_marking,TaskState * task_state,int task_id)520   Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
521        TaskState* task_state, int task_id)
522       : CancelableTask(isolate),
523         concurrent_marking_(concurrent_marking),
524         task_state_(task_state),
525         task_id_(task_id) {}
526 
~Task()527   virtual ~Task() {}
528 
529  private:
530   // v8::internal::CancelableTask overrides.
RunInternal()531   void RunInternal() override {
532     concurrent_marking_->Run(task_id_, task_state_);
533   }
534 
535   ConcurrentMarking* concurrent_marking_;
536   TaskState* task_state_;
537   int task_id_;
538   DISALLOW_COPY_AND_ASSIGN(Task);
539 };
540 
ConcurrentMarking(Heap * heap,MarkingWorklist * shared,MarkingWorklist * bailout,MarkingWorklist * on_hold,WeakObjects * weak_objects)541 ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
542                                      MarkingWorklist* bailout,
543                                      MarkingWorklist* on_hold,
544                                      WeakObjects* weak_objects)
545     : heap_(heap),
546       shared_(shared),
547       bailout_(bailout),
548       on_hold_(on_hold),
549       weak_objects_(weak_objects) {
550 // The runtime flag should be set only if the compile time flag was set.
551 #ifndef V8_CONCURRENT_MARKING
552   CHECK(!FLAG_concurrent_marking);
553 #endif
554 }
555 
Run(int task_id,TaskState * task_state)556 void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
557   TRACE_BACKGROUND_GC(heap_->tracer(),
558                       GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
559   size_t kBytesUntilInterruptCheck = 64 * KB;
560   int kObjectsUntilInterrupCheck = 1000;
561   ConcurrentMarkingVisitor visitor(shared_, bailout_, &task_state->live_bytes,
562                                    weak_objects_, task_id);
563   double time_ms;
564   size_t marked_bytes = 0;
565   if (FLAG_trace_concurrent_marking) {
566     heap_->isolate()->PrintWithTimestamp(
567         "Starting concurrent marking task %d\n", task_id);
568   }
569   bool ephemeron_marked = false;
570 
571   {
572     TimedScope scope(&time_ms);
573 
574     {
575       Ephemeron ephemeron;
576 
577       while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
578         if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
579           ephemeron_marked = true;
580         }
581       }
582     }
583 
584     bool done = false;
585     while (!done) {
586       size_t current_marked_bytes = 0;
587       int objects_processed = 0;
588       while (current_marked_bytes < kBytesUntilInterruptCheck &&
589              objects_processed < kObjectsUntilInterrupCheck) {
590         HeapObject* object;
591         if (!shared_->Pop(task_id, &object)) {
592           done = true;
593           break;
594         }
595         objects_processed++;
596         Address new_space_top = heap_->new_space()->original_top();
597         Address new_space_limit = heap_->new_space()->original_limit();
598         Address addr = object->address();
599         if (new_space_top <= addr && addr < new_space_limit) {
600           on_hold_->Push(task_id, object);
601         } else {
602           Map* map = object->synchronized_map();
603           current_marked_bytes += visitor.Visit(map, object);
604         }
605       }
606       marked_bytes += current_marked_bytes;
607       base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
608                                                 marked_bytes);
609       if (task_state->preemption_request) {
610         TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
611                      "ConcurrentMarking::Run Preempted");
612         break;
613       }
614     }
615 
616     if (done) {
617       Ephemeron ephemeron;
618 
619       while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
620         if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
621           ephemeron_marked = true;
622         }
623       }
624     }
625 
626     shared_->FlushToGlobal(task_id);
627     bailout_->FlushToGlobal(task_id);
628     on_hold_->FlushToGlobal(task_id);
629 
630     weak_objects_->transition_arrays.FlushToGlobal(task_id);
631     weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
632     weak_objects_->current_ephemerons.FlushToGlobal(task_id);
633     weak_objects_->next_ephemerons.FlushToGlobal(task_id);
634     weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
635     weak_objects_->weak_references.FlushToGlobal(task_id);
636     base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
637     total_marked_bytes_ += marked_bytes;
638 
639     if (ephemeron_marked) {
640       set_ephemeron_marked(true);
641     }
642 
643     {
644       base::LockGuard<base::Mutex> guard(&pending_lock_);
645       is_pending_[task_id] = false;
646       --pending_task_count_;
647       pending_condition_.NotifyAll();
648     }
649   }
650   if (FLAG_trace_concurrent_marking) {
651     heap_->isolate()->PrintWithTimestamp(
652         "Task %d concurrently marked %dKB in %.2fms\n", task_id,
653         static_cast<int>(marked_bytes / KB), time_ms);
654   }
655 }
656 
ScheduleTasks()657 void ConcurrentMarking::ScheduleTasks() {
658   DCHECK(!heap_->IsTearingDown());
659   if (!FLAG_concurrent_marking) return;
660   base::LockGuard<base::Mutex> guard(&pending_lock_);
661   DCHECK_EQ(0, pending_task_count_);
662   if (task_count_ == 0) {
663     static const int num_cores =
664         V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
665 #if defined(V8_OS_MACOSX)
666     // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
667     // marking on competing hyper-threads (regresses Octane/Splay). As such,
668     // only use num_cores/2, leaving one of those for the main thread.
669     // TODO(ulan): Use all cores on Mac 10.12+.
670     task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
671 #else   // defined(OS_MACOSX)
672     // On other platforms use all logical cores, leaving one for the main
673     // thread.
674     task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
675 #endif  // defined(OS_MACOSX)
676   }
677   // Task id 0 is for the main thread.
678   for (int i = 1; i <= task_count_; i++) {
679     if (!is_pending_[i]) {
680       if (FLAG_trace_concurrent_marking) {
681         heap_->isolate()->PrintWithTimestamp(
682             "Scheduling concurrent marking task %d\n", i);
683       }
684       task_state_[i].preemption_request = false;
685       is_pending_[i] = true;
686       ++pending_task_count_;
687       auto task =
688           base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
689       cancelable_id_[i] = task->id();
690       V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
691     }
692   }
693   DCHECK_EQ(task_count_, pending_task_count_);
694 }
695 
RescheduleTasksIfNeeded()696 void ConcurrentMarking::RescheduleTasksIfNeeded() {
697   if (!FLAG_concurrent_marking || heap_->IsTearingDown()) return;
698   {
699     base::LockGuard<base::Mutex> guard(&pending_lock_);
700     if (pending_task_count_ > 0) return;
701   }
702   if (!shared_->IsGlobalPoolEmpty() ||
703       !weak_objects_->current_ephemerons.IsEmpty() ||
704       !weak_objects_->discovered_ephemerons.IsEmpty()) {
705     ScheduleTasks();
706   }
707 }
708 
Stop(StopRequest stop_request)709 bool ConcurrentMarking::Stop(StopRequest stop_request) {
710   if (!FLAG_concurrent_marking) return false;
711   base::LockGuard<base::Mutex> guard(&pending_lock_);
712 
713   if (pending_task_count_ == 0) return false;
714 
715   if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
716     CancelableTaskManager* task_manager =
717         heap_->isolate()->cancelable_task_manager();
718     for (int i = 1; i <= task_count_; i++) {
719       if (is_pending_[i]) {
720         if (task_manager->TryAbort(cancelable_id_[i]) ==
721             CancelableTaskManager::kTaskAborted) {
722           is_pending_[i] = false;
723           --pending_task_count_;
724         } else if (stop_request == StopRequest::PREEMPT_TASKS) {
725           task_state_[i].preemption_request = true;
726         }
727       }
728     }
729   }
730   while (pending_task_count_ > 0) {
731     pending_condition_.Wait(&pending_lock_);
732   }
733   for (int i = 1; i <= task_count_; i++) {
734     DCHECK(!is_pending_[i]);
735   }
736   return true;
737 }
738 
IsStopped()739 bool ConcurrentMarking::IsStopped() {
740   if (!FLAG_concurrent_marking) return true;
741 
742   base::LockGuard<base::Mutex> guard(&pending_lock_);
743   return pending_task_count_ == 0;
744 }
745 
FlushLiveBytes(MajorNonAtomicMarkingState * marking_state)746 void ConcurrentMarking::FlushLiveBytes(
747     MajorNonAtomicMarkingState* marking_state) {
748   DCHECK_EQ(pending_task_count_, 0);
749   for (int i = 1; i <= task_count_; i++) {
750     LiveBytesMap& live_bytes = task_state_[i].live_bytes;
751     for (auto pair : live_bytes) {
752       // ClearLiveness sets the live bytes to zero.
753       // Pages with zero live bytes might be already unmapped.
754       if (pair.second != 0) {
755         marking_state->IncrementLiveBytes(pair.first, pair.second);
756       }
757     }
758     live_bytes.clear();
759     task_state_[i].marked_bytes = 0;
760   }
761   total_marked_bytes_ = 0;
762 }
763 
ClearLiveness(MemoryChunk * chunk)764 void ConcurrentMarking::ClearLiveness(MemoryChunk* chunk) {
765   for (int i = 1; i <= task_count_; i++) {
766     if (task_state_[i].live_bytes.count(chunk)) {
767       task_state_[i].live_bytes[chunk] = 0;
768     }
769   }
770 }
771 
TotalMarkedBytes()772 size_t ConcurrentMarking::TotalMarkedBytes() {
773   size_t result = 0;
774   for (int i = 1; i <= task_count_; i++) {
775     result +=
776         base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
777   }
778   result += total_marked_bytes_;
779   return result;
780 }
781 
PauseScope(ConcurrentMarking * concurrent_marking)782 ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
783     : concurrent_marking_(concurrent_marking),
784       resume_on_exit_(concurrent_marking_->Stop(
785           ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
786   DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
787 }
788 
~PauseScope()789 ConcurrentMarking::PauseScope::~PauseScope() {
790   if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
791 }
792 
793 }  // namespace internal
794 }  // namespace v8
795