• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_CONCURRENT_MARKING_H_
6 #define V8_HEAP_CONCURRENT_MARKING_H_
7 
8 #include <memory>
9 
10 #include "include/v8-platform.h"
11 #include "src/base/atomic-utils.h"
12 #include "src/base/platform/condition-variable.h"
13 #include "src/base/platform/mutex.h"
14 #include "src/heap/marking-visitor.h"
15 #include "src/heap/marking-worklist.h"
16 #include "src/heap/memory-measurement.h"
17 #include "src/heap/slot-set.h"
18 #include "src/heap/spaces.h"
19 #include "src/init/v8.h"
20 #include "src/tasks/cancelable-task.h"
21 #include "src/utils/allocation.h"
22 #include "src/utils/utils.h"
23 
24 namespace v8 {
25 namespace internal {
26 
27 class Heap;
28 class Isolate;
29 class MajorNonAtomicMarkingState;
30 class MemoryChunk;
31 class WeakObjects;
32 
33 struct MemoryChunkData {
34   intptr_t live_bytes;
35   std::unique_ptr<TypedSlots> typed_slots;
36 };
37 
38 using MemoryChunkDataMap =
39     std::unordered_map<MemoryChunk*, MemoryChunkData, MemoryChunk::Hasher>;
40 
41 class V8_EXPORT_PRIVATE ConcurrentMarking {
42  public:
43   // When the scope is entered, the concurrent marking tasks
44   // are preempted and are not looking at the heap objects, concurrent marking
45   // is resumed when the scope is exited.
46   class V8_NODISCARD PauseScope {
47    public:
48     explicit PauseScope(ConcurrentMarking* concurrent_marking);
49     ~PauseScope();
50 
51    private:
52     ConcurrentMarking* const concurrent_marking_;
53     const bool resume_on_exit_;
54   };
55 
56   // TODO(gab): The only thing that prevents this being above 7 is
57   // Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
58   // task 0, reserved for the main thread).
59   static constexpr int kMaxTasks = 7;
60 
61   ConcurrentMarking(Heap* heap, MarkingWorklists* marking_worklists,
62                     WeakObjects* weak_objects);
63 
64   // Schedules asynchronous job to perform concurrent marking at |priority|.
65   // Objects in the heap should not be moved while these are active (can be
66   // stopped safely via Stop() or PauseScope).
67   void ScheduleJob(TaskPriority priority = TaskPriority::kUserVisible);
68 
69   // Waits for scheduled job to complete.
70   void Join();
71   // Preempts ongoing job ASAP. Returns true if concurrent marking was in
72   // progress, false otherwise.
73   bool Pause();
74 
75   // Schedules asynchronous job to perform concurrent marking at |priority| if
76   // not already running, otherwise adjusts the number of workers running job
77   // and the priority if diffrent from the default kUserVisible.
78   void RescheduleJobIfNeeded(
79       TaskPriority priority = TaskPriority::kUserVisible);
80   // Flushes native context sizes to the given table of the main thread.
81   void FlushNativeContexts(NativeContextStats* main_stats);
82   // Flushes memory chunk data using the given marking state.
83   void FlushMemoryChunkData(MajorNonAtomicMarkingState* marking_state);
84   // This function is called for a new space page that was cleared after
85   // scavenge and is going to be re-used.
86   void ClearMemoryChunkData(MemoryChunk* chunk);
87 
88   // Checks if all threads are stopped.
89   bool IsStopped();
90 
91   size_t TotalMarkedBytes();
92 
set_another_ephemeron_iteration(bool another_ephemeron_iteration)93   void set_another_ephemeron_iteration(bool another_ephemeron_iteration) {
94     another_ephemeron_iteration_.store(another_ephemeron_iteration);
95   }
another_ephemeron_iteration()96   bool another_ephemeron_iteration() {
97     return another_ephemeron_iteration_.load();
98   }
99 
100  private:
101   struct TaskState {
102     size_t marked_bytes = 0;
103     MemoryChunkDataMap memory_chunk_data;
104     NativeContextInferrer native_context_inferrer;
105     NativeContextStats native_context_stats;
106     char cache_line_padding[64];
107   };
108   class JobTask;
109   void Run(JobDelegate* delegate, base::EnumSet<CodeFlushMode> code_flush_mode,
110            unsigned mark_compact_epoch, bool should_keep_ages_unchanged);
111   size_t GetMaxConcurrency(size_t worker_count);
112 
113   std::unique_ptr<JobHandle> job_handle_;
114   Heap* const heap_;
115   MarkingWorklists* const marking_worklists_;
116   WeakObjects* const weak_objects_;
117   TaskState task_state_[kMaxTasks + 1];
118   std::atomic<size_t> total_marked_bytes_{0};
119   std::atomic<bool> another_ephemeron_iteration_{false};
120 };
121 
122 }  // namespace internal
123 }  // namespace v8
124 
125 #endif  // V8_HEAP_CONCURRENT_MARKING_H_
126