1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/execution/microtask-queue.h"
6
7 #include <algorithm>
8 #include <cstddef>
9
10 #include "src/api/api-inl.h"
11 #include "src/base/logging.h"
12 #include "src/execution/isolate.h"
13 #include "src/handles/handles-inl.h"
14 #include "src/objects/microtask-inl.h"
15 #include "src/objects/visitors.h"
16 #include "src/roots/roots-inl.h"
17 #include "src/tracing/trace-event.h"
18
19 namespace v8 {
20 namespace internal {
21
22 const size_t MicrotaskQueue::kRingBufferOffset =
23 OFFSET_OF(MicrotaskQueue, ring_buffer_);
24 const size_t MicrotaskQueue::kCapacityOffset =
25 OFFSET_OF(MicrotaskQueue, capacity_);
26 const size_t MicrotaskQueue::kSizeOffset = OFFSET_OF(MicrotaskQueue, size_);
27 const size_t MicrotaskQueue::kStartOffset = OFFSET_OF(MicrotaskQueue, start_);
28 const size_t MicrotaskQueue::kFinishedMicrotaskCountOffset =
29 OFFSET_OF(MicrotaskQueue, finished_microtask_count_);
30
31 const intptr_t MicrotaskQueue::kMinimumCapacity = 8;
32
33 // static
SetUpDefaultMicrotaskQueue(Isolate * isolate)34 void MicrotaskQueue::SetUpDefaultMicrotaskQueue(Isolate* isolate) {
35 DCHECK_NULL(isolate->default_microtask_queue());
36
37 MicrotaskQueue* microtask_queue = new MicrotaskQueue;
38 microtask_queue->next_ = microtask_queue;
39 microtask_queue->prev_ = microtask_queue;
40 isolate->set_default_microtask_queue(microtask_queue);
41 }
42
43 // static
New(Isolate * isolate)44 std::unique_ptr<MicrotaskQueue> MicrotaskQueue::New(Isolate* isolate) {
45 DCHECK_NOT_NULL(isolate->default_microtask_queue());
46
47 std::unique_ptr<MicrotaskQueue> microtask_queue(new MicrotaskQueue);
48
49 // Insert the new instance to the next of last MicrotaskQueue instance.
50 MicrotaskQueue* last = isolate->default_microtask_queue()->prev_;
51 microtask_queue->next_ = last->next_;
52 microtask_queue->prev_ = last;
53 last->next_->prev_ = microtask_queue.get();
54 last->next_ = microtask_queue.get();
55
56 return microtask_queue;
57 }
58
59 MicrotaskQueue::MicrotaskQueue() = default;
60
~MicrotaskQueue()61 MicrotaskQueue::~MicrotaskQueue() {
62 if (next_ != this) {
63 DCHECK_NE(prev_, this);
64 next_->prev_ = prev_;
65 prev_->next_ = next_;
66 }
67 delete[] ring_buffer_;
68 }
69
70 // static
CallEnqueueMicrotask(Isolate * isolate,intptr_t microtask_queue_pointer,Address raw_microtask)71 Address MicrotaskQueue::CallEnqueueMicrotask(Isolate* isolate,
72 intptr_t microtask_queue_pointer,
73 Address raw_microtask) {
74 Microtask microtask = Microtask::cast(Object(raw_microtask));
75 reinterpret_cast<MicrotaskQueue*>(microtask_queue_pointer)
76 ->EnqueueMicrotask(microtask);
77 return Smi::zero().ptr();
78 }
79
EnqueueMicrotask(v8::Isolate * v8_isolate,v8::Local<Function> function)80 void MicrotaskQueue::EnqueueMicrotask(v8::Isolate* v8_isolate,
81 v8::Local<Function> function) {
82 Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
83 HandleScope scope(isolate);
84 Handle<CallableTask> microtask = isolate->factory()->NewCallableTask(
85 Utils::OpenHandle(*function), isolate->native_context());
86 EnqueueMicrotask(*microtask);
87 }
88
EnqueueMicrotask(v8::Isolate * v8_isolate,v8::MicrotaskCallback callback,void * data)89 void MicrotaskQueue::EnqueueMicrotask(v8::Isolate* v8_isolate,
90 v8::MicrotaskCallback callback,
91 void* data) {
92 Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
93 HandleScope scope(isolate);
94 Handle<CallbackTask> microtask = isolate->factory()->NewCallbackTask(
95 isolate->factory()->NewForeign(reinterpret_cast<Address>(callback)),
96 isolate->factory()->NewForeign(reinterpret_cast<Address>(data)));
97 EnqueueMicrotask(*microtask);
98 }
99
EnqueueMicrotask(Microtask microtask)100 void MicrotaskQueue::EnqueueMicrotask(Microtask microtask) {
101 if (size_ == capacity_) {
102 // Keep the capacity of |ring_buffer_| power of 2, so that the JIT
103 // implementation can calculate the modulo easily.
104 intptr_t new_capacity = std::max(kMinimumCapacity, capacity_ << 1);
105 ResizeBuffer(new_capacity);
106 }
107
108 DCHECK_LT(size_, capacity_);
109 ring_buffer_[(start_ + size_) % capacity_] = microtask.ptr();
110 ++size_;
111 }
112
PerformCheckpointInternal(v8::Isolate * v8_isolate)113 void MicrotaskQueue::PerformCheckpointInternal(v8::Isolate* v8_isolate) {
114 DCHECK(ShouldPerfomCheckpoint());
115 std::unique_ptr<MicrotasksScope> microtasks_scope;
116 if (microtasks_policy_ == v8::MicrotasksPolicy::kScoped) {
117 // If we're using microtask scopes to schedule microtask execution, V8
118 // API calls will check that there's always a microtask scope on the
119 // stack. As the microtasks we're about to execute could invoke embedder
120 // callbacks which then calls back into V8, we create an artificial
121 // microtask scope here to avoid running into the CallDepthScope check.
122 microtasks_scope.reset(new v8::MicrotasksScope(
123 v8_isolate, this, v8::MicrotasksScope::kDoNotRunMicrotasks));
124 }
125 Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
126 RunMicrotasks(isolate);
127 isolate->ClearKeptObjects();
128 }
129
130 namespace {
131
132 class SetIsRunningMicrotasks {
133 public:
SetIsRunningMicrotasks(bool * flag)134 explicit SetIsRunningMicrotasks(bool* flag) : flag_(flag) {
135 DCHECK(!*flag_);
136 *flag_ = true;
137 }
138
~SetIsRunningMicrotasks()139 ~SetIsRunningMicrotasks() {
140 DCHECK(*flag_);
141 *flag_ = false;
142 }
143
144 private:
145 bool* flag_;
146 };
147
148 } // namespace
149
RunMicrotasks(Isolate * isolate)150 int MicrotaskQueue::RunMicrotasks(Isolate* isolate) {
151 if (!size()) {
152 OnCompleted(isolate);
153 return 0;
154 }
155
156 intptr_t base_count = finished_microtask_count_;
157
158 HandleScope handle_scope(isolate);
159 MaybeHandle<Object> maybe_exception;
160
161 MaybeHandle<Object> maybe_result;
162
163 int processed_microtask_count;
164 {
165 SetIsRunningMicrotasks scope(&is_running_microtasks_);
166 v8::Isolate::SuppressMicrotaskExecutionScope suppress(
167 reinterpret_cast<v8::Isolate*>(isolate));
168 HandleScopeImplementer::EnteredContextRewindScope rewind_scope(
169 isolate->handle_scope_implementer());
170 TRACE_EVENT_BEGIN0("v8.execute", "RunMicrotasks");
171 {
172 TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.RunMicrotasks");
173 maybe_result = Execution::TryRunMicrotasks(isolate, this,
174 &maybe_exception);
175 processed_microtask_count =
176 static_cast<int>(finished_microtask_count_ - base_count);
177 }
178 TRACE_EVENT_END1("v8.execute", "RunMicrotasks", "microtask_count",
179 processed_microtask_count);
180 }
181
182 // If execution is terminating, clean up and propagate that to TryCatch scope.
183 if (maybe_result.is_null() && maybe_exception.is_null()) {
184 delete[] ring_buffer_;
185 ring_buffer_ = nullptr;
186 capacity_ = 0;
187 size_ = 0;
188 start_ = 0;
189 DCHECK(isolate->has_scheduled_exception());
190 isolate->OnTerminationDuringRunMicrotasks();
191 OnCompleted(isolate);
192 return -1;
193 }
194 DCHECK_EQ(0, size());
195 OnCompleted(isolate);
196
197 return processed_microtask_count;
198 }
199
IterateMicrotasks(RootVisitor * visitor)200 void MicrotaskQueue::IterateMicrotasks(RootVisitor* visitor) {
201 if (size_) {
202 // Iterate pending Microtasks as root objects to avoid the write barrier for
203 // all single Microtask. If this hurts the GC performance, use a FixedArray.
204 visitor->VisitRootPointers(
205 Root::kStrongRoots, nullptr, FullObjectSlot(ring_buffer_ + start_),
206 FullObjectSlot(ring_buffer_ + std::min(start_ + size_, capacity_)));
207 visitor->VisitRootPointers(
208 Root::kStrongRoots, nullptr, FullObjectSlot(ring_buffer_),
209 FullObjectSlot(ring_buffer_ + std::max(start_ + size_ - capacity_,
210 static_cast<intptr_t>(0))));
211 }
212
213 if (capacity_ <= kMinimumCapacity) {
214 return;
215 }
216
217 intptr_t new_capacity = capacity_;
218 while (new_capacity > 2 * size_) {
219 new_capacity >>= 1;
220 }
221 new_capacity = std::max(new_capacity, kMinimumCapacity);
222 if (new_capacity < capacity_) {
223 ResizeBuffer(new_capacity);
224 }
225 }
226
AddMicrotasksCompletedCallback(MicrotasksCompletedCallbackWithData callback,void * data)227 void MicrotaskQueue::AddMicrotasksCompletedCallback(
228 MicrotasksCompletedCallbackWithData callback, void* data) {
229 CallbackWithData callback_with_data(callback, data);
230 auto pos =
231 std::find(microtasks_completed_callbacks_.begin(),
232 microtasks_completed_callbacks_.end(), callback_with_data);
233 if (pos != microtasks_completed_callbacks_.end()) return;
234 microtasks_completed_callbacks_.push_back(callback_with_data);
235 }
236
RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallbackWithData callback,void * data)237 void MicrotaskQueue::RemoveMicrotasksCompletedCallback(
238 MicrotasksCompletedCallbackWithData callback, void* data) {
239 CallbackWithData callback_with_data(callback, data);
240 auto pos =
241 std::find(microtasks_completed_callbacks_.begin(),
242 microtasks_completed_callbacks_.end(), callback_with_data);
243 if (pos == microtasks_completed_callbacks_.end()) return;
244 microtasks_completed_callbacks_.erase(pos);
245 }
246
OnCompleted(Isolate * isolate) const247 void MicrotaskQueue::OnCompleted(Isolate* isolate) const {
248 std::vector<CallbackWithData> callbacks(microtasks_completed_callbacks_);
249 for (auto& callback : callbacks) {
250 callback.first(reinterpret_cast<v8::Isolate*>(isolate), callback.second);
251 }
252 }
253
get(intptr_t index) const254 Microtask MicrotaskQueue::get(intptr_t index) const {
255 DCHECK_LT(index, size_);
256 Object microtask(ring_buffer_[(index + start_) % capacity_]);
257 return Microtask::cast(microtask);
258 }
259
ResizeBuffer(intptr_t new_capacity)260 void MicrotaskQueue::ResizeBuffer(intptr_t new_capacity) {
261 DCHECK_LE(size_, new_capacity);
262 Address* new_ring_buffer = new Address[new_capacity];
263 for (intptr_t i = 0; i < size_; ++i) {
264 new_ring_buffer[i] = ring_buffer_[(start_ + i) % capacity_];
265 }
266
267 delete[] ring_buffer_;
268 ring_buffer_ = new_ring_buffer;
269 capacity_ = new_capacity;
270 start_ = 0;
271 }
272
273 } // namespace internal
274 } // namespace v8
275