1 // Copyright 2019 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/execution/stack-guard.h"
6
7 #include "src/baseline/baseline-batch-compiler.h"
8 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
9 #include "src/execution/interrupts-scope.h"
10 #include "src/execution/isolate.h"
11 #include "src/execution/simulator.h"
12 #include "src/logging/counters.h"
13 #include "src/objects/backing-store.h"
14 #include "src/roots/roots-inl.h"
15 #include "src/tracing/trace-event.h"
16 #include "src/utils/memcopy.h"
17
18 #ifdef V8_ENABLE_MAGLEV
19 #include "src/maglev/maglev-concurrent-dispatcher.h"
20 #endif // V8_ENABLE_MAGLEV
21
22 #if V8_ENABLE_WEBASSEMBLY
23 #include "src/wasm/wasm-engine.h"
24 #endif // V8_ENABLE_WEBASSEMBLY
25
26 namespace v8 {
27 namespace internal {
28
set_interrupt_limits(const ExecutionAccess & lock)29 void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
30 DCHECK_NOT_NULL(isolate_);
31 thread_local_.set_jslimit(kInterruptLimit);
32 thread_local_.set_climit(kInterruptLimit);
33 }
34
reset_limits(const ExecutionAccess & lock)35 void StackGuard::reset_limits(const ExecutionAccess& lock) {
36 DCHECK_NOT_NULL(isolate_);
37 thread_local_.set_jslimit(thread_local_.real_jslimit_);
38 thread_local_.set_climit(thread_local_.real_climit_);
39 }
40
SetStackLimit(uintptr_t limit)41 void StackGuard::SetStackLimit(uintptr_t limit) {
42 ExecutionAccess access(isolate_);
43 // If the current limits are special (e.g. due to a pending interrupt) then
44 // leave them alone.
45 uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
46 if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
47 thread_local_.set_jslimit(jslimit);
48 }
49 if (thread_local_.climit() == thread_local_.real_climit_) {
50 thread_local_.set_climit(limit);
51 }
52 thread_local_.real_climit_ = limit;
53 thread_local_.real_jslimit_ = jslimit;
54 }
55
AdjustStackLimitForSimulator()56 void StackGuard::AdjustStackLimitForSimulator() {
57 ExecutionAccess access(isolate_);
58 uintptr_t climit = thread_local_.real_climit_;
59 // If the current limits are special (e.g. due to a pending interrupt) then
60 // leave them alone.
61 uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
62 if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
63 thread_local_.set_jslimit(jslimit);
64 }
65 }
66
EnableInterrupts()67 void StackGuard::EnableInterrupts() {
68 ExecutionAccess access(isolate_);
69 if (has_pending_interrupts(access)) {
70 set_interrupt_limits(access);
71 }
72 }
73
DisableInterrupts()74 void StackGuard::DisableInterrupts() {
75 ExecutionAccess access(isolate_);
76 reset_limits(access);
77 }
78
PushInterruptsScope(InterruptsScope * scope)79 void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
80 ExecutionAccess access(isolate_);
81 DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
82 if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
83 // Intercept already requested interrupts.
84 intptr_t intercepted =
85 thread_local_.interrupt_flags_ & scope->intercept_mask_;
86 scope->intercepted_flags_ = intercepted;
87 thread_local_.interrupt_flags_ &= ~intercepted;
88 } else {
89 DCHECK_EQ(scope->mode_, InterruptsScope::kRunInterrupts);
90 // Restore postponed interrupts.
91 int restored_flags = 0;
92 for (InterruptsScope* current = thread_local_.interrupt_scopes_;
93 current != nullptr; current = current->prev_) {
94 restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
95 current->intercepted_flags_ &= ~scope->intercept_mask_;
96 }
97 thread_local_.interrupt_flags_ |= restored_flags;
98
99 if (has_pending_interrupts(access)) set_interrupt_limits(access);
100 }
101 if (!has_pending_interrupts(access)) reset_limits(access);
102 // Add scope to the chain.
103 scope->prev_ = thread_local_.interrupt_scopes_;
104 thread_local_.interrupt_scopes_ = scope;
105 }
106
PopInterruptsScope()107 void StackGuard::PopInterruptsScope() {
108 ExecutionAccess access(isolate_);
109 InterruptsScope* top = thread_local_.interrupt_scopes_;
110 DCHECK_NE(top->mode_, InterruptsScope::kNoop);
111 if (top->mode_ == InterruptsScope::kPostponeInterrupts) {
112 // Make intercepted interrupts active.
113 DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
114 thread_local_.interrupt_flags_ |= top->intercepted_flags_;
115 } else {
116 DCHECK_EQ(top->mode_, InterruptsScope::kRunInterrupts);
117 // Postpone existing interupts if needed.
118 if (top->prev_) {
119 for (int interrupt = 1; interrupt < ALL_INTERRUPTS;
120 interrupt = interrupt << 1) {
121 InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
122 if ((thread_local_.interrupt_flags_ & flag) &&
123 top->prev_->Intercept(flag)) {
124 thread_local_.interrupt_flags_ &= ~flag;
125 }
126 }
127 }
128 }
129 if (has_pending_interrupts(access)) set_interrupt_limits(access);
130 // Remove scope from chain.
131 thread_local_.interrupt_scopes_ = top->prev_;
132 }
133
CheckInterrupt(InterruptFlag flag)134 bool StackGuard::CheckInterrupt(InterruptFlag flag) {
135 ExecutionAccess access(isolate_);
136 return (thread_local_.interrupt_flags_ & flag) != 0;
137 }
138
RequestInterrupt(InterruptFlag flag)139 void StackGuard::RequestInterrupt(InterruptFlag flag) {
140 ExecutionAccess access(isolate_);
141 // Check the chain of InterruptsScope for interception.
142 if (thread_local_.interrupt_scopes_ &&
143 thread_local_.interrupt_scopes_->Intercept(flag)) {
144 return;
145 }
146
147 // Not intercepted. Set as active interrupt flag.
148 thread_local_.interrupt_flags_ |= flag;
149 set_interrupt_limits(access);
150
151 // If this isolate is waiting in a futex, notify it to wake up.
152 isolate_->futex_wait_list_node()->NotifyWake();
153 }
154
ClearInterrupt(InterruptFlag flag)155 void StackGuard::ClearInterrupt(InterruptFlag flag) {
156 ExecutionAccess access(isolate_);
157 // Clear the interrupt flag from the chain of InterruptsScope.
158 for (InterruptsScope* current = thread_local_.interrupt_scopes_;
159 current != nullptr; current = current->prev_) {
160 current->intercepted_flags_ &= ~flag;
161 }
162
163 // Clear the interrupt flag from the active interrupt flags.
164 thread_local_.interrupt_flags_ &= ~flag;
165 if (!has_pending_interrupts(access)) reset_limits(access);
166 }
167
HasTerminationRequest()168 bool StackGuard::HasTerminationRequest() {
169 ExecutionAccess access(isolate_);
170 if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) {
171 thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
172 if (!has_pending_interrupts(access)) reset_limits(access);
173 return true;
174 }
175 return false;
176 }
177
FetchAndClearInterrupts()178 int StackGuard::FetchAndClearInterrupts() {
179 ExecutionAccess access(isolate_);
180
181 int result = 0;
182 if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) {
183 // The TERMINATE_EXECUTION interrupt is special, since it terminates
184 // execution but should leave V8 in a resumable state. If it exists, we only
185 // fetch and clear that bit. On resume, V8 can continue processing other
186 // interrupts.
187 result = TERMINATE_EXECUTION;
188 thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
189 if (!has_pending_interrupts(access)) reset_limits(access);
190 } else {
191 result = static_cast<int>(thread_local_.interrupt_flags_);
192 thread_local_.interrupt_flags_ = 0;
193 reset_limits(access);
194 }
195
196 return result;
197 }
198
ArchiveStackGuard(char * to)199 char* StackGuard::ArchiveStackGuard(char* to) {
200 ExecutionAccess access(isolate_);
201 MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
202 thread_local_ = {};
203 return to + sizeof(ThreadLocal);
204 }
205
RestoreStackGuard(char * from)206 char* StackGuard::RestoreStackGuard(char* from) {
207 ExecutionAccess access(isolate_);
208 MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
209 return from + sizeof(ThreadLocal);
210 }
211
FreeThreadResources()212 void StackGuard::FreeThreadResources() {
213 Isolate::PerIsolateThreadData* per_thread =
214 isolate_->FindOrAllocatePerThreadDataForThisThread();
215 per_thread->set_stack_limit(thread_local_.real_climit_);
216 }
217
Initialize(Isolate * isolate,const ExecutionAccess & lock)218 void StackGuard::ThreadLocal::Initialize(Isolate* isolate,
219 const ExecutionAccess& lock) {
220 const uintptr_t kLimitSize = FLAG_stack_size * KB;
221 DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
222 uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
223 real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
224 set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
225 real_climit_ = limit;
226 set_climit(limit);
227 interrupt_scopes_ = nullptr;
228 interrupt_flags_ = 0;
229 }
230
InitThread(const ExecutionAccess & lock)231 void StackGuard::InitThread(const ExecutionAccess& lock) {
232 thread_local_.Initialize(isolate_, lock);
233 Isolate::PerIsolateThreadData* per_thread =
234 isolate_->FindOrAllocatePerThreadDataForThisThread();
235 uintptr_t stored_limit = per_thread->stack_limit();
236 // You should hold the ExecutionAccess lock when you call this.
237 if (stored_limit != 0) {
238 SetStackLimit(stored_limit);
239 }
240 }
241
242 // --- C a l l s t o n a t i v e s ---
243
244 namespace {
245
TestAndClear(int * bitfield,int mask)246 bool TestAndClear(int* bitfield, int mask) {
247 bool result = (*bitfield & mask);
248 *bitfield &= ~mask;
249 return result;
250 }
251
252 class V8_NODISCARD ShouldBeZeroOnReturnScope final {
253 public:
254 #ifndef DEBUG
ShouldBeZeroOnReturnScope(int *)255 explicit ShouldBeZeroOnReturnScope(int*) {}
256 #else // DEBUG
257 explicit ShouldBeZeroOnReturnScope(int* v) : v_(v) {}
258 ~ShouldBeZeroOnReturnScope() { DCHECK_EQ(*v_, 0); }
259
260 private:
261 int* v_;
262 #endif // DEBUG
263 };
264
265 } // namespace
266
HandleInterrupts()267 Object StackGuard::HandleInterrupts() {
268 TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
269
270 #if DEBUG
271 isolate_->heap()->VerifyNewSpaceTop();
272 #endif
273
274 if (FLAG_verify_predictable) {
275 // Advance synthetic time by making a time request.
276 isolate_->heap()->MonotonicallyIncreasingTimeInMs();
277 }
278
279 // Fetch and clear interrupt bits in one go. See comments inside the method
280 // for special handling of TERMINATE_EXECUTION.
281 int interrupt_flags = FetchAndClearInterrupts();
282
283 // All interrupts should be fully processed when returning from this method.
284 ShouldBeZeroOnReturnScope should_be_zero_on_return(&interrupt_flags);
285
286 if (TestAndClear(&interrupt_flags, TERMINATE_EXECUTION)) {
287 TRACE_EVENT0("v8.execute", "V8.TerminateExecution");
288 return isolate_->TerminateExecution();
289 }
290
291 if (TestAndClear(&interrupt_flags, GC_REQUEST)) {
292 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCHandleGCRequest");
293 isolate_->heap()->HandleGCRequest();
294 }
295
296 #if V8_ENABLE_WEBASSEMBLY
297 if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
298 TRACE_EVENT0("v8.wasm", "V8.WasmGrowSharedMemory");
299 BackingStore::UpdateSharedWasmMemoryObjects(isolate_);
300 }
301
302 if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) {
303 TRACE_EVENT0("v8.wasm", "V8.LogCode");
304 wasm::GetWasmEngine()->LogOutstandingCodesForIsolate(isolate_);
305 }
306
307 if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) {
308 TRACE_EVENT0("v8.wasm", "V8.WasmCodeGC");
309 wasm::GetWasmEngine()->ReportLiveCodeFromStackForGC(isolate_);
310 }
311 #endif // V8_ENABLE_WEBASSEMBLY
312
313 if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {
314 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
315 "V8.GCDeoptMarkedAllocationSites");
316 isolate_->heap()->DeoptMarkedAllocationSites();
317 }
318
319 if (TestAndClear(&interrupt_flags, INSTALL_CODE)) {
320 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
321 "V8.InstallOptimizedFunctions");
322 DCHECK(isolate_->concurrent_recompilation_enabled());
323 isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
324 }
325
326 if (TestAndClear(&interrupt_flags, INSTALL_BASELINE_CODE)) {
327 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
328 "V8.FinalizeBaselineConcurrentCompilation");
329 isolate_->baseline_batch_compiler()->InstallBatch();
330 }
331
332 #ifdef V8_ENABLE_MAGLEV
333 if (TestAndClear(&interrupt_flags, INSTALL_MAGLEV_CODE)) {
334 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
335 "V8.FinalizeMaglevConcurrentCompilation");
336 isolate_->maglev_concurrent_dispatcher()->FinalizeFinishedJobs();
337 }
338 #endif // V8_ENABLE_MAGLEV
339
340 if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
341 TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
342 // Callbacks must be invoked outside of ExecutionAccess lock.
343 isolate_->InvokeApiInterruptCallbacks();
344 }
345
346 isolate_->counters()->stack_interrupts()->Increment();
347
348 return ReadOnlyRoots(isolate_).undefined_value();
349 }
350
351 } // namespace internal
352 } // namespace v8
353