1 // Copyright 2019 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/execution/stack-guard.h"
6
7 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
8 #include "src/execution/interrupts-scope.h"
9 #include "src/execution/isolate.h"
10 #include "src/execution/runtime-profiler.h"
11 #include "src/execution/simulator.h"
12 #include "src/logging/counters.h"
13 #include "src/objects/backing-store.h"
14 #include "src/roots/roots-inl.h"
15 #include "src/utils/memcopy.h"
16 #include "src/wasm/wasm-engine.h"
17
18 namespace v8 {
19 namespace internal {
20
set_interrupt_limits(const ExecutionAccess & lock)21 void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
22 DCHECK_NOT_NULL(isolate_);
23 thread_local_.set_jslimit(kInterruptLimit);
24 thread_local_.set_climit(kInterruptLimit);
25 }
26
reset_limits(const ExecutionAccess & lock)27 void StackGuard::reset_limits(const ExecutionAccess& lock) {
28 DCHECK_NOT_NULL(isolate_);
29 thread_local_.set_jslimit(thread_local_.real_jslimit_);
30 thread_local_.set_climit(thread_local_.real_climit_);
31 }
32
SetStackLimit(uintptr_t limit)33 void StackGuard::SetStackLimit(uintptr_t limit) {
34 ExecutionAccess access(isolate_);
35 // If the current limits are special (e.g. due to a pending interrupt) then
36 // leave them alone.
37 uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
38 if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
39 thread_local_.set_jslimit(jslimit);
40 }
41 if (thread_local_.climit() == thread_local_.real_climit_) {
42 thread_local_.set_climit(limit);
43 }
44 thread_local_.real_climit_ = limit;
45 thread_local_.real_jslimit_ = jslimit;
46 }
47
AdjustStackLimitForSimulator()48 void StackGuard::AdjustStackLimitForSimulator() {
49 ExecutionAccess access(isolate_);
50 uintptr_t climit = thread_local_.real_climit_;
51 // If the current limits are special (e.g. due to a pending interrupt) then
52 // leave them alone.
53 uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
54 if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
55 thread_local_.set_jslimit(jslimit);
56 }
57 }
58
EnableInterrupts()59 void StackGuard::EnableInterrupts() {
60 ExecutionAccess access(isolate_);
61 if (has_pending_interrupts(access)) {
62 set_interrupt_limits(access);
63 }
64 }
65
DisableInterrupts()66 void StackGuard::DisableInterrupts() {
67 ExecutionAccess access(isolate_);
68 reset_limits(access);
69 }
70
PushInterruptsScope(InterruptsScope * scope)71 void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
72 ExecutionAccess access(isolate_);
73 DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
74 if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
75 // Intercept already requested interrupts.
76 intptr_t intercepted =
77 thread_local_.interrupt_flags_ & scope->intercept_mask_;
78 scope->intercepted_flags_ = intercepted;
79 thread_local_.interrupt_flags_ &= ~intercepted;
80 } else {
81 DCHECK_EQ(scope->mode_, InterruptsScope::kRunInterrupts);
82 // Restore postponed interrupts.
83 int restored_flags = 0;
84 for (InterruptsScope* current = thread_local_.interrupt_scopes_;
85 current != nullptr; current = current->prev_) {
86 restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
87 current->intercepted_flags_ &= ~scope->intercept_mask_;
88 }
89 thread_local_.interrupt_flags_ |= restored_flags;
90
91 if (has_pending_interrupts(access)) set_interrupt_limits(access);
92 }
93 if (!has_pending_interrupts(access)) reset_limits(access);
94 // Add scope to the chain.
95 scope->prev_ = thread_local_.interrupt_scopes_;
96 thread_local_.interrupt_scopes_ = scope;
97 }
98
PopInterruptsScope()99 void StackGuard::PopInterruptsScope() {
100 ExecutionAccess access(isolate_);
101 InterruptsScope* top = thread_local_.interrupt_scopes_;
102 DCHECK_NE(top->mode_, InterruptsScope::kNoop);
103 if (top->mode_ == InterruptsScope::kPostponeInterrupts) {
104 // Make intercepted interrupts active.
105 DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
106 thread_local_.interrupt_flags_ |= top->intercepted_flags_;
107 } else {
108 DCHECK_EQ(top->mode_, InterruptsScope::kRunInterrupts);
109 // Postpone existing interupts if needed.
110 if (top->prev_) {
111 for (int interrupt = 1; interrupt < ALL_INTERRUPTS;
112 interrupt = interrupt << 1) {
113 InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
114 if ((thread_local_.interrupt_flags_ & flag) &&
115 top->prev_->Intercept(flag)) {
116 thread_local_.interrupt_flags_ &= ~flag;
117 }
118 }
119 }
120 }
121 if (has_pending_interrupts(access)) set_interrupt_limits(access);
122 // Remove scope from chain.
123 thread_local_.interrupt_scopes_ = top->prev_;
124 }
125
CheckInterrupt(InterruptFlag flag)126 bool StackGuard::CheckInterrupt(InterruptFlag flag) {
127 ExecutionAccess access(isolate_);
128 return (thread_local_.interrupt_flags_ & flag) != 0;
129 }
130
RequestInterrupt(InterruptFlag flag)131 void StackGuard::RequestInterrupt(InterruptFlag flag) {
132 ExecutionAccess access(isolate_);
133 // Check the chain of InterruptsScope for interception.
134 if (thread_local_.interrupt_scopes_ &&
135 thread_local_.interrupt_scopes_->Intercept(flag)) {
136 return;
137 }
138
139 // Not intercepted. Set as active interrupt flag.
140 thread_local_.interrupt_flags_ |= flag;
141 set_interrupt_limits(access);
142
143 // If this isolate is waiting in a futex, notify it to wake up.
144 isolate_->futex_wait_list_node()->NotifyWake();
145 }
146
ClearInterrupt(InterruptFlag flag)147 void StackGuard::ClearInterrupt(InterruptFlag flag) {
148 ExecutionAccess access(isolate_);
149 // Clear the interrupt flag from the chain of InterruptsScope.
150 for (InterruptsScope* current = thread_local_.interrupt_scopes_;
151 current != nullptr; current = current->prev_) {
152 current->intercepted_flags_ &= ~flag;
153 }
154
155 // Clear the interrupt flag from the active interrupt flags.
156 thread_local_.interrupt_flags_ &= ~flag;
157 if (!has_pending_interrupts(access)) reset_limits(access);
158 }
159
FetchAndClearInterrupts()160 int StackGuard::FetchAndClearInterrupts() {
161 ExecutionAccess access(isolate_);
162
163 int result = 0;
164 if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) {
165 // The TERMINATE_EXECUTION interrupt is special, since it terminates
166 // execution but should leave V8 in a resumable state. If it exists, we only
167 // fetch and clear that bit. On resume, V8 can continue processing other
168 // interrupts.
169 result = TERMINATE_EXECUTION;
170 thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
171 if (!has_pending_interrupts(access)) reset_limits(access);
172 } else {
173 result = static_cast<int>(thread_local_.interrupt_flags_);
174 thread_local_.interrupt_flags_ = 0;
175 reset_limits(access);
176 }
177
178 return result;
179 }
180
ArchiveStackGuard(char * to)181 char* StackGuard::ArchiveStackGuard(char* to) {
182 ExecutionAccess access(isolate_);
183 MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
184 thread_local_ = {};
185 return to + sizeof(ThreadLocal);
186 }
187
RestoreStackGuard(char * from)188 char* StackGuard::RestoreStackGuard(char* from) {
189 ExecutionAccess access(isolate_);
190 MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
191 return from + sizeof(ThreadLocal);
192 }
193
FreeThreadResources()194 void StackGuard::FreeThreadResources() {
195 Isolate::PerIsolateThreadData* per_thread =
196 isolate_->FindOrAllocatePerThreadDataForThisThread();
197 per_thread->set_stack_limit(thread_local_.real_climit_);
198 }
199
Initialize(Isolate * isolate,const ExecutionAccess & lock)200 void StackGuard::ThreadLocal::Initialize(Isolate* isolate,
201 const ExecutionAccess& lock) {
202 const uintptr_t kLimitSize = FLAG_stack_size * KB;
203 DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
204 uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
205 real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
206 set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
207 real_climit_ = limit;
208 set_climit(limit);
209 interrupt_scopes_ = nullptr;
210 interrupt_flags_ = 0;
211 }
212
InitThread(const ExecutionAccess & lock)213 void StackGuard::InitThread(const ExecutionAccess& lock) {
214 thread_local_.Initialize(isolate_, lock);
215 Isolate::PerIsolateThreadData* per_thread =
216 isolate_->FindOrAllocatePerThreadDataForThisThread();
217 uintptr_t stored_limit = per_thread->stack_limit();
218 // You should hold the ExecutionAccess lock when you call this.
219 if (stored_limit != 0) {
220 SetStackLimit(stored_limit);
221 }
222 }
223
224 // --- C a l l s t o n a t i v e s ---
225
226 namespace {
227
TestAndClear(int * bitfield,int mask)228 bool TestAndClear(int* bitfield, int mask) {
229 bool result = (*bitfield & mask);
230 *bitfield &= ~mask;
231 return result;
232 }
233
234 class ShouldBeZeroOnReturnScope final {
235 public:
236 #ifndef DEBUG
ShouldBeZeroOnReturnScope(int *)237 explicit ShouldBeZeroOnReturnScope(int*) {}
238 #else // DEBUG
239 explicit ShouldBeZeroOnReturnScope(int* v) : v_(v) {}
240 ~ShouldBeZeroOnReturnScope() { DCHECK_EQ(*v_, 0); }
241
242 private:
243 int* v_;
244 #endif // DEBUG
245 };
246
247 } // namespace
248
HandleInterrupts()249 Object StackGuard::HandleInterrupts() {
250 TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
251
252 #if DEBUG
253 isolate_->heap()->VerifyNewSpaceTop();
254 #endif
255
256 if (FLAG_verify_predictable) {
257 // Advance synthetic time by making a time request.
258 isolate_->heap()->MonotonicallyIncreasingTimeInMs();
259 }
260
261 // Fetch and clear interrupt bits in one go. See comments inside the method
262 // for special handling of TERMINATE_EXECUTION.
263 int interrupt_flags = FetchAndClearInterrupts();
264
265 // All interrupts should be fully processed when returning from this method.
266 ShouldBeZeroOnReturnScope should_be_zero_on_return(&interrupt_flags);
267
268 if (TestAndClear(&interrupt_flags, TERMINATE_EXECUTION)) {
269 TRACE_EVENT0("v8.execute", "V8.TerminateExecution");
270 return isolate_->TerminateExecution();
271 }
272
273 if (TestAndClear(&interrupt_flags, GC_REQUEST)) {
274 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCHandleGCRequest");
275 isolate_->heap()->HandleGCRequest();
276 }
277
278 if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
279 TRACE_EVENT0("v8.wasm", "V8.WasmGrowSharedMemory");
280 BackingStore::UpdateSharedWasmMemoryObjects(isolate_);
281 }
282
283 if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {
284 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
285 "V8.GCDeoptMarkedAllocationSites");
286 isolate_->heap()->DeoptMarkedAllocationSites();
287 }
288
289 if (TestAndClear(&interrupt_flags, INSTALL_CODE)) {
290 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
291 "V8.InstallOptimizedFunctions");
292 DCHECK(isolate_->concurrent_recompilation_enabled());
293 isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
294 }
295
296 if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
297 TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
298 // Callbacks must be invoked outside of ExecutionAccess lock.
299 isolate_->InvokeApiInterruptCallbacks();
300 }
301
302 if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) {
303 TRACE_EVENT0("v8.wasm", "V8.LogCode");
304 isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
305 }
306
307 if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) {
308 TRACE_EVENT0("v8.wasm", "V8.WasmCodeGC");
309 isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
310 }
311
312 isolate_->counters()->stack_interrupts()->Increment();
313
314 return ReadOnlyRoots(isolate_).undefined_value();
315 }
316
317 } // namespace internal
318 } // namespace v8
319