1 // Copyright 2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "api.h"
31 #include "bootstrapper.h"
32 #include "debug.h"
33 #include "execution.h"
34 #include "v8threads.h"
35 #include "regexp-stack.h"
36
37 namespace v8 {
38
39
40 // Track whether this V8 instance has ever called v8::Locker. This allows the
41 // API code to verify that the lock is always held when V8 is being entered.
42 bool Locker::active_ = false;
43
44
45 // Constructor for the Locker object. Once the Locker is constructed the
46 // current thread will be guaranteed to have the lock for a given isolate.
Locker(v8::Isolate * isolate)47 Locker::Locker(v8::Isolate* isolate)
48 : has_lock_(false),
49 top_level_(true),
50 isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
51 if (isolate_ == NULL) {
52 isolate_ = i::Isolate::GetDefaultIsolateForLocking();
53 }
54 // Record that the Locker has been used at least once.
55 active_ = true;
56 // Get the big lock if necessary.
57 if (!isolate_->thread_manager()->IsLockedByCurrentThread()) {
58 isolate_->thread_manager()->Lock();
59 has_lock_ = true;
60
61 // Make sure that V8 is initialized. Archiving of threads interferes
62 // with deserialization by adding additional root pointers, so we must
63 // initialize here, before anyone can call ~Locker() or Unlocker().
64 if (!isolate_->IsInitialized()) {
65 isolate_->Enter();
66 V8::Initialize();
67 isolate_->Exit();
68 }
69
70 // This may be a locker within an unlocker in which case we have to
71 // get the saved state for this thread and restore it.
72 if (isolate_->thread_manager()->RestoreThread()) {
73 top_level_ = false;
74 } else {
75 internal::ExecutionAccess access(isolate_);
76 isolate_->stack_guard()->ClearThread(access);
77 isolate_->stack_guard()->InitThread(access);
78 }
79 if (isolate_->IsDefaultIsolate()) {
80 // This only enters if not yet entered.
81 internal::Isolate::EnterDefaultIsolate();
82 }
83 }
84 ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
85 }
86
87
IsLocked(v8::Isolate * isolate)88 bool Locker::IsLocked(v8::Isolate* isolate) {
89 i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
90 if (internal_isolate == NULL) {
91 internal_isolate = i::Isolate::GetDefaultIsolateForLocking();
92 }
93 return internal_isolate->thread_manager()->IsLockedByCurrentThread();
94 }
95
96
IsActive()97 bool Locker::IsActive() {
98 return active_;
99 }
100
101
~Locker()102 Locker::~Locker() {
103 ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
104 if (has_lock_) {
105 if (isolate_->IsDefaultIsolate()) {
106 isolate_->Exit();
107 }
108 if (top_level_) {
109 isolate_->thread_manager()->FreeThreadResources();
110 } else {
111 isolate_->thread_manager()->ArchiveThread();
112 }
113 isolate_->thread_manager()->Unlock();
114 }
115 }
116
117
Unlocker(v8::Isolate * isolate)118 Unlocker::Unlocker(v8::Isolate* isolate)
119 : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
120 if (isolate_ == NULL) {
121 isolate_ = i::Isolate::GetDefaultIsolateForLocking();
122 }
123 ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
124 if (isolate_->IsDefaultIsolate()) {
125 isolate_->Exit();
126 }
127 isolate_->thread_manager()->ArchiveThread();
128 isolate_->thread_manager()->Unlock();
129 }
130
131
~Unlocker()132 Unlocker::~Unlocker() {
133 ASSERT(!isolate_->thread_manager()->IsLockedByCurrentThread());
134 isolate_->thread_manager()->Lock();
135 isolate_->thread_manager()->RestoreThread();
136 if (isolate_->IsDefaultIsolate()) {
137 isolate_->Enter();
138 }
139 }
140
141
StartPreemption(int every_n_ms)142 void Locker::StartPreemption(int every_n_ms) {
143 v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
144 }
145
146
StopPreemption()147 void Locker::StopPreemption() {
148 v8::internal::ContextSwitcher::StopPreemption();
149 }
150
151
152 namespace internal {
153
154
RestoreThread()155 bool ThreadManager::RestoreThread() {
156 ASSERT(IsLockedByCurrentThread());
157 // First check whether the current thread has been 'lazily archived', i.e.
158 // not archived at all. If that is the case we put the state storage we
159 // had prepared back in the free list, since we didn't need it after all.
160 if (lazily_archived_thread_.Equals(ThreadId::Current())) {
161 lazily_archived_thread_ = ThreadId::Invalid();
162 Isolate::PerIsolateThreadData* per_thread =
163 isolate_->FindPerThreadDataForThisThread();
164 ASSERT(per_thread != NULL);
165 ASSERT(per_thread->thread_state() == lazily_archived_thread_state_);
166 lazily_archived_thread_state_->set_id(ThreadId::Invalid());
167 lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
168 lazily_archived_thread_state_ = NULL;
169 per_thread->set_thread_state(NULL);
170 return true;
171 }
172
173 // Make sure that the preemption thread cannot modify the thread state while
174 // it is being archived or restored.
175 ExecutionAccess access(isolate_);
176
177 // If there is another thread that was lazily archived then we have to really
178 // archive it now.
179 if (lazily_archived_thread_.IsValid()) {
180 EagerlyArchiveThread();
181 }
182 Isolate::PerIsolateThreadData* per_thread =
183 isolate_->FindPerThreadDataForThisThread();
184 if (per_thread == NULL || per_thread->thread_state() == NULL) {
185 // This is a new thread.
186 isolate_->stack_guard()->InitThread(access);
187 return false;
188 }
189 ThreadState* state = per_thread->thread_state();
190 char* from = state->data();
191 from = isolate_->handle_scope_implementer()->RestoreThread(from);
192 from = isolate_->RestoreThread(from);
193 from = Relocatable::RestoreState(isolate_, from);
194 #ifdef ENABLE_DEBUGGER_SUPPORT
195 from = isolate_->debug()->RestoreDebug(from);
196 #endif
197 from = isolate_->stack_guard()->RestoreStackGuard(from);
198 from = isolate_->regexp_stack()->RestoreStack(from);
199 from = isolate_->bootstrapper()->RestoreState(from);
200 per_thread->set_thread_state(NULL);
201 if (state->terminate_on_restore()) {
202 isolate_->stack_guard()->TerminateExecution();
203 state->set_terminate_on_restore(false);
204 }
205 state->set_id(ThreadId::Invalid());
206 state->Unlink();
207 state->LinkInto(ThreadState::FREE_LIST);
208 return true;
209 }
210
211
Lock()212 void ThreadManager::Lock() {
213 mutex_->Lock();
214 mutex_owner_ = ThreadId::Current();
215 ASSERT(IsLockedByCurrentThread());
216 }
217
218
Unlock()219 void ThreadManager::Unlock() {
220 mutex_owner_ = ThreadId::Invalid();
221 mutex_->Unlock();
222 }
223
224
ArchiveSpacePerThread()225 static int ArchiveSpacePerThread() {
226 return HandleScopeImplementer::ArchiveSpacePerThread() +
227 Isolate::ArchiveSpacePerThread() +
228 #ifdef ENABLE_DEBUGGER_SUPPORT
229 Debug::ArchiveSpacePerThread() +
230 #endif
231 StackGuard::ArchiveSpacePerThread() +
232 RegExpStack::ArchiveSpacePerThread() +
233 Bootstrapper::ArchiveSpacePerThread() +
234 Relocatable::ArchiveSpacePerThread();
235 }
236
237
ThreadState(ThreadManager * thread_manager)238 ThreadState::ThreadState(ThreadManager* thread_manager)
239 : id_(ThreadId::Invalid()),
240 terminate_on_restore_(false),
241 next_(this),
242 previous_(this),
243 thread_manager_(thread_manager) {
244 }
245
246
AllocateSpace()247 void ThreadState::AllocateSpace() {
248 data_ = NewArray<char>(ArchiveSpacePerThread());
249 }
250
251
Unlink()252 void ThreadState::Unlink() {
253 next_->previous_ = previous_;
254 previous_->next_ = next_;
255 }
256
257
LinkInto(List list)258 void ThreadState::LinkInto(List list) {
259 ThreadState* flying_anchor =
260 list == FREE_LIST ? thread_manager_->free_anchor_
261 : thread_manager_->in_use_anchor_;
262 next_ = flying_anchor->next_;
263 previous_ = flying_anchor;
264 flying_anchor->next_ = this;
265 next_->previous_ = this;
266 }
267
268
GetFreeThreadState()269 ThreadState* ThreadManager::GetFreeThreadState() {
270 ThreadState* gotten = free_anchor_->next_;
271 if (gotten == free_anchor_) {
272 ThreadState* new_thread_state = new ThreadState(this);
273 new_thread_state->AllocateSpace();
274 return new_thread_state;
275 }
276 return gotten;
277 }
278
279
280 // Gets the first in the list of archived threads.
FirstThreadStateInUse()281 ThreadState* ThreadManager::FirstThreadStateInUse() {
282 return in_use_anchor_->Next();
283 }
284
285
Next()286 ThreadState* ThreadState::Next() {
287 if (next_ == thread_manager_->in_use_anchor_) return NULL;
288 return next_;
289 }
290
291
292 // Thread ids must start with 1, because in TLS having thread id 0 can't
293 // be distinguished from not having a thread id at all (since NULL is
294 // defined as 0.)
ThreadManager()295 ThreadManager::ThreadManager()
296 : mutex_(OS::CreateMutex()),
297 mutex_owner_(ThreadId::Invalid()),
298 lazily_archived_thread_(ThreadId::Invalid()),
299 lazily_archived_thread_state_(NULL),
300 free_anchor_(NULL),
301 in_use_anchor_(NULL) {
302 free_anchor_ = new ThreadState(this);
303 in_use_anchor_ = new ThreadState(this);
304 }
305
306
~ThreadManager()307 ThreadManager::~ThreadManager() {
308 delete mutex_;
309 delete free_anchor_;
310 delete in_use_anchor_;
311 }
312
313
ArchiveThread()314 void ThreadManager::ArchiveThread() {
315 ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid()));
316 ASSERT(!IsArchived());
317 ASSERT(IsLockedByCurrentThread());
318 ThreadState* state = GetFreeThreadState();
319 state->Unlink();
320 Isolate::PerIsolateThreadData* per_thread =
321 isolate_->FindOrAllocatePerThreadDataForThisThread();
322 per_thread->set_thread_state(state);
323 lazily_archived_thread_ = ThreadId::Current();
324 lazily_archived_thread_state_ = state;
325 ASSERT(state->id().Equals(ThreadId::Invalid()));
326 state->set_id(CurrentId());
327 ASSERT(!state->id().Equals(ThreadId::Invalid()));
328 }
329
330
EagerlyArchiveThread()331 void ThreadManager::EagerlyArchiveThread() {
332 ASSERT(IsLockedByCurrentThread());
333 ThreadState* state = lazily_archived_thread_state_;
334 state->LinkInto(ThreadState::IN_USE_LIST);
335 char* to = state->data();
336 // Ensure that data containing GC roots are archived first, and handle them
337 // in ThreadManager::Iterate(ObjectVisitor*).
338 to = isolate_->handle_scope_implementer()->ArchiveThread(to);
339 to = isolate_->ArchiveThread(to);
340 to = Relocatable::ArchiveState(isolate_, to);
341 #ifdef ENABLE_DEBUGGER_SUPPORT
342 to = isolate_->debug()->ArchiveDebug(to);
343 #endif
344 to = isolate_->stack_guard()->ArchiveStackGuard(to);
345 to = isolate_->regexp_stack()->ArchiveStack(to);
346 to = isolate_->bootstrapper()->ArchiveState(to);
347 lazily_archived_thread_ = ThreadId::Invalid();
348 lazily_archived_thread_state_ = NULL;
349 }
350
351
FreeThreadResources()352 void ThreadManager::FreeThreadResources() {
353 isolate_->handle_scope_implementer()->FreeThreadResources();
354 isolate_->FreeThreadResources();
355 #ifdef ENABLE_DEBUGGER_SUPPORT
356 isolate_->debug()->FreeThreadResources();
357 #endif
358 isolate_->stack_guard()->FreeThreadResources();
359 isolate_->regexp_stack()->FreeThreadResources();
360 isolate_->bootstrapper()->FreeThreadResources();
361 }
362
363
IsArchived()364 bool ThreadManager::IsArchived() {
365 Isolate::PerIsolateThreadData* data =
366 isolate_->FindPerThreadDataForThisThread();
367 return data != NULL && data->thread_state() != NULL;
368 }
369
Iterate(ObjectVisitor * v)370 void ThreadManager::Iterate(ObjectVisitor* v) {
371 // Expecting no threads during serialization/deserialization
372 for (ThreadState* state = FirstThreadStateInUse();
373 state != NULL;
374 state = state->Next()) {
375 char* data = state->data();
376 data = HandleScopeImplementer::Iterate(v, data);
377 data = isolate_->Iterate(v, data);
378 data = Relocatable::Iterate(v, data);
379 }
380 }
381
382
IterateArchivedThreads(ThreadVisitor * v)383 void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
384 for (ThreadState* state = FirstThreadStateInUse();
385 state != NULL;
386 state = state->Next()) {
387 char* data = state->data();
388 data += HandleScopeImplementer::ArchiveSpacePerThread();
389 isolate_->IterateThread(v, data);
390 }
391 }
392
393
CurrentId()394 ThreadId ThreadManager::CurrentId() {
395 return ThreadId::Current();
396 }
397
398
TerminateExecution(ThreadId thread_id)399 void ThreadManager::TerminateExecution(ThreadId thread_id) {
400 for (ThreadState* state = FirstThreadStateInUse();
401 state != NULL;
402 state = state->Next()) {
403 if (thread_id.Equals(state->id())) {
404 state->set_terminate_on_restore(true);
405 }
406 }
407 }
408
409
ContextSwitcher(Isolate * isolate,int every_n_ms)410 ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
411 : Thread("v8:CtxtSwitcher"),
412 keep_going_(true),
413 sleep_ms_(every_n_ms),
414 isolate_(isolate) {
415 }
416
417
418 // Set the scheduling interval of V8 threads. This function starts the
419 // ContextSwitcher thread if needed.
StartPreemption(int every_n_ms)420 void ContextSwitcher::StartPreemption(int every_n_ms) {
421 Isolate* isolate = Isolate::Current();
422 ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
423 if (isolate->context_switcher() == NULL) {
424 // If the ContextSwitcher thread is not running at the moment start it now.
425 isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
426 isolate->context_switcher()->Start();
427 } else {
428 // ContextSwitcher thread is already running, so we just change the
429 // scheduling interval.
430 isolate->context_switcher()->sleep_ms_ = every_n_ms;
431 }
432 }
433
434
435 // Disable preemption of V8 threads. If multiple threads want to use V8 they
436 // must cooperatively schedule amongst them from this point on.
StopPreemption()437 void ContextSwitcher::StopPreemption() {
438 Isolate* isolate = Isolate::Current();
439 ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
440 if (isolate->context_switcher() != NULL) {
441 // The ContextSwitcher thread is running. We need to stop it and release
442 // its resources.
443 isolate->context_switcher()->keep_going_ = false;
444 // Wait for the ContextSwitcher thread to exit.
445 isolate->context_switcher()->Join();
446 // Thread has exited, now we can delete it.
447 delete(isolate->context_switcher());
448 isolate->set_context_switcher(NULL);
449 }
450 }
451
452
453 // Main loop of the ContextSwitcher thread: Preempt the currently running V8
454 // thread at regular intervals.
Run()455 void ContextSwitcher::Run() {
456 while (keep_going_) {
457 OS::Sleep(sleep_ms_);
458 isolate()->stack_guard()->Preempt();
459 }
460 }
461
462
463 // Acknowledge the preemption by the receiving thread.
PreemptionReceived()464 void ContextSwitcher::PreemptionReceived() {
465 ASSERT(Locker::IsLocked());
466 // There is currently no accounting being done for this. But could be in the
467 // future, which is why we leave this in.
468 }
469
470
471 } // namespace internal
472 } // namespace v8
473