1 // Copyright 2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "api.h"
31 #include "bootstrapper.h"
32 #include "debug.h"
33 #include "execution.h"
34 #include "v8threads.h"
35 #include "regexp-stack.h"
36
37 namespace v8 {
38
39 static internal::Thread::LocalStorageKey thread_state_key =
40 internal::Thread::CreateThreadLocalKey();
41 static internal::Thread::LocalStorageKey thread_id_key =
42 internal::Thread::CreateThreadLocalKey();
43
44
45 // Track whether this V8 instance has ever called v8::Locker. This allows the
46 // API code to verify that the lock is always held when V8 is being entered.
47 bool Locker::active_ = false;
48
49
50 // Constructor for the Locker object. Once the Locker is constructed the
51 // current thread will be guaranteed to have the big V8 lock.
Locker()52 Locker::Locker() : has_lock_(false), top_level_(true) {
53 // Record that the Locker has been used at least once.
54 active_ = true;
55 // Get the big lock if necessary.
56 if (!internal::ThreadManager::IsLockedByCurrentThread()) {
57 internal::ThreadManager::Lock();
58 has_lock_ = true;
59 // Make sure that V8 is initialized. Archiving of threads interferes
60 // with deserialization by adding additional root pointers, so we must
61 // initialize here, before anyone can call ~Locker() or Unlocker().
62 if (!internal::V8::IsRunning()) {
63 V8::Initialize();
64 }
65 // This may be a locker within an unlocker in which case we have to
66 // get the saved state for this thread and restore it.
67 if (internal::ThreadManager::RestoreThread()) {
68 top_level_ = false;
69 } else {
70 internal::ExecutionAccess access;
71 internal::StackGuard::ClearThread(access);
72 internal::StackGuard::InitThread(access);
73 }
74 }
75 ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
76
77 // Make sure this thread is assigned a thread id.
78 internal::ThreadManager::AssignId();
79 }
80
81
IsLocked()82 bool Locker::IsLocked() {
83 return internal::ThreadManager::IsLockedByCurrentThread();
84 }
85
86
~Locker()87 Locker::~Locker() {
88 ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
89 if (has_lock_) {
90 if (top_level_) {
91 internal::ThreadManager::FreeThreadResources();
92 } else {
93 internal::ThreadManager::ArchiveThread();
94 }
95 internal::ThreadManager::Unlock();
96 }
97 }
98
99
Unlocker()100 Unlocker::Unlocker() {
101 ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
102 internal::ThreadManager::ArchiveThread();
103 internal::ThreadManager::Unlock();
104 }
105
106
~Unlocker()107 Unlocker::~Unlocker() {
108 ASSERT(!internal::ThreadManager::IsLockedByCurrentThread());
109 internal::ThreadManager::Lock();
110 internal::ThreadManager::RestoreThread();
111 }
112
113
StartPreemption(int every_n_ms)114 void Locker::StartPreemption(int every_n_ms) {
115 v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
116 }
117
118
StopPreemption()119 void Locker::StopPreemption() {
120 v8::internal::ContextSwitcher::StopPreemption();
121 }
122
123
124 namespace internal {
125
126
RestoreThread()127 bool ThreadManager::RestoreThread() {
128 // First check whether the current thread has been 'lazily archived', ie
129 // not archived at all. If that is the case we put the state storage we
130 // had prepared back in the free list, since we didn't need it after all.
131 if (lazily_archived_thread_.IsSelf()) {
132 lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
133 ASSERT(Thread::GetThreadLocal(thread_state_key) ==
134 lazily_archived_thread_state_);
135 lazily_archived_thread_state_->set_id(kInvalidId);
136 lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
137 lazily_archived_thread_state_ = NULL;
138 Thread::SetThreadLocal(thread_state_key, NULL);
139 return true;
140 }
141
142 // Make sure that the preemption thread cannot modify the thread state while
143 // it is being archived or restored.
144 ExecutionAccess access;
145
146 // If there is another thread that was lazily archived then we have to really
147 // archive it now.
148 if (lazily_archived_thread_.IsValid()) {
149 EagerlyArchiveThread();
150 }
151 ThreadState* state =
152 reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
153 if (state == NULL) {
154 // This is a new thread.
155 StackGuard::InitThread(access);
156 return false;
157 }
158 char* from = state->data();
159 from = HandleScopeImplementer::RestoreThread(from);
160 from = Top::RestoreThread(from);
161 from = Relocatable::RestoreState(from);
162 #ifdef ENABLE_DEBUGGER_SUPPORT
163 from = Debug::RestoreDebug(from);
164 #endif
165 from = StackGuard::RestoreStackGuard(from);
166 from = RegExpStack::RestoreStack(from);
167 from = Bootstrapper::RestoreState(from);
168 Thread::SetThreadLocal(thread_state_key, NULL);
169 if (state->terminate_on_restore()) {
170 StackGuard::TerminateExecution();
171 state->set_terminate_on_restore(false);
172 }
173 state->set_id(kInvalidId);
174 state->Unlink();
175 state->LinkInto(ThreadState::FREE_LIST);
176 return true;
177 }
178
179
Lock()180 void ThreadManager::Lock() {
181 mutex_->Lock();
182 mutex_owner_.Initialize(ThreadHandle::SELF);
183 ASSERT(IsLockedByCurrentThread());
184 }
185
186
Unlock()187 void ThreadManager::Unlock() {
188 mutex_owner_.Initialize(ThreadHandle::INVALID);
189 mutex_->Unlock();
190 }
191
192
ArchiveSpacePerThread()193 static int ArchiveSpacePerThread() {
194 return HandleScopeImplementer::ArchiveSpacePerThread() +
195 Top::ArchiveSpacePerThread() +
196 #ifdef ENABLE_DEBUGGER_SUPPORT
197 Debug::ArchiveSpacePerThread() +
198 #endif
199 StackGuard::ArchiveSpacePerThread() +
200 RegExpStack::ArchiveSpacePerThread() +
201 Bootstrapper::ArchiveSpacePerThread() +
202 Relocatable::ArchiveSpacePerThread();
203 }
204
205
206 ThreadState* ThreadState::free_anchor_ = new ThreadState();
207 ThreadState* ThreadState::in_use_anchor_ = new ThreadState();
208
209
ThreadState()210 ThreadState::ThreadState() : id_(ThreadManager::kInvalidId),
211 terminate_on_restore_(false),
212 next_(this), previous_(this) {
213 }
214
215
AllocateSpace()216 void ThreadState::AllocateSpace() {
217 data_ = NewArray<char>(ArchiveSpacePerThread());
218 }
219
220
Unlink()221 void ThreadState::Unlink() {
222 next_->previous_ = previous_;
223 previous_->next_ = next_;
224 }
225
226
LinkInto(List list)227 void ThreadState::LinkInto(List list) {
228 ThreadState* flying_anchor =
229 list == FREE_LIST ? free_anchor_ : in_use_anchor_;
230 next_ = flying_anchor->next_;
231 previous_ = flying_anchor;
232 flying_anchor->next_ = this;
233 next_->previous_ = this;
234 }
235
236
GetFree()237 ThreadState* ThreadState::GetFree() {
238 ThreadState* gotten = free_anchor_->next_;
239 if (gotten == free_anchor_) {
240 ThreadState* new_thread_state = new ThreadState();
241 new_thread_state->AllocateSpace();
242 return new_thread_state;
243 }
244 return gotten;
245 }
246
247
248 // Gets the first in the list of archived threads.
FirstInUse()249 ThreadState* ThreadState::FirstInUse() {
250 return in_use_anchor_->Next();
251 }
252
253
Next()254 ThreadState* ThreadState::Next() {
255 if (next_ == in_use_anchor_) return NULL;
256 return next_;
257 }
258
259
260 // Thread ids must start with 1, because in TLS having thread id 0 can't
261 // be distinguished from not having a thread id at all (since NULL is
262 // defined as 0.)
263 int ThreadManager::last_id_ = 0;
264 Mutex* ThreadManager::mutex_ = OS::CreateMutex();
265 ThreadHandle ThreadManager::mutex_owner_(ThreadHandle::INVALID);
266 ThreadHandle ThreadManager::lazily_archived_thread_(ThreadHandle::INVALID);
267 ThreadState* ThreadManager::lazily_archived_thread_state_ = NULL;
268
269
ArchiveThread()270 void ThreadManager::ArchiveThread() {
271 ASSERT(!lazily_archived_thread_.IsValid());
272 ASSERT(!IsArchived());
273 ThreadState* state = ThreadState::GetFree();
274 state->Unlink();
275 Thread::SetThreadLocal(thread_state_key, reinterpret_cast<void*>(state));
276 lazily_archived_thread_.Initialize(ThreadHandle::SELF);
277 lazily_archived_thread_state_ = state;
278 ASSERT(state->id() == kInvalidId);
279 state->set_id(CurrentId());
280 ASSERT(state->id() != kInvalidId);
281 }
282
283
EagerlyArchiveThread()284 void ThreadManager::EagerlyArchiveThread() {
285 ThreadState* state = lazily_archived_thread_state_;
286 state->LinkInto(ThreadState::IN_USE_LIST);
287 char* to = state->data();
288 // Ensure that data containing GC roots are archived first, and handle them
289 // in ThreadManager::Iterate(ObjectVisitor*).
290 to = HandleScopeImplementer::ArchiveThread(to);
291 to = Top::ArchiveThread(to);
292 to = Relocatable::ArchiveState(to);
293 #ifdef ENABLE_DEBUGGER_SUPPORT
294 to = Debug::ArchiveDebug(to);
295 #endif
296 to = StackGuard::ArchiveStackGuard(to);
297 to = RegExpStack::ArchiveStack(to);
298 to = Bootstrapper::ArchiveState(to);
299 lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
300 lazily_archived_thread_state_ = NULL;
301 }
302
303
FreeThreadResources()304 void ThreadManager::FreeThreadResources() {
305 HandleScopeImplementer::FreeThreadResources();
306 Top::FreeThreadResources();
307 #ifdef ENABLE_DEBUGGER_SUPPORT
308 Debug::FreeThreadResources();
309 #endif
310 StackGuard::FreeThreadResources();
311 RegExpStack::FreeThreadResources();
312 Bootstrapper::FreeThreadResources();
313 }
314
315
IsArchived()316 bool ThreadManager::IsArchived() {
317 return Thread::HasThreadLocal(thread_state_key);
318 }
319
320
Iterate(ObjectVisitor * v)321 void ThreadManager::Iterate(ObjectVisitor* v) {
322 // Expecting no threads during serialization/deserialization
323 for (ThreadState* state = ThreadState::FirstInUse();
324 state != NULL;
325 state = state->Next()) {
326 char* data = state->data();
327 data = HandleScopeImplementer::Iterate(v, data);
328 data = Top::Iterate(v, data);
329 data = Relocatable::Iterate(v, data);
330 }
331 }
332
333
MarkCompactPrologue(bool is_compacting)334 void ThreadManager::MarkCompactPrologue(bool is_compacting) {
335 for (ThreadState* state = ThreadState::FirstInUse();
336 state != NULL;
337 state = state->Next()) {
338 char* data = state->data();
339 data += HandleScopeImplementer::ArchiveSpacePerThread();
340 Top::MarkCompactPrologue(is_compacting, data);
341 }
342 }
343
344
MarkCompactEpilogue(bool is_compacting)345 void ThreadManager::MarkCompactEpilogue(bool is_compacting) {
346 for (ThreadState* state = ThreadState::FirstInUse();
347 state != NULL;
348 state = state->Next()) {
349 char* data = state->data();
350 data += HandleScopeImplementer::ArchiveSpacePerThread();
351 Top::MarkCompactEpilogue(is_compacting, data);
352 }
353 }
354
355
CurrentId()356 int ThreadManager::CurrentId() {
357 return Thread::GetThreadLocalInt(thread_id_key);
358 }
359
360
AssignId()361 void ThreadManager::AssignId() {
362 if (!HasId()) {
363 ASSERT(Locker::IsLocked());
364 int thread_id = ++last_id_;
365 ASSERT(thread_id > 0); // see the comment near last_id_ definition.
366 Thread::SetThreadLocalInt(thread_id_key, thread_id);
367 Top::set_thread_id(thread_id);
368 }
369 }
370
371
HasId()372 bool ThreadManager::HasId() {
373 return Thread::HasThreadLocal(thread_id_key);
374 }
375
376
TerminateExecution(int thread_id)377 void ThreadManager::TerminateExecution(int thread_id) {
378 for (ThreadState* state = ThreadState::FirstInUse();
379 state != NULL;
380 state = state->Next()) {
381 if (thread_id == state->id()) {
382 state->set_terminate_on_restore(true);
383 }
384 }
385 }
386
387
388 // This is the ContextSwitcher singleton. There is at most a single thread
389 // running which delivers preemption events to V8 threads.
390 ContextSwitcher* ContextSwitcher::singleton_ = NULL;
391
392
ContextSwitcher(int every_n_ms)393 ContextSwitcher::ContextSwitcher(int every_n_ms)
394 : keep_going_(true),
395 sleep_ms_(every_n_ms) {
396 }
397
398
399 // Set the scheduling interval of V8 threads. This function starts the
400 // ContextSwitcher thread if needed.
StartPreemption(int every_n_ms)401 void ContextSwitcher::StartPreemption(int every_n_ms) {
402 ASSERT(Locker::IsLocked());
403 if (singleton_ == NULL) {
404 // If the ContextSwitcher thread is not running at the moment start it now.
405 singleton_ = new ContextSwitcher(every_n_ms);
406 singleton_->Start();
407 } else {
408 // ContextSwitcher thread is already running, so we just change the
409 // scheduling interval.
410 singleton_->sleep_ms_ = every_n_ms;
411 }
412 }
413
414
415 // Disable preemption of V8 threads. If multiple threads want to use V8 they
416 // must cooperatively schedule amongst them from this point on.
StopPreemption()417 void ContextSwitcher::StopPreemption() {
418 ASSERT(Locker::IsLocked());
419 if (singleton_ != NULL) {
420 // The ContextSwitcher thread is running. We need to stop it and release
421 // its resources.
422 singleton_->keep_going_ = false;
423 singleton_->Join(); // Wait for the ContextSwitcher thread to exit.
424 // Thread has exited, now we can delete it.
425 delete(singleton_);
426 singleton_ = NULL;
427 }
428 }
429
430
431 // Main loop of the ContextSwitcher thread: Preempt the currently running V8
432 // thread at regular intervals.
Run()433 void ContextSwitcher::Run() {
434 while (keep_going_) {
435 OS::Sleep(sleep_ms_);
436 StackGuard::Preempt();
437 }
438 }
439
440
441 // Acknowledge the preemption by the receiving thread.
PreemptionReceived()442 void ContextSwitcher::PreemptionReceived() {
443 ASSERT(Locker::IsLocked());
444 // There is currently no accounting being done for this. But could be in the
445 // future, which is why we leave this in.
446 }
447
448
449 } // namespace internal
450 } // namespace v8
451