• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "api.h"
31 #include "bootstrapper.h"
32 #include "debug.h"
33 #include "execution.h"
34 #include "v8threads.h"
35 #include "regexp-stack.h"
36 
37 namespace v8 {
38 
39 static internal::Thread::LocalStorageKey thread_state_key =
40     internal::Thread::CreateThreadLocalKey();
41 static internal::Thread::LocalStorageKey thread_id_key =
42     internal::Thread::CreateThreadLocalKey();
43 
44 
45 // Track whether this V8 instance has ever called v8::Locker. This allows the
46 // API code to verify that the lock is always held when V8 is being entered.
47 bool Locker::active_ = false;
48 
49 
50 // Constructor for the Locker object.  Once the Locker is constructed the
51 // current thread will be guaranteed to have the big V8 lock.
Locker()52 Locker::Locker() : has_lock_(false), top_level_(true) {
53   // Record that the Locker has been used at least once.
54   active_ = true;
55   // Get the big lock if necessary.
56   if (!internal::ThreadManager::IsLockedByCurrentThread()) {
57     internal::ThreadManager::Lock();
58     has_lock_ = true;
59     // This may be a locker within an unlocker in which case we have to
60     // get the saved state for this thread and restore it.
61     if (internal::ThreadManager::RestoreThread()) {
62       top_level_ = false;
63     }
64   }
65   ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
66 
67   // Make sure this thread is assigned a thread id.
68   internal::ThreadManager::AssignId();
69 }
70 
71 
IsLocked()72 bool Locker::IsLocked() {
73   return internal::ThreadManager::IsLockedByCurrentThread();
74 }
75 
76 
~Locker()77 Locker::~Locker() {
78   ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
79   if (has_lock_) {
80     if (!top_level_) {
81       internal::ThreadManager::ArchiveThread();
82     }
83     internal::ThreadManager::Unlock();
84   }
85 }
86 
87 
Unlocker()88 Unlocker::Unlocker() {
89   ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
90   internal::ThreadManager::ArchiveThread();
91   internal::ThreadManager::Unlock();
92 }
93 
94 
~Unlocker()95 Unlocker::~Unlocker() {
96   ASSERT(!internal::ThreadManager::IsLockedByCurrentThread());
97   internal::ThreadManager::Lock();
98   internal::ThreadManager::RestoreThread();
99 }
100 
101 
StartPreemption(int every_n_ms)102 void Locker::StartPreemption(int every_n_ms) {
103   v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
104 }
105 
106 
StopPreemption()107 void Locker::StopPreemption() {
108   v8::internal::ContextSwitcher::StopPreemption();
109 }
110 
111 
112 namespace internal {
113 
114 
RestoreThread()115 bool ThreadManager::RestoreThread() {
116   // First check whether the current thread has been 'lazily archived', ie
117   // not archived at all.  If that is the case we put the state storage we
118   // had prepared back in the free list, since we didn't need it after all.
119   if (lazily_archived_thread_.IsSelf()) {
120     lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
121     ASSERT(Thread::GetThreadLocal(thread_state_key) ==
122            lazily_archived_thread_state_);
123     lazily_archived_thread_state_->set_id(kInvalidId);
124     lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
125     lazily_archived_thread_state_ = NULL;
126     Thread::SetThreadLocal(thread_state_key, NULL);
127     return true;
128   }
129 
130   // Make sure that the preemption thread cannot modify the thread state while
131   // it is being archived or restored.
132   ExecutionAccess access;
133 
134   // If there is another thread that was lazily archived then we have to really
135   // archive it now.
136   if (lazily_archived_thread_.IsValid()) {
137     EagerlyArchiveThread();
138   }
139   ThreadState* state =
140       reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
141   if (state == NULL) {
142     return false;
143   }
144   char* from = state->data();
145   from = HandleScopeImplementer::RestoreThread(from);
146   from = Top::RestoreThread(from);
147 #ifdef ENABLE_DEBUGGER_SUPPORT
148   from = Debug::RestoreDebug(from);
149 #endif
150   from = StackGuard::RestoreStackGuard(from);
151   from = RegExpStack::RestoreStack(from);
152   from = Bootstrapper::RestoreState(from);
153   Thread::SetThreadLocal(thread_state_key, NULL);
154   if (state->terminate_on_restore()) {
155     StackGuard::TerminateExecution();
156     state->set_terminate_on_restore(false);
157   }
158   state->set_id(kInvalidId);
159   state->Unlink();
160   state->LinkInto(ThreadState::FREE_LIST);
161   return true;
162 }
163 
164 
Lock()165 void ThreadManager::Lock() {
166   mutex_->Lock();
167   mutex_owner_.Initialize(ThreadHandle::SELF);
168   ASSERT(IsLockedByCurrentThread());
169 }
170 
171 
Unlock()172 void ThreadManager::Unlock() {
173   mutex_owner_.Initialize(ThreadHandle::INVALID);
174   mutex_->Unlock();
175 }
176 
177 
ArchiveSpacePerThread()178 static int ArchiveSpacePerThread() {
179   return HandleScopeImplementer::ArchiveSpacePerThread() +
180                             Top::ArchiveSpacePerThread() +
181 #ifdef ENABLE_DEBUGGER_SUPPORT
182                           Debug::ArchiveSpacePerThread() +
183 #endif
184                      StackGuard::ArchiveSpacePerThread() +
185                     RegExpStack::ArchiveSpacePerThread() +
186                    Bootstrapper::ArchiveSpacePerThread();
187 }
188 
189 
190 ThreadState* ThreadState::free_anchor_ = new ThreadState();
191 ThreadState* ThreadState::in_use_anchor_ = new ThreadState();
192 
193 
ThreadState()194 ThreadState::ThreadState() : id_(ThreadManager::kInvalidId),
195                              terminate_on_restore_(false),
196                              next_(this), previous_(this) {
197 }
198 
199 
AllocateSpace()200 void ThreadState::AllocateSpace() {
201   data_ = NewArray<char>(ArchiveSpacePerThread());
202 }
203 
204 
Unlink()205 void ThreadState::Unlink() {
206   next_->previous_ = previous_;
207   previous_->next_ = next_;
208 }
209 
210 
LinkInto(List list)211 void ThreadState::LinkInto(List list) {
212   ThreadState* flying_anchor =
213       list == FREE_LIST ? free_anchor_ : in_use_anchor_;
214   next_ = flying_anchor->next_;
215   previous_ = flying_anchor;
216   flying_anchor->next_ = this;
217   next_->previous_ = this;
218 }
219 
220 
GetFree()221 ThreadState* ThreadState::GetFree() {
222   ThreadState* gotten = free_anchor_->next_;
223   if (gotten == free_anchor_) {
224     ThreadState* new_thread_state = new ThreadState();
225     new_thread_state->AllocateSpace();
226     return new_thread_state;
227   }
228   return gotten;
229 }
230 
231 
232 // Gets the first in the list of archived threads.
FirstInUse()233 ThreadState* ThreadState::FirstInUse() {
234   return in_use_anchor_->Next();
235 }
236 
237 
Next()238 ThreadState* ThreadState::Next() {
239   if (next_ == in_use_anchor_) return NULL;
240   return next_;
241 }
242 
243 
244 // Thread ids must start with 1, because in TLS having thread id 0 can't
245 // be distinguished from not having a thread id at all (since NULL is
246 // defined as 0.)
247 int ThreadManager::last_id_ = 0;
248 Mutex* ThreadManager::mutex_ = OS::CreateMutex();
249 ThreadHandle ThreadManager::mutex_owner_(ThreadHandle::INVALID);
250 ThreadHandle ThreadManager::lazily_archived_thread_(ThreadHandle::INVALID);
251 ThreadState* ThreadManager::lazily_archived_thread_state_ = NULL;
252 
253 
ArchiveThread()254 void ThreadManager::ArchiveThread() {
255   ASSERT(!lazily_archived_thread_.IsValid());
256   ASSERT(!IsArchived());
257   ThreadState* state = ThreadState::GetFree();
258   state->Unlink();
259   Thread::SetThreadLocal(thread_state_key, reinterpret_cast<void*>(state));
260   lazily_archived_thread_.Initialize(ThreadHandle::SELF);
261   lazily_archived_thread_state_ = state;
262   ASSERT(state->id() == kInvalidId);
263   state->set_id(CurrentId());
264   ASSERT(state->id() != kInvalidId);
265 }
266 
267 
EagerlyArchiveThread()268 void ThreadManager::EagerlyArchiveThread() {
269   ThreadState* state = lazily_archived_thread_state_;
270   state->LinkInto(ThreadState::IN_USE_LIST);
271   char* to = state->data();
272   // Ensure that data containing GC roots are archived first, and handle them
273   // in ThreadManager::Iterate(ObjectVisitor*).
274   to = HandleScopeImplementer::ArchiveThread(to);
275   to = Top::ArchiveThread(to);
276 #ifdef ENABLE_DEBUGGER_SUPPORT
277   to = Debug::ArchiveDebug(to);
278 #endif
279   to = StackGuard::ArchiveStackGuard(to);
280   to = RegExpStack::ArchiveStack(to);
281   to = Bootstrapper::ArchiveState(to);
282   lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
283   lazily_archived_thread_state_ = NULL;
284 }
285 
286 
IsArchived()287 bool ThreadManager::IsArchived() {
288   return Thread::HasThreadLocal(thread_state_key);
289 }
290 
291 
Iterate(ObjectVisitor * v)292 void ThreadManager::Iterate(ObjectVisitor* v) {
293   // Expecting no threads during serialization/deserialization
294   for (ThreadState* state = ThreadState::FirstInUse();
295        state != NULL;
296        state = state->Next()) {
297     char* data = state->data();
298     data = HandleScopeImplementer::Iterate(v, data);
299     data = Top::Iterate(v, data);
300   }
301 }
302 
303 
MarkCompactPrologue(bool is_compacting)304 void ThreadManager::MarkCompactPrologue(bool is_compacting) {
305   for (ThreadState* state = ThreadState::FirstInUse();
306        state != NULL;
307        state = state->Next()) {
308     char* data = state->data();
309     data += HandleScopeImplementer::ArchiveSpacePerThread();
310     Top::MarkCompactPrologue(is_compacting, data);
311   }
312 }
313 
314 
MarkCompactEpilogue(bool is_compacting)315 void ThreadManager::MarkCompactEpilogue(bool is_compacting) {
316   for (ThreadState* state = ThreadState::FirstInUse();
317        state != NULL;
318        state = state->Next()) {
319     char* data = state->data();
320     data += HandleScopeImplementer::ArchiveSpacePerThread();
321     Top::MarkCompactEpilogue(is_compacting, data);
322   }
323 }
324 
325 
CurrentId()326 int ThreadManager::CurrentId() {
327   return Thread::GetThreadLocalInt(thread_id_key);
328 }
329 
330 
AssignId()331 void ThreadManager::AssignId() {
332   if (!HasId()) {
333     ASSERT(Locker::IsLocked());
334     int thread_id = ++last_id_;
335     ASSERT(thread_id > 0);  // see the comment near last_id_ definition.
336     Thread::SetThreadLocalInt(thread_id_key, thread_id);
337     Top::set_thread_id(thread_id);
338   }
339 }
340 
341 
HasId()342 bool ThreadManager::HasId() {
343   return Thread::HasThreadLocal(thread_id_key);
344 }
345 
346 
TerminateExecution(int thread_id)347 void ThreadManager::TerminateExecution(int thread_id) {
348   for (ThreadState* state = ThreadState::FirstInUse();
349        state != NULL;
350        state = state->Next()) {
351     if (thread_id == state->id()) {
352       state->set_terminate_on_restore(true);
353     }
354   }
355 }
356 
357 
358 // This is the ContextSwitcher singleton. There is at most a single thread
359 // running which delivers preemption events to V8 threads.
360 ContextSwitcher* ContextSwitcher::singleton_ = NULL;
361 
362 
ContextSwitcher(int every_n_ms)363 ContextSwitcher::ContextSwitcher(int every_n_ms)
364   : keep_going_(true),
365     sleep_ms_(every_n_ms) {
366 }
367 
368 
369 // Set the scheduling interval of V8 threads. This function starts the
370 // ContextSwitcher thread if needed.
StartPreemption(int every_n_ms)371 void ContextSwitcher::StartPreemption(int every_n_ms) {
372   ASSERT(Locker::IsLocked());
373   if (singleton_ == NULL) {
374     // If the ContextSwitcher thread is not running at the moment start it now.
375     singleton_ = new ContextSwitcher(every_n_ms);
376     singleton_->Start();
377   } else {
378     // ContextSwitcher thread is already running, so we just change the
379     // scheduling interval.
380     singleton_->sleep_ms_ = every_n_ms;
381   }
382 }
383 
384 
385 // Disable preemption of V8 threads. If multiple threads want to use V8 they
386 // must cooperatively schedule amongst them from this point on.
StopPreemption()387 void ContextSwitcher::StopPreemption() {
388   ASSERT(Locker::IsLocked());
389   if (singleton_ != NULL) {
390     // The ContextSwitcher thread is running. We need to stop it and release
391     // its resources.
392     singleton_->keep_going_ = false;
393     singleton_->Join();  // Wait for the ContextSwitcher thread to exit.
394     // Thread has exited, now we can delete it.
395     delete(singleton_);
396     singleton_ = NULL;
397   }
398 }
399 
400 
401 // Main loop of the ContextSwitcher thread: Preempt the currently running V8
402 // thread at regular intervals.
Run()403 void ContextSwitcher::Run() {
404   while (keep_going_) {
405     OS::Sleep(sleep_ms_);
406     StackGuard::Preempt();
407   }
408 }
409 
410 
411 // Acknowledge the preemption by the receiving thread.
PreemptionReceived()412 void ContextSwitcher::PreemptionReceived() {
413   ASSERT(Locker::IsLocked());
414   // There is currently no accounting being done for this. But could be in the
415   // future, which is why we leave this in.
416 }
417 
418 
419 }  // namespace internal
420 }  // namespace v8
421