1 // Copyright 2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "api.h"
31 #include "bootstrapper.h"
32 #include "debug.h"
33 #include "execution.h"
34 #include "v8threads.h"
35 #include "regexp-stack.h"
36
37 namespace v8 {
38
39
40 // Track whether this V8 instance has ever called v8::Locker. This allows the
41 // API code to verify that the lock is always held when V8 is being entered.
42 bool Locker::active_ = false;
43
44
45 // Constructor for the Locker object. Once the Locker is constructed the
46 // current thread will be guaranteed to have the big V8 lock.
Locker()47 Locker::Locker() : has_lock_(false), top_level_(true) {
48 // TODO(isolates): When Locker has Isolate parameter and it is provided, grab
49 // that one instead of using the current one.
50 // We pull default isolate for Locker constructor w/o p[arameter.
51 // A thread should not enter an isolate before acquiring a lock,
52 // in cases which mandate using Lockers.
53 // So getting a lock is the first thing threads do in a scenario where
54 // multple threads share an isolate. Hence, we need to access
55 // 'locking isolate' before we can actually enter into default isolate.
56 internal::Isolate* isolate = internal::Isolate::GetDefaultIsolateForLocking();
57 ASSERT(isolate != NULL);
58
59 // Record that the Locker has been used at least once.
60 active_ = true;
61 // Get the big lock if necessary.
62 if (!isolate->thread_manager()->IsLockedByCurrentThread()) {
63 isolate->thread_manager()->Lock();
64 has_lock_ = true;
65
66 if (isolate->IsDefaultIsolate()) {
67 // This only enters if not yet entered.
68 internal::Isolate::EnterDefaultIsolate();
69 }
70
71 ASSERT(internal::Thread::HasThreadLocal(
72 internal::Isolate::thread_id_key()));
73
74 // Make sure that V8 is initialized. Archiving of threads interferes
75 // with deserialization by adding additional root pointers, so we must
76 // initialize here, before anyone can call ~Locker() or Unlocker().
77 if (!isolate->IsInitialized()) {
78 V8::Initialize();
79 }
80 // This may be a locker within an unlocker in which case we have to
81 // get the saved state for this thread and restore it.
82 if (isolate->thread_manager()->RestoreThread()) {
83 top_level_ = false;
84 } else {
85 internal::ExecutionAccess access(isolate);
86 isolate->stack_guard()->ClearThread(access);
87 isolate->stack_guard()->InitThread(access);
88 }
89 }
90 ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
91 }
92
93
IsLocked()94 bool Locker::IsLocked() {
95 return internal::Isolate::Current()->thread_manager()->
96 IsLockedByCurrentThread();
97 }
98
99
~Locker()100 Locker::~Locker() {
101 // TODO(isolate): this should use a field storing the isolate it
102 // locked instead.
103 internal::Isolate* isolate = internal::Isolate::Current();
104 ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
105 if (has_lock_) {
106 if (top_level_) {
107 isolate->thread_manager()->FreeThreadResources();
108 } else {
109 isolate->thread_manager()->ArchiveThread();
110 }
111 isolate->thread_manager()->Unlock();
112 }
113 }
114
115
Unlocker()116 Unlocker::Unlocker() {
117 internal::Isolate* isolate = internal::Isolate::Current();
118 ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
119 isolate->thread_manager()->ArchiveThread();
120 isolate->thread_manager()->Unlock();
121 }
122
123
~Unlocker()124 Unlocker::~Unlocker() {
125 // TODO(isolates): check it's the isolate we unlocked.
126 internal::Isolate* isolate = internal::Isolate::Current();
127 ASSERT(!isolate->thread_manager()->IsLockedByCurrentThread());
128 isolate->thread_manager()->Lock();
129 isolate->thread_manager()->RestoreThread();
130 }
131
132
StartPreemption(int every_n_ms)133 void Locker::StartPreemption(int every_n_ms) {
134 v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
135 }
136
137
StopPreemption()138 void Locker::StopPreemption() {
139 v8::internal::ContextSwitcher::StopPreemption();
140 }
141
142
143 namespace internal {
144
145
RestoreThread()146 bool ThreadManager::RestoreThread() {
147 // First check whether the current thread has been 'lazily archived', ie
148 // not archived at all. If that is the case we put the state storage we
149 // had prepared back in the free list, since we didn't need it after all.
150 if (lazily_archived_thread_.Equals(ThreadId::Current())) {
151 lazily_archived_thread_ = ThreadId::Invalid();
152 ASSERT(Isolate::CurrentPerIsolateThreadData()->thread_state() ==
153 lazily_archived_thread_state_);
154 lazily_archived_thread_state_->set_id(ThreadId::Invalid());
155 lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
156 lazily_archived_thread_state_ = NULL;
157 Isolate::CurrentPerIsolateThreadData()->set_thread_state(NULL);
158 return true;
159 }
160
161 // Make sure that the preemption thread cannot modify the thread state while
162 // it is being archived or restored.
163 ExecutionAccess access(isolate_);
164
165 // If there is another thread that was lazily archived then we have to really
166 // archive it now.
167 if (lazily_archived_thread_.IsValid()) {
168 EagerlyArchiveThread();
169 }
170 Isolate::PerIsolateThreadData* per_thread =
171 Isolate::CurrentPerIsolateThreadData();
172 if (per_thread == NULL || per_thread->thread_state() == NULL) {
173 // This is a new thread.
174 isolate_->stack_guard()->InitThread(access);
175 return false;
176 }
177 ThreadState* state = per_thread->thread_state();
178 char* from = state->data();
179 from = isolate_->handle_scope_implementer()->RestoreThread(from);
180 from = isolate_->RestoreThread(from);
181 from = Relocatable::RestoreState(from);
182 #ifdef ENABLE_DEBUGGER_SUPPORT
183 from = isolate_->debug()->RestoreDebug(from);
184 #endif
185 from = isolate_->stack_guard()->RestoreStackGuard(from);
186 from = isolate_->regexp_stack()->RestoreStack(from);
187 from = isolate_->bootstrapper()->RestoreState(from);
188 per_thread->set_thread_state(NULL);
189 if (state->terminate_on_restore()) {
190 isolate_->stack_guard()->TerminateExecution();
191 state->set_terminate_on_restore(false);
192 }
193 state->set_id(ThreadId::Invalid());
194 state->Unlink();
195 state->LinkInto(ThreadState::FREE_LIST);
196 return true;
197 }
198
199
Lock()200 void ThreadManager::Lock() {
201 mutex_->Lock();
202 mutex_owner_ = ThreadId::Current();
203 ASSERT(IsLockedByCurrentThread());
204 }
205
206
Unlock()207 void ThreadManager::Unlock() {
208 mutex_owner_ = ThreadId::Invalid();
209 mutex_->Unlock();
210 }
211
212
ArchiveSpacePerThread()213 static int ArchiveSpacePerThread() {
214 return HandleScopeImplementer::ArchiveSpacePerThread() +
215 Isolate::ArchiveSpacePerThread() +
216 #ifdef ENABLE_DEBUGGER_SUPPORT
217 Debug::ArchiveSpacePerThread() +
218 #endif
219 StackGuard::ArchiveSpacePerThread() +
220 RegExpStack::ArchiveSpacePerThread() +
221 Bootstrapper::ArchiveSpacePerThread() +
222 Relocatable::ArchiveSpacePerThread();
223 }
224
225
ThreadState(ThreadManager * thread_manager)226 ThreadState::ThreadState(ThreadManager* thread_manager)
227 : id_(ThreadId::Invalid()),
228 terminate_on_restore_(false),
229 next_(this),
230 previous_(this),
231 thread_manager_(thread_manager) {
232 }
233
234
AllocateSpace()235 void ThreadState::AllocateSpace() {
236 data_ = NewArray<char>(ArchiveSpacePerThread());
237 }
238
239
Unlink()240 void ThreadState::Unlink() {
241 next_->previous_ = previous_;
242 previous_->next_ = next_;
243 }
244
245
LinkInto(List list)246 void ThreadState::LinkInto(List list) {
247 ThreadState* flying_anchor =
248 list == FREE_LIST ? thread_manager_->free_anchor_
249 : thread_manager_->in_use_anchor_;
250 next_ = flying_anchor->next_;
251 previous_ = flying_anchor;
252 flying_anchor->next_ = this;
253 next_->previous_ = this;
254 }
255
256
GetFreeThreadState()257 ThreadState* ThreadManager::GetFreeThreadState() {
258 ThreadState* gotten = free_anchor_->next_;
259 if (gotten == free_anchor_) {
260 ThreadState* new_thread_state = new ThreadState(this);
261 new_thread_state->AllocateSpace();
262 return new_thread_state;
263 }
264 return gotten;
265 }
266
267
268 // Gets the first in the list of archived threads.
FirstThreadStateInUse()269 ThreadState* ThreadManager::FirstThreadStateInUse() {
270 return in_use_anchor_->Next();
271 }
272
273
Next()274 ThreadState* ThreadState::Next() {
275 if (next_ == thread_manager_->in_use_anchor_) return NULL;
276 return next_;
277 }
278
279
280 // Thread ids must start with 1, because in TLS having thread id 0 can't
281 // be distinguished from not having a thread id at all (since NULL is
282 // defined as 0.)
ThreadManager()283 ThreadManager::ThreadManager()
284 : mutex_(OS::CreateMutex()),
285 mutex_owner_(ThreadId::Invalid()),
286 lazily_archived_thread_(ThreadId::Invalid()),
287 lazily_archived_thread_state_(NULL),
288 free_anchor_(NULL),
289 in_use_anchor_(NULL) {
290 free_anchor_ = new ThreadState(this);
291 in_use_anchor_ = new ThreadState(this);
292 }
293
294
~ThreadManager()295 ThreadManager::~ThreadManager() {
296 // TODO(isolates): Destroy mutexes.
297 }
298
299
ArchiveThread()300 void ThreadManager::ArchiveThread() {
301 ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid()));
302 ASSERT(!IsArchived());
303 ThreadState* state = GetFreeThreadState();
304 state->Unlink();
305 Isolate::CurrentPerIsolateThreadData()->set_thread_state(state);
306 lazily_archived_thread_ = ThreadId::Current();
307 lazily_archived_thread_state_ = state;
308 ASSERT(state->id().Equals(ThreadId::Invalid()));
309 state->set_id(CurrentId());
310 ASSERT(!state->id().Equals(ThreadId::Invalid()));
311 }
312
313
EagerlyArchiveThread()314 void ThreadManager::EagerlyArchiveThread() {
315 ThreadState* state = lazily_archived_thread_state_;
316 state->LinkInto(ThreadState::IN_USE_LIST);
317 char* to = state->data();
318 // Ensure that data containing GC roots are archived first, and handle them
319 // in ThreadManager::Iterate(ObjectVisitor*).
320 to = isolate_->handle_scope_implementer()->ArchiveThread(to);
321 to = isolate_->ArchiveThread(to);
322 to = Relocatable::ArchiveState(to);
323 #ifdef ENABLE_DEBUGGER_SUPPORT
324 to = isolate_->debug()->ArchiveDebug(to);
325 #endif
326 to = isolate_->stack_guard()->ArchiveStackGuard(to);
327 to = isolate_->regexp_stack()->ArchiveStack(to);
328 to = isolate_->bootstrapper()->ArchiveState(to);
329 lazily_archived_thread_ = ThreadId::Invalid();
330 lazily_archived_thread_state_ = NULL;
331 }
332
333
FreeThreadResources()334 void ThreadManager::FreeThreadResources() {
335 isolate_->handle_scope_implementer()->FreeThreadResources();
336 isolate_->FreeThreadResources();
337 #ifdef ENABLE_DEBUGGER_SUPPORT
338 isolate_->debug()->FreeThreadResources();
339 #endif
340 isolate_->stack_guard()->FreeThreadResources();
341 isolate_->regexp_stack()->FreeThreadResources();
342 isolate_->bootstrapper()->FreeThreadResources();
343 }
344
345
IsArchived()346 bool ThreadManager::IsArchived() {
347 Isolate::PerIsolateThreadData* data = Isolate::CurrentPerIsolateThreadData();
348 return data != NULL && data->thread_state() != NULL;
349 }
350
351
Iterate(ObjectVisitor * v)352 void ThreadManager::Iterate(ObjectVisitor* v) {
353 // Expecting no threads during serialization/deserialization
354 for (ThreadState* state = FirstThreadStateInUse();
355 state != NULL;
356 state = state->Next()) {
357 char* data = state->data();
358 data = HandleScopeImplementer::Iterate(v, data);
359 data = isolate_->Iterate(v, data);
360 data = Relocatable::Iterate(v, data);
361 }
362 }
363
364
IterateArchivedThreads(ThreadVisitor * v)365 void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
366 for (ThreadState* state = FirstThreadStateInUse();
367 state != NULL;
368 state = state->Next()) {
369 char* data = state->data();
370 data += HandleScopeImplementer::ArchiveSpacePerThread();
371 isolate_->IterateThread(v, data);
372 }
373 }
374
375
CurrentId()376 ThreadId ThreadManager::CurrentId() {
377 return ThreadId::Current();
378 }
379
380
TerminateExecution(ThreadId thread_id)381 void ThreadManager::TerminateExecution(ThreadId thread_id) {
382 for (ThreadState* state = FirstThreadStateInUse();
383 state != NULL;
384 state = state->Next()) {
385 if (thread_id.Equals(state->id())) {
386 state->set_terminate_on_restore(true);
387 }
388 }
389 }
390
391
ContextSwitcher(Isolate * isolate,int every_n_ms)392 ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
393 : Thread(isolate, "v8:CtxtSwitcher"),
394 keep_going_(true),
395 sleep_ms_(every_n_ms) {
396 }
397
398
399 // Set the scheduling interval of V8 threads. This function starts the
400 // ContextSwitcher thread if needed.
StartPreemption(int every_n_ms)401 void ContextSwitcher::StartPreemption(int every_n_ms) {
402 Isolate* isolate = Isolate::Current();
403 ASSERT(Locker::IsLocked());
404 if (isolate->context_switcher() == NULL) {
405 // If the ContextSwitcher thread is not running at the moment start it now.
406 isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
407 isolate->context_switcher()->Start();
408 } else {
409 // ContextSwitcher thread is already running, so we just change the
410 // scheduling interval.
411 isolate->context_switcher()->sleep_ms_ = every_n_ms;
412 }
413 }
414
415
416 // Disable preemption of V8 threads. If multiple threads want to use V8 they
417 // must cooperatively schedule amongst them from this point on.
StopPreemption()418 void ContextSwitcher::StopPreemption() {
419 Isolate* isolate = Isolate::Current();
420 ASSERT(Locker::IsLocked());
421 if (isolate->context_switcher() != NULL) {
422 // The ContextSwitcher thread is running. We need to stop it and release
423 // its resources.
424 isolate->context_switcher()->keep_going_ = false;
425 // Wait for the ContextSwitcher thread to exit.
426 isolate->context_switcher()->Join();
427 // Thread has exited, now we can delete it.
428 delete(isolate->context_switcher());
429 isolate->set_context_switcher(NULL);
430 }
431 }
432
433
434 // Main loop of the ContextSwitcher thread: Preempt the currently running V8
435 // thread at regular intervals.
Run()436 void ContextSwitcher::Run() {
437 while (keep_going_) {
438 OS::Sleep(sleep_ms_);
439 isolate()->stack_guard()->Preempt();
440 }
441 }
442
443
444 // Acknowledge the preemption by the receiving thread.
PreemptionReceived()445 void ContextSwitcher::PreemptionReceived() {
446 ASSERT(Locker::IsLocked());
447 // There is currently no accounting being done for this. But could be in the
448 // future, which is why we leave this in.
449 }
450
451
452 } // namespace internal
453 } // namespace v8
454