• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *
8  *     * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  *     * Neither the name of Google Inc. nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef ThreadState_h
32 #define ThreadState_h
33 
34 #include "platform/PlatformExport.h"
35 #include "platform/heap/AddressSanitizer.h"
36 #include "wtf/HashSet.h"
37 #include "wtf/OwnPtr.h"
38 #include "wtf/PassOwnPtr.h"
39 #include "wtf/ThreadSpecific.h"
40 #include "wtf/Threading.h"
41 #include "wtf/ThreadingPrimitives.h"
42 #include "wtf/Vector.h"
43 
44 namespace WebCore {
45 
46 class BaseHeap;
47 class BaseHeapPage;
48 class FinalizedHeapObjectHeader;
49 struct GCInfo;
50 class HeapContainsCache;
51 class HeapObjectHeader;
52 class PersistentNode;
53 class Visitor;
54 class SafePointBarrier;
55 class SafePointAwareMutexLocker;
56 template<typename Header> class ThreadHeap;
57 class CallbackStack;
58 
59 typedef uint8_t* Address;
60 
61 typedef void (*FinalizationCallback)(void*);
62 typedef void (*VisitorCallback)(Visitor*, void* self);
63 typedef VisitorCallback TraceCallback;
64 typedef VisitorCallback WeakPointerCallback;
65 
66 // ThreadAffinity indicates which threads objects can be used on. We
67 // distinguish between objects that can be used on the main thread
68 // only and objects that can be used on any thread.
69 //
70 // For objects that can only be used on the main thread we avoid going
71 // through thread-local storage to get to the thread state.
72 //
73 // FIXME: We should evaluate the performance gain. Having
74 // ThreadAffinity is complicating the implementation and we should get
75 // rid of it if it is fast enough to go through thread-local storage
76 // always.
77 enum ThreadAffinity {
78     AnyThread,
79     MainThreadOnly,
80 };
81 
82 class Node;
83 class CSSValue;
84 
85 template<typename T, bool derivesNode = WTF::IsSubclass<typename WTF::RemoveConst<T>::Type, Node>::value> struct DefaultThreadingTrait;
86 
87 template<typename T>
88 struct DefaultThreadingTrait<T, false> {
89     static const ThreadAffinity Affinity = AnyThread;
90 };
91 
92 template<typename T>
93 struct DefaultThreadingTrait<T, true> {
94     static const ThreadAffinity Affinity = MainThreadOnly;
95 };
96 
97 template<typename T>
98 struct ThreadingTrait {
99     static const ThreadAffinity Affinity = DefaultThreadingTrait<T>::Affinity;
100 };
101 
102 // Marks the specified class as being used from multiple threads. When
103 // a class is used from multiple threads we go through thread local
104 // storage to get the heap in which to allocate an object of that type
105 // and when allocating a Persistent handle for an object with that
106 // type. Notice that marking the base class does not automatically
107 // mark its descendants and they have to be explicitly marked.
108 #define USED_FROM_MULTIPLE_THREADS(Class)                 \
109     class Class;                                          \
110     template<> struct ThreadingTrait<Class> {             \
111         static const ThreadAffinity Affinity = AnyThread; \
112     }
113 
114 #define USED_FROM_MULTIPLE_THREADS_NAMESPACE(Namespace, Class)          \
115     namespace Namespace {                                               \
116         class Class;                                                    \
117     }                                                                   \
118     namespace WebCore {                                                 \
119         template<> struct ThreadingTrait<Namespace::Class> {            \
120             static const ThreadAffinity Affinity = AnyThread;           \
121         };                                                              \
122     }
123 
124 template<typename U> class ThreadingTrait<const U> : public ThreadingTrait<U> { };
125 
126 // List of typed heaps. The list is used to generate the implementation
127 // of typed heap related methods.
128 //
129 // To create a new typed heap add a H(<ClassName>) to the
130 // FOR_EACH_TYPED_HEAP macro below.
131 // FIXME: When the Node hierarchy has been moved use Node in our
132 // tests instead of TestTypedHeapClass.
133 #define FOR_EACH_TYPED_HEAP(H)  \
134     H(TestTypedHeapClass)
135 //    H(Node)
136 
137 #define TypedHeapEnumName(Type) Type##Heap,
138 
139 enum TypedHeaps {
140     GeneralHeap,
141     FOR_EACH_TYPED_HEAP(TypedHeapEnumName)
142     NumberOfHeaps
143 };
144 
145 // Trait to give an index in the thread state to all the
146 // type-specialized heaps. The general heap is at index 0 in the
147 // thread state. The index for other type-specialized heaps are given
148 // by the TypedHeaps enum above.
149 template<typename T>
150 struct HeapTrait {
151     static const int index = GeneralHeap;
152     typedef ThreadHeap<FinalizedHeapObjectHeader> HeapType;
153 };
154 
155 #define DEFINE_HEAP_INDEX_TRAIT(Type)                  \
156     class Type;                                        \
157     template<>                                         \
158     struct HeapTrait<class Type> {                     \
159         static const int index = Type##Heap;           \
160         typedef ThreadHeap<HeapObjectHeader> HeapType; \
161     };
162 
163 FOR_EACH_TYPED_HEAP(DEFINE_HEAP_INDEX_TRAIT)
164 
165 // A HeapStats structure keeps track of the amount of memory allocated
166 // for a Blink heap and how much of that memory is used for actual
167 // Blink objects. These stats are used in the heuristics to determine
168 // when to perform garbage collections.
169 class HeapStats {
170 public:
171     size_t totalObjectSpace() const { return m_totalObjectSpace; }
172     size_t totalAllocatedSpace() const { return m_totalAllocatedSpace; }
173 
174     void add(HeapStats* other)
175     {
176         m_totalObjectSpace += other->m_totalObjectSpace;
177         m_totalAllocatedSpace += other->m_totalAllocatedSpace;
178     }
179 
180     void inline increaseObjectSpace(size_t newObjectSpace)
181     {
182         m_totalObjectSpace += newObjectSpace;
183     }
184 
185     void inline decreaseObjectSpace(size_t deadObjectSpace)
186     {
187         m_totalObjectSpace -= deadObjectSpace;
188     }
189 
190     void inline increaseAllocatedSpace(size_t newAllocatedSpace)
191     {
192         m_totalAllocatedSpace += newAllocatedSpace;
193     }
194 
195     void inline decreaseAllocatedSpace(size_t deadAllocatedSpace)
196     {
197         m_totalAllocatedSpace -= deadAllocatedSpace;
198     }
199 
200     void clear()
201     {
202         m_totalObjectSpace = 0;
203         m_totalAllocatedSpace = 0;
204     }
205 
206     bool operator==(const HeapStats& other)
207     {
208         return m_totalAllocatedSpace == other.m_totalAllocatedSpace
209             && m_totalObjectSpace == other.m_totalObjectSpace;
210     }
211 
212 private:
213     size_t m_totalObjectSpace; // Actually contains objects that may be live, not including headers.
214     size_t m_totalAllocatedSpace; // Allocated from the OS.
215 
216     friend class HeapTester;
217 };
218 
219 class PLATFORM_EXPORT ThreadState {
220     WTF_MAKE_NONCOPYABLE(ThreadState);
221 public:
222     // When garbage collecting we need to know whether or not there
223     // can be pointers to Blink GC managed objects on the stack for
224     // each thread. When threads reach a safe point they record
225     // whether or not they have pointers on the stack.
226     enum StackState {
227         NoHeapPointersOnStack,
228         HeapPointersOnStack
229     };
230 
231     // The set of ThreadStates for all threads attached to the Blink
232     // garbage collector.
233     typedef HashSet<ThreadState*> AttachedThreadStateSet;
234     static AttachedThreadStateSet& attachedThreads();
235 
236     // Initialize threading infrastructure. Should be called from the main
237     // thread.
238     static void init();
239     static void shutdown();
240     static void shutdownHeapIfNecessary();
241 
242     static void attachMainThread();
243     static void detachMainThread();
244 
245     // Trace all GC roots, called when marking the managed heap objects.
246     static void visitRoots(Visitor*);
247 
248     // Associate ThreadState object with the current thread. After this
249     // call thread can start using the garbage collected heap infrastructure.
250     // It also has to periodically check for safepoints.
251     static void attach();
252 
253     // Disassociate attached ThreadState from the current thread. The thread
254     // can no longer use the garbage collected heap after this call.
255     static void detach();
256 
257     static ThreadState* current() { return **s_threadSpecific; }
258     static ThreadState* mainThreadState()
259     {
260         return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage);
261     }
262 
263     bool isMainThread() const { return this == mainThreadState(); }
264     inline bool checkThread() const
265     {
266         ASSERT(m_thread == currentThread());
267         return true;
268     }
269 
270     // shouldGC and shouldForceConservativeGC implement the heuristics
271     // that are used to determine when to collect garbage. If
272     // shouldForceConservativeGC returns true, we force the garbage
273     // collection immediately. Otherwise, if shouldGC returns true, we
274     // record that we should garbage collect the next time we return
275     // to the event loop. If both return false, we don't need to
276     // collect garbage at this point.
277     bool shouldGC();
278     bool shouldForceConservativeGC();
279 
280     // If gcRequested returns true when a thread returns to its event
281     // loop the thread will initiate a garbage collection.
282     bool gcRequested();
283     void setGCRequested();
284     void clearGCRequested();
285 
286     // Was the last GC forced for testing? This is set when garbage collection
287     // is forced for testing and there are pointers on the stack. It remains
288     // set until a garbage collection is triggered with no pointers on the stack.
289     // This is used for layout tests that trigger GCs and check if objects are
290     // dead at a given point in time. That only reliably works when we get
291     // precise GCs with no conservative stack scanning.
292     void setForcePreciseGCForTesting(bool);
293     bool forcePreciseGCForTesting();
294 
295     bool sweepRequested();
296     void setSweepRequested();
297     void clearSweepRequested();
298     void performPendingSweep();
299 
300     // Support for disallowing allocation. Mainly used for sanity
301     // checks asserts.
302     bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocationCount; }
303     void enterNoAllocationScope() { m_noAllocationCount++; }
304     void leaveNoAllocationScope() { m_noAllocationCount--; }
305 
306     // Before performing GC the thread-specific heap state should be
307     // made consistent for garbage collection.
308     bool isConsistentForGC();
309     void makeConsistentForGC();
310 
311     // Is the thread corresponding to this thread state currently
312     // performing GC?
313     bool isInGC() const { return m_inGC; }
314 
315     // Is any of the threads registered with the blink garbage collection
316     // infrastructure currently perform GC?
317     static bool isAnyThreadInGC() { return s_inGC; }
318 
319     void enterGC()
320     {
321         ASSERT(!m_inGC);
322         ASSERT(!s_inGC);
323         m_inGC = true;
324         s_inGC = true;
325     }
326 
327     void leaveGC()
328     {
329         m_inGC = false;
330         s_inGC = false;
331     }
332 
333     // Is the thread corresponding to this thread state currently
334     // sweeping?
335     bool isSweepInProgress() const { return m_sweepInProgress; }
336 
337     void prepareForGC();
338 
339     // Safepoint related functionality.
340     //
341     // When a thread attempts to perform GC it needs to stop all other threads
342     // that use the heap or at least guarantee that they will not touch any
343     // heap allocated object until GC is complete.
344     //
345     // We say that a thread is at a safepoint if this thread is guaranteed to
346     // not touch any heap allocated object or any heap related functionality until
347     // it leaves the safepoint.
348     //
349     // Notice that a thread does not have to be paused if it is at safepoint it
350     // can continue to run and perform tasks that do not require interaction
351     // with the heap. It will be paused if it attempts to leave the safepoint and
352     // there is a GC in progress.
353     //
354     // Each thread that has ThreadState attached must:
355     //   - periodically check if GC is requested from another thread by calling a safePoint() method;
356     //   - use SafePointScope around long running loops that have no safePoint() invocation inside,
357     //     such loops must not touch any heap object;
358     //   - register an Interruptor that can interrupt long running loops that have no calls to safePoint and
359     //     are not wrapped in a SafePointScope (e.g. Interruptor for JavaScript code)
360     //
361 
362     // Request all other threads to stop. Must only be called if the current thread is at safepoint.
363     static bool stopThreads();
364     static void resumeThreads();
365 
366     // Check if GC is requested by another thread and pause this thread if this is the case.
367     // Can only be called when current thread is in a consistent state.
368     void safePoint(StackState);
369 
370     // Mark current thread as running inside safepoint.
371     void enterSafePointWithoutPointers() { enterSafePoint(NoHeapPointersOnStack, 0); }
372     void enterSafePointWithPointers(void* scopeMarker) { enterSafePoint(HeapPointersOnStack, scopeMarker); }
373     void leaveSafePoint(SafePointAwareMutexLocker* = 0);
374     bool isAtSafePoint() const { return m_atSafePoint; }
375 
376     class SafePointScope {
377     public:
378         enum ScopeNesting {
379             NoNesting,
380             AllowNesting
381         };
382 
383         explicit SafePointScope(StackState stackState, ScopeNesting nesting = NoNesting)
384             : m_state(ThreadState::current())
385         {
386             if (m_state->isAtSafePoint()) {
387                 RELEASE_ASSERT(nesting == AllowNesting);
388                 // We can ignore stackState because there should be no heap object
389                 // pointers manipulation after outermost safepoint was entered.
390                 m_state = 0;
391             } else {
392                 m_state->enterSafePoint(stackState, this);
393             }
394         }
395 
396         ~SafePointScope()
397         {
398             if (m_state)
399                 m_state->leaveSafePoint();
400         }
401 
402     private:
403         ThreadState* m_state;
404     };
405 
406     // If attached thread enters long running loop that can call back
407     // into Blink and leaving and reentering safepoint at every
408     // transition between this loop and Blink is deemed too expensive
409     // then instead of marking this loop as a GC safepoint thread
410     // can provide an interruptor object which would allow GC
411     // to temporarily interrupt and pause this long running loop at
412     // an arbitrary moment creating a safepoint for a GC.
413     class PLATFORM_EXPORT Interruptor {
414     public:
415         virtual ~Interruptor() { }
416 
417         // Request the interruptor to interrupt the thread and
418         // call onInterrupted on that thread once interruption
419         // succeeds.
420         virtual void requestInterrupt() = 0;
421 
422         // Clear previous interrupt request.
423         virtual void clearInterrupt() = 0;
424 
425     protected:
426         // This method is called on the interrupted thread to
427         // create a safepoint for a GC.
428         void onInterrupted();
429     };
430 
431     void addInterruptor(Interruptor*);
432     void removeInterruptor(Interruptor*);
433 
434     // CleanupTasks are executed when ThreadState performs
435     // cleanup before detaching.
436     class CleanupTask {
437     public:
438         virtual ~CleanupTask() { }
439 
440         // Executed before the final GC.
441         virtual void preCleanup() { }
442 
443         // Executed after the final GC. Thread heap is empty at this point.
444         virtual void postCleanup() { }
445     };
446 
447     void addCleanupTask(PassOwnPtr<CleanupTask> cleanupTask)
448     {
449         m_cleanupTasks.append(cleanupTask);
450     }
451 
452     // Should only be called under protection of threadAttachMutex().
453     const Vector<Interruptor*>& interruptors() const { return m_interruptors; }
454 
455     void recordStackEnd(intptr_t* endOfStack)
456     {
457         m_endOfStack = endOfStack;
458     }
459 
460     // Get one of the heap structures for this thread.
461     //
462     // The heap is split into multiple heap parts based on object
463     // types. To get the index for a given type, use
464     // HeapTrait<Type>::index.
465     BaseHeap* heap(int index) const { return m_heaps[index]; }
466 
467     // Infrastructure to determine if an address is within one of the
468     // address ranges for the Blink heap. If the address is in the Blink
469     // heap the containing heap page is returned.
470     HeapContainsCache* heapContainsCache() { return m_heapContainsCache.get(); }
471     BaseHeapPage* contains(Address address) { return heapPageFromAddress(address); }
472     BaseHeapPage* contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
473     BaseHeapPage* contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
474 
475     // List of persistent roots allocated on the given thread.
476     PersistentNode* roots() const { return m_persistents.get(); }
477 
478     // List of global persistent roots not owned by any particular thread.
479     // globalRootsMutex must be acquired before any modifications.
480     static PersistentNode* globalRoots();
481     static Mutex& globalRootsMutex();
482 
483     // Visit local thread stack and trace all pointers conservatively.
484     void visitStack(Visitor*);
485 
486     // Visit the asan fake stack frame corresponding to a slot on the
487     // real machine stack if there is one.
488     void visitAsanFakeStackForPointer(Visitor*, Address);
489 
490     // Visit all persistents allocated on this thread.
491     void visitPersistents(Visitor*);
492 
493     // Checks a given address and if a pointer into the oilpan heap marks
494     // the object to which it points.
495     bool checkAndMarkPointer(Visitor*, Address);
496 
497 #if ENABLE(GC_TRACING)
498     const GCInfo* findGCInfo(Address);
499     static const GCInfo* findGCInfoFromAllThreads(Address);
500 #endif
501 
502     void pushWeakObjectPointerCallback(void*, WeakPointerCallback);
503     bool popAndInvokeWeakPointerCallback(Visitor*);
504 
505     void getStats(HeapStats&);
506     HeapStats& stats() { return m_stats; }
507     HeapStats& statsAfterLastGC() { return m_statsAfterLastGC; }
508 
509 private:
510     explicit ThreadState();
511     ~ThreadState();
512 
513     friend class SafePointBarrier;
514     friend class SafePointAwareMutexLocker;
515 
516     void enterSafePoint(StackState, void*);
517     NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope();
518     void clearSafePointScopeMarker()
519     {
520         m_safePointStackCopy.clear();
521         m_safePointScopeMarker = 0;
522     }
523 
524     void performPendingGC(StackState);
525 
526     // Finds the Blink HeapPage in this thread-specific heap
527     // corresponding to a given address. Return 0 if the address is
528     // not contained in any of the pages. This does not consider
529     // large objects.
530     BaseHeapPage* heapPageFromAddress(Address);
531 
532     // When ThreadState is detaching from non-main thread its
533     // heap is expected to be empty (because it is going away).
534     // Perform registered cleanup tasks and garbage collection
535     // to sweep away any objects that are left on this heap.
536     // We assert that nothing must remain after this cleanup.
537     // If assertion does not hold we crash as we are potentially
538     // in the dangling pointer situation.
539     void cleanup();
540     void preCleanup();
541     void postCleanup();
542 
543     static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific;
544     static SafePointBarrier* s_safePointBarrier;
545 
546     // This variable is flipped to true after all threads are stoped
547     // and outermost GC has started.
548     static bool s_inGC;
549 
550     // We can't create a static member of type ThreadState here
551     // because it will introduce global constructor and destructor.
552     // We would like to manage lifetime of the ThreadState attached
553     // to the main thread explicitly instead and still use normal
554     // constructor and destructor for the ThreadState class.
555     // For this we reserve static storage for the main ThreadState
556     // and lazily construct ThreadState in it using placement new.
557     static uint8_t s_mainThreadStateStorage[];
558 
559     void trace(Visitor*);
560 
561     ThreadIdentifier m_thread;
562     OwnPtr<PersistentNode> m_persistents;
563     StackState m_stackState;
564     intptr_t* m_startOfStack;
565     intptr_t* m_endOfStack;
566     void* m_safePointScopeMarker;
567     Vector<Address> m_safePointStackCopy;
568     bool m_atSafePoint;
569     Vector<Interruptor*> m_interruptors;
570     bool m_gcRequested;
571     bool m_forcePreciseGCForTesting;
572     volatile int m_sweepRequested;
573     bool m_sweepInProgress;
574     size_t m_noAllocationCount;
575     bool m_inGC;
576     BaseHeap* m_heaps[NumberOfHeaps];
577     OwnPtr<HeapContainsCache> m_heapContainsCache;
578     HeapStats m_stats;
579     HeapStats m_statsAfterLastGC;
580 
581     Vector<OwnPtr<CleanupTask> > m_cleanupTasks;
582     bool m_isCleaningUp;
583 
584     CallbackStack* m_weakCallbackStack;
585 
586 #if defined(ADDRESS_SANITIZER)
587     void* m_asanFakeStack;
588 #endif
589 };
590 
591 template<ThreadAffinity affinity> class ThreadStateFor;
592 
593 template<> class ThreadStateFor<MainThreadOnly> {
594 public:
595     static ThreadState* state()
596     {
597         // This specialization must only be used from the main thread.
598         ASSERT(ThreadState::current()->isMainThread());
599         return ThreadState::mainThreadState();
600     }
601 };
602 
603 template<> class ThreadStateFor<AnyThread> {
604 public:
605     static ThreadState* state() { return ThreadState::current(); }
606 };
607 
608 // The SafePointAwareMutexLocker is used to enter a safepoint while waiting for
609 // a mutex lock. It also ensures that the lock is not held while waiting for a GC
610 // to complete in the leaveSafePoint method, by releasing the lock if the
611 // leaveSafePoint method cannot complete without blocking, see
612 // SafePointBarrier::checkAndPark.
613 class SafePointAwareMutexLocker {
614     WTF_MAKE_NONCOPYABLE(SafePointAwareMutexLocker);
615 public:
616     explicit SafePointAwareMutexLocker(Mutex& mutex) : m_mutex(mutex), m_locked(false)
617     {
618         ThreadState* state = ThreadState::current();
619         do {
620             bool leaveSafePoint = false;
621             if (!state->isAtSafePoint()) {
622                 state->enterSafePoint(ThreadState::HeapPointersOnStack, this);
623                 leaveSafePoint = true;
624             }
625             m_mutex.lock();
626             m_locked = true;
627             if (leaveSafePoint) {
628                 // When leaving the safepoint we might end up release the mutex
629                 // if another thread is requesting a GC, see
630                 // SafePointBarrier::checkAndPark. This is the case where we
631                 // loop around to reacquire the lock.
632                 state->leaveSafePoint(this);
633             }
634         } while (!m_locked);
635     }
636 
637     ~SafePointAwareMutexLocker()
638     {
639         ASSERT(m_locked);
640         m_mutex.unlock();
641     }
642 
643 private:
644     friend class SafePointBarrier;
645 
646     void reset()
647     {
648         ASSERT(m_locked);
649         m_mutex.unlock();
650         m_locked = false;
651     }
652 
653     Mutex& m_mutex;
654     bool m_locked;
655 };
656 
657 }
658 
659 #endif // ThreadState_h
660