• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_LIST_H_
18 #define ART_RUNTIME_THREAD_LIST_H_
19 
20 #include "barrier.h"
21 #include "base/histogram.h"
22 #include "base/mutex.h"
23 #include "base/value_object.h"
24 #include "jni.h"
25 #include "reflective_handle_scope.h"
26 #include "suspend_reason.h"
27 
28 #include <bitset>
29 #include <list>
30 #include <vector>
31 
32 namespace art {
33 namespace gc {
34 namespace collector {
35 class GarbageCollector;
36 }  // namespace collector
37 class GcPauseListener;
38 }  // namespace gc
39 class Closure;
40 class IsMarkedVisitor;
41 class RootVisitor;
42 class Thread;
43 class TimingLogger;
44 enum VisitRootFlags : uint8_t;
45 
46 class ThreadList {
47  public:
48   static constexpr uint32_t kMaxThreadId = 0xFFFF;
49   static constexpr uint32_t kInvalidThreadId = 0;
50   static constexpr uint32_t kMainThreadId = 1;
51   static constexpr uint64_t kDefaultThreadSuspendTimeout =
52       kIsDebugBuild ? 50'000'000'000ull : 10'000'000'000ull;
53 
54   explicit ThreadList(uint64_t thread_suspend_timeout_ns);
55   ~ThreadList();
56 
57   void ShutDown();
58 
59   void DumpForSigQuit(std::ostream& os)
60       REQUIRES(!Locks::thread_list_lock_, !Locks::mutator_lock_);
61   // For thread suspend timeout dumps.
62   void Dump(std::ostream& os, bool dump_native_stack = true)
63       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
64   pid_t GetLockOwner();  // For SignalCatcher.
65 
66   // Thread suspension support.
67   void ResumeAll()
68       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
69       UNLOCK_FUNCTION(Locks::mutator_lock_);
70   bool Resume(Thread* thread, SuspendReason reason = SuspendReason::kInternal)
71       REQUIRES(!Locks::thread_suspend_count_lock_) WARN_UNUSED;
72 
73   // Suspends all threads and gets exclusive access to the mutator lock.
74   // If long_suspend is true, then other threads who try to suspend will never timeout.
75   // long_suspend is currenly used for hprof since large heaps take a long time.
76   void SuspendAll(const char* cause, bool long_suspend = false)
77       EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_)
78       REQUIRES(!Locks::thread_list_lock_,
79                !Locks::thread_suspend_count_lock_,
80                !Locks::mutator_lock_);
81 
82   // Suspend a thread using a peer, typically used by the debugger. Returns the thread on success,
83   // else null. The peer is used to identify the thread to avoid races with the thread terminating.
84   // If the thread should be suspended then value of request_suspension should be true otherwise
85   // the routine will wait for a previous suspend request. If the suspension times out then *timeout
86   // is set to true.
87   Thread* SuspendThreadByPeer(jobject peer,
88                               bool request_suspension,
89                               SuspendReason reason,
90                               bool* timed_out)
91       REQUIRES(!Locks::mutator_lock_,
92                !Locks::thread_list_lock_,
93                !Locks::thread_suspend_count_lock_);
94 
95   // Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
96   // thread on success else null. The thread id is used to identify the thread to avoid races with
97   // the thread terminating. Note that as thread ids are recycled this may not suspend the expected
98   // thread, that may be terminating. If the suspension times out then *timeout is set to true.
99   Thread* SuspendThreadByThreadId(uint32_t thread_id, SuspendReason reason, bool* timed_out)
100       REQUIRES(!Locks::mutator_lock_,
101                !Locks::thread_list_lock_,
102                !Locks::thread_suspend_count_lock_);
103 
104   // Find an existing thread (or self) by its thread id (not tid).
105   Thread* FindThreadByThreadId(uint32_t thread_id) REQUIRES(Locks::thread_list_lock_);
106 
107   // Find an existing thread (or self) by its tid (not thread id).
108   Thread* FindThreadByTid(int tid) REQUIRES(Locks::thread_list_lock_);
109 
110   // Does the thread list still contain the given thread, or one at the same address?
111   // Used by Monitor to provide (mostly accurate) debugging information.
112   bool Contains(Thread* thread) REQUIRES(Locks::thread_list_lock_);
113 
114   // Run a checkpoint on all threads. Return the total number of threads for which the checkpoint
115   // function has been or will be called.
116   // Running threads are not suspended but run the checkpoint inside of the suspend check. The
117   // return value includes already suspended threads for b/24191051. Runs or requests the
118   // callback, if non-null, inside the thread_list_lock critical section after determining the
119   // runnable/suspended states of the threads. Does not wait for completion of the callbacks in
120   // running threads.
121   size_t RunCheckpoint(Closure* checkpoint_function, Closure* callback = nullptr)
122       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
123 
124   // Run an empty checkpoint on threads. Wait until threads pass the next suspend point or are
125   // suspended. This is used to ensure that the threads finish or aren't in the middle of an
126   // in-flight mutator heap access (eg. a read barrier.) Runnable threads will respond by
127   // decrementing the empty checkpoint barrier count. This works even when the weak ref access is
128   // disabled. Only one concurrent use is currently supported.
129   void RunEmptyCheckpoint()
130       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
131 
132   // Flip thread roots from from-space refs to to-space refs. Used by
133   // the concurrent copying collector.
134   size_t FlipThreadRoots(Closure* thread_flip_visitor,
135                          Closure* flip_callback,
136                          gc::collector::GarbageCollector* collector,
137                          gc::GcPauseListener* pause_listener)
138       REQUIRES(!Locks::mutator_lock_,
139                !Locks::thread_list_lock_,
140                !Locks::thread_suspend_count_lock_);
141 
142   // Iterates over all the threads.
143   void ForEach(void (*callback)(Thread*, void*), void* context)
144       REQUIRES(Locks::thread_list_lock_);
145 
146   template<typename CallBack>
ForEach(CallBack cb)147   void ForEach(CallBack cb) REQUIRES(Locks::thread_list_lock_) {
148     ForEach([](Thread* t, void* ctx) REQUIRES(Locks::thread_list_lock_) {
149       (*reinterpret_cast<CallBack*>(ctx))(t);
150     }, &cb);
151   }
152 
153   // Add/remove current thread from list.
154   void Register(Thread* self)
155       REQUIRES(Locks::runtime_shutdown_lock_)
156       REQUIRES(!Locks::mutator_lock_,
157                !Locks::thread_list_lock_,
158                !Locks::thread_suspend_count_lock_);
159   void Unregister(Thread* self)
160       REQUIRES(!Locks::mutator_lock_,
161                !Locks::thread_list_lock_,
162                !Locks::thread_suspend_count_lock_);
163 
164   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) const
165       REQUIRES_SHARED(Locks::mutator_lock_);
166 
167   void VisitRootsForSuspendedThreads(RootVisitor* visitor)
168       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
169       REQUIRES_SHARED(Locks::mutator_lock_);
170 
171   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) const REQUIRES(Locks::mutator_lock_);
172 
173   // Return a copy of the thread list.
GetList()174   std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) {
175     return list_;
176   }
177 
178   void DumpNativeStacks(std::ostream& os)
179       REQUIRES(!Locks::thread_list_lock_);
180 
EmptyCheckpointBarrier()181   Barrier* EmptyCheckpointBarrier() {
182     return empty_checkpoint_barrier_.get();
183   }
184 
185   void SweepInterpreterCaches(IsMarkedVisitor* visitor) const
186       REQUIRES(!Locks::thread_list_lock_)
187       REQUIRES_SHARED(Locks::mutator_lock_);
188 
189   void WaitForOtherNonDaemonThreadsToExit(bool check_no_birth = true)
190       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
191                !Locks::mutator_lock_);
192 
193  private:
194   uint32_t AllocThreadId(Thread* self);
195   void ReleaseThreadId(Thread* self, uint32_t id) REQUIRES(!Locks::allocated_thread_ids_lock_);
196 
197   size_t RunCheckpoint(Closure* checkpoint_function, bool includeSuspended)
198       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
199 
200   void DumpUnattachedThreads(std::ostream& os, bool dump_native_stack)
201       REQUIRES(!Locks::thread_list_lock_);
202 
203   void SuspendAllDaemonThreadsForShutdown()
204       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
205 
206   void SuspendAllInternal(Thread* self,
207                           Thread* ignore1,
208                           Thread* ignore2 = nullptr,
209                           SuspendReason reason = SuspendReason::kInternal)
210       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
211 
212   void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr)
213       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
214 
215   std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(Locks::allocated_thread_ids_lock_);
216 
217   // The actual list of all threads.
218   std::list<Thread*> list_ GUARDED_BY(Locks::thread_list_lock_);
219 
220   // Ongoing suspend all requests, used to ensure threads added to list_ respect SuspendAll.
221   int suspend_all_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);
222 
223   // Number of threads unregistering, ~ThreadList blocks until this hits 0.
224   int unregistering_count_ GUARDED_BY(Locks::thread_list_lock_);
225 
226   // Thread suspend time histogram. Only modified when all the threads are suspended, so guarding
227   // by mutator lock ensures no thread can read when another thread is modifying it.
228   Histogram<uint64_t> suspend_all_historam_ GUARDED_BY(Locks::mutator_lock_);
229 
230   // Whether or not the current thread suspension is long.
231   bool long_suspend_;
232 
233   // Whether the shutdown function has been called. This is checked in the destructor. It is an
234   // error to destroy a ThreadList instance without first calling ShutDown().
235   bool shut_down_;
236 
237   // Thread suspension timeout in nanoseconds.
238   const uint64_t thread_suspend_timeout_ns_;
239 
240   std::unique_ptr<Barrier> empty_checkpoint_barrier_;
241 
242   friend class Thread;
243 
244   DISALLOW_COPY_AND_ASSIGN(ThreadList);
245 };
246 
247 // Helper for suspending all threads and getting exclusive access to the mutator lock.
248 class ScopedSuspendAll : public ValueObject {
249  public:
250   explicit ScopedSuspendAll(const char* cause, bool long_suspend = false)
251      EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_)
252      REQUIRES(!Locks::thread_list_lock_,
253               !Locks::thread_suspend_count_lock_,
254               !Locks::mutator_lock_);
255   // No REQUIRES(mutator_lock_) since the unlock function already asserts this.
256   ~ScopedSuspendAll()
257       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
258       UNLOCK_FUNCTION(Locks::mutator_lock_);
259 };
260 
261 }  // namespace art
262 
263 #endif  // ART_RUNTIME_THREAD_LIST_H_
264