1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "thread.h"
18
19 #include <limits.h> // for INT_MAX
20 #include <pthread.h>
21 #include <signal.h>
22 #include <stdlib.h>
23 #include <sys/resource.h>
24 #include <sys/time.h>
25
26 #include <algorithm>
27 #include <atomic>
28 #include <bitset>
29 #include <cerrno>
30 #include <iostream>
31 #include <list>
32 #include <optional>
33 #include <sstream>
34
35 #include "android-base/file.h"
36 #include "android-base/stringprintf.h"
37 #include "android-base/strings.h"
38
39 #include "unwindstack/AndroidUnwinder.h"
40
41 #include "arch/context-inl.h"
42 #include "arch/context.h"
43 #include "art_field-inl.h"
44 #include "art_method-inl.h"
45 #include "base/atomic.h"
46 #include "base/bit_utils.h"
47 #include "base/casts.h"
48 #include "base/file_utils.h"
49 #include "base/memory_tool.h"
50 #include "base/mutex.h"
51 #include "base/stl_util.h"
52 #include "base/systrace.h"
53 #include "base/time_utils.h"
54 #include "base/timing_logger.h"
55 #include "base/to_str.h"
56 #include "base/utils.h"
57 #include "class_linker-inl.h"
58 #include "class_root-inl.h"
59 #include "debugger.h"
60 #include "dex/descriptors_names.h"
61 #include "dex/dex_file-inl.h"
62 #include "dex/dex_file_annotations.h"
63 #include "dex/dex_file_types.h"
64 #include "entrypoints/entrypoint_utils.h"
65 #include "entrypoints/quick/quick_alloc_entrypoints.h"
66 #include "gc/accounting/card_table-inl.h"
67 #include "gc/accounting/heap_bitmap-inl.h"
68 #include "gc/allocator/rosalloc.h"
69 #include "gc/heap.h"
70 #include "gc/space/space-inl.h"
71 #include "gc_root.h"
72 #include "handle_scope-inl.h"
73 #include "indirect_reference_table-inl.h"
74 #include "instrumentation.h"
75 #include "intern_table.h"
76 #include "interpreter/interpreter.h"
77 #include "interpreter/shadow_frame-inl.h"
78 #include "java_frame_root_info.h"
79 #include "jni/java_vm_ext.h"
80 #include "jni/jni_internal.h"
81 #include "mirror/class-alloc-inl.h"
82 #include "mirror/class_loader.h"
83 #include "mirror/object_array-alloc-inl.h"
84 #include "mirror/object_array-inl.h"
85 #include "mirror/stack_frame_info.h"
86 #include "mirror/stack_trace_element.h"
87 #include "monitor.h"
88 #include "monitor_objects_stack_visitor.h"
89 #include "native_stack_dump.h"
90 #include "nativehelper/scoped_local_ref.h"
91 #include "nativehelper/scoped_utf_chars.h"
92 #include "nterp_helpers.h"
93 #include "nth_caller_visitor.h"
94 #include "oat/oat_quick_method_header.h"
95 #include "oat/stack_map.h"
96 #include "obj_ptr-inl.h"
97 #include "object_lock.h"
98 #include "palette/palette.h"
99 #include "quick/quick_method_frame_info.h"
100 #include "quick_exception_handler.h"
101 #include "read_barrier-inl.h"
102 #include "reflection.h"
103 #include "reflective_handle_scope-inl.h"
104 #include "runtime-inl.h"
105 #include "runtime.h"
106 #include "runtime_callbacks.h"
107 #include "scoped_thread_state_change-inl.h"
108 #include "scoped_disable_public_sdk_checker.h"
109 #include "stack.h"
110 #include "thread-inl.h"
111 #include "thread_list.h"
112 #include "trace.h"
113 #include "verify_object.h"
114 #include "well_known_classes-inl.h"
115
116 #ifdef ART_TARGET_ANDROID
117 #include <android/set_abort_message.h>
118 #endif
119
120 #if ART_USE_FUTEXES
121 #include <linux/futex.h>
122 #include <sys/syscall.h>
123 #endif // ART_USE_FUTEXES
124
125 #pragma clang diagnostic push
126 #pragma clang diagnostic error "-Wconversion"
127
128 extern "C" __attribute__((weak)) void* __hwasan_tag_pointer(const volatile void* p,
129 unsigned char tag);
130
131 namespace art HIDDEN {
132
133 using android::base::StringAppendV;
134 using android::base::StringPrintf;
135
136 extern "C" NO_RETURN void artDeoptimize(Thread* self, bool skip_method_exit_callbacks);
137
138 bool Thread::is_started_ = false;
139 pthread_key_t Thread::pthread_key_self_;
140 ConditionVariable* Thread::resume_cond_ = nullptr;
141 const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
142 bool (*Thread::is_sensitive_thread_hook_)() = nullptr;
143 Thread* Thread::jit_sensitive_thread_ = nullptr;
144 std::atomic<Mutex*> Thread::cp_placeholder_mutex_(nullptr);
145 #ifndef __BIONIC__
146 thread_local Thread* Thread::self_tls_ = nullptr;
147 #endif
148
149 static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild;
150
151 static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
152
InitCardTable()153 void Thread::InitCardTable() {
154 tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
155 }
156
UnimplementedEntryPoint()157 static void UnimplementedEntryPoint() {
158 UNIMPLEMENTED(FATAL);
159 }
160
161 void InitEntryPoints(JniEntryPoints* jpoints,
162 QuickEntryPoints* qpoints,
163 bool monitor_jni_entry_exit);
164 void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active);
165
SetIsGcMarkingAndUpdateEntrypoints(bool is_marking)166 void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
167 CHECK(gUseReadBarrier);
168 tls32_.is_gc_marking = is_marking;
169 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking);
170 }
171
InitTlsEntryPoints()172 void Thread::InitTlsEntryPoints() {
173 ScopedTrace trace("InitTlsEntryPoints");
174 // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
175 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints);
176 uintptr_t* end = reinterpret_cast<uintptr_t*>(
177 reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) + sizeof(tlsPtr_.quick_entrypoints));
178 for (uintptr_t* it = begin; it != end; ++it) {
179 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
180 }
181 bool monitor_jni_entry_exit = false;
182 PaletteShouldReportJniInvocations(&monitor_jni_entry_exit);
183 if (monitor_jni_entry_exit) {
184 AtomicSetFlag(ThreadFlag::kMonitorJniEntryExit);
185 }
186 InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints, monitor_jni_entry_exit);
187 }
188
ResetQuickAllocEntryPointsForThread()189 void Thread::ResetQuickAllocEntryPointsForThread() {
190 ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
191 }
192
193 class DeoptimizationContextRecord {
194 public:
DeoptimizationContextRecord(const JValue & ret_val,bool is_reference,bool from_code,ObjPtr<mirror::Throwable> pending_exception,DeoptimizationMethodType method_type,DeoptimizationContextRecord * link)195 DeoptimizationContextRecord(const JValue& ret_val,
196 bool is_reference,
197 bool from_code,
198 ObjPtr<mirror::Throwable> pending_exception,
199 DeoptimizationMethodType method_type,
200 DeoptimizationContextRecord* link)
201 : ret_val_(ret_val),
202 is_reference_(is_reference),
203 from_code_(from_code),
204 pending_exception_(pending_exception.Ptr()),
205 deopt_method_type_(method_type),
206 link_(link) {}
207
GetReturnValue() const208 JValue GetReturnValue() const { return ret_val_; }
IsReference() const209 bool IsReference() const { return is_reference_; }
GetFromCode() const210 bool GetFromCode() const { return from_code_; }
GetPendingException() const211 ObjPtr<mirror::Throwable> GetPendingException() const REQUIRES_SHARED(Locks::mutator_lock_) {
212 return pending_exception_;
213 }
GetLink() const214 DeoptimizationContextRecord* GetLink() const { return link_; }
GetReturnValueAsGCRoot()215 mirror::Object** GetReturnValueAsGCRoot() {
216 DCHECK(is_reference_);
217 return ret_val_.GetGCRoot();
218 }
GetPendingExceptionAsGCRoot()219 mirror::Object** GetPendingExceptionAsGCRoot() {
220 return reinterpret_cast<mirror::Object**>(&pending_exception_);
221 }
GetDeoptimizationMethodType() const222 DeoptimizationMethodType GetDeoptimizationMethodType() const {
223 return deopt_method_type_;
224 }
225
226 private:
227 // The value returned by the method at the top of the stack before deoptimization.
228 JValue ret_val_;
229
230 // Indicates whether the returned value is a reference. If so, the GC will visit it.
231 const bool is_reference_;
232
233 // Whether the context was created from an explicit deoptimization in the code.
234 const bool from_code_;
235
236 // The exception that was pending before deoptimization (or null if there was no pending
237 // exception).
238 mirror::Throwable* pending_exception_;
239
240 // Whether the context was created for an (idempotent) runtime method.
241 const DeoptimizationMethodType deopt_method_type_;
242
243 // A link to the previous DeoptimizationContextRecord.
244 DeoptimizationContextRecord* const link_;
245
246 DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord);
247 };
248
249 class StackedShadowFrameRecord {
250 public:
StackedShadowFrameRecord(ShadowFrame * shadow_frame,StackedShadowFrameType type,StackedShadowFrameRecord * link)251 StackedShadowFrameRecord(ShadowFrame* shadow_frame,
252 StackedShadowFrameType type,
253 StackedShadowFrameRecord* link)
254 : shadow_frame_(shadow_frame),
255 type_(type),
256 link_(link) {}
257
GetShadowFrame() const258 ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
GetType() const259 StackedShadowFrameType GetType() const { return type_; }
GetLink() const260 StackedShadowFrameRecord* GetLink() const { return link_; }
261
262 private:
263 ShadowFrame* const shadow_frame_;
264 const StackedShadowFrameType type_;
265 StackedShadowFrameRecord* const link_;
266
267 DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord);
268 };
269
PushDeoptimizationContext(const JValue & return_value,bool is_reference,ObjPtr<mirror::Throwable> exception,bool from_code,DeoptimizationMethodType method_type)270 void Thread::PushDeoptimizationContext(const JValue& return_value,
271 bool is_reference,
272 ObjPtr<mirror::Throwable> exception,
273 bool from_code,
274 DeoptimizationMethodType method_type) {
275 DCHECK(exception != Thread::GetDeoptimizationException());
276 DeoptimizationContextRecord* record = new DeoptimizationContextRecord(
277 return_value,
278 is_reference,
279 from_code,
280 exception,
281 method_type,
282 tlsPtr_.deoptimization_context_stack);
283 tlsPtr_.deoptimization_context_stack = record;
284 }
285
PopDeoptimizationContext(JValue * result,ObjPtr<mirror::Throwable> * exception,bool * from_code,DeoptimizationMethodType * method_type)286 void Thread::PopDeoptimizationContext(JValue* result,
287 ObjPtr<mirror::Throwable>* exception,
288 bool* from_code,
289 DeoptimizationMethodType* method_type) {
290 AssertHasDeoptimizationContext();
291 DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
292 tlsPtr_.deoptimization_context_stack = record->GetLink();
293 result->SetJ(record->GetReturnValue().GetJ());
294 *exception = record->GetPendingException();
295 *from_code = record->GetFromCode();
296 *method_type = record->GetDeoptimizationMethodType();
297 delete record;
298 }
299
AssertHasDeoptimizationContext()300 void Thread::AssertHasDeoptimizationContext() {
301 CHECK(tlsPtr_.deoptimization_context_stack != nullptr)
302 << "No deoptimization context for thread " << *this;
303 }
304
305 enum {
306 kPermitAvailable = 0, // Incrementing consumes the permit
307 kNoPermit = 1, // Incrementing marks as waiter waiting
308 kNoPermitWaiterWaiting = 2
309 };
310
Park(bool is_absolute,int64_t time)311 void Thread::Park(bool is_absolute, int64_t time) {
312 DCHECK(this == Thread::Current());
313 #if ART_USE_FUTEXES
314 // Consume the permit, or mark as waiting. This cannot cause park_state to go
315 // outside of its valid range (0, 1, 2), because in all cases where 2 is
316 // assigned it is set back to 1 before returning, and this method cannot run
317 // concurrently with itself since it operates on the current thread.
318 int old_state = tls32_.park_state_.fetch_add(1, std::memory_order_relaxed);
319 if (old_state == kNoPermit) {
320 // no permit was available. block thread until later.
321 Runtime::Current()->GetRuntimeCallbacks()->ThreadParkStart(is_absolute, time);
322 bool timed_out = false;
323 if (!is_absolute && time == 0) {
324 // Thread.getState() is documented to return waiting for untimed parks.
325 ScopedThreadSuspension sts(this, ThreadState::kWaiting);
326 DCHECK_EQ(NumberOfHeldMutexes(), 0u);
327 int result = futex(tls32_.park_state_.Address(),
328 FUTEX_WAIT_PRIVATE,
329 /* sleep if val = */ kNoPermitWaiterWaiting,
330 /* timeout */ nullptr,
331 nullptr,
332 0);
333 // This errno check must happen before the scope is closed, to ensure that
334 // no destructors (such as ScopedThreadSuspension) overwrite errno.
335 if (result == -1) {
336 switch (errno) {
337 case EAGAIN:
338 FALLTHROUGH_INTENDED;
339 case EINTR: break; // park() is allowed to spuriously return
340 default: PLOG(FATAL) << "Failed to park";
341 }
342 }
343 } else if (time > 0) {
344 // Only actually suspend and futex_wait if we're going to wait for some
345 // positive amount of time - the kernel will reject negative times with
346 // EINVAL, and a zero time will just noop.
347
348 // Thread.getState() is documented to return timed wait for timed parks.
349 ScopedThreadSuspension sts(this, ThreadState::kTimedWaiting);
350 DCHECK_EQ(NumberOfHeldMutexes(), 0u);
351 timespec timespec;
352 int result = 0;
353 if (is_absolute) {
354 // Time is millis when scheduled for an absolute time
355 timespec.tv_nsec = (time % 1000) * 1000000;
356 timespec.tv_sec = SaturatedTimeT(time / 1000);
357 // This odd looking pattern is recommended by futex documentation to
358 // wait until an absolute deadline, with otherwise identical behavior to
359 // FUTEX_WAIT_PRIVATE. This also allows parkUntil() to return at the
360 // correct time when the system clock changes.
361 result = futex(tls32_.park_state_.Address(),
362 FUTEX_WAIT_BITSET_PRIVATE | FUTEX_CLOCK_REALTIME,
363 /* sleep if val = */ kNoPermitWaiterWaiting,
364 ×pec,
365 nullptr,
366 static_cast<int>(FUTEX_BITSET_MATCH_ANY));
367 } else {
368 // Time is nanos when scheduled for a relative time
369 timespec.tv_sec = SaturatedTimeT(time / 1000000000);
370 timespec.tv_nsec = time % 1000000000;
371 result = futex(tls32_.park_state_.Address(),
372 FUTEX_WAIT_PRIVATE,
373 /* sleep if val = */ kNoPermitWaiterWaiting,
374 ×pec,
375 nullptr,
376 0);
377 }
378 // This errno check must happen before the scope is closed, to ensure that
379 // no destructors (such as ScopedThreadSuspension) overwrite errno.
380 if (result == -1) {
381 switch (errno) {
382 case ETIMEDOUT:
383 timed_out = true;
384 FALLTHROUGH_INTENDED;
385 case EAGAIN:
386 case EINTR: break; // park() is allowed to spuriously return
387 default: PLOG(FATAL) << "Failed to park";
388 }
389 }
390 }
391 // Mark as no longer waiting, and consume permit if there is one.
392 tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
393 // TODO: Call to signal jvmti here
394 Runtime::Current()->GetRuntimeCallbacks()->ThreadParkFinished(timed_out);
395 } else {
396 // the fetch_add has consumed the permit. immediately return.
397 DCHECK_EQ(old_state, kPermitAvailable);
398 }
399 #else
400 #pragma clang diagnostic push
401 #pragma clang diagnostic warning "-W#warnings"
402 #warning "LockSupport.park/unpark implemented as noops without FUTEX support."
403 #pragma clang diagnostic pop
404 UNUSED(is_absolute, time);
405 UNIMPLEMENTED(WARNING);
406 sched_yield();
407 #endif
408 }
409
Unpark()410 void Thread::Unpark() {
411 #if ART_USE_FUTEXES
412 // Set permit available; will be consumed either by fetch_add (when the thread
413 // tries to park) or store (when the parked thread is woken up)
414 if (tls32_.park_state_.exchange(kPermitAvailable, std::memory_order_relaxed)
415 == kNoPermitWaiterWaiting) {
416 int result = futex(tls32_.park_state_.Address(),
417 FUTEX_WAKE_PRIVATE,
418 /* number of waiters = */ 1,
419 nullptr,
420 nullptr,
421 0);
422 if (result == -1) {
423 PLOG(FATAL) << "Failed to unpark";
424 }
425 }
426 #else
427 UNIMPLEMENTED(WARNING);
428 #endif
429 }
430
PushStackedShadowFrame(ShadowFrame * sf,StackedShadowFrameType type)431 void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
432 StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
433 sf, type, tlsPtr_.stacked_shadow_frame_record);
434 tlsPtr_.stacked_shadow_frame_record = record;
435 }
436
MaybePopDeoptimizedStackedShadowFrame()437 ShadowFrame* Thread::MaybePopDeoptimizedStackedShadowFrame() {
438 StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
439 if (record == nullptr ||
440 record->GetType() != StackedShadowFrameType::kDeoptimizationShadowFrame) {
441 return nullptr;
442 }
443 return PopStackedShadowFrame();
444 }
445
PopStackedShadowFrame()446 ShadowFrame* Thread::PopStackedShadowFrame() {
447 StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
448 DCHECK_NE(record, nullptr);
449 tlsPtr_.stacked_shadow_frame_record = record->GetLink();
450 ShadowFrame* shadow_frame = record->GetShadowFrame();
451 delete record;
452 return shadow_frame;
453 }
454
455 class FrameIdToShadowFrame {
456 public:
Create(size_t frame_id,ShadowFrame * shadow_frame,FrameIdToShadowFrame * next,size_t num_vregs)457 static FrameIdToShadowFrame* Create(size_t frame_id,
458 ShadowFrame* shadow_frame,
459 FrameIdToShadowFrame* next,
460 size_t num_vregs) {
461 // Append a bool array at the end to keep track of what vregs are updated by the debugger.
462 uint8_t* memory = new uint8_t[sizeof(FrameIdToShadowFrame) + sizeof(bool) * num_vregs];
463 return new (memory) FrameIdToShadowFrame(frame_id, shadow_frame, next);
464 }
465
Delete(FrameIdToShadowFrame * f)466 static void Delete(FrameIdToShadowFrame* f) {
467 uint8_t* memory = reinterpret_cast<uint8_t*>(f);
468 delete[] memory;
469 }
470
GetFrameId() const471 size_t GetFrameId() const { return frame_id_; }
GetShadowFrame() const472 ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
GetNext() const473 FrameIdToShadowFrame* GetNext() const { return next_; }
SetNext(FrameIdToShadowFrame * next)474 void SetNext(FrameIdToShadowFrame* next) { next_ = next; }
GetUpdatedVRegFlags()475 bool* GetUpdatedVRegFlags() {
476 return updated_vreg_flags_;
477 }
478
479 private:
FrameIdToShadowFrame(size_t frame_id,ShadowFrame * shadow_frame,FrameIdToShadowFrame * next)480 FrameIdToShadowFrame(size_t frame_id,
481 ShadowFrame* shadow_frame,
482 FrameIdToShadowFrame* next)
483 : frame_id_(frame_id),
484 shadow_frame_(shadow_frame),
485 next_(next) {}
486
487 const size_t frame_id_;
488 ShadowFrame* const shadow_frame_;
489 FrameIdToShadowFrame* next_;
490 bool updated_vreg_flags_[0];
491
492 DISALLOW_COPY_AND_ASSIGN(FrameIdToShadowFrame);
493 };
494
FindFrameIdToShadowFrame(FrameIdToShadowFrame * head,size_t frame_id)495 static FrameIdToShadowFrame* FindFrameIdToShadowFrame(FrameIdToShadowFrame* head,
496 size_t frame_id) {
497 FrameIdToShadowFrame* found = nullptr;
498 for (FrameIdToShadowFrame* record = head; record != nullptr; record = record->GetNext()) {
499 if (record->GetFrameId() == frame_id) {
500 if (kIsDebugBuild) {
501 // Check we have at most one record for this frame.
502 CHECK(found == nullptr) << "Multiple records for the frame " << frame_id;
503 found = record;
504 } else {
505 return record;
506 }
507 }
508 }
509 return found;
510 }
511
FindDebuggerShadowFrame(size_t frame_id)512 ShadowFrame* Thread::FindDebuggerShadowFrame(size_t frame_id) {
513 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame(
514 tlsPtr_.frame_id_to_shadow_frame, frame_id);
515 if (record != nullptr) {
516 return record->GetShadowFrame();
517 }
518 return nullptr;
519 }
520
521 // Must only be called when FindDebuggerShadowFrame(frame_id) returns non-nullptr.
GetUpdatedVRegFlags(size_t frame_id)522 bool* Thread::GetUpdatedVRegFlags(size_t frame_id) {
523 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame(
524 tlsPtr_.frame_id_to_shadow_frame, frame_id);
525 CHECK(record != nullptr);
526 return record->GetUpdatedVRegFlags();
527 }
528
FindOrCreateDebuggerShadowFrame(size_t frame_id,uint32_t num_vregs,ArtMethod * method,uint32_t dex_pc)529 ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id,
530 uint32_t num_vregs,
531 ArtMethod* method,
532 uint32_t dex_pc) {
533 ShadowFrame* shadow_frame = FindDebuggerShadowFrame(frame_id);
534 if (shadow_frame != nullptr) {
535 return shadow_frame;
536 }
537 VLOG(deopt) << "Create pre-deopted ShadowFrame for " << ArtMethod::PrettyMethod(method);
538 shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, method, dex_pc);
539 FrameIdToShadowFrame* record = FrameIdToShadowFrame::Create(frame_id,
540 shadow_frame,
541 tlsPtr_.frame_id_to_shadow_frame,
542 num_vregs);
543 for (uint32_t i = 0; i < num_vregs; i++) {
544 // Do this to clear all references for root visitors.
545 shadow_frame->SetVRegReference(i, nullptr);
546 // This flag will be changed to true if the debugger modifies the value.
547 record->GetUpdatedVRegFlags()[i] = false;
548 }
549 tlsPtr_.frame_id_to_shadow_frame = record;
550 return shadow_frame;
551 }
552
GetCustomTLS(const char * key)553 TLSData* Thread::GetCustomTLS(const char* key) {
554 MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_);
555 auto it = custom_tls_.find(key);
556 return (it != custom_tls_.end()) ? it->second.get() : nullptr;
557 }
558
SetCustomTLS(const char * key,TLSData * data)559 void Thread::SetCustomTLS(const char* key, TLSData* data) {
560 // We will swap the old data (which might be nullptr) with this and then delete it outside of the
561 // custom_tls_lock_.
562 std::unique_ptr<TLSData> old_data(data);
563 {
564 MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_);
565 custom_tls_.GetOrCreate(key, []() { return std::unique_ptr<TLSData>(); }).swap(old_data);
566 }
567 }
568
RemoveDebuggerShadowFrameMapping(size_t frame_id)569 void Thread::RemoveDebuggerShadowFrameMapping(size_t frame_id) {
570 FrameIdToShadowFrame* head = tlsPtr_.frame_id_to_shadow_frame;
571 if (head->GetFrameId() == frame_id) {
572 tlsPtr_.frame_id_to_shadow_frame = head->GetNext();
573 FrameIdToShadowFrame::Delete(head);
574 return;
575 }
576 FrameIdToShadowFrame* prev = head;
577 for (FrameIdToShadowFrame* record = head->GetNext();
578 record != nullptr;
579 prev = record, record = record->GetNext()) {
580 if (record->GetFrameId() == frame_id) {
581 prev->SetNext(record->GetNext());
582 FrameIdToShadowFrame::Delete(record);
583 return;
584 }
585 }
586 LOG(FATAL) << "No shadow frame for frame " << frame_id;
587 UNREACHABLE();
588 }
589
InitTid()590 void Thread::InitTid() {
591 tls32_.tid = ::art::GetTid();
592 }
593
InitAfterFork()594 void Thread::InitAfterFork() {
595 // One thread (us) survived the fork, but we have a new tid so we need to
596 // update the value stashed in this Thread*.
597 InitTid();
598 }
599
DeleteJPeer(JNIEnv * env)600 void Thread::DeleteJPeer(JNIEnv* env) {
601 // Make sure nothing can observe both opeer and jpeer set at the same time.
602 jobject old_jpeer = tlsPtr_.jpeer;
603 CHECK(old_jpeer != nullptr);
604 tlsPtr_.jpeer = nullptr;
605 env->DeleteGlobalRef(old_jpeer);
606 }
607
CreateCallbackWithUffdGc(void * arg)608 void* Thread::CreateCallbackWithUffdGc(void* arg) {
609 return Thread::CreateCallback(arg);
610 }
611
CreateCallback(void * arg)612 void* Thread::CreateCallback(void* arg) {
613 Thread* self = reinterpret_cast<Thread*>(arg);
614 Runtime* runtime = Runtime::Current();
615 if (runtime == nullptr) {
616 LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
617 return nullptr;
618 }
619 {
620 // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
621 // after self->Init().
622 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
623 // Check that if we got here we cannot be shutting down (as shutdown should never have started
624 // while threads are being born).
625 CHECK(!runtime->IsShuttingDownLocked());
626 // Note: given that the JNIEnv is created in the parent thread, the only failure point here is
627 // a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort
628 // the runtime in such a case. In case this ever changes, we need to make sure here to
629 // delete the tmp_jni_env, as we own it at this point.
630 CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env));
631 self->tlsPtr_.tmp_jni_env = nullptr;
632 Runtime::Current()->EndThreadBirth();
633 }
634 {
635 ScopedObjectAccess soa(self);
636 self->InitStringEntryPoints();
637
638 // Copy peer into self, deleting global reference when done.
639 CHECK(self->tlsPtr_.jpeer != nullptr);
640 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr();
641 // Make sure nothing can observe both opeer and jpeer set at the same time.
642 self->DeleteJPeer(self->GetJniEnv());
643 self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str());
644
645 ArtField* priorityField = WellKnownClasses::java_lang_Thread_priority;
646 self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
647
648 runtime->GetRuntimeCallbacks()->ThreadStart(self);
649
650 // Unpark ourselves if the java peer was unparked before it started (see
651 // b/28845097#comment49 for more information)
652
653 ArtField* unparkedField = WellKnownClasses::java_lang_Thread_unparkedBeforeStart;
654 bool should_unpark = false;
655 {
656 // Hold the lock here, so that if another thread calls unpark before the thread starts
657 // we don't observe the unparkedBeforeStart field before the unparker writes to it,
658 // which could cause a lost unpark.
659 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
660 should_unpark = unparkedField->GetBoolean(self->tlsPtr_.opeer) == JNI_TRUE;
661 }
662 if (should_unpark) {
663 self->Unpark();
664 }
665 // Invoke the 'run' method of our java.lang.Thread.
666 ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer;
667 WellKnownClasses::java_lang_Thread_run->InvokeVirtual<'V'>(self, receiver);
668 }
669 // Detach and delete self.
670 Runtime::Current()->GetThreadList()->Unregister(self, /* should_run_callbacks= */ true);
671
672 return nullptr;
673 }
674
FromManagedThread(Thread * self,ObjPtr<mirror::Object> thread_peer)675 Thread* Thread::FromManagedThread(Thread* self, ObjPtr<mirror::Object> thread_peer) {
676 ArtField* f = WellKnownClasses::java_lang_Thread_nativePeer;
677 Thread* result = reinterpret_cast64<Thread*>(f->GetLong(thread_peer));
678 // Check that if we have a result it is either suspended or we hold the thread_list_lock_
679 // to stop it from going away.
680 if (kIsDebugBuild) {
681 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
682 if (result != nullptr && !result->IsSuspended()) {
683 Locks::thread_list_lock_->AssertHeld(self);
684 }
685 }
686 return result;
687 }
688
FromManagedThread(const ScopedObjectAccessAlreadyRunnable & soa,jobject java_thread)689 Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
690 jobject java_thread) {
691 return FromManagedThread(soa.Self(), soa.Decode<mirror::Object>(java_thread));
692 }
693
FixStackSize(size_t stack_size)694 static size_t FixStackSize(size_t stack_size) {
695 // A stack size of zero means "use the default".
696 if (stack_size == 0) {
697 stack_size = Runtime::Current()->GetDefaultStackSize();
698 }
699
700 // Dalvik used the bionic pthread default stack size for native threads,
701 // so include that here to support apps that expect large native stacks.
702 stack_size += 1 * MB;
703
704 // Under sanitization, frames of the interpreter may become bigger, both for C code as
705 // well as the ShadowFrame. Ensure a larger minimum size. Otherwise initialization
706 // of all core classes cannot be done in all test circumstances.
707 if (kMemoryToolIsAvailable) {
708 stack_size = std::max(2 * MB, stack_size);
709 }
710
711 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
712 if (stack_size < PTHREAD_STACK_MIN) {
713 stack_size = PTHREAD_STACK_MIN;
714 }
715
716 if (Runtime::Current()->GetImplicitStackOverflowChecks()) {
717 // If we are going to use implicit stack checks, allocate space for the protected
718 // region at the bottom of the stack.
719 stack_size += Thread::kStackOverflowImplicitCheckSize +
720 GetStackOverflowReservedBytes(kRuntimeISA);
721 } else {
722 // It's likely that callers are trying to ensure they have at least a certain amount of
723 // stack space, so we should add our reserved space on top of what they requested, rather
724 // than implicitly take it away from them.
725 stack_size += GetStackOverflowReservedBytes(kRuntimeISA);
726 }
727
728 // Some systems require the stack size to be a multiple of the system page size, so round up.
729 stack_size = RoundUp(stack_size, gPageSize);
730
731 return stack_size;
732 }
733
734 // Return the nearest page-aligned address below the current stack top.
735 NO_INLINE
FindStackTop()736 static uint8_t* FindStackTop() {
737 return reinterpret_cast<uint8_t*>(
738 AlignDown(__builtin_frame_address(0), gPageSize));
739 }
740
741 // Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack
742 // overflow is detected. It is located right below the stack_begin_.
743 ATTRIBUTE_NO_SANITIZE_ADDRESS
InstallImplicitProtection()744 void Thread::InstallImplicitProtection() {
745 uint8_t* pregion = tlsPtr_.stack_begin - GetStackOverflowProtectedSize();
746 // Page containing current top of stack.
747 uint8_t* stack_top = FindStackTop();
748
749 // Try to directly protect the stack.
750 VLOG(threads) << "installing stack protected region at " << std::hex <<
751 static_cast<void*>(pregion) << " to " <<
752 static_cast<void*>(pregion + GetStackOverflowProtectedSize() - 1);
753 if (ProtectStack(/* fatal_on_error= */ false)) {
754 // Tell the kernel that we won't be needing these pages any more.
755 // NB. madvise will probably write zeroes into the memory (on linux it does).
756 size_t unwanted_size =
757 reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - gPageSize;
758 madvise(pregion, unwanted_size, MADV_DONTNEED);
759 return;
760 }
761
762 // There is a little complexity here that deserves a special mention. On some
763 // architectures, the stack is created using a VM_GROWSDOWN flag
764 // to prevent memory being allocated when it's not needed. This flag makes the
765 // kernel only allocate memory for the stack by growing down in memory. Because we
766 // want to put an mprotected region far away from that at the stack top, we need
767 // to make sure the pages for the stack are mapped in before we call mprotect.
768 //
769 // The failed mprotect in UnprotectStack is an indication of a thread with VM_GROWSDOWN
770 // with a non-mapped stack (usually only the main thread).
771 //
772 // We map in the stack by reading every page from the stack bottom (highest address)
773 // to the stack top. (We then madvise this away.) This must be done by reading from the
774 // current stack pointer downwards.
775 //
776 // Accesses too far below the current machine register corresponding to the stack pointer (e.g.,
777 // ESP on x86[-32], SP on ARM) might cause a SIGSEGV (at least on x86 with newer kernels). We
778 // thus have to move the stack pointer. We do this portably by using a recursive function with a
779 // large stack frame size.
780
781 // (Defensively) first remove the protection on the protected region as we'll want to read
782 // and write it. Ignore errors.
783 UnprotectStack();
784
785 VLOG(threads) << "Need to map in stack for thread at " << std::hex <<
786 static_cast<void*>(pregion);
787
788 struct RecurseDownStack {
789 // This function has an intentionally large stack size.
790 #pragma GCC diagnostic push
791 #pragma GCC diagnostic ignored "-Wframe-larger-than="
792 NO_INLINE
793 __attribute__((no_sanitize("memtag"))) static void Touch(uintptr_t target) {
794 volatile size_t zero = 0;
795 // Use a large local volatile array to ensure a large frame size. Do not use anything close
796 // to a full page for ASAN. It would be nice to ensure the frame size is at most a page, but
797 // there is no pragma support for this.
798 // Note: for ASAN we need to shrink the array a bit, as there's other overhead.
799 constexpr size_t kAsanMultiplier =
800 #ifdef ADDRESS_SANITIZER
801 2u;
802 #else
803 1u;
804 #endif
805 // Keep space uninitialized as it can overflow the stack otherwise (should Clang actually
806 // auto-initialize this local variable).
807 volatile char space[gPageSize - (kAsanMultiplier * 256)] __attribute__((uninitialized));
808 [[maybe_unused]] char sink = space[zero];
809 // Remove tag from the pointer. Nop in non-hwasan builds.
810 uintptr_t addr = reinterpret_cast<uintptr_t>(
811 __hwasan_tag_pointer != nullptr ? __hwasan_tag_pointer(space, 0) : space);
812 if (addr >= target + gPageSize) {
813 Touch(target);
814 }
815 zero *= 2; // Try to avoid tail recursion.
816 }
817 #pragma GCC diagnostic pop
818 };
819 RecurseDownStack::Touch(reinterpret_cast<uintptr_t>(pregion));
820
821 VLOG(threads) << "(again) installing stack protected region at " << std::hex <<
822 static_cast<void*>(pregion) << " to " <<
823 static_cast<void*>(pregion + GetStackOverflowProtectedSize() - 1);
824
825 // Protect the bottom of the stack to prevent read/write to it.
826 ProtectStack(/* fatal_on_error= */ true);
827
828 // Tell the kernel that we won't be needing these pages any more.
829 // NB. madvise will probably write zeroes into the memory (on linux it does).
830 size_t unwanted_size =
831 reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - gPageSize;
832 madvise(pregion, unwanted_size, MADV_DONTNEED);
833 }
834
835 template <bool kSupportTransaction>
SetNativePeer(ObjPtr<mirror::Object> java_peer,Thread * thread)836 static void SetNativePeer(ObjPtr<mirror::Object> java_peer, Thread* thread)
837 REQUIRES_SHARED(Locks::mutator_lock_) {
838 ArtField* field = WellKnownClasses::java_lang_Thread_nativePeer;
839 if (kSupportTransaction && Runtime::Current()->IsActiveTransaction()) {
840 field->SetLong</*kTransactionActive=*/ true>(java_peer, reinterpret_cast<jlong>(thread));
841 } else {
842 field->SetLong</*kTransactionActive=*/ false>(java_peer, reinterpret_cast<jlong>(thread));
843 }
844 }
845
SetNativePeer(JNIEnv * env,jobject java_peer,Thread * thread)846 static void SetNativePeer(JNIEnv* env, jobject java_peer, Thread* thread) {
847 ScopedObjectAccess soa(env);
848 SetNativePeer</*kSupportTransaction=*/ false>(soa.Decode<mirror::Object>(java_peer), thread);
849 }
850
CreateNativeThread(JNIEnv * env,jobject java_peer,size_t stack_size,bool is_daemon)851 void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
852 CHECK(java_peer != nullptr);
853 Thread* self = static_cast<JNIEnvExt*>(env)->GetSelf();
854
855 if (VLOG_IS_ON(threads)) {
856 ScopedObjectAccess soa(env);
857
858 ArtField* f = WellKnownClasses::java_lang_Thread_name;
859 ObjPtr<mirror::String> java_name =
860 f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString();
861 std::string thread_name;
862 if (java_name != nullptr) {
863 thread_name = java_name->ToModifiedUtf8();
864 } else {
865 thread_name = "(Unnamed)";
866 }
867
868 VLOG(threads) << "Creating native thread for " << thread_name;
869 self->Dump(LOG_STREAM(INFO));
870 }
871
872 Runtime* runtime = Runtime::Current();
873
874 // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
875 bool thread_start_during_shutdown = false;
876 {
877 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
878 if (runtime->IsShuttingDownLocked()) {
879 thread_start_during_shutdown = true;
880 } else {
881 runtime->StartThreadBirth();
882 }
883 }
884 if (thread_start_during_shutdown) {
885 ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
886 env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
887 return;
888 }
889
890 Thread* child_thread = new Thread(is_daemon);
891 // Use global JNI ref to hold peer live while child thread starts.
892 child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);
893 stack_size = FixStackSize(stack_size);
894
895 // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing
896 // to assign it.
897 SetNativePeer(env, java_peer, child_thread);
898
899 // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and
900 // do not have a good way to report this on the child's side.
901 std::string error_msg;
902 std::unique_ptr<JNIEnvExt> child_jni_env_ext(
903 JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM(), &error_msg));
904
905 int pthread_create_result = 0;
906 if (child_jni_env_ext.get() != nullptr) {
907 pthread_t new_pthread;
908 pthread_attr_t attr;
909 child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get();
910 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
911 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
912 "PTHREAD_CREATE_DETACHED");
913 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
914 pthread_create_result = pthread_create(&new_pthread,
915 &attr,
916 gUseUserfaultfd ? Thread::CreateCallbackWithUffdGc
917 : Thread::CreateCallback,
918 child_thread);
919 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
920
921 if (pthread_create_result == 0) {
922 // pthread_create started the new thread. The child is now responsible for managing the
923 // JNIEnvExt we created.
924 // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization
925 // between the threads.
926 child_jni_env_ext.release(); // NOLINT pthreads API.
927 return;
928 }
929 }
930
931 // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up.
932 {
933 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
934 runtime->EndThreadBirth();
935 }
936 // Manually delete the global reference since Thread::Init will not have been run. Make sure
937 // nothing can observe both opeer and jpeer set at the same time.
938 child_thread->DeleteJPeer(env);
939 delete child_thread;
940 child_thread = nullptr;
941 // TODO: remove from thread group?
942 SetNativePeer(env, java_peer, nullptr);
943 {
944 std::string msg(child_jni_env_ext.get() == nullptr ?
945 StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) :
946 StringPrintf("pthread_create (%s stack) failed: %s",
947 PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
948 ScopedObjectAccess soa(env);
949 soa.Self()->ThrowOutOfMemoryError(msg.c_str());
950 }
951 }
952
Init(ThreadList * thread_list,JavaVMExt * java_vm,JNIEnvExt * jni_env_ext)953 bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) {
954 // This function does all the initialization that must be run by the native thread it applies to.
955 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
956 // we can handshake with the corresponding native thread when it's ready.) Check this native
957 // thread hasn't been through here already...
958 CHECK(Thread::Current() == nullptr);
959
960 // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
961 // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
962 tlsPtr_.pthread_self = pthread_self();
963 CHECK(is_started_);
964
965 ScopedTrace trace("Thread::Init");
966
967 SetUpAlternateSignalStack();
968 if (!InitStackHwm()) {
969 return false;
970 }
971 InitCpu();
972 InitTlsEntryPoints();
973 RemoveSuspendTrigger();
974 InitCardTable();
975 InitTid();
976
977 #ifdef __BIONIC__
978 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
979 #else
980 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
981 Thread::self_tls_ = this;
982 #endif
983 DCHECK_EQ(Thread::Current(), this);
984
985 tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
986
987 if (jni_env_ext != nullptr) {
988 DCHECK_EQ(jni_env_ext->GetVm(), java_vm);
989 DCHECK_EQ(jni_env_ext->GetSelf(), this);
990 tlsPtr_.jni_env = jni_env_ext;
991 } else {
992 std::string error_msg;
993 tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm, &error_msg);
994 if (tlsPtr_.jni_env == nullptr) {
995 LOG(ERROR) << "Failed to create JNIEnvExt: " << error_msg;
996 return false;
997 }
998 }
999
1000 ScopedTrace trace3("ThreadList::Register");
1001 thread_list->Register(this);
1002 return true;
1003 }
1004
1005 template <typename PeerAction>
Attach(const char * thread_name,bool as_daemon,PeerAction peer_action,bool should_run_callbacks)1006 Thread* Thread::Attach(const char* thread_name,
1007 bool as_daemon,
1008 PeerAction peer_action,
1009 bool should_run_callbacks) {
1010 Runtime* runtime = Runtime::Current();
1011 ScopedTrace trace("Thread::Attach");
1012 if (runtime == nullptr) {
1013 LOG(ERROR) << "Thread attaching to non-existent runtime: " <<
1014 ((thread_name != nullptr) ? thread_name : "(Unnamed)");
1015 return nullptr;
1016 }
1017 Thread* self;
1018 {
1019 ScopedTrace trace2("Thread birth");
1020 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
1021 if (runtime->IsShuttingDownLocked()) {
1022 LOG(WARNING) << "Thread attaching while runtime is shutting down: " <<
1023 ((thread_name != nullptr) ? thread_name : "(Unnamed)");
1024 return nullptr;
1025 } else {
1026 Runtime::Current()->StartThreadBirth();
1027 self = new Thread(as_daemon);
1028 bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
1029 Runtime::Current()->EndThreadBirth();
1030 if (!init_success) {
1031 delete self;
1032 return nullptr;
1033 }
1034 }
1035 }
1036
1037 self->InitStringEntryPoints();
1038
1039 CHECK_NE(self->GetState(), ThreadState::kRunnable);
1040 self->SetState(ThreadState::kNative);
1041
1042 // Run the action that is acting on the peer.
1043 if (!peer_action(self)) {
1044 runtime->GetThreadList()->Unregister(self, should_run_callbacks);
1045 // Unregister deletes self, no need to do this here.
1046 return nullptr;
1047 }
1048
1049 if (VLOG_IS_ON(threads)) {
1050 if (thread_name != nullptr) {
1051 VLOG(threads) << "Attaching thread " << thread_name;
1052 } else {
1053 VLOG(threads) << "Attaching unnamed thread.";
1054 }
1055 ScopedObjectAccess soa(self);
1056 self->Dump(LOG_STREAM(INFO));
1057 }
1058
1059 if (should_run_callbacks) {
1060 ScopedObjectAccess soa(self);
1061 runtime->GetRuntimeCallbacks()->ThreadStart(self);
1062 }
1063
1064 return self;
1065 }
1066
Attach(const char * thread_name,bool as_daemon,jobject thread_group,bool create_peer,bool should_run_callbacks)1067 Thread* Thread::Attach(const char* thread_name,
1068 bool as_daemon,
1069 jobject thread_group,
1070 bool create_peer,
1071 bool should_run_callbacks) {
1072 auto create_peer_action = [&](Thread* self) {
1073 // If we're the main thread, ClassLinker won't be created until after we're attached,
1074 // so that thread needs a two-stage attach. Regular threads don't need this hack.
1075 // In the compiler, all threads need this hack, because no-one's going to be getting
1076 // a native peer!
1077 if (create_peer) {
1078 self->CreatePeer(thread_name, as_daemon, thread_group);
1079 if (self->IsExceptionPending()) {
1080 // We cannot keep the exception around, as we're deleting self. Try to be helpful and log
1081 // the failure but do not dump the exception details. If we fail to allocate the peer, we
1082 // usually also fail to allocate an exception object and throw a pre-allocated OOME without
1083 // any useful information. If we do manage to allocate the exception object, the memory
1084 // information in the message could have been collected too late and therefore misleading.
1085 {
1086 ScopedObjectAccess soa(self);
1087 LOG(ERROR) << "Exception creating thread peer: "
1088 << ((thread_name != nullptr) ? thread_name : "<null>");
1089 self->ClearException();
1090 }
1091 return false;
1092 }
1093 } else {
1094 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
1095 if (thread_name != nullptr) {
1096 self->SetCachedThreadName(thread_name);
1097 ::art::SetThreadName(thread_name);
1098 } else if (self->GetJniEnv()->IsCheckJniEnabled()) {
1099 LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
1100 }
1101 }
1102 return true;
1103 };
1104 return Attach(thread_name, as_daemon, create_peer_action, should_run_callbacks);
1105 }
1106
Attach(const char * thread_name,bool as_daemon,jobject thread_peer)1107 Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_peer) {
1108 auto set_peer_action = [&](Thread* self) {
1109 // Install the given peer.
1110 DCHECK(self == Thread::Current());
1111 ScopedObjectAccess soa(self);
1112 ObjPtr<mirror::Object> peer = soa.Decode<mirror::Object>(thread_peer);
1113 self->tlsPtr_.opeer = peer.Ptr();
1114 SetNativePeer</*kSupportTransaction=*/ false>(peer, self);
1115 return true;
1116 };
1117 return Attach(thread_name, as_daemon, set_peer_action, /* should_run_callbacks= */ true);
1118 }
1119
CreatePeer(const char * name,bool as_daemon,jobject thread_group)1120 void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
1121 Runtime* runtime = Runtime::Current();
1122 CHECK(runtime->IsStarted());
1123 Thread* self = this;
1124 DCHECK_EQ(self, Thread::Current());
1125
1126 ScopedObjectAccess soa(self);
1127 StackHandleScope<4u> hs(self);
1128 DCHECK(WellKnownClasses::java_lang_ThreadGroup->IsInitialized());
1129 Handle<mirror::Object> thr_group = hs.NewHandle(soa.Decode<mirror::Object>(
1130 thread_group != nullptr ? thread_group : runtime->GetMainThreadGroup()));
1131 Handle<mirror::String> thread_name = hs.NewHandle(
1132 name != nullptr ? mirror::String::AllocFromModifiedUtf8(self, name) : nullptr);
1133 // Add missing null check in case of OOM b/18297817
1134 if (name != nullptr && UNLIKELY(thread_name == nullptr)) {
1135 CHECK(self->IsExceptionPending());
1136 return;
1137 }
1138 jint thread_priority = GetNativePriority();
1139
1140 DCHECK(WellKnownClasses::java_lang_Thread->IsInitialized());
1141 Handle<mirror::Object> peer =
1142 hs.NewHandle(WellKnownClasses::java_lang_Thread->AllocObject(self));
1143 if (UNLIKELY(peer == nullptr)) {
1144 CHECK(IsExceptionPending());
1145 return;
1146 }
1147 tlsPtr_.opeer = peer.Get();
1148 WellKnownClasses::java_lang_Thread_init->InvokeInstance<'V', 'L', 'L', 'I', 'Z'>(
1149 self, peer.Get(), thr_group.Get(), thread_name.Get(), thread_priority, as_daemon);
1150 if (self->IsExceptionPending()) {
1151 return;
1152 }
1153
1154 SetNativePeer</*kSupportTransaction=*/ false>(peer.Get(), self);
1155
1156 MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName()));
1157 if (peer_thread_name == nullptr) {
1158 // The Thread constructor should have set the Thread.name to a
1159 // non-null value. However, because we can run without code
1160 // available (in the compiler, in tests), we manually assign the
1161 // fields the constructor should have set.
1162 if (runtime->IsActiveTransaction()) {
1163 InitPeer<true>(tlsPtr_.opeer,
1164 as_daemon,
1165 thr_group.Get(),
1166 thread_name.Get(),
1167 thread_priority);
1168 } else {
1169 InitPeer<false>(tlsPtr_.opeer,
1170 as_daemon,
1171 thr_group.Get(),
1172 thread_name.Get(),
1173 thread_priority);
1174 }
1175 peer_thread_name.Assign(GetThreadName());
1176 }
1177 // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
1178 if (peer_thread_name != nullptr) {
1179 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
1180 }
1181 }
1182
CreateCompileTimePeer(const char * name,bool as_daemon,jobject thread_group)1183 ObjPtr<mirror::Object> Thread::CreateCompileTimePeer(const char* name,
1184 bool as_daemon,
1185 jobject thread_group) {
1186 Runtime* runtime = Runtime::Current();
1187 CHECK(!runtime->IsStarted());
1188 Thread* self = this;
1189 DCHECK_EQ(self, Thread::Current());
1190
1191 ScopedObjectAccessUnchecked soa(self);
1192 StackHandleScope<3u> hs(self);
1193 DCHECK(WellKnownClasses::java_lang_ThreadGroup->IsInitialized());
1194 Handle<mirror::Object> thr_group = hs.NewHandle(soa.Decode<mirror::Object>(
1195 thread_group != nullptr ? thread_group : runtime->GetMainThreadGroup()));
1196 Handle<mirror::String> thread_name = hs.NewHandle(
1197 name != nullptr ? mirror::String::AllocFromModifiedUtf8(self, name) : nullptr);
1198 // Add missing null check in case of OOM b/18297817
1199 if (name != nullptr && UNLIKELY(thread_name == nullptr)) {
1200 CHECK(self->IsExceptionPending());
1201 return nullptr;
1202 }
1203 jint thread_priority = kNormThreadPriority; // Always normalize to NORM priority.
1204
1205 DCHECK(WellKnownClasses::java_lang_Thread->IsInitialized());
1206 Handle<mirror::Object> peer = hs.NewHandle(
1207 WellKnownClasses::java_lang_Thread->AllocObject(self));
1208 if (peer == nullptr) {
1209 CHECK(Thread::Current()->IsExceptionPending());
1210 return nullptr;
1211 }
1212
1213 // We cannot call Thread.init, as it will recursively ask for currentThread.
1214
1215 // The Thread constructor should have set the Thread.name to a
1216 // non-null value. However, because we can run without code
1217 // available (in the compiler, in tests), we manually assign the
1218 // fields the constructor should have set.
1219 if (runtime->IsActiveTransaction()) {
1220 InitPeer<true>(peer.Get(),
1221 as_daemon,
1222 thr_group.Get(),
1223 thread_name.Get(),
1224 thread_priority);
1225 } else {
1226 InitPeer<false>(peer.Get(),
1227 as_daemon,
1228 thr_group.Get(),
1229 thread_name.Get(),
1230 thread_priority);
1231 }
1232
1233 return peer.Get();
1234 }
1235
1236 template<bool kTransactionActive>
InitPeer(ObjPtr<mirror::Object> peer,bool as_daemon,ObjPtr<mirror::Object> thread_group,ObjPtr<mirror::String> thread_name,jint thread_priority)1237 void Thread::InitPeer(ObjPtr<mirror::Object> peer,
1238 bool as_daemon,
1239 ObjPtr<mirror::Object> thread_group,
1240 ObjPtr<mirror::String> thread_name,
1241 jint thread_priority) {
1242 WellKnownClasses::java_lang_Thread_daemon->SetBoolean<kTransactionActive>(peer,
1243 static_cast<uint8_t>(as_daemon ? 1u : 0u));
1244 WellKnownClasses::java_lang_Thread_group->SetObject<kTransactionActive>(peer, thread_group);
1245 WellKnownClasses::java_lang_Thread_name->SetObject<kTransactionActive>(peer, thread_name);
1246 WellKnownClasses::java_lang_Thread_priority->SetInt<kTransactionActive>(peer, thread_priority);
1247 }
1248
SetCachedThreadName(const char * name)1249 void Thread::SetCachedThreadName(const char* name) {
1250 DCHECK(name != kThreadNameDuringStartup);
1251 const char* old_name = tlsPtr_.name.exchange(name == nullptr ? nullptr : strdup(name));
1252 if (old_name != nullptr && old_name != kThreadNameDuringStartup) {
1253 // Deallocate it, carefully. Note that the load has to be ordered wrt the store of the xchg.
1254 for (uint32_t i = 0; UNLIKELY(tls32_.num_name_readers.load(std::memory_order_seq_cst) != 0);
1255 ++i) {
1256 static constexpr uint32_t kNumSpins = 1000;
1257 // Ugly, but keeps us from having to do anything on the reader side.
1258 if (i > kNumSpins) {
1259 usleep(500);
1260 }
1261 }
1262 // We saw the reader count drop to zero since we replaced the name; old one is now safe to
1263 // deallocate.
1264 free(const_cast<char *>(old_name));
1265 }
1266 }
1267
SetThreadName(const char * name)1268 void Thread::SetThreadName(const char* name) {
1269 SetCachedThreadName(name);
1270 ::art::SetThreadName(name);
1271 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
1272 }
1273
GetThreadStack(pthread_t thread,void ** stack_base,size_t * stack_size,size_t * guard_size)1274 static void GetThreadStack(pthread_t thread,
1275 void** stack_base,
1276 size_t* stack_size,
1277 size_t* guard_size) {
1278 #if defined(__APPLE__)
1279 *stack_size = pthread_get_stacksize_np(thread);
1280 void* stack_addr = pthread_get_stackaddr_np(thread);
1281
1282 // Check whether stack_addr is the base or end of the stack.
1283 // (On Mac OS 10.7, it's the end.)
1284 int stack_variable;
1285 if (stack_addr > &stack_variable) {
1286 *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size;
1287 } else {
1288 *stack_base = stack_addr;
1289 }
1290
1291 // This is wrong, but there doesn't seem to be a way to get the actual value on the Mac.
1292 pthread_attr_t attributes;
1293 CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), __FUNCTION__);
1294 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
1295 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
1296 #else
1297 pthread_attr_t attributes;
1298 CHECK_PTHREAD_CALL(pthread_getattr_np, (thread, &attributes), __FUNCTION__);
1299 CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__);
1300 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
1301 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
1302
1303 #if defined(__GLIBC__)
1304 // If we're the main thread, check whether we were run with an unlimited stack. In that case,
1305 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
1306 // will be broken because we'll die long before we get close to 2GB.
1307 bool is_main_thread = (::art::GetTid() == static_cast<uint32_t>(getpid()));
1308 if (is_main_thread) {
1309 rlimit stack_limit;
1310 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
1311 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
1312 }
1313 if (stack_limit.rlim_cur == RLIM_INFINITY) {
1314 size_t old_stack_size = *stack_size;
1315
1316 // Use the kernel default limit as our size, and adjust the base to match.
1317 *stack_size = 8 * MB;
1318 *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size);
1319
1320 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
1321 << " to " << PrettySize(*stack_size)
1322 << " with base " << *stack_base;
1323 }
1324 }
1325 #endif
1326
1327 #endif
1328 }
1329
InitStackHwm()1330 bool Thread::InitStackHwm() {
1331 ScopedTrace trace("InitStackHwm");
1332 void* read_stack_base;
1333 size_t read_stack_size;
1334 size_t read_guard_size;
1335 GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size);
1336
1337 tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base);
1338 tlsPtr_.stack_size = read_stack_size;
1339
1340 // The minimum stack size we can cope with is the protected region size + stack overflow check
1341 // region size + some memory for normal stack usage.
1342 //
1343 // The protected region is located at the beginning (lowest address) of the stack region.
1344 // Therefore, it starts at a page-aligned address. Its size should be a multiple of page sizes.
1345 // Typically, it is one page in size, however this varies in some configurations.
1346 //
1347 // The overflow reserved bytes is size of the stack overflow check region, located right after
1348 // the protected region, so also starts at a page-aligned address. The size is discretionary.
1349 // Typically it is 8K, but this varies in some configurations.
1350 //
1351 // The rest of the stack memory is available for normal stack usage. It is located right after
1352 // the stack overflow check region, so its starting address isn't necessarily page-aligned. The
1353 // size of the region is discretionary, however should be chosen in a way that the overall stack
1354 // size is a multiple of page sizes. Historically, it is chosen to be at least 4 KB.
1355 //
1356 // On systems with 4K page size, typically the minimum stack size will be 4+8+4 = 16K.
1357 // The thread won't be able to do much with this stack: even the GC takes between 8K and 12K.
1358 DCHECK_ALIGNED_PARAM(static_cast<size_t>(GetStackOverflowProtectedSize()),
1359 static_cast<int32_t>(gPageSize));
1360 size_t min_stack = GetStackOverflowProtectedSize() +
1361 RoundUp(GetStackOverflowReservedBytes(kRuntimeISA) + 4 * KB, gPageSize);
1362 if (read_stack_size <= min_stack) {
1363 // Note, as we know the stack is small, avoid operations that could use a lot of stack.
1364 LogHelper::LogLineLowStack(__PRETTY_FUNCTION__,
1365 __LINE__,
1366 ::android::base::ERROR,
1367 "Attempt to attach a thread with a too-small stack");
1368 return false;
1369 }
1370
1371 // This is included in the SIGQUIT output, but it's useful here for thread debugging.
1372 VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)",
1373 read_stack_base,
1374 PrettySize(read_stack_size).c_str(),
1375 PrettySize(read_guard_size).c_str());
1376
1377 // Set stack_end_ to the bottom of the stack saving space of stack overflows
1378
1379 Runtime* runtime = Runtime::Current();
1380 bool implicit_stack_check =
1381 runtime->GetImplicitStackOverflowChecks() && !runtime->IsAotCompiler();
1382
1383 ResetDefaultStackEnd();
1384
1385 // Install the protected region if we are doing implicit overflow checks.
1386 if (implicit_stack_check) {
1387 // The thread might have protected region at the bottom. We need
1388 // to install our own region so we need to move the limits
1389 // of the stack to make room for it.
1390
1391 tlsPtr_.stack_begin += read_guard_size + GetStackOverflowProtectedSize();
1392 tlsPtr_.stack_end += read_guard_size + GetStackOverflowProtectedSize();
1393 tlsPtr_.stack_size -= read_guard_size + GetStackOverflowProtectedSize();
1394
1395 InstallImplicitProtection();
1396 }
1397
1398 // Consistency check.
1399 CHECK_GT(FindStackTop(), reinterpret_cast<void*>(tlsPtr_.stack_end));
1400
1401 return true;
1402 }
1403
ShortDump(std::ostream & os) const1404 void Thread::ShortDump(std::ostream& os) const {
1405 os << "Thread[";
1406 if (GetThreadId() != 0) {
1407 // If we're in kStarting, we won't have a thin lock id or tid yet.
1408 os << GetThreadId()
1409 << ",tid=" << GetTid() << ',';
1410 }
1411 tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst);
1412 const char* name = tlsPtr_.name.load();
1413 os << GetState()
1414 << ",Thread*=" << this
1415 << ",peer=" << tlsPtr_.opeer
1416 << ",\"" << (name == nullptr ? "null" : name) << "\""
1417 << "]";
1418 tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */);
1419 }
1420
Dump(std::ostream & os,bool dump_native_stack,bool force_dump_stack) const1421 Thread::DumpOrder Thread::Dump(std::ostream& os,
1422 bool dump_native_stack,
1423 bool force_dump_stack) const {
1424 DumpState(os);
1425 return DumpStack(os, dump_native_stack, force_dump_stack);
1426 }
1427
Dump(std::ostream & os,unwindstack::AndroidLocalUnwinder & unwinder,bool dump_native_stack,bool force_dump_stack) const1428 Thread::DumpOrder Thread::Dump(std::ostream& os,
1429 unwindstack::AndroidLocalUnwinder& unwinder,
1430 bool dump_native_stack,
1431 bool force_dump_stack) const {
1432 DumpState(os);
1433 return DumpStack(os, unwinder, dump_native_stack, force_dump_stack);
1434 }
1435
GetThreadName() const1436 ObjPtr<mirror::String> Thread::GetThreadName() const {
1437 if (tlsPtr_.opeer == nullptr) {
1438 return nullptr;
1439 }
1440 ObjPtr<mirror::Object> name = WellKnownClasses::java_lang_Thread_name->GetObject(tlsPtr_.opeer);
1441 return name == nullptr ? nullptr : name->AsString();
1442 }
1443
GetThreadName(std::string & name) const1444 void Thread::GetThreadName(std::string& name) const {
1445 tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst);
1446 // The store part of the increment has to be ordered with respect to the following load.
1447 const char* c_name = tlsPtr_.name.load(std::memory_order_seq_cst);
1448 name.assign(c_name == nullptr ? "<no name>" : c_name);
1449 tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */);
1450 }
1451
GetCpuMicroTime() const1452 uint64_t Thread::GetCpuMicroTime() const {
1453 #if defined(__linux__)
1454 clockid_t cpu_clock_id;
1455 pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
1456 timespec now;
1457 clock_gettime(cpu_clock_id, &now);
1458 return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) +
1459 static_cast<uint64_t>(now.tv_nsec) / UINT64_C(1000);
1460 #else // __APPLE__
1461 UNIMPLEMENTED(WARNING);
1462 return -1;
1463 #endif
1464 }
1465
1466 // Attempt to rectify locks so that we dump thread list with required locks before exiting.
UnsafeLogFatalForSuspendCount(Thread * self,Thread * thread)1467 void Thread::UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
1468 LOG(ERROR) << *thread << " suspend count already zero.";
1469 Locks::thread_suspend_count_lock_->Unlock(self);
1470 if (!Locks::mutator_lock_->IsSharedHeld(self)) {
1471 Locks::mutator_lock_->SharedTryLock(self);
1472 if (!Locks::mutator_lock_->IsSharedHeld(self)) {
1473 LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
1474 }
1475 }
1476 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
1477 Locks::thread_list_lock_->TryLock(self);
1478 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
1479 LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
1480 }
1481 }
1482 std::ostringstream ss;
1483 Runtime::Current()->GetThreadList()->Dump(ss);
1484 LOG(FATAL) << ss.str();
1485 UNREACHABLE();
1486 }
1487
PassActiveSuspendBarriers()1488 bool Thread::PassActiveSuspendBarriers() {
1489 DCHECK_EQ(this, Thread::Current());
1490 DCHECK_NE(GetState(), ThreadState::kRunnable);
1491 // Grab the suspend_count lock and copy the current set of barriers. Then clear the list and the
1492 // flag. The IncrementSuspendCount function requires the lock so we prevent a race between setting
1493 // the kActiveSuspendBarrier flag and clearing it.
1494 // TODO: Consider doing this without the temporary vector. That code will be a bit
1495 // tricky, since the WrappedSuspend1Barrier may disappear once the barrier is decremented.
1496 std::vector<AtomicInteger*> pass_barriers{};
1497 {
1498 MutexLock mu(this, *Locks::thread_suspend_count_lock_);
1499 if (!ReadFlag(ThreadFlag::kActiveSuspendBarrier)) {
1500 // Quick exit test: The barriers have already been claimed - this is possible as there may
1501 // be a race to claim and it doesn't matter who wins. All of the callers of this function
1502 // (except SuspendAllInternal) will first test the kActiveSuspendBarrier flag without the
1503 // lock. Here we double-check whether the barrier has been passed with the
1504 // suspend_count_lock_.
1505 return false;
1506 }
1507 if (tlsPtr_.active_suspendall_barrier != nullptr) {
1508 // We have at most one active active_suspendall_barrier. See thread.h comment.
1509 pass_barriers.push_back(tlsPtr_.active_suspendall_barrier);
1510 tlsPtr_.active_suspendall_barrier = nullptr;
1511 }
1512 for (WrappedSuspend1Barrier* w = tlsPtr_.active_suspend1_barriers; w != nullptr; w = w->next_) {
1513 CHECK_EQ(w->magic_, WrappedSuspend1Barrier::kMagic)
1514 << "first = " << tlsPtr_.active_suspend1_barriers << " current = " << w
1515 << " next = " << w->next_;
1516 pass_barriers.push_back(&(w->barrier_));
1517 }
1518 tlsPtr_.active_suspend1_barriers = nullptr;
1519 AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
1520 CHECK_GT(pass_barriers.size(), 0U); // Since kActiveSuspendBarrier was set.
1521 // Decrement suspend barrier(s) while we still hold the lock, since SuspendThread may
1522 // remove and deallocate suspend barriers while holding suspend_count_lock_ .
1523 // There will typically only be a single barrier to pass here.
1524 for (AtomicInteger*& barrier : pass_barriers) {
1525 int32_t old_val = barrier->fetch_sub(1, std::memory_order_release);
1526 CHECK_GT(old_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << old_val;
1527 if (old_val != 1) {
1528 // We're done with it.
1529 barrier = nullptr;
1530 }
1531 }
1532 }
1533 // Finally do futex_wakes after releasing the lock.
1534 for (AtomicInteger* barrier : pass_barriers) {
1535 #if ART_USE_FUTEXES
1536 if (barrier != nullptr) {
1537 futex(barrier->Address(), FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0);
1538 }
1539 #endif
1540 }
1541 return true;
1542 }
1543
RunCheckpointFunction()1544 void Thread::RunCheckpointFunction() {
1545 DCHECK_EQ(Thread::Current(), this);
1546 CHECK(!GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags()));
1547 // Grab the suspend_count lock, get the next checkpoint and update all the checkpoint fields. If
1548 // there are no more checkpoints we will also clear the kCheckpointRequest flag.
1549 Closure* checkpoint;
1550 {
1551 MutexLock mu(this, *Locks::thread_suspend_count_lock_);
1552 checkpoint = tlsPtr_.checkpoint_function;
1553 if (!checkpoint_overflow_.empty()) {
1554 // Overflow list not empty, copy the first one out and continue.
1555 tlsPtr_.checkpoint_function = checkpoint_overflow_.front();
1556 checkpoint_overflow_.pop_front();
1557 } else {
1558 // No overflow checkpoints. Clear the kCheckpointRequest flag
1559 tlsPtr_.checkpoint_function = nullptr;
1560 AtomicClearFlag(ThreadFlag::kCheckpointRequest);
1561 }
1562 }
1563 // Outside the lock, run the checkpoint function.
1564 ScopedTrace trace("Run checkpoint function");
1565 CHECK(checkpoint != nullptr) << "Checkpoint flag set without pending checkpoint";
1566 checkpoint->Run(this);
1567 }
1568
RunEmptyCheckpoint()1569 void Thread::RunEmptyCheckpoint() {
1570 // Note: Empty checkpoint does not access the thread's stack,
1571 // so we do not need to check for the flip function.
1572 DCHECK_EQ(Thread::Current(), this);
1573 AtomicClearFlag(ThreadFlag::kEmptyCheckpointRequest);
1574 Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this);
1575 }
1576
RequestCheckpoint(Closure * function)1577 bool Thread::RequestCheckpoint(Closure* function) {
1578 bool success;
1579 do {
1580 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1581 if (old_state_and_flags.GetState() != ThreadState::kRunnable) {
1582 return false; // Fail, thread is suspended and so can't run a checkpoint.
1583 }
1584 StateAndFlags new_state_and_flags = old_state_and_flags;
1585 new_state_and_flags.SetFlag(ThreadFlag::kCheckpointRequest);
1586 success = tls32_.state_and_flags.CompareAndSetWeakSequentiallyConsistent(
1587 old_state_and_flags.GetValue(), new_state_and_flags.GetValue());
1588 } while (!success);
1589 // Succeeded setting checkpoint flag, now insert the actual checkpoint.
1590 if (tlsPtr_.checkpoint_function == nullptr) {
1591 tlsPtr_.checkpoint_function = function;
1592 } else {
1593 checkpoint_overflow_.push_back(function);
1594 }
1595 DCHECK(ReadFlag(ThreadFlag::kCheckpointRequest));
1596 TriggerSuspend();
1597 return true;
1598 }
1599
RequestEmptyCheckpoint()1600 bool Thread::RequestEmptyCheckpoint() {
1601 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1602 if (old_state_and_flags.GetState() != ThreadState::kRunnable) {
1603 // If it's not runnable, we don't need to do anything because it won't be in the middle of a
1604 // heap access (eg. the read barrier).
1605 return false;
1606 }
1607
1608 // We must be runnable to request a checkpoint.
1609 DCHECK_EQ(old_state_and_flags.GetState(), ThreadState::kRunnable);
1610 StateAndFlags new_state_and_flags = old_state_and_flags;
1611 new_state_and_flags.SetFlag(ThreadFlag::kEmptyCheckpointRequest);
1612 bool success = tls32_.state_and_flags.CompareAndSetStrongSequentiallyConsistent(
1613 old_state_and_flags.GetValue(), new_state_and_flags.GetValue());
1614 if (success) {
1615 TriggerSuspend();
1616 }
1617 return success;
1618 }
1619
1620 class BarrierClosure : public Closure {
1621 public:
BarrierClosure(Closure * wrapped)1622 explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {}
1623
Run(Thread * self)1624 void Run(Thread* self) override {
1625 wrapped_->Run(self);
1626 barrier_.Pass(self);
1627 }
1628
Wait(Thread * self,ThreadState wait_state)1629 void Wait(Thread* self, ThreadState wait_state) {
1630 if (wait_state != ThreadState::kRunnable) {
1631 barrier_.Increment<Barrier::kDisallowHoldingLocks>(self, 1);
1632 } else {
1633 barrier_.Increment<Barrier::kAllowHoldingLocks>(self, 1);
1634 }
1635 }
1636
1637 private:
1638 Closure* wrapped_;
1639 Barrier barrier_;
1640 };
1641
1642 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
RequestSynchronousCheckpoint(Closure * function,ThreadState wait_state)1643 bool Thread::RequestSynchronousCheckpoint(Closure* function, ThreadState wait_state) {
1644 Thread* self = Thread::Current();
1645 if (this == self) {
1646 Locks::thread_list_lock_->AssertExclusiveHeld(self);
1647 // Unlock the tll before running so that the state is the same regardless of thread.
1648 Locks::thread_list_lock_->ExclusiveUnlock(self);
1649 // Asked to run on this thread. Just run.
1650 function->Run(this);
1651 return true;
1652 }
1653
1654 // The current thread is not this thread.
1655
1656 VerifyState();
1657
1658 Locks::thread_list_lock_->AssertExclusiveHeld(self);
1659 // If target "this" thread is runnable, try to schedule a checkpoint. Do some gymnastics to not
1660 // hold the suspend-count lock for too long.
1661 if (GetState() == ThreadState::kRunnable) {
1662 BarrierClosure barrier_closure(function);
1663 bool installed = false;
1664 {
1665 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1666 installed = RequestCheckpoint(&barrier_closure);
1667 }
1668 if (installed) {
1669 // Relinquish the thread-list lock. We should not wait holding any locks. We cannot
1670 // reacquire it since we don't know if 'this' hasn't been deleted yet.
1671 Locks::thread_list_lock_->ExclusiveUnlock(self);
1672 ScopedThreadStateChange sts(self, wait_state);
1673 // Wait state can be kRunnable, in which case, for lock ordering purposes, it's as if we ran
1674 // the closure ourselves. This means that the target thread should not acquire a pre-mutator
1675 // lock without running the checkpoint, and the closure should not acquire a pre-mutator
1676 // lock or suspend.
1677 barrier_closure.Wait(self, wait_state);
1678 return true;
1679 }
1680 // No longer runnable. Fall-through.
1681 }
1682
1683 // Target "this" thread was not runnable. Suspend it, hopefully redundantly,
1684 // but it might have become runnable in the meantime.
1685 // Although this is a thread suspension, the target thread only blocks while we run the
1686 // checkpoint, which is presumed to terminate quickly even if other threads are blocked.
1687 // Note: IncrementSuspendCount also expects the thread_list_lock to be held unless this == self.
1688 WrappedSuspend1Barrier wrapped_barrier{};
1689 {
1690 bool is_suspended = false;
1691
1692 {
1693 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1694 // If wait_state is kRunnable, function may not suspend. We thus never block because
1695 // we ourselves are being asked to suspend.
1696 if (UNLIKELY(wait_state != ThreadState::kRunnable && self->GetSuspendCount() != 0)) {
1697 // We are being asked to suspend while we are suspending another thread that may be
1698 // responsible for our suspension. This is likely to result in deadlock if we each
1699 // block on the suspension request. Instead we wait for the situation to change.
1700 ThreadExitFlag target_status;
1701 NotifyOnThreadExit(&target_status);
1702 for (int iter_count = 1; self->GetSuspendCount() != 0; ++iter_count) {
1703 Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1704 Locks::thread_list_lock_->ExclusiveUnlock(self);
1705 {
1706 ScopedThreadStateChange sts(self, wait_state);
1707 usleep(ThreadList::kThreadSuspendSleepUs);
1708 }
1709 CHECK_LT(iter_count, ThreadList::kMaxSuspendRetries);
1710 Locks::thread_list_lock_->ExclusiveLock(self);
1711 if (target_status.HasExited()) {
1712 Locks::thread_list_lock_->ExclusiveUnlock(self);
1713 DCheckUnregisteredEverywhere(&target_status, &target_status);
1714 return false;
1715 }
1716 Locks::thread_suspend_count_lock_->ExclusiveLock(self);
1717 }
1718 UnregisterThreadExitFlag(&target_status);
1719 }
1720 IncrementSuspendCount(self, nullptr, &wrapped_barrier, SuspendReason::kInternal);
1721 VerifyState();
1722 DCHECK_GT(GetSuspendCount(), 0);
1723 if (wait_state != ThreadState::kRunnable) {
1724 DCHECK_EQ(self->GetSuspendCount(), 0);
1725 }
1726 // Since we've incremented the suspend count, "this" thread can no longer disappear.
1727 Locks::thread_list_lock_->ExclusiveUnlock(self);
1728 if (IsSuspended()) {
1729 // See the discussion in mutator_gc_coord.md and SuspendAllInternal for the race here.
1730 RemoveFirstSuspend1Barrier(&wrapped_barrier);
1731 if (!HasActiveSuspendBarrier()) {
1732 AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
1733 }
1734 is_suspended = true;
1735 }
1736 }
1737 if (!is_suspended) {
1738 // This waits while holding the mutator lock. Effectively `self` becomes
1739 // impossible to suspend until `this` responds to the suspend request.
1740 // Arguably that's not making anything qualitatively worse.
1741 bool success = !Runtime::Current()
1742 ->GetThreadList()
1743 ->WaitForSuspendBarrier(&wrapped_barrier.barrier_)
1744 .has_value();
1745 CHECK(success);
1746 }
1747
1748 // Ensure that the flip function for this thread, if pending, is finished *before*
1749 // the checkpoint function is run. Otherwise, we may end up with both `to' and 'from'
1750 // space references on the stack, confusing the GC's thread-flip logic. The caller is
1751 // runnable so can't have a pending flip function.
1752 DCHECK_EQ(self->GetState(), ThreadState::kRunnable);
1753 DCHECK(IsSuspended());
1754 DCHECK(!self->GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags()));
1755 EnsureFlipFunctionStarted(self, this);
1756 // Since we're runnable, and kPendingFlipFunction is set with all threads suspended, it
1757 // cannot be set again here. Thus kRunningFlipFunction is either already set after the
1758 // EnsureFlipFunctionStarted call, or will not be set before we call Run().
1759 if (ReadFlag(ThreadFlag::kRunningFlipFunction)) {
1760 WaitForFlipFunction(self);
1761 }
1762 function->Run(this);
1763 }
1764
1765 {
1766 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1767 DCHECK_NE(GetState(), ThreadState::kRunnable);
1768 DCHECK_GT(GetSuspendCount(), 0);
1769 DecrementSuspendCount(self);
1770 if (kIsDebugBuild) {
1771 CheckBarrierInactive(&wrapped_barrier);
1772 }
1773 resume_cond_->Broadcast(self);
1774 }
1775
1776 Locks::thread_list_lock_->AssertNotHeld(self);
1777 return true;
1778 }
1779
SetFlipFunction(Closure * function)1780 void Thread::SetFlipFunction(Closure* function) {
1781 // This is called with all threads suspended, except for the calling thread.
1782 DCHECK(IsSuspended() || Thread::Current() == this);
1783 DCHECK(function != nullptr);
1784 DCHECK(GetFlipFunction() == nullptr);
1785 tlsPtr_.flip_function.store(function, std::memory_order_relaxed);
1786 DCHECK(!GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags()));
1787 AtomicSetFlag(ThreadFlag::kPendingFlipFunction, std::memory_order_release);
1788 }
1789
EnsureFlipFunctionStarted(Thread * self,Thread * target,StateAndFlags old_state_and_flags,ThreadExitFlag * tef,bool * finished)1790 bool Thread::EnsureFlipFunctionStarted(Thread* self,
1791 Thread* target,
1792 StateAndFlags old_state_and_flags,
1793 ThreadExitFlag* tef,
1794 bool* finished) {
1795 // Note: If tef is non-null, *target may have been destroyed. We have to be careful about
1796 // accessing it. That is the reason this is static and not a member function.
1797 DCHECK(self == Current());
1798 bool check_exited = (tef != nullptr);
1799 // Check that the thread can't unexpectedly exit while we are running.
1800 DCHECK(self == target || check_exited || target->ReadFlag(ThreadFlag::kSuspendRequest) ||
1801 Locks::thread_list_lock_->IsExclusiveHeld(self))
1802 << *target;
1803 bool become_runnable;
1804 auto maybe_release = [=]() NO_THREAD_SAFETY_ANALYSIS /* conditionally unlocks */ {
1805 if (check_exited) {
1806 Locks::thread_list_lock_->Unlock(self);
1807 }
1808 };
1809 auto set_finished = [=](bool value) {
1810 if (finished != nullptr) {
1811 *finished = value;
1812 }
1813 };
1814
1815 if (check_exited) {
1816 Locks::thread_list_lock_->Lock(self);
1817 if (tef->HasExited()) {
1818 Locks::thread_list_lock_->Unlock(self);
1819 set_finished(true);
1820 return false;
1821 }
1822 }
1823 target->VerifyState();
1824 if (old_state_and_flags.GetValue() == 0) {
1825 become_runnable = false;
1826 old_state_and_flags = target->GetStateAndFlags(std::memory_order_relaxed);
1827 } else {
1828 become_runnable = true;
1829 DCHECK(!check_exited);
1830 DCHECK(target == self);
1831 DCHECK(old_state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction));
1832 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest));
1833 }
1834 while (true) {
1835 DCHECK(!check_exited || (Locks::thread_list_lock_->IsExclusiveHeld(self) && !tef->HasExited()));
1836 if (!old_state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction)) {
1837 maybe_release();
1838 set_finished(!old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction));
1839 return false;
1840 }
1841 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction));
1842 StateAndFlags new_state_and_flags =
1843 old_state_and_flags.WithFlag(ThreadFlag::kRunningFlipFunction)
1844 .WithoutFlag(ThreadFlag::kPendingFlipFunction);
1845 if (become_runnable) {
1846 DCHECK_EQ(self, target);
1847 DCHECK_NE(self->GetState(), ThreadState::kRunnable);
1848 new_state_and_flags = new_state_and_flags.WithState(ThreadState::kRunnable);
1849 }
1850 if (target->tls32_.state_and_flags.CompareAndSetWeakAcquire(old_state_and_flags.GetValue(),
1851 new_state_and_flags.GetValue())) {
1852 if (become_runnable) {
1853 self->GetMutatorLock()->TransitionFromSuspendedToRunnable(self);
1854 }
1855 art::Locks::mutator_lock_->AssertSharedHeld(self);
1856 maybe_release();
1857 // Thread will not go away while kRunningFlipFunction is set.
1858 target->RunFlipFunction(self);
1859 // At this point, no flip function flags should be set. It's unsafe to DCHECK that, since
1860 // the thread may now have exited.
1861 set_finished(true);
1862 return become_runnable;
1863 }
1864 if (become_runnable) {
1865 DCHECK(!check_exited); // We didn't acquire thread_list_lock_ .
1866 // Let caller retry.
1867 return false;
1868 }
1869 old_state_and_flags = target->GetStateAndFlags(std::memory_order_acquire);
1870 }
1871 // Unreachable.
1872 }
1873
RunFlipFunction(Thread * self)1874 void Thread::RunFlipFunction(Thread* self) {
1875 // This function is called either by the thread running `ThreadList::FlipThreadRoots()` or when
1876 // a thread becomes runnable, after we've successfully set the kRunningFlipFunction ThreadFlag.
1877 DCHECK(ReadFlag(ThreadFlag::kRunningFlipFunction));
1878
1879 Closure* flip_function = GetFlipFunction();
1880 tlsPtr_.flip_function.store(nullptr, std::memory_order_relaxed);
1881 DCHECK(flip_function != nullptr);
1882 VerifyState();
1883 flip_function->Run(this);
1884 DCHECK(!ReadFlag(ThreadFlag::kPendingFlipFunction));
1885 VerifyState();
1886 AtomicClearFlag(ThreadFlag::kRunningFlipFunction, std::memory_order_release);
1887 // From here on this thread may go away, and it is no longer safe to access.
1888
1889 // Notify all threads that are waiting for completion.
1890 // TODO: Should we create a separate mutex and condition variable instead
1891 // of piggy-backing on the `thread_suspend_count_lock_` and `resume_cond_`?
1892 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1893 resume_cond_->Broadcast(self);
1894 }
1895
WaitForFlipFunction(Thread * self) const1896 void Thread::WaitForFlipFunction(Thread* self) const {
1897 // Another thread is running the flip function. Wait for it to complete.
1898 // Check the flag while holding the mutex so that we do not miss the broadcast.
1899 // Repeat the check after waiting to guard against spurious wakeups (and because
1900 // we share the `thread_suspend_count_lock_` and `resume_cond_` with other code).
1901 // Check that the thread can't unexpectedly exit while we are running.
1902 DCHECK(self == this || ReadFlag(ThreadFlag::kSuspendRequest) ||
1903 Locks::thread_list_lock_->IsExclusiveHeld(self));
1904 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1905 while (true) {
1906 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_acquire);
1907 if (!old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction)) {
1908 return;
1909 }
1910 // We sometimes hold mutator lock here. OK since the flip function must complete quickly.
1911 resume_cond_->WaitHoldingLocks(self);
1912 }
1913 }
1914
WaitForFlipFunctionTestingExited(Thread * self,ThreadExitFlag * tef)1915 void Thread::WaitForFlipFunctionTestingExited(Thread* self, ThreadExitFlag* tef) {
1916 Locks::thread_list_lock_->Lock(self);
1917 if (tef->HasExited()) {
1918 Locks::thread_list_lock_->Unlock(self);
1919 return;
1920 }
1921 // We need to hold suspend_count_lock_ to avoid missed wakeups when the flip function finishes.
1922 // We need to hold thread_list_lock_ because the tef test result is only valid while we hold the
1923 // lock, and once kRunningFlipFunction is no longer set, "this" may be deallocated. Hence the
1924 // complicated locking dance.
1925 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1926 while (true) {
1927 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_acquire);
1928 Locks::thread_list_lock_->Unlock(self); // So we can wait or return.
1929 if (!old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction)) {
1930 return;
1931 }
1932 resume_cond_->WaitHoldingLocks(self);
1933 Locks::thread_suspend_count_lock_->Unlock(self); // To re-lock thread_list_lock.
1934 Locks::thread_list_lock_->Lock(self);
1935 Locks::thread_suspend_count_lock_->Lock(self);
1936 if (tef->HasExited()) {
1937 Locks::thread_list_lock_->Unlock(self);
1938 return;
1939 }
1940 }
1941 }
1942
FullSuspendCheck(bool implicit)1943 void Thread::FullSuspendCheck(bool implicit) {
1944 ScopedTrace trace(__FUNCTION__);
1945 DCHECK(!ReadFlag(ThreadFlag::kSuspensionImmune));
1946 DCHECK(this == Thread::Current());
1947 VLOG(threads) << this << " self-suspending";
1948 // Make thread appear suspended to other threads, release mutator_lock_.
1949 // Transition to suspended and back to runnable, re-acquire share on mutator_lock_.
1950 ScopedThreadSuspension(this, ThreadState::kSuspended); // NOLINT
1951 if (implicit) {
1952 // For implicit suspend check we want to `madvise()` away
1953 // the alternate signal stack to avoid wasting memory.
1954 MadviseAwayAlternateSignalStack();
1955 }
1956 VLOG(threads) << this << " self-reviving";
1957 }
1958
GetSchedulerGroupName(pid_t tid)1959 static std::string GetSchedulerGroupName(pid_t tid) {
1960 // /proc/<pid>/cgroup looks like this:
1961 // 2:devices:/
1962 // 1:cpuacct,cpu:/
1963 // We want the third field from the line whose second field contains the "cpu" token.
1964 std::string cgroup_file;
1965 if (!android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid),
1966 &cgroup_file)) {
1967 return "";
1968 }
1969 std::vector<std::string> cgroup_lines;
1970 Split(cgroup_file, '\n', &cgroup_lines);
1971 for (size_t i = 0; i < cgroup_lines.size(); ++i) {
1972 std::vector<std::string> cgroup_fields;
1973 Split(cgroup_lines[i], ':', &cgroup_fields);
1974 std::vector<std::string> cgroups;
1975 Split(cgroup_fields[1], ',', &cgroups);
1976 for (size_t j = 0; j < cgroups.size(); ++j) {
1977 if (cgroups[j] == "cpu") {
1978 return cgroup_fields[2].substr(1); // Skip the leading slash.
1979 }
1980 }
1981 }
1982 return "";
1983 }
1984
DumpState(std::ostream & os,const Thread * thread,pid_t tid)1985 void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
1986 std::string group_name;
1987 int priority;
1988 bool is_daemon = false;
1989 Thread* self = Thread::Current();
1990
1991 // Don't do this if we are aborting since the GC may have all the threads suspended. This will
1992 // cause ScopedObjectAccessUnchecked to deadlock.
1993 if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
1994 ScopedObjectAccessUnchecked soa(self);
1995 priority = WellKnownClasses::java_lang_Thread_priority->GetInt(thread->tlsPtr_.opeer);
1996 is_daemon = WellKnownClasses::java_lang_Thread_daemon->GetBoolean(thread->tlsPtr_.opeer);
1997
1998 ObjPtr<mirror::Object> thread_group =
1999 WellKnownClasses::java_lang_Thread_group->GetObject(thread->tlsPtr_.opeer);
2000
2001 if (thread_group != nullptr) {
2002 ObjPtr<mirror::Object> group_name_object =
2003 WellKnownClasses::java_lang_ThreadGroup_name->GetObject(thread_group);
2004 group_name = (group_name_object != nullptr)
2005 ? group_name_object->AsString()->ToModifiedUtf8()
2006 : "<null>";
2007 }
2008 } else if (thread != nullptr) {
2009 priority = thread->GetNativePriority();
2010 } else {
2011 palette_status_t status = PaletteSchedGetPriority(tid, &priority);
2012 CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO);
2013 }
2014
2015 std::string scheduler_group_name(GetSchedulerGroupName(tid));
2016 if (scheduler_group_name.empty()) {
2017 scheduler_group_name = "default";
2018 }
2019
2020 if (thread != nullptr) {
2021 thread->tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst);
2022 os << '"' << thread->tlsPtr_.name.load() << '"';
2023 thread->tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */);
2024 if (is_daemon) {
2025 os << " daemon";
2026 }
2027 os << " prio=" << priority
2028 << " tid=" << thread->GetThreadId()
2029 << " " << thread->GetState();
2030 if (thread->IsStillStarting()) {
2031 os << " (still starting up)";
2032 }
2033 if (thread->tls32_.disable_thread_flip_count != 0) {
2034 os << " DisableFlipCount = " << thread->tls32_.disable_thread_flip_count;
2035 }
2036 os << "\n";
2037 } else {
2038 os << '"' << ::art::GetThreadName(tid) << '"'
2039 << " prio=" << priority
2040 << " (not attached)\n";
2041 }
2042
2043 if (thread != nullptr) {
2044 auto suspend_log_fn = [&]() REQUIRES(Locks::thread_suspend_count_lock_) {
2045 StateAndFlags state_and_flags = thread->GetStateAndFlags(std::memory_order_relaxed);
2046 static_assert(
2047 static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
2048 state_and_flags.SetState(ThreadState::kRunnable); // Clear state bits.
2049 os << " | group=\"" << group_name << "\""
2050 << " sCount=" << thread->tls32_.suspend_count
2051 << " ucsCount=" << thread->tls32_.user_code_suspend_count
2052 << " flags=" << state_and_flags.GetValue()
2053 << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
2054 << " self=" << reinterpret_cast<const void*>(thread) << "\n";
2055 };
2056 if (Locks::thread_suspend_count_lock_->IsExclusiveHeld(self)) {
2057 Locks::thread_suspend_count_lock_->AssertExclusiveHeld(self); // For annotalysis.
2058 suspend_log_fn();
2059 } else {
2060 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
2061 suspend_log_fn();
2062 }
2063 }
2064
2065 os << " | sysTid=" << tid
2066 << " nice=" << getpriority(PRIO_PROCESS, static_cast<id_t>(tid))
2067 << " cgrp=" << scheduler_group_name;
2068 if (thread != nullptr) {
2069 int policy;
2070 sched_param sp;
2071 #if !defined(__APPLE__)
2072 // b/36445592 Don't use pthread_getschedparam since pthread may have exited.
2073 policy = sched_getscheduler(tid);
2074 if (policy == -1) {
2075 PLOG(WARNING) << "sched_getscheduler(" << tid << ")";
2076 }
2077 int sched_getparam_result = sched_getparam(tid, &sp);
2078 if (sched_getparam_result == -1) {
2079 PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)";
2080 sp.sched_priority = -1;
2081 }
2082 #else
2083 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
2084 __FUNCTION__);
2085 #endif
2086 os << " sched=" << policy << "/" << sp.sched_priority
2087 << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
2088 }
2089 os << "\n";
2090
2091 // Grab the scheduler stats for this thread.
2092 std::string scheduler_stats;
2093 if (android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid),
2094 &scheduler_stats)
2095 && !scheduler_stats.empty()) {
2096 scheduler_stats = android::base::Trim(scheduler_stats); // Lose the trailing '\n'.
2097 } else {
2098 scheduler_stats = "0 0 0";
2099 }
2100
2101 char native_thread_state = '?';
2102 int utime = 0;
2103 int stime = 0;
2104 int task_cpu = 0;
2105 GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
2106
2107 os << " | state=" << native_thread_state
2108 << " schedstat=( " << scheduler_stats << " )"
2109 << " utm=" << utime
2110 << " stm=" << stime
2111 << " core=" << task_cpu
2112 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
2113 if (thread != nullptr) {
2114 os << " | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-"
2115 << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize="
2116 << PrettySize(thread->tlsPtr_.stack_size) << "\n";
2117 // Dump the held mutexes.
2118 os << " | held mutexes=";
2119 for (size_t i = 0; i < kLockLevelCount; ++i) {
2120 if (i != kMonitorLock) {
2121 BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i));
2122 if (mutex != nullptr) {
2123 os << " \"" << mutex->GetName() << "\"";
2124 if (mutex->IsReaderWriterMutex()) {
2125 ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
2126 if (rw_mutex->GetExclusiveOwnerTid() == tid) {
2127 os << "(exclusive held)";
2128 } else {
2129 os << "(shared held)";
2130 }
2131 }
2132 }
2133 }
2134 }
2135 os << "\n";
2136 }
2137 }
2138
DumpState(std::ostream & os) const2139 void Thread::DumpState(std::ostream& os) const {
2140 Thread::DumpState(os, this, GetTid());
2141 }
2142
2143 struct StackDumpVisitor : public MonitorObjectsStackVisitor {
StackDumpVisitorart::StackDumpVisitor2144 StackDumpVisitor(std::ostream& os_in,
2145 Thread* thread_in,
2146 Context* context,
2147 bool can_allocate,
2148 bool check_suspended = true,
2149 bool dump_locks = true)
2150 REQUIRES_SHARED(Locks::mutator_lock_)
2151 : MonitorObjectsStackVisitor(thread_in,
2152 context,
2153 check_suspended,
2154 can_allocate && dump_locks),
2155 os(os_in),
2156 last_method(nullptr),
2157 last_line_number(0),
2158 repetition_count(0) {}
2159
~StackDumpVisitorart::StackDumpVisitor2160 virtual ~StackDumpVisitor() {
2161 if (frame_count == 0) {
2162 os << " (no managed stack frames)\n";
2163 }
2164 }
2165
2166 static constexpr size_t kMaxRepetition = 3u;
2167
StartMethodart::StackDumpVisitor2168 VisitMethodResult StartMethod(ArtMethod* m, [[maybe_unused]] size_t frame_nr) override
2169 REQUIRES_SHARED(Locks::mutator_lock_) {
2170 m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
2171 ObjPtr<mirror::DexCache> dex_cache = m->GetDexCache();
2172 int line_number = -1;
2173 uint32_t dex_pc = GetDexPc(false);
2174 if (dex_cache != nullptr) { // be tolerant of bad input
2175 const DexFile* dex_file = dex_cache->GetDexFile();
2176 line_number = annotations::GetLineNumFromPC(dex_file, m, dex_pc);
2177 }
2178 if (line_number == last_line_number && last_method == m) {
2179 ++repetition_count;
2180 } else {
2181 if (repetition_count >= kMaxRepetition) {
2182 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
2183 }
2184 repetition_count = 0;
2185 last_line_number = line_number;
2186 last_method = m;
2187 }
2188
2189 if (repetition_count >= kMaxRepetition) {
2190 // Skip visiting=printing anything.
2191 return VisitMethodResult::kSkipMethod;
2192 }
2193
2194 os << " at " << m->PrettyMethod(false);
2195 if (m->IsNative()) {
2196 os << "(Native method)";
2197 } else {
2198 const char* source_file(m->GetDeclaringClassSourceFile());
2199 if (line_number == -1) {
2200 // If we failed to map to a line number, use
2201 // the dex pc as the line number and leave source file null
2202 source_file = nullptr;
2203 line_number = static_cast<int32_t>(dex_pc);
2204 }
2205 os << "(" << (source_file != nullptr ? source_file : "unavailable")
2206 << ":" << line_number << ")";
2207 }
2208 os << "\n";
2209 // Go and visit locks.
2210 return VisitMethodResult::kContinueMethod;
2211 }
2212
EndMethodart::StackDumpVisitor2213 VisitMethodResult EndMethod([[maybe_unused]] ArtMethod* m) override {
2214 return VisitMethodResult::kContinueMethod;
2215 }
2216
VisitWaitingObjectart::StackDumpVisitor2217 void VisitWaitingObject(ObjPtr<mirror::Object> obj, [[maybe_unused]] ThreadState state) override
2218 REQUIRES_SHARED(Locks::mutator_lock_) {
2219 PrintObject(obj, " - waiting on ", ThreadList::kInvalidThreadId);
2220 }
VisitSleepingObjectart::StackDumpVisitor2221 void VisitSleepingObject(ObjPtr<mirror::Object> obj)
2222 override
2223 REQUIRES_SHARED(Locks::mutator_lock_) {
2224 PrintObject(obj, " - sleeping on ", ThreadList::kInvalidThreadId);
2225 }
VisitBlockedOnObjectart::StackDumpVisitor2226 void VisitBlockedOnObject(ObjPtr<mirror::Object> obj,
2227 ThreadState state,
2228 uint32_t owner_tid)
2229 override
2230 REQUIRES_SHARED(Locks::mutator_lock_) {
2231 const char* msg;
2232 switch (state) {
2233 case ThreadState::kBlocked:
2234 msg = " - waiting to lock ";
2235 break;
2236
2237 case ThreadState::kWaitingForLockInflation:
2238 msg = " - waiting for lock inflation of ";
2239 break;
2240
2241 default:
2242 LOG(FATAL) << "Unreachable";
2243 UNREACHABLE();
2244 }
2245 PrintObject(obj, msg, owner_tid);
2246 num_blocked++;
2247 }
VisitLockedObjectart::StackDumpVisitor2248 void VisitLockedObject(ObjPtr<mirror::Object> obj)
2249 override
2250 REQUIRES_SHARED(Locks::mutator_lock_) {
2251 PrintObject(obj, " - locked ", ThreadList::kInvalidThreadId);
2252 num_locked++;
2253 }
2254
PrintObjectart::StackDumpVisitor2255 void PrintObject(ObjPtr<mirror::Object> obj,
2256 const char* msg,
2257 uint32_t owner_tid) REQUIRES_SHARED(Locks::mutator_lock_) {
2258 if (obj == nullptr) {
2259 os << msg << "an unknown object";
2260 } else {
2261 const std::string pretty_type(obj->PrettyTypeOf());
2262 // It's often unsafe to allow lock inflation here. We may be the only runnable thread, or
2263 // this may be called from a checkpoint. We get the hashcode on a best effort basis.
2264 static constexpr int kNumRetries = 3;
2265 static constexpr int kSleepMicros = 10;
2266 int32_t hash_code;
2267 for (int i = 0;; ++i) {
2268 hash_code = obj->IdentityHashCodeNoInflation();
2269 if (hash_code != 0 || i == kNumRetries) {
2270 break;
2271 }
2272 usleep(kSleepMicros);
2273 }
2274 if (hash_code == 0) {
2275 os << msg
2276 << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)",
2277 reinterpret_cast<intptr_t>(obj.Ptr()),
2278 pretty_type.c_str());
2279 } else {
2280 // - waiting on <0x608c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
2281 os << msg << StringPrintf("<0x%08x> (a %s)", hash_code, pretty_type.c_str());
2282 }
2283 }
2284 if (owner_tid != ThreadList::kInvalidThreadId) {
2285 os << " held by thread " << owner_tid;
2286 }
2287 os << "\n";
2288 }
2289
2290 std::ostream& os;
2291 ArtMethod* last_method;
2292 int last_line_number;
2293 size_t repetition_count;
2294 size_t num_blocked = 0;
2295 size_t num_locked = 0;
2296 };
2297
ShouldShowNativeStack(const Thread * thread)2298 static bool ShouldShowNativeStack(const Thread* thread)
2299 REQUIRES_SHARED(Locks::mutator_lock_) {
2300 ThreadState state = thread->GetState();
2301
2302 // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
2303 if (state > ThreadState::kWaiting && state < ThreadState::kStarting) {
2304 return true;
2305 }
2306
2307 // In an Object.wait variant or Thread.sleep? That's not interesting.
2308 if (state == ThreadState::kTimedWaiting ||
2309 state == ThreadState::kSleeping ||
2310 state == ThreadState::kWaiting) {
2311 return false;
2312 }
2313
2314 // Threads with no managed stack frames should be shown.
2315 if (!thread->HasManagedStack()) {
2316 return true;
2317 }
2318
2319 // In some other native method? That's interesting.
2320 // We don't just check kNative because native methods will be in state kSuspended if they're
2321 // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
2322 // thread-startup states if it's early enough in their life cycle (http://b/7432159).
2323 ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
2324 return current_method != nullptr && current_method->IsNative();
2325 }
2326
DumpJavaStack(std::ostream & os,bool check_suspended,bool dump_locks) const2327 Thread::DumpOrder Thread::DumpJavaStack(std::ostream& os,
2328 bool check_suspended,
2329 bool dump_locks) const {
2330 // Dumping the Java stack involves the verifier for locks. The verifier operates under the
2331 // assumption that there is no exception pending on entry. Thus, stash any pending exception.
2332 // Thread::Current() instead of this in case a thread is dumping the stack of another suspended
2333 // thread.
2334 ScopedExceptionStorage ses(Thread::Current());
2335
2336 std::unique_ptr<Context> context(Context::Create());
2337 StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
2338 !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks);
2339 dumper.WalkStack();
2340 if (IsJitSensitiveThread()) {
2341 return DumpOrder::kMain;
2342 } else if (dumper.num_blocked > 0) {
2343 return DumpOrder::kBlocked;
2344 } else if (dumper.num_locked > 0) {
2345 return DumpOrder::kLocked;
2346 } else {
2347 return DumpOrder::kDefault;
2348 }
2349 }
2350
DumpStack(std::ostream & os,bool dump_native_stack,bool force_dump_stack) const2351 Thread::DumpOrder Thread::DumpStack(std::ostream& os,
2352 bool dump_native_stack,
2353 bool force_dump_stack) const {
2354 unwindstack::AndroidLocalUnwinder unwinder;
2355 return DumpStack(os, unwinder, dump_native_stack, force_dump_stack);
2356 }
2357
DumpStack(std::ostream & os,unwindstack::AndroidLocalUnwinder & unwinder,bool dump_native_stack,bool force_dump_stack) const2358 Thread::DumpOrder Thread::DumpStack(std::ostream& os,
2359 unwindstack::AndroidLocalUnwinder& unwinder,
2360 bool dump_native_stack,
2361 bool force_dump_stack) const {
2362 // TODO: we call this code when dying but may not have suspended the thread ourself. The
2363 // IsSuspended check is therefore racy with the use for dumping (normally we inhibit
2364 // the race with the thread_suspend_count_lock_).
2365 bool dump_for_abort = (gAborting > 0);
2366 bool safe_to_dump = (this == Thread::Current() || IsSuspended());
2367 if (!kIsDebugBuild) {
2368 // We always want to dump the stack for an abort, however, there is no point dumping another
2369 // thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
2370 safe_to_dump = (safe_to_dump || dump_for_abort);
2371 }
2372 DumpOrder dump_order = DumpOrder::kDefault;
2373 if (safe_to_dump || force_dump_stack) {
2374 uint64_t nanotime = NanoTime();
2375 // If we're currently in native code, dump that stack before dumping the managed stack.
2376 if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
2377 ArtMethod* method =
2378 GetCurrentMethod(nullptr,
2379 /*check_suspended=*/ !force_dump_stack,
2380 /*abort_on_error=*/ !(dump_for_abort || force_dump_stack));
2381 DumpNativeStack(os, unwinder, GetTid(), " native: ", method);
2382 }
2383 dump_order = DumpJavaStack(os,
2384 /*check_suspended=*/ !force_dump_stack,
2385 /*dump_locks=*/ !force_dump_stack);
2386 Runtime* runtime = Runtime::Current();
2387 std::optional<uint64_t> start = runtime != nullptr ? runtime->SiqQuitNanoTime() : std::nullopt;
2388 if (start.has_value()) {
2389 os << "DumpLatencyMs: " << static_cast<float>(nanotime - start.value()) / 1000000.0 << "\n";
2390 }
2391 } else {
2392 os << "Not able to dump stack of thread that isn't suspended";
2393 }
2394 return dump_order;
2395 }
2396
ThreadExitCallback(void * arg)2397 void Thread::ThreadExitCallback(void* arg) {
2398 Thread* self = reinterpret_cast<Thread*>(arg);
2399 if (self->tls32_.thread_exit_check_count == 0) {
2400 LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
2401 "going to use a pthread_key_create destructor?): " << *self;
2402 CHECK(is_started_);
2403 #ifdef __BIONIC__
2404 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self;
2405 #else
2406 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
2407 Thread::self_tls_ = self;
2408 #endif
2409 self->tls32_.thread_exit_check_count = 1;
2410 } else {
2411 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
2412 }
2413 }
2414
Startup()2415 void Thread::Startup() {
2416 CHECK(!is_started_);
2417 is_started_ = true;
2418 {
2419 // MutexLock to keep annotalysis happy.
2420 //
2421 // Note we use null for the thread because Thread::Current can
2422 // return garbage since (is_started_ == true) and
2423 // Thread::pthread_key_self_ is not yet initialized.
2424 // This was seen on glibc.
2425 MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
2426 resume_cond_ = new ConditionVariable("Thread resumption condition variable",
2427 *Locks::thread_suspend_count_lock_);
2428 }
2429
2430 // Allocate a TLS slot.
2431 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
2432 "self key");
2433
2434 // Double-check the TLS slot allocation.
2435 if (pthread_getspecific(pthread_key_self_) != nullptr) {
2436 LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
2437 }
2438 #ifndef __BIONIC__
2439 CHECK(Thread::self_tls_ == nullptr);
2440 #endif
2441 }
2442
FinishStartup()2443 void Thread::FinishStartup() {
2444 Runtime* runtime = Runtime::Current();
2445 CHECK(runtime->IsStarted());
2446
2447 // Finish attaching the main thread.
2448 ScopedObjectAccess soa(Thread::Current());
2449 soa.Self()->CreatePeer("main", false, runtime->GetMainThreadGroup());
2450 soa.Self()->AssertNoPendingException();
2451
2452 runtime->RunRootClinits(soa.Self());
2453
2454 // The thread counts as started from now on. We need to add it to the ThreadGroup. For regular
2455 // threads, this is done in Thread.start() on the Java side.
2456 soa.Self()->NotifyThreadGroup(soa, runtime->GetMainThreadGroup());
2457 soa.Self()->AssertNoPendingException();
2458 }
2459
Shutdown()2460 void Thread::Shutdown() {
2461 CHECK(is_started_);
2462 is_started_ = false;
2463 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
2464 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
2465 if (resume_cond_ != nullptr) {
2466 delete resume_cond_;
2467 resume_cond_ = nullptr;
2468 }
2469 }
2470
NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable & soa,jobject thread_group)2471 void Thread::NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group) {
2472 ObjPtr<mirror::Object> thread_object = soa.Self()->GetPeer();
2473 ObjPtr<mirror::Object> thread_group_object = soa.Decode<mirror::Object>(thread_group);
2474 if (thread_group == nullptr || kIsDebugBuild) {
2475 // There is always a group set. Retrieve it.
2476 thread_group_object = WellKnownClasses::java_lang_Thread_group->GetObject(thread_object);
2477 if (kIsDebugBuild && thread_group != nullptr) {
2478 CHECK(thread_group_object == soa.Decode<mirror::Object>(thread_group));
2479 }
2480 }
2481 WellKnownClasses::java_lang_ThreadGroup_add->InvokeVirtual<'V', 'L'>(
2482 soa.Self(), thread_group_object, thread_object);
2483 }
2484
SignalExitFlags()2485 void Thread::SignalExitFlags() {
2486 ThreadExitFlag* next;
2487 for (ThreadExitFlag* tef = tlsPtr_.thread_exit_flags; tef != nullptr; tef = next) {
2488 DCHECK(!tef->exited_);
2489 tef->exited_ = true;
2490 next = tef->next_;
2491 if (kIsDebugBuild) {
2492 ThreadExitFlag* const garbage_tef = reinterpret_cast<ThreadExitFlag*>(1);
2493 // Link fields should no longer be used.
2494 tef->prev_ = tef->next_ = garbage_tef;
2495 }
2496 }
2497 tlsPtr_.thread_exit_flags = nullptr; // Now unused.
2498 }
2499
Thread(bool daemon)2500 Thread::Thread(bool daemon)
2501 : tls32_(daemon),
2502 wait_monitor_(nullptr),
2503 is_runtime_thread_(false) {
2504 wait_mutex_ = new Mutex("a thread wait mutex", LockLevel::kThreadWaitLock);
2505 wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
2506 tlsPtr_.mutator_lock = Locks::mutator_lock_;
2507 DCHECK(tlsPtr_.mutator_lock != nullptr);
2508 tlsPtr_.name.store(kThreadNameDuringStartup, std::memory_order_relaxed);
2509 CHECK_NE(GetStackOverflowProtectedSize(), 0u);
2510
2511 static_assert((sizeof(Thread) % 4) == 0U,
2512 "art::Thread has a size which is not a multiple of 4.");
2513 DCHECK_EQ(GetStateAndFlags(std::memory_order_relaxed).GetValue(), 0u);
2514 StateAndFlags state_and_flags = StateAndFlags(0u).WithState(ThreadState::kNative);
2515 tls32_.state_and_flags.store(state_and_flags.GetValue(), std::memory_order_relaxed);
2516 tls32_.interrupted.store(false, std::memory_order_relaxed);
2517 // Initialize with no permit; if the java Thread was unparked before being
2518 // started, it will unpark itself before calling into java code.
2519 tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
2520 memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
2521 std::fill(tlsPtr_.rosalloc_runs,
2522 tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
2523 gc::allocator::RosAlloc::GetDedicatedFullRun());
2524 tlsPtr_.checkpoint_function = nullptr;
2525 tlsPtr_.active_suspendall_barrier = nullptr;
2526 tlsPtr_.active_suspend1_barriers = nullptr;
2527 tlsPtr_.flip_function.store(nullptr, std::memory_order_relaxed);
2528 tlsPtr_.thread_local_mark_stack = nullptr;
2529 ResetTlab();
2530 }
2531
CanLoadClasses() const2532 bool Thread::CanLoadClasses() const {
2533 return !IsRuntimeThread() || !Runtime::Current()->IsJavaDebuggable();
2534 }
2535
IsStillStarting() const2536 bool Thread::IsStillStarting() const {
2537 // You might think you can check whether the state is kStarting, but for much of thread startup,
2538 // the thread is in kNative; it might also be in kVmWait.
2539 // You might think you can check whether the peer is null, but the peer is actually created and
2540 // assigned fairly early on, and needs to be.
2541 // It turns out that the last thing to change is the thread name; that's a good proxy for "has
2542 // this thread _ever_ entered kRunnable".
2543 return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) ||
2544 (tlsPtr_.name.load() == kThreadNameDuringStartup);
2545 }
2546
AssertPendingException() const2547 void Thread::AssertPendingException() const {
2548 CHECK(IsExceptionPending()) << "Pending exception expected.";
2549 }
2550
AssertPendingOOMException() const2551 void Thread::AssertPendingOOMException() const {
2552 AssertPendingException();
2553 auto* e = GetException();
2554 CHECK_EQ(e->GetClass(), WellKnownClasses::java_lang_OutOfMemoryError.Get()) << e->Dump();
2555 }
2556
AssertNoPendingException() const2557 void Thread::AssertNoPendingException() const {
2558 if (UNLIKELY(IsExceptionPending())) {
2559 ScopedObjectAccess soa(Thread::Current());
2560 LOG(FATAL) << "No pending exception expected: " << GetException()->Dump();
2561 }
2562 }
2563
AssertNoPendingExceptionForNewException(const char * msg) const2564 void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
2565 if (UNLIKELY(IsExceptionPending())) {
2566 ScopedObjectAccess soa(Thread::Current());
2567 LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
2568 << GetException()->Dump();
2569 }
2570 }
2571
2572 class MonitorExitVisitor : public SingleRootVisitor {
2573 public:
MonitorExitVisitor(Thread * self)2574 explicit MonitorExitVisitor(Thread* self) : self_(self) { }
2575
2576 // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
VisitRoot(mirror::Object * entered_monitor,const RootInfo & info)2577 void VisitRoot(mirror::Object* entered_monitor,
2578 [[maybe_unused]] const RootInfo& info) override NO_THREAD_SAFETY_ANALYSIS {
2579 if (self_->HoldsLock(entered_monitor)) {
2580 LOG(WARNING) << "Calling MonitorExit on object "
2581 << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")"
2582 << " left locked by native thread "
2583 << *Thread::Current() << " which is detaching";
2584 entered_monitor->MonitorExit(self_);
2585 }
2586 }
2587
2588 private:
2589 Thread* const self_;
2590 };
2591
Destroy(bool should_run_callbacks)2592 void Thread::Destroy(bool should_run_callbacks) {
2593 Thread* self = this;
2594 DCHECK_EQ(self, Thread::Current());
2595
2596 if (tlsPtr_.jni_env != nullptr) {
2597 {
2598 ScopedObjectAccess soa(self);
2599 MonitorExitVisitor visitor(self);
2600 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
2601 tlsPtr_.jni_env->monitors_.VisitRoots(&visitor, RootInfo(kRootVMInternal));
2602 }
2603 // Release locally held global references which releasing may require the mutator lock.
2604 if (tlsPtr_.jpeer != nullptr) {
2605 // If pthread_create fails we don't have a jni env here.
2606 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
2607 tlsPtr_.jpeer = nullptr;
2608 }
2609 if (tlsPtr_.class_loader_override != nullptr) {
2610 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
2611 tlsPtr_.class_loader_override = nullptr;
2612 }
2613 }
2614
2615 if (tlsPtr_.opeer != nullptr) {
2616 ScopedObjectAccess soa(self);
2617 // We may need to call user-supplied managed code, do this before final clean-up.
2618 HandleUncaughtExceptions();
2619 RemoveFromThreadGroup();
2620 Runtime* runtime = Runtime::Current();
2621 if (runtime != nullptr && should_run_callbacks) {
2622 runtime->GetRuntimeCallbacks()->ThreadDeath(self);
2623 }
2624
2625 // this.nativePeer = 0;
2626 SetNativePeer</*kSupportTransaction=*/ true>(tlsPtr_.opeer, nullptr);
2627
2628 // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
2629 // who is waiting.
2630 ObjPtr<mirror::Object> lock =
2631 WellKnownClasses::java_lang_Thread_lock->GetObject(tlsPtr_.opeer);
2632 // (This conditional is only needed for tests, where Thread.lock won't have been set.)
2633 if (lock != nullptr) {
2634 StackHandleScope<1> hs(self);
2635 Handle<mirror::Object> h_obj(hs.NewHandle(lock));
2636 ObjectLock<mirror::Object> locker(self, h_obj);
2637 locker.NotifyAll();
2638 }
2639
2640 tlsPtr_.opeer = nullptr;
2641 }
2642
2643 {
2644 ScopedObjectAccess soa(self);
2645 Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
2646
2647 if (UNLIKELY(self->GetMethodTraceBuffer() != nullptr)) {
2648 Trace::FlushThreadBuffer(self);
2649 }
2650 }
2651 // Mark-stack revocation must be performed at the very end. No
2652 // checkpoint/flip-function or read-barrier should be called after this.
2653 if (gUseReadBarrier) {
2654 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this);
2655 }
2656 }
2657
~Thread()2658 Thread::~Thread() {
2659 CHECK(tlsPtr_.class_loader_override == nullptr);
2660 CHECK(tlsPtr_.jpeer == nullptr);
2661 CHECK(tlsPtr_.opeer == nullptr);
2662 bool initialized = (tlsPtr_.jni_env != nullptr); // Did Thread::Init run?
2663 if (initialized) {
2664 delete tlsPtr_.jni_env;
2665 tlsPtr_.jni_env = nullptr;
2666 }
2667 CHECK_NE(GetState(), ThreadState::kRunnable);
2668 CHECK(!ReadFlag(ThreadFlag::kCheckpointRequest));
2669 CHECK(!ReadFlag(ThreadFlag::kEmptyCheckpointRequest));
2670 CHECK(!ReadFlag(ThreadFlag::kSuspensionImmune));
2671 CHECK(tlsPtr_.checkpoint_function == nullptr);
2672 CHECK_EQ(checkpoint_overflow_.size(), 0u);
2673 // A pending flip function request is OK. FlipThreadRoots will have been notified that we
2674 // exited, and nobody will attempt to process the request.
2675
2676 // Make sure we processed all deoptimization requests.
2677 CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization";
2678 CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) <<
2679 "Not all deoptimized frames have been consumed by the debugger.";
2680
2681 // We may be deleting a still born thread.
2682 SetStateUnsafe(ThreadState::kTerminated);
2683
2684 delete wait_cond_;
2685 delete wait_mutex_;
2686
2687 if (tlsPtr_.long_jump_context != nullptr) {
2688 delete tlsPtr_.long_jump_context;
2689 }
2690
2691 if (initialized) {
2692 CleanupCpu();
2693 }
2694
2695 SetCachedThreadName(nullptr); // Deallocate name.
2696 delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
2697
2698 CHECK_EQ(tlsPtr_.method_trace_buffer, nullptr);
2699
2700 Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
2701
2702 TearDownAlternateSignalStack();
2703 }
2704
HandleUncaughtExceptions()2705 void Thread::HandleUncaughtExceptions() {
2706 Thread* self = this;
2707 DCHECK_EQ(self, Thread::Current());
2708 if (!self->IsExceptionPending()) {
2709 return;
2710 }
2711
2712 // Get and clear the exception.
2713 ObjPtr<mirror::Object> exception = self->GetException();
2714 self->ClearException();
2715
2716 // Call the Thread instance's dispatchUncaughtException(Throwable)
2717 WellKnownClasses::java_lang_Thread_dispatchUncaughtException->InvokeFinal<'V', 'L'>(
2718 self, tlsPtr_.opeer, exception);
2719
2720 // If the dispatchUncaughtException threw, clear that exception too.
2721 self->ClearException();
2722 }
2723
RemoveFromThreadGroup()2724 void Thread::RemoveFromThreadGroup() {
2725 Thread* self = this;
2726 DCHECK_EQ(self, Thread::Current());
2727 // this.group.threadTerminated(this);
2728 // group can be null if we're in the compiler or a test.
2729 ObjPtr<mirror::Object> group =
2730 WellKnownClasses::java_lang_Thread_group->GetObject(tlsPtr_.opeer);
2731 if (group != nullptr) {
2732 WellKnownClasses::java_lang_ThreadGroup_threadTerminated->InvokeVirtual<'V', 'L'>(
2733 self, group, tlsPtr_.opeer);
2734 }
2735 }
2736
2737 template <bool kPointsToStack>
2738 class JniTransitionReferenceVisitor : public StackVisitor {
2739 public:
JniTransitionReferenceVisitor(Thread * thread,void * obj)2740 JniTransitionReferenceVisitor(Thread* thread, void* obj) REQUIRES_SHARED(Locks::mutator_lock_)
2741 : StackVisitor(thread, /*context=*/ nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
2742 obj_(obj),
2743 found_(false) {}
2744
VisitFrame()2745 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
2746 ArtMethod* m = GetMethod();
2747 if (!m->IsNative() || m->IsCriticalNative()) {
2748 return true;
2749 }
2750 if (kPointsToStack) {
2751 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
2752 size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
2753 uint32_t* current_vreg = reinterpret_cast<uint32_t*>(sp + frame_size + sizeof(ArtMethod*));
2754 if (!m->IsStatic()) {
2755 if (current_vreg == obj_) {
2756 found_ = true;
2757 return false;
2758 }
2759 current_vreg += 1u;
2760 }
2761 uint32_t shorty_length;
2762 const char* shorty = m->GetShorty(&shorty_length);
2763 for (size_t i = 1; i != shorty_length; ++i) {
2764 switch (shorty[i]) {
2765 case 'D':
2766 case 'J':
2767 current_vreg += 2u;
2768 break;
2769 case 'L':
2770 if (current_vreg == obj_) {
2771 found_ = true;
2772 return false;
2773 }
2774 FALLTHROUGH_INTENDED;
2775 default:
2776 current_vreg += 1u;
2777 break;
2778 }
2779 }
2780 // Continue only if the object is somewhere higher on the stack.
2781 return obj_ >= current_vreg;
2782 } else { // if (kPointsToStack)
2783 if (m->IsStatic() && obj_ == m->GetDeclaringClassAddressWithoutBarrier()) {
2784 found_ = true;
2785 return false;
2786 }
2787 return true;
2788 }
2789 }
2790
Found() const2791 bool Found() const {
2792 return found_;
2793 }
2794
2795 private:
2796 void* obj_;
2797 bool found_;
2798 };
2799
IsJniTransitionReference(jobject obj) const2800 bool Thread::IsJniTransitionReference(jobject obj) const {
2801 DCHECK(obj != nullptr);
2802 // We need a non-const pointer for stack walk even if we're not modifying the thread state.
2803 Thread* thread = const_cast<Thread*>(this);
2804 uint8_t* raw_obj = reinterpret_cast<uint8_t*>(obj);
2805 if (static_cast<size_t>(raw_obj - tlsPtr_.stack_begin) < tlsPtr_.stack_size) {
2806 JniTransitionReferenceVisitor</*kPointsToStack=*/ true> visitor(thread, raw_obj);
2807 visitor.WalkStack();
2808 return visitor.Found();
2809 } else {
2810 JniTransitionReferenceVisitor</*kPointsToStack=*/ false> visitor(thread, raw_obj);
2811 visitor.WalkStack();
2812 return visitor.Found();
2813 }
2814 }
2815
HandleScopeVisitRoots(RootVisitor * visitor,uint32_t thread_id)2816 void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
2817 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
2818 visitor, RootInfo(kRootNativeStack, thread_id));
2819 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
2820 cur->VisitRoots(buffered_visitor);
2821 }
2822 }
2823
DecodeGlobalJObject(jobject obj) const2824 ObjPtr<mirror::Object> Thread::DecodeGlobalJObject(jobject obj) const {
2825 DCHECK(obj != nullptr);
2826 IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
2827 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
2828 DCHECK_NE(kind, kJniTransition);
2829 DCHECK_NE(kind, kLocal);
2830 ObjPtr<mirror::Object> result;
2831 bool expect_null = false;
2832 if (kind == kGlobal) {
2833 result = tlsPtr_.jni_env->vm_->DecodeGlobal(ref);
2834 } else {
2835 DCHECK_EQ(kind, kWeakGlobal);
2836 result = tlsPtr_.jni_env->vm_->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
2837 if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
2838 // This is a special case where it's okay to return null.
2839 expect_null = true;
2840 result = nullptr;
2841 }
2842 }
2843
2844 DCHECK(expect_null || result != nullptr)
2845 << "use of deleted " << ToStr<IndirectRefKind>(kind).c_str()
2846 << " " << static_cast<const void*>(obj);
2847 return result;
2848 }
2849
IsJWeakCleared(jweak obj) const2850 bool Thread::IsJWeakCleared(jweak obj) const {
2851 CHECK(obj != nullptr);
2852 IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
2853 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
2854 CHECK_EQ(kind, kWeakGlobal);
2855 return tlsPtr_.jni_env->vm_->IsWeakGlobalCleared(const_cast<Thread*>(this), ref);
2856 }
2857
2858 // Implements java.lang.Thread.interrupted.
Interrupted()2859 bool Thread::Interrupted() {
2860 DCHECK_EQ(Thread::Current(), this);
2861 // No other thread can concurrently reset the interrupted flag.
2862 bool interrupted = tls32_.interrupted.load(std::memory_order_seq_cst);
2863 if (interrupted) {
2864 tls32_.interrupted.store(false, std::memory_order_seq_cst);
2865 }
2866 return interrupted;
2867 }
2868
2869 // Implements java.lang.Thread.isInterrupted.
IsInterrupted()2870 bool Thread::IsInterrupted() {
2871 return tls32_.interrupted.load(std::memory_order_seq_cst);
2872 }
2873
Interrupt(Thread * self)2874 void Thread::Interrupt(Thread* self) {
2875 {
2876 MutexLock mu(self, *wait_mutex_);
2877 if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
2878 return;
2879 }
2880 tls32_.interrupted.store(true, std::memory_order_seq_cst);
2881 NotifyLocked(self);
2882 }
2883 Unpark();
2884 }
2885
Notify()2886 void Thread::Notify() {
2887 Thread* self = Thread::Current();
2888 MutexLock mu(self, *wait_mutex_);
2889 NotifyLocked(self);
2890 }
2891
NotifyLocked(Thread * self)2892 void Thread::NotifyLocked(Thread* self) {
2893 if (wait_monitor_ != nullptr) {
2894 wait_cond_->Signal(self);
2895 }
2896 }
2897
SetClassLoaderOverride(jobject class_loader_override)2898 void Thread::SetClassLoaderOverride(jobject class_loader_override) {
2899 if (tlsPtr_.class_loader_override != nullptr) {
2900 GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
2901 }
2902 tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
2903 }
2904
2905 using ArtMethodDexPcPair = std::pair<ArtMethod*, uint32_t>;
2906
2907 // Counts the stack trace depth and also fetches the first max_saved_frames frames.
2908 class FetchStackTraceVisitor : public StackVisitor {
2909 public:
FetchStackTraceVisitor(Thread * thread,ArtMethodDexPcPair * saved_frames=nullptr,size_t max_saved_frames=0)2910 explicit FetchStackTraceVisitor(Thread* thread,
2911 ArtMethodDexPcPair* saved_frames = nullptr,
2912 size_t max_saved_frames = 0)
2913 REQUIRES_SHARED(Locks::mutator_lock_)
2914 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2915 saved_frames_(saved_frames),
2916 max_saved_frames_(max_saved_frames) {}
2917
VisitFrame()2918 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
2919 // We want to skip frames up to and including the exception's constructor.
2920 // Note we also skip the frame if it doesn't have a method (namely the callee
2921 // save frame)
2922 ArtMethod* m = GetMethod();
2923 if (skipping_ && !m->IsRuntimeMethod() &&
2924 !GetClassRoot<mirror::Throwable>()->IsAssignableFrom(m->GetDeclaringClass())) {
2925 skipping_ = false;
2926 }
2927 if (!skipping_) {
2928 if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save).
2929 if (depth_ < max_saved_frames_) {
2930 saved_frames_[depth_].first = m;
2931 saved_frames_[depth_].second = m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc();
2932 }
2933 ++depth_;
2934 }
2935 } else {
2936 ++skip_depth_;
2937 }
2938 return true;
2939 }
2940
GetDepth() const2941 uint32_t GetDepth() const {
2942 return depth_;
2943 }
2944
GetSkipDepth() const2945 uint32_t GetSkipDepth() const {
2946 return skip_depth_;
2947 }
2948
2949 private:
2950 uint32_t depth_ = 0;
2951 uint32_t skip_depth_ = 0;
2952 bool skipping_ = true;
2953 ArtMethodDexPcPair* saved_frames_;
2954 const size_t max_saved_frames_;
2955
2956 DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor);
2957 };
2958
2959 class BuildInternalStackTraceVisitor : public StackVisitor {
2960 public:
BuildInternalStackTraceVisitor(Thread * self,Thread * thread,uint32_t skip_depth)2961 BuildInternalStackTraceVisitor(Thread* self, Thread* thread, uint32_t skip_depth)
2962 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2963 self_(self),
2964 skip_depth_(skip_depth),
2965 pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
2966
Init(uint32_t depth)2967 bool Init(uint32_t depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
2968 // Allocate method trace as an object array where the first element is a pointer array that
2969 // contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring
2970 // class of the ArtMethod pointers.
2971 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
2972 StackHandleScope<1> hs(self_);
2973 ObjPtr<mirror::Class> array_class =
2974 GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker);
2975 // The first element is the methods and dex pc array, the other elements are declaring classes
2976 // for the methods to ensure classes in the stack trace don't get unloaded.
2977 Handle<mirror::ObjectArray<mirror::Object>> trace(
2978 hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(
2979 hs.Self(), array_class, static_cast<int32_t>(depth) + 1)));
2980 if (trace == nullptr) {
2981 // Acquire uninterruptible_ in all paths.
2982 self_->StartAssertNoThreadSuspension("Building internal stack trace");
2983 self_->AssertPendingOOMException();
2984 return false;
2985 }
2986 ObjPtr<mirror::PointerArray> methods_and_pcs =
2987 class_linker->AllocPointerArray(self_, depth * 2);
2988 const char* last_no_suspend_cause =
2989 self_->StartAssertNoThreadSuspension("Building internal stack trace");
2990 if (methods_and_pcs == nullptr) {
2991 self_->AssertPendingOOMException();
2992 return false;
2993 }
2994 trace->Set</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(0, methods_and_pcs);
2995 trace_ = trace.Get();
2996 // If We are called from native, use non-transactional mode.
2997 CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
2998 return true;
2999 }
3000
RELEASE(Roles::uninterruptible_)3001 virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) {
3002 self_->EndAssertNoThreadSuspension(nullptr);
3003 }
3004
VisitFrame()3005 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
3006 if (trace_ == nullptr) {
3007 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
3008 }
3009 if (skip_depth_ > 0) {
3010 skip_depth_--;
3011 return true;
3012 }
3013 ArtMethod* m = GetMethod();
3014 if (m->IsRuntimeMethod()) {
3015 return true; // Ignore runtime frames (in particular callee save).
3016 }
3017 AddFrame(m, m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc());
3018 return true;
3019 }
3020
AddFrame(ArtMethod * method,uint32_t dex_pc)3021 void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
3022 ObjPtr<mirror::PointerArray> methods_and_pcs = GetTraceMethodsAndPCs();
3023 methods_and_pcs->SetElementPtrSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
3024 count_, method, pointer_size_);
3025 methods_and_pcs->SetElementPtrSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
3026 static_cast<uint32_t>(methods_and_pcs->GetLength()) / 2 + count_, dex_pc, pointer_size_);
3027 // Save the declaring class of the method to ensure that the declaring classes of the methods
3028 // do not get unloaded while the stack trace is live. However, this does not work for copied
3029 // methods because the declaring class of a copied method points to an interface class which
3030 // may be in a different class loader. Instead, retrieve the class loader associated with the
3031 // allocator that holds the copied method. This is much cheaper than finding the actual class.
3032 ObjPtr<mirror::Object> keep_alive;
3033 if (UNLIKELY(method->IsCopied())) {
3034 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
3035 keep_alive = class_linker->GetHoldingClassLoaderOfCopiedMethod(self_, method);
3036 } else {
3037 keep_alive = method->GetDeclaringClass();
3038 }
3039 trace_->Set</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
3040 static_cast<int32_t>(count_) + 1, keep_alive);
3041 ++count_;
3042 }
3043
GetTraceMethodsAndPCs() const3044 ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) {
3045 return ObjPtr<mirror::PointerArray>::DownCast(trace_->Get(0));
3046 }
3047
GetInternalStackTrace() const3048 mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
3049 return trace_;
3050 }
3051
3052 private:
3053 Thread* const self_;
3054 // How many more frames to skip.
3055 uint32_t skip_depth_;
3056 // Current position down stack trace.
3057 uint32_t count_ = 0;
3058 // An object array where the first element is a pointer array that contains the `ArtMethod`
3059 // pointers on the stack and dex PCs. The rest of the elements are referencing objects
3060 // that shall keep the methods alive, namely the declaring class of the `ArtMethod` for
3061 // declared methods and the class loader for copied methods (because it's faster to find
3062 // the class loader than the actual class that holds the copied method). The `trace_[i+1]`
3063 // contains the declaring class or class loader of the `ArtMethod` of the i'th frame.
3064 // We're initializing a newly allocated trace, so we do not need to record that under
3065 // a transaction. If the transaction is aborted, the whole trace shall be unreachable.
3066 mirror::ObjectArray<mirror::Object>* trace_ = nullptr;
3067 // For cross compilation.
3068 const PointerSize pointer_size_;
3069
3070 DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor);
3071 };
3072
CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable & soa) const3073 jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
3074 // Compute depth of stack, save frames if possible to avoid needing to recompute many.
3075 constexpr size_t kMaxSavedFrames = 256;
3076 std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]);
3077 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this),
3078 &saved_frames[0],
3079 kMaxSavedFrames);
3080 count_visitor.WalkStack();
3081 const uint32_t depth = count_visitor.GetDepth();
3082 const uint32_t skip_depth = count_visitor.GetSkipDepth();
3083
3084 // Build internal stack trace.
3085 BuildInternalStackTraceVisitor build_trace_visitor(
3086 soa.Self(), const_cast<Thread*>(this), skip_depth);
3087 if (!build_trace_visitor.Init(depth)) {
3088 return nullptr; // Allocation failed.
3089 }
3090 // If we saved all of the frames we don't even need to do the actual stack walk. This is faster
3091 // than doing the stack walk twice.
3092 if (depth < kMaxSavedFrames) {
3093 for (size_t i = 0; i < depth; ++i) {
3094 build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second);
3095 }
3096 } else {
3097 build_trace_visitor.WalkStack();
3098 }
3099
3100 mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
3101 if (kIsDebugBuild) {
3102 ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs();
3103 // Second half of trace_methods is dex PCs.
3104 for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) {
3105 auto* method = trace_methods->GetElementPtrSize<ArtMethod*>(
3106 i, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
3107 CHECK(method != nullptr);
3108 }
3109 }
3110 return soa.AddLocalReference<jobject>(trace);
3111 }
3112
IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const3113 bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const {
3114 // Only count the depth since we do not pass a stack frame array as an argument.
3115 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this));
3116 count_visitor.WalkStack();
3117 return count_visitor.GetDepth() == static_cast<uint32_t>(exception->GetStackDepth());
3118 }
3119
CreateStackTraceElement(const ScopedObjectAccessAlreadyRunnable & soa,ArtMethod * method,uint32_t dex_pc)3120 static ObjPtr<mirror::StackTraceElement> CreateStackTraceElement(
3121 const ScopedObjectAccessAlreadyRunnable& soa,
3122 ArtMethod* method,
3123 uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
3124 int32_t line_number;
3125 StackHandleScope<3> hs(soa.Self());
3126 auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
3127 auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
3128 if (method->IsProxyMethod()) {
3129 line_number = -1;
3130 class_name_object.Assign(method->GetDeclaringClass()->GetName());
3131 // source_name_object intentionally left null for proxy methods
3132 } else {
3133 line_number = method->GetLineNumFromDexPC(dex_pc);
3134 // Allocate element, potentially triggering GC
3135 // TODO: reuse class_name_object via Class::name_?
3136 const char* descriptor = method->GetDeclaringClassDescriptor();
3137 CHECK(descriptor != nullptr);
3138 std::string class_name(PrettyDescriptor(descriptor));
3139 class_name_object.Assign(
3140 mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
3141 if (class_name_object == nullptr) {
3142 soa.Self()->AssertPendingOOMException();
3143 return nullptr;
3144 }
3145 const char* source_file = method->GetDeclaringClassSourceFile();
3146 if (line_number == -1) {
3147 // Make the line_number field of StackTraceElement hold the dex pc.
3148 // source_name_object is intentionally left null if we failed to map the dex pc to
3149 // a line number (most probably because there is no debug info). See b/30183883.
3150 line_number = static_cast<int32_t>(dex_pc);
3151 } else {
3152 if (source_file != nullptr) {
3153 source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
3154 if (source_name_object == nullptr) {
3155 soa.Self()->AssertPendingOOMException();
3156 return nullptr;
3157 }
3158 }
3159 }
3160 }
3161 const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
3162 CHECK(method_name != nullptr);
3163 Handle<mirror::String> method_name_object(
3164 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
3165 if (method_name_object == nullptr) {
3166 return nullptr;
3167 }
3168 return mirror::StackTraceElement::Alloc(soa.Self(),
3169 class_name_object,
3170 method_name_object,
3171 source_name_object,
3172 line_number);
3173 }
3174
InternalStackTraceToStackTraceElementArray(const ScopedObjectAccessAlreadyRunnable & soa,jobject internal,jobjectArray output_array,int * stack_depth)3175 jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
3176 const ScopedObjectAccessAlreadyRunnable& soa,
3177 jobject internal,
3178 jobjectArray output_array,
3179 int* stack_depth) {
3180 // Decode the internal stack trace into the depth, method trace and PC trace.
3181 // Subtract one for the methods and PC trace.
3182 int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1;
3183 DCHECK_GE(depth, 0);
3184
3185 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
3186
3187 jobjectArray result;
3188
3189 if (output_array != nullptr) {
3190 // Reuse the array we were given.
3191 result = output_array;
3192 // ...adjusting the number of frames we'll write to not exceed the array length.
3193 const int32_t traces_length =
3194 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->GetLength();
3195 depth = std::min(depth, traces_length);
3196 } else {
3197 // Create java_trace array and place in local reference table
3198 ObjPtr<mirror::ObjectArray<mirror::StackTraceElement>> java_traces =
3199 class_linker->AllocStackTraceElementArray(soa.Self(), static_cast<size_t>(depth));
3200 if (java_traces == nullptr) {
3201 return nullptr;
3202 }
3203 result = soa.AddLocalReference<jobjectArray>(java_traces);
3204 }
3205
3206 if (stack_depth != nullptr) {
3207 *stack_depth = depth;
3208 }
3209
3210 for (uint32_t i = 0; i < static_cast<uint32_t>(depth); ++i) {
3211 ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces =
3212 soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>();
3213 // Methods and dex PC trace is element 0.
3214 DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
3215 const ObjPtr<mirror::PointerArray> method_trace =
3216 ObjPtr<mirror::PointerArray>::DownCast(decoded_traces->Get(0));
3217 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
3218 ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
3219 uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
3220 i + static_cast<uint32_t>(method_trace->GetLength()) / 2, kRuntimePointerSize);
3221 const ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(soa, method, dex_pc);
3222 if (obj == nullptr) {
3223 return nullptr;
3224 }
3225 // We are called from native: use non-transactional mode.
3226 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->Set<false>(
3227 static_cast<int32_t>(i), obj);
3228 }
3229 return result;
3230 }
3231
InitStackFrameInfo(const ScopedObjectAccessAlreadyRunnable & soa,ClassLinker * class_linker,Handle<mirror::StackFrameInfo> stackFrameInfo,ArtMethod * method,uint32_t dex_pc)3232 [[nodiscard]] static ObjPtr<mirror::StackFrameInfo> InitStackFrameInfo(
3233 const ScopedObjectAccessAlreadyRunnable& soa,
3234 ClassLinker* class_linker,
3235 Handle<mirror::StackFrameInfo> stackFrameInfo,
3236 ArtMethod* method,
3237 uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
3238 StackHandleScope<4> hs(soa.Self());
3239 int32_t line_number;
3240 auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
3241 if (method->IsProxyMethod()) {
3242 line_number = -1;
3243 // source_name_object intentionally left null for proxy methods
3244 } else {
3245 line_number = method->GetLineNumFromDexPC(dex_pc);
3246 if (line_number == -1) {
3247 // Make the line_number field of StackFrameInfo hold the dex pc.
3248 // source_name_object is intentionally left null if we failed to map the dex pc to
3249 // a line number (most probably because there is no debug info). See b/30183883.
3250 line_number = static_cast<int32_t>(dex_pc);
3251 } else {
3252 const char* source_file = method->GetDeclaringClassSourceFile();
3253 if (source_file != nullptr) {
3254 source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
3255 if (source_name_object == nullptr) {
3256 soa.Self()->AssertPendingOOMException();
3257 return nullptr;
3258 }
3259 }
3260 }
3261 }
3262
3263 Handle<mirror::Class> declaring_class_object(
3264 hs.NewHandle<mirror::Class>(method->GetDeclaringClass()));
3265
3266 ArtMethod* interface_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
3267 const char* method_name = interface_method->GetName();
3268 CHECK(method_name != nullptr);
3269 Handle<mirror::String> method_name_object(
3270 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
3271 if (method_name_object == nullptr) {
3272 soa.Self()->AssertPendingOOMException();
3273 return nullptr;
3274 }
3275
3276 dex::ProtoIndex proto_idx =
3277 method->GetDexFile()->GetIndexForProtoId(interface_method->GetPrototype());
3278 Handle<mirror::MethodType> method_type_object(hs.NewHandle<mirror::MethodType>(
3279 class_linker->ResolveMethodType(soa.Self(), proto_idx, interface_method)));
3280 if (method_type_object == nullptr) {
3281 soa.Self()->AssertPendingOOMException();
3282 return nullptr;
3283 }
3284
3285 stackFrameInfo->AssignFields(declaring_class_object,
3286 method_type_object,
3287 method_name_object,
3288 source_name_object,
3289 line_number,
3290 static_cast<int32_t>(dex_pc));
3291 return stackFrameInfo.Get();
3292 }
3293
3294 constexpr jlong FILL_CLASS_REFS_ONLY = 0x2; // StackStreamFactory.FILL_CLASS_REFS_ONLY
3295
InternalStackTraceToStackFrameInfoArray(const ScopedObjectAccessAlreadyRunnable & soa,jlong mode,jobject internal,jint startLevel,jint batchSize,jint startBufferIndex,jobjectArray output_array)3296 jint Thread::InternalStackTraceToStackFrameInfoArray(
3297 const ScopedObjectAccessAlreadyRunnable& soa,
3298 jlong mode, // See java.lang.StackStreamFactory for the mode flags
3299 jobject internal,
3300 jint startLevel,
3301 jint batchSize,
3302 jint startBufferIndex,
3303 jobjectArray output_array) {
3304 // Decode the internal stack trace into the depth, method trace and PC trace.
3305 // Subtract one for the methods and PC trace.
3306 int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1;
3307 DCHECK_GE(depth, 0);
3308
3309 StackHandleScope<6> hs(soa.Self());
3310 Handle<mirror::ObjectArray<mirror::Object>> framesOrClasses =
3311 hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(output_array));
3312
3313 jint endBufferIndex = startBufferIndex;
3314
3315 if (startLevel < 0 || startLevel >= depth) {
3316 return endBufferIndex;
3317 }
3318
3319 int32_t bufferSize = framesOrClasses->GetLength();
3320 if (startBufferIndex < 0 || startBufferIndex >= bufferSize) {
3321 return endBufferIndex;
3322 }
3323
3324 // The FILL_CLASS_REFS_ONLY flag is defined in AbstractStackWalker.fetchStackFrames() javadoc.
3325 bool isClassArray = (mode & FILL_CLASS_REFS_ONLY) != 0;
3326
3327 Handle<mirror::ObjectArray<mirror::Object>> decoded_traces =
3328 hs.NewHandle(soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>());
3329 // Methods and dex PC trace is element 0.
3330 DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
3331 Handle<mirror::PointerArray> method_trace =
3332 hs.NewHandle(ObjPtr<mirror::PointerArray>::DownCast(decoded_traces->Get(0)));
3333
3334 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
3335 Handle<mirror::Class> sfi_class =
3336 hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/StackFrameInfo;"));
3337 DCHECK(sfi_class != nullptr);
3338
3339 MutableHandle<mirror::StackFrameInfo> frame = hs.NewHandle<mirror::StackFrameInfo>(nullptr);
3340 MutableHandle<mirror::Class> clazz = hs.NewHandle<mirror::Class>(nullptr);
3341 for (uint32_t i = static_cast<uint32_t>(startLevel); i < static_cast<uint32_t>(depth); ++i) {
3342 if (endBufferIndex >= startBufferIndex + batchSize || endBufferIndex >= bufferSize) {
3343 break;
3344 }
3345
3346 ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
3347 if (isClassArray) {
3348 clazz.Assign(method->GetDeclaringClass());
3349 framesOrClasses->Set(endBufferIndex, clazz.Get());
3350 } else {
3351 // Prepare parameters for fields in StackFrameInfo
3352 uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
3353 i + static_cast<uint32_t>(method_trace->GetLength()) / 2, kRuntimePointerSize);
3354
3355 ObjPtr<mirror::Object> frameObject = framesOrClasses->Get(endBufferIndex);
3356 // If libcore didn't allocate the object, we just stop here, but it's unlikely.
3357 if (frameObject == nullptr || !frameObject->InstanceOf(sfi_class.Get())) {
3358 break;
3359 }
3360 frame.Assign(ObjPtr<mirror::StackFrameInfo>::DownCast(frameObject));
3361 frame.Assign(InitStackFrameInfo(soa, class_linker, frame, method, dex_pc));
3362 // Break if InitStackFrameInfo fails to allocate objects or assign the fields.
3363 if (frame == nullptr) {
3364 break;
3365 }
3366 }
3367
3368 ++endBufferIndex;
3369 }
3370
3371 return endBufferIndex;
3372 }
3373
CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable & soa) const3374 jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
3375 // This code allocates. Do not allow it to operate with a pending exception.
3376 if (IsExceptionPending()) {
3377 return nullptr;
3378 }
3379
3380 class CollectFramesAndLocksStackVisitor : public MonitorObjectsStackVisitor {
3381 public:
3382 CollectFramesAndLocksStackVisitor(const ScopedObjectAccessAlreadyRunnable& soaa_in,
3383 Thread* self,
3384 Context* context)
3385 : MonitorObjectsStackVisitor(self, context),
3386 wait_jobject_(soaa_in.Env(), nullptr),
3387 block_jobject_(soaa_in.Env(), nullptr),
3388 soaa_(soaa_in) {}
3389
3390 protected:
3391 VisitMethodResult StartMethod(ArtMethod* m, [[maybe_unused]] size_t frame_nr) override
3392 REQUIRES_SHARED(Locks::mutator_lock_) {
3393 ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(
3394 soaa_, m, GetDexPc(/* abort on error */ false));
3395 if (obj == nullptr) {
3396 return VisitMethodResult::kEndStackWalk;
3397 }
3398 stack_trace_elements_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj.Ptr()));
3399 return VisitMethodResult::kContinueMethod;
3400 }
3401
3402 VisitMethodResult EndMethod([[maybe_unused]] ArtMethod* m) override {
3403 lock_objects_.push_back({});
3404 lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_);
3405
3406 DCHECK_EQ(lock_objects_.size(), stack_trace_elements_.size());
3407
3408 return VisitMethodResult::kContinueMethod;
3409 }
3410
3411 void VisitWaitingObject(ObjPtr<mirror::Object> obj, [[maybe_unused]] ThreadState state) override
3412 REQUIRES_SHARED(Locks::mutator_lock_) {
3413 wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
3414 }
3415 void VisitSleepingObject(ObjPtr<mirror::Object> obj)
3416 override
3417 REQUIRES_SHARED(Locks::mutator_lock_) {
3418 wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
3419 }
3420 void VisitBlockedOnObject(ObjPtr<mirror::Object> obj,
3421 [[maybe_unused]] ThreadState state,
3422 [[maybe_unused]] uint32_t owner_tid) override
3423 REQUIRES_SHARED(Locks::mutator_lock_) {
3424 block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
3425 }
3426 void VisitLockedObject(ObjPtr<mirror::Object> obj)
3427 override
3428 REQUIRES_SHARED(Locks::mutator_lock_) {
3429 frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj));
3430 }
3431
3432 public:
3433 std::vector<ScopedLocalRef<jobject>> stack_trace_elements_;
3434 ScopedLocalRef<jobject> wait_jobject_;
3435 ScopedLocalRef<jobject> block_jobject_;
3436 std::vector<std::vector<ScopedLocalRef<jobject>>> lock_objects_;
3437
3438 private:
3439 const ScopedObjectAccessAlreadyRunnable& soaa_;
3440
3441 std::vector<ScopedLocalRef<jobject>> frame_lock_objects_;
3442 };
3443
3444 std::unique_ptr<Context> context(Context::Create());
3445 CollectFramesAndLocksStackVisitor dumper(soa, const_cast<Thread*>(this), context.get());
3446 dumper.WalkStack();
3447
3448 // There should not be a pending exception. Otherwise, return with it pending.
3449 if (IsExceptionPending()) {
3450 return nullptr;
3451 }
3452
3453 // Now go and create Java arrays.
3454
3455 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
3456
3457 StackHandleScope<6> hs(soa.Self());
3458 Handle<mirror::Class> h_aste_array_class = hs.NewHandle(class_linker->FindSystemClass(
3459 soa.Self(),
3460 "[Ldalvik/system/AnnotatedStackTraceElement;"));
3461 if (h_aste_array_class == nullptr) {
3462 return nullptr;
3463 }
3464 Handle<mirror::Class> h_aste_class = hs.NewHandle(h_aste_array_class->GetComponentType());
3465
3466 Handle<mirror::Class> h_o_array_class =
3467 hs.NewHandle(GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker));
3468 DCHECK(h_o_array_class != nullptr); // Class roots must be already initialized.
3469
3470
3471 // Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 .
3472 class_linker->EnsureInitialized(soa.Self(),
3473 h_aste_class,
3474 /* can_init_fields= */ true,
3475 /* can_init_parents= */ true);
3476 if (soa.Self()->IsExceptionPending()) {
3477 // This should not fail in a healthy runtime.
3478 return nullptr;
3479 }
3480
3481 ArtField* stack_trace_element_field =
3482 h_aste_class->FindDeclaredInstanceField("stackTraceElement", "Ljava/lang/StackTraceElement;");
3483 DCHECK(stack_trace_element_field != nullptr);
3484 ArtField* held_locks_field =
3485 h_aste_class->FindDeclaredInstanceField("heldLocks", "[Ljava/lang/Object;");
3486 DCHECK(held_locks_field != nullptr);
3487 ArtField* blocked_on_field =
3488 h_aste_class->FindDeclaredInstanceField("blockedOn", "Ljava/lang/Object;");
3489 DCHECK(blocked_on_field != nullptr);
3490
3491 int32_t length = static_cast<int32_t>(dumper.stack_trace_elements_.size());
3492 ObjPtr<mirror::ObjectArray<mirror::Object>> array =
3493 mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), h_aste_array_class.Get(), length);
3494 if (array == nullptr) {
3495 soa.Self()->AssertPendingOOMException();
3496 return nullptr;
3497 }
3498
3499 ScopedLocalRef<jobjectArray> result(soa.Env(), soa.Env()->AddLocalReference<jobjectArray>(array));
3500
3501 MutableHandle<mirror::Object> handle(hs.NewHandle<mirror::Object>(nullptr));
3502 MutableHandle<mirror::ObjectArray<mirror::Object>> handle2(
3503 hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr));
3504 for (size_t i = 0; i != static_cast<size_t>(length); ++i) {
3505 handle.Assign(h_aste_class->AllocObject(soa.Self()));
3506 if (handle == nullptr) {
3507 soa.Self()->AssertPendingOOMException();
3508 return nullptr;
3509 }
3510
3511 // Set stack trace element.
3512 stack_trace_element_field->SetObject<false>(
3513 handle.Get(), soa.Decode<mirror::Object>(dumper.stack_trace_elements_[i].get()));
3514
3515 // Create locked-on array.
3516 if (!dumper.lock_objects_[i].empty()) {
3517 handle2.Assign(mirror::ObjectArray<mirror::Object>::Alloc(
3518 soa.Self(), h_o_array_class.Get(), static_cast<int32_t>(dumper.lock_objects_[i].size())));
3519 if (handle2 == nullptr) {
3520 soa.Self()->AssertPendingOOMException();
3521 return nullptr;
3522 }
3523 int32_t j = 0;
3524 for (auto& scoped_local : dumper.lock_objects_[i]) {
3525 if (scoped_local == nullptr) {
3526 continue;
3527 }
3528 handle2->Set(j, soa.Decode<mirror::Object>(scoped_local.get()));
3529 DCHECK(!soa.Self()->IsExceptionPending());
3530 j++;
3531 }
3532 held_locks_field->SetObject<false>(handle.Get(), handle2.Get());
3533 }
3534
3535 // Set blocked-on object.
3536 if (i == 0) {
3537 if (dumper.block_jobject_ != nullptr) {
3538 blocked_on_field->SetObject<false>(
3539 handle.Get(), soa.Decode<mirror::Object>(dumper.block_jobject_.get()));
3540 }
3541 }
3542
3543 ScopedLocalRef<jobject> elem(soa.Env(), soa.AddLocalReference<jobject>(handle.Get()));
3544 soa.Env()->SetObjectArrayElement(result.get(), static_cast<jsize>(i), elem.get());
3545 DCHECK(!soa.Self()->IsExceptionPending());
3546 }
3547
3548 return result.release();
3549 }
3550
ThrowNewExceptionF(const char * exception_class_descriptor,const char * fmt,...)3551 void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
3552 va_list args;
3553 va_start(args, fmt);
3554 ThrowNewExceptionV(exception_class_descriptor, fmt, args);
3555 va_end(args);
3556 }
3557
ThrowNewExceptionV(const char * exception_class_descriptor,const char * fmt,va_list ap)3558 void Thread::ThrowNewExceptionV(const char* exception_class_descriptor,
3559 const char* fmt, va_list ap) {
3560 std::string msg;
3561 StringAppendV(&msg, fmt, ap);
3562 ThrowNewException(exception_class_descriptor, msg.c_str());
3563 }
3564
ThrowNewException(const char * exception_class_descriptor,const char * msg)3565 void Thread::ThrowNewException(const char* exception_class_descriptor,
3566 const char* msg) {
3567 // Callers should either clear or call ThrowNewWrappedException.
3568 AssertNoPendingExceptionForNewException(msg);
3569 ThrowNewWrappedException(exception_class_descriptor, msg);
3570 }
3571
GetCurrentClassLoader(Thread * self)3572 static ObjPtr<mirror::ClassLoader> GetCurrentClassLoader(Thread* self)
3573 REQUIRES_SHARED(Locks::mutator_lock_) {
3574 ArtMethod* method = self->GetCurrentMethod(nullptr);
3575 return method != nullptr
3576 ? method->GetDeclaringClass()->GetClassLoader()
3577 : nullptr;
3578 }
3579
ThrowNewWrappedException(const char * exception_class_descriptor,const char * msg)3580 void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
3581 const char* msg) {
3582 DCHECK_EQ(this, Thread::Current());
3583 ScopedObjectAccessUnchecked soa(this);
3584 StackHandleScope<3> hs(soa.Self());
3585
3586 // Disable public sdk checks if we need to throw exceptions.
3587 // The checks are only used in AOT compilation and may block (exception) class
3588 // initialization if it needs access to private fields (e.g. serialVersionUID).
3589 //
3590 // Since throwing an exception will EnsureInitialization and the public sdk may
3591 // block that, disable the checks. It's ok to do so, because the thrown exceptions
3592 // are not part of the application code that needs to verified.
3593 ScopedDisablePublicSdkChecker sdpsc;
3594
3595 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self())));
3596 ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
3597 ClearException();
3598 Runtime* runtime = Runtime::Current();
3599 auto* cl = runtime->GetClassLinker();
3600 Handle<mirror::Class> exception_class(
3601 hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader)));
3602 if (UNLIKELY(exception_class == nullptr)) {
3603 CHECK(IsExceptionPending());
3604 LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
3605 return;
3606 }
3607
3608 if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true,
3609 true))) {
3610 DCHECK(IsExceptionPending());
3611 return;
3612 }
3613 DCHECK_IMPLIES(runtime->IsStarted(), exception_class->IsThrowableClass());
3614 Handle<mirror::Throwable> exception(
3615 hs.NewHandle(ObjPtr<mirror::Throwable>::DownCast(exception_class->AllocObject(this))));
3616
3617 // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
3618 if (exception == nullptr) {
3619 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one.
3620 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenThrowingException());
3621 return;
3622 }
3623
3624 // Choose an appropriate constructor and set up the arguments.
3625 const char* signature;
3626 ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr);
3627 if (msg != nullptr) {
3628 // Ensure we remember this and the method over the String allocation.
3629 msg_string.reset(
3630 soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg)));
3631 if (UNLIKELY(msg_string.get() == nullptr)) {
3632 CHECK(IsExceptionPending()); // OOME.
3633 return;
3634 }
3635 if (cause.get() == nullptr) {
3636 signature = "(Ljava/lang/String;)V";
3637 } else {
3638 signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
3639 }
3640 } else {
3641 if (cause.get() == nullptr) {
3642 signature = "()V";
3643 } else {
3644 signature = "(Ljava/lang/Throwable;)V";
3645 }
3646 }
3647 ArtMethod* exception_init_method =
3648 exception_class->FindConstructor(signature, cl->GetImagePointerSize());
3649
3650 CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
3651 << PrettyDescriptor(exception_class_descriptor);
3652
3653 if (UNLIKELY(!runtime->IsStarted())) {
3654 // Something is trying to throw an exception without a started runtime, which is the common
3655 // case in the compiler. We won't be able to invoke the constructor of the exception, so set
3656 // the exception fields directly.
3657 if (msg != nullptr) {
3658 exception->SetDetailMessage(DecodeJObject(msg_string.get())->AsString());
3659 }
3660 if (cause.get() != nullptr) {
3661 exception->SetCause(DecodeJObject(cause.get())->AsThrowable());
3662 }
3663 ScopedLocalRef<jobject> trace(GetJniEnv(), CreateInternalStackTrace(soa));
3664 if (trace.get() != nullptr) {
3665 exception->SetStackState(DecodeJObject(trace.get()).Ptr());
3666 }
3667 SetException(exception.Get());
3668 } else {
3669 jvalue jv_args[2];
3670 size_t i = 0;
3671
3672 if (msg != nullptr) {
3673 jv_args[i].l = msg_string.get();
3674 ++i;
3675 }
3676 if (cause.get() != nullptr) {
3677 jv_args[i].l = cause.get();
3678 ++i;
3679 }
3680 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get()));
3681 InvokeWithJValues(soa, ref.get(), exception_init_method, jv_args);
3682 if (LIKELY(!IsExceptionPending())) {
3683 SetException(exception.Get());
3684 }
3685 }
3686 }
3687
ThrowOutOfMemoryError(const char * msg)3688 void Thread::ThrowOutOfMemoryError(const char* msg) {
3689 LOG(WARNING) << "Throwing OutOfMemoryError "
3690 << '"' << msg << '"'
3691 << " (VmSize " << GetProcessStatus("VmSize")
3692 << (tls32_.throwing_OutOfMemoryError ? ", recursive case)" : ")");
3693 ScopedTrace trace("OutOfMemoryError");
3694 if (!tls32_.throwing_OutOfMemoryError) {
3695 tls32_.throwing_OutOfMemoryError = true;
3696 ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
3697 tls32_.throwing_OutOfMemoryError = false;
3698 } else {
3699 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one.
3700 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME());
3701 }
3702 }
3703
CurrentFromGdb()3704 Thread* Thread::CurrentFromGdb() {
3705 return Thread::Current();
3706 }
3707
DumpFromGdb() const3708 void Thread::DumpFromGdb() const {
3709 std::ostringstream ss;
3710 Dump(ss);
3711 std::string str(ss.str());
3712 // log to stderr for debugging command line processes
3713 std::cerr << str;
3714 #ifdef ART_TARGET_ANDROID
3715 // log to logcat for debugging frameworks processes
3716 LOG(INFO) << str;
3717 #endif
3718 }
3719
3720 // Explicitly instantiate 32 and 64bit thread offset dumping support.
3721 template
3722 void Thread::DumpThreadOffset<PointerSize::k32>(std::ostream& os, uint32_t offset);
3723 template
3724 void Thread::DumpThreadOffset<PointerSize::k64>(std::ostream& os, uint32_t offset);
3725
3726 template<PointerSize ptr_size>
DumpThreadOffset(std::ostream & os,uint32_t offset)3727 void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
3728 #define DO_THREAD_OFFSET(x, y) \
3729 if (offset == (x).Uint32Value()) { \
3730 os << (y); \
3731 return; \
3732 }
3733 DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags")
3734 DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table")
3735 DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception")
3736 DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer");
3737 DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env")
3738 DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self")
3739 DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end")
3740 DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id")
3741 DO_THREAD_OFFSET(IsGcMarkingOffset<ptr_size>(), "is_gc_marking")
3742 DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
3743 DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
3744 DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
3745 DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
3746 #undef DO_THREAD_OFFSET
3747
3748 #define JNI_ENTRY_POINT_INFO(x) \
3749 if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
3750 os << #x; \
3751 return; \
3752 }
3753 JNI_ENTRY_POINT_INFO(pDlsymLookup)
3754 JNI_ENTRY_POINT_INFO(pDlsymLookupCritical)
3755 #undef JNI_ENTRY_POINT_INFO
3756
3757 #define QUICK_ENTRY_POINT_INFO(x) \
3758 if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
3759 os << #x; \
3760 return; \
3761 }
3762 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
3763 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved8)
3764 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved16)
3765 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved32)
3766 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved64)
3767 QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
3768 QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
3769 QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks)
3770 QUICK_ENTRY_POINT_INFO(pAllocStringObject)
3771 QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes)
3772 QUICK_ENTRY_POINT_INFO(pAllocStringFromChars)
3773 QUICK_ENTRY_POINT_INFO(pAllocStringFromString)
3774 QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
3775 QUICK_ENTRY_POINT_INFO(pCheckInstanceOf)
3776 QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
3777 QUICK_ENTRY_POINT_INFO(pResolveTypeAndVerifyAccess)
3778 QUICK_ENTRY_POINT_INFO(pResolveType)
3779 QUICK_ENTRY_POINT_INFO(pResolveString)
3780 QUICK_ENTRY_POINT_INFO(pSet8Instance)
3781 QUICK_ENTRY_POINT_INFO(pSet8Static)
3782 QUICK_ENTRY_POINT_INFO(pSet16Instance)
3783 QUICK_ENTRY_POINT_INFO(pSet16Static)
3784 QUICK_ENTRY_POINT_INFO(pSet32Instance)
3785 QUICK_ENTRY_POINT_INFO(pSet32Static)
3786 QUICK_ENTRY_POINT_INFO(pSet64Instance)
3787 QUICK_ENTRY_POINT_INFO(pSet64Static)
3788 QUICK_ENTRY_POINT_INFO(pSetObjInstance)
3789 QUICK_ENTRY_POINT_INFO(pSetObjStatic)
3790 QUICK_ENTRY_POINT_INFO(pGetByteInstance)
3791 QUICK_ENTRY_POINT_INFO(pGetBooleanInstance)
3792 QUICK_ENTRY_POINT_INFO(pGetByteStatic)
3793 QUICK_ENTRY_POINT_INFO(pGetBooleanStatic)
3794 QUICK_ENTRY_POINT_INFO(pGetShortInstance)
3795 QUICK_ENTRY_POINT_INFO(pGetCharInstance)
3796 QUICK_ENTRY_POINT_INFO(pGetShortStatic)
3797 QUICK_ENTRY_POINT_INFO(pGetCharStatic)
3798 QUICK_ENTRY_POINT_INFO(pGet32Instance)
3799 QUICK_ENTRY_POINT_INFO(pGet32Static)
3800 QUICK_ENTRY_POINT_INFO(pGet64Instance)
3801 QUICK_ENTRY_POINT_INFO(pGet64Static)
3802 QUICK_ENTRY_POINT_INFO(pGetObjInstance)
3803 QUICK_ENTRY_POINT_INFO(pGetObjStatic)
3804 QUICK_ENTRY_POINT_INFO(pAputObject)
3805 QUICK_ENTRY_POINT_INFO(pJniMethodStart)
3806 QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
3807 QUICK_ENTRY_POINT_INFO(pJniMethodEntryHook)
3808 QUICK_ENTRY_POINT_INFO(pJniDecodeReferenceResult)
3809 QUICK_ENTRY_POINT_INFO(pJniLockObject)
3810 QUICK_ENTRY_POINT_INFO(pJniUnlockObject)
3811 QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline)
3812 QUICK_ENTRY_POINT_INFO(pLockObject)
3813 QUICK_ENTRY_POINT_INFO(pUnlockObject)
3814 QUICK_ENTRY_POINT_INFO(pCmpgDouble)
3815 QUICK_ENTRY_POINT_INFO(pCmpgFloat)
3816 QUICK_ENTRY_POINT_INFO(pCmplDouble)
3817 QUICK_ENTRY_POINT_INFO(pCmplFloat)
3818 QUICK_ENTRY_POINT_INFO(pCos)
3819 QUICK_ENTRY_POINT_INFO(pSin)
3820 QUICK_ENTRY_POINT_INFO(pAcos)
3821 QUICK_ENTRY_POINT_INFO(pAsin)
3822 QUICK_ENTRY_POINT_INFO(pAtan)
3823 QUICK_ENTRY_POINT_INFO(pAtan2)
3824 QUICK_ENTRY_POINT_INFO(pCbrt)
3825 QUICK_ENTRY_POINT_INFO(pCosh)
3826 QUICK_ENTRY_POINT_INFO(pExp)
3827 QUICK_ENTRY_POINT_INFO(pExpm1)
3828 QUICK_ENTRY_POINT_INFO(pHypot)
3829 QUICK_ENTRY_POINT_INFO(pLog)
3830 QUICK_ENTRY_POINT_INFO(pLog10)
3831 QUICK_ENTRY_POINT_INFO(pNextAfter)
3832 QUICK_ENTRY_POINT_INFO(pSinh)
3833 QUICK_ENTRY_POINT_INFO(pTan)
3834 QUICK_ENTRY_POINT_INFO(pTanh)
3835 QUICK_ENTRY_POINT_INFO(pFmod)
3836 QUICK_ENTRY_POINT_INFO(pL2d)
3837 QUICK_ENTRY_POINT_INFO(pFmodf)
3838 QUICK_ENTRY_POINT_INFO(pL2f)
3839 QUICK_ENTRY_POINT_INFO(pD2iz)
3840 QUICK_ENTRY_POINT_INFO(pF2iz)
3841 QUICK_ENTRY_POINT_INFO(pIdivmod)
3842 QUICK_ENTRY_POINT_INFO(pD2l)
3843 QUICK_ENTRY_POINT_INFO(pF2l)
3844 QUICK_ENTRY_POINT_INFO(pLdiv)
3845 QUICK_ENTRY_POINT_INFO(pLmod)
3846 QUICK_ENTRY_POINT_INFO(pLmul)
3847 QUICK_ENTRY_POINT_INFO(pShlLong)
3848 QUICK_ENTRY_POINT_INFO(pShrLong)
3849 QUICK_ENTRY_POINT_INFO(pUshrLong)
3850 QUICK_ENTRY_POINT_INFO(pIndexOf)
3851 QUICK_ENTRY_POINT_INFO(pStringCompareTo)
3852 QUICK_ENTRY_POINT_INFO(pMemcpy)
3853 QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline)
3854 QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline)
3855 QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge)
3856 QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck)
3857 QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck)
3858 QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
3859 QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
3860 QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
3861 QUICK_ENTRY_POINT_INFO(pInvokePolymorphic)
3862 QUICK_ENTRY_POINT_INFO(pTestSuspend)
3863 QUICK_ENTRY_POINT_INFO(pDeliverException)
3864 QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
3865 QUICK_ENTRY_POINT_INFO(pThrowDivZero)
3866 QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
3867 QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
3868 QUICK_ENTRY_POINT_INFO(pDeoptimize)
3869 QUICK_ENTRY_POINT_INFO(pA64Load)
3870 QUICK_ENTRY_POINT_INFO(pA64Store)
3871 QUICK_ENTRY_POINT_INFO(pNewEmptyString)
3872 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B)
3873 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BB)
3874 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI)
3875 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII)
3876 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII)
3877 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString)
3878 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString)
3879 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset)
3880 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset)
3881 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C)
3882 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII)
3883 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC)
3884 QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints)
3885 QUICK_ENTRY_POINT_INFO(pNewStringFromString)
3886 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
3887 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
3888 QUICK_ENTRY_POINT_INFO(pNewStringFromUtf16Bytes_BII)
3889 QUICK_ENTRY_POINT_INFO(pJniReadBarrier)
3890 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg00)
3891 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg01)
3892 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg02)
3893 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg03)
3894 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg04)
3895 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg05)
3896 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg06)
3897 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg07)
3898 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg08)
3899 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg09)
3900 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg10)
3901 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg11)
3902 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg12)
3903 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg13)
3904 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg14)
3905 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg15)
3906 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg16)
3907 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg17)
3908 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg18)
3909 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg19)
3910 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg20)
3911 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg21)
3912 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg22)
3913 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg23)
3914 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg24)
3915 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg25)
3916 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg26)
3917 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg27)
3918 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg28)
3919 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg29)
3920 QUICK_ENTRY_POINT_INFO(pReadBarrierSlow)
3921 QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow)
3922 #undef QUICK_ENTRY_POINT_INFO
3923
3924 os << offset;
3925 }
3926
QuickDeliverException(bool skip_method_exit_callbacks)3927 void Thread::QuickDeliverException(bool skip_method_exit_callbacks) {
3928 // Get exception from thread.
3929 ObjPtr<mirror::Throwable> exception = GetException();
3930 CHECK(exception != nullptr);
3931 if (exception == GetDeoptimizationException()) {
3932 // This wasn't a real exception, so just clear it here. If there was an actual exception it
3933 // will be recorded in the DeoptimizationContext and it will be restored later.
3934 ClearException();
3935 artDeoptimize(this, skip_method_exit_callbacks);
3936 UNREACHABLE();
3937 }
3938
3939 ReadBarrier::MaybeAssertToSpaceInvariant(exception.Ptr());
3940
3941 // This is a real exception: let the instrumentation know about it. Exception throw listener
3942 // could set a breakpoint or install listeners that might require a deoptimization. Hence the
3943 // deoptimization check needs to happen after calling the listener.
3944 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3945 if (instrumentation->HasExceptionThrownListeners() &&
3946 IsExceptionThrownByCurrentMethod(exception)) {
3947 // Instrumentation may cause GC so keep the exception object safe.
3948 StackHandleScope<1> hs(this);
3949 HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
3950 instrumentation->ExceptionThrownEvent(this, exception);
3951 }
3952 // Does instrumentation need to deoptimize the stack or otherwise go to interpreter for something?
3953 // Note: we do this *after* reporting the exception to instrumentation in case it now requires
3954 // deoptimization. It may happen if a debugger is attached and requests new events (single-step,
3955 // breakpoint, ...) when the exception is reported.
3956 // Frame pop can be requested on a method unwind callback which requires a deopt. We could
3957 // potentially check after each unwind callback to see if a frame pop was requested and deopt if
3958 // needed. Since this is a debug only feature and this path is only taken when an exception is
3959 // thrown, it is not performance critical and we keep it simple by just deopting if method exit
3960 // listeners are installed and frame pop feature is supported.
3961 bool needs_deopt =
3962 instrumentation->HasMethodExitListeners() && Runtime::Current()->AreNonStandardExitsEnabled();
3963 if (Dbg::IsForcedInterpreterNeededForException(this) || IsForceInterpreter() || needs_deopt) {
3964 NthCallerVisitor visitor(this, 0, false);
3965 visitor.WalkStack();
3966 if (visitor.GetCurrentQuickFrame() != nullptr) {
3967 if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.GetOuterMethod(), visitor.caller_pc)) {
3968 // method_type shouldn't matter due to exception handling.
3969 const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault;
3970 // Save the exception into the deoptimization context so it can be restored
3971 // before entering the interpreter.
3972 PushDeoptimizationContext(
3973 JValue(),
3974 /* is_reference= */ false,
3975 exception,
3976 /* from_code= */ false,
3977 method_type);
3978 artDeoptimize(this, skip_method_exit_callbacks);
3979 UNREACHABLE();
3980 } else {
3981 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
3982 << visitor.caller->PrettyMethod();
3983 }
3984 } else {
3985 // This is either top of call stack, or shadow frame.
3986 DCHECK(visitor.caller == nullptr || visitor.IsShadowFrame());
3987 }
3988 }
3989
3990 // Don't leave exception visible while we try to find the handler, which may cause class
3991 // resolution.
3992 ClearException();
3993 QuickExceptionHandler exception_handler(this, false);
3994 exception_handler.FindCatch(exception, skip_method_exit_callbacks);
3995 if (exception_handler.GetClearException()) {
3996 // Exception was cleared as part of delivery.
3997 DCHECK(!IsExceptionPending());
3998 } else {
3999 // Exception was put back with a throw location.
4000 DCHECK(IsExceptionPending());
4001 // Check the to-space invariant on the re-installed exception (if applicable).
4002 ReadBarrier::MaybeAssertToSpaceInvariant(GetException());
4003 }
4004 exception_handler.DoLongJump();
4005 }
4006
GetLongJumpContext()4007 Context* Thread::GetLongJumpContext() {
4008 Context* result = tlsPtr_.long_jump_context;
4009 if (result == nullptr) {
4010 result = Context::Create();
4011 } else {
4012 tlsPtr_.long_jump_context = nullptr; // Avoid context being shared.
4013 result->Reset();
4014 }
4015 return result;
4016 }
4017
GetCurrentMethod(uint32_t * dex_pc_out,bool check_suspended,bool abort_on_error) const4018 ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc_out,
4019 bool check_suspended,
4020 bool abort_on_error) const {
4021 // Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
4022 // so we don't abort in a special situation (thinlocked monitor) when dumping the Java
4023 // stack.
4024 ArtMethod* method = nullptr;
4025 uint32_t dex_pc = dex::kDexNoIndex;
4026 StackVisitor::WalkStack(
4027 [&](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
4028 ArtMethod* m = visitor->GetMethod();
4029 if (m->IsRuntimeMethod()) {
4030 // Continue if this is a runtime method.
4031 return true;
4032 }
4033 method = m;
4034 dex_pc = visitor->GetDexPc(abort_on_error);
4035 return false;
4036 },
4037 const_cast<Thread*>(this),
4038 /* context= */ nullptr,
4039 StackVisitor::StackWalkKind::kIncludeInlinedFrames,
4040 check_suspended);
4041
4042 if (dex_pc_out != nullptr) {
4043 *dex_pc_out = dex_pc;
4044 }
4045 return method;
4046 }
4047
HoldsLock(ObjPtr<mirror::Object> object) const4048 bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const {
4049 return object != nullptr && object->GetLockOwnerThreadId() == GetThreadId();
4050 }
4051
4052 extern std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMethod** sp)
4053 REQUIRES_SHARED(Locks::mutator_lock_);
4054
4055 // RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
4056 template <typename RootVisitor, bool kPrecise = false>
4057 class ReferenceMapVisitor : public StackVisitor {
4058 public:
ReferenceMapVisitor(Thread * thread,Context * context,RootVisitor & visitor)4059 ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
4060 REQUIRES_SHARED(Locks::mutator_lock_)
4061 // We are visiting the references in compiled frames, so we do not need
4062 // to know the inlined frames.
4063 : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
4064 visitor_(visitor),
4065 visit_declaring_class_(!Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {}
4066
VisitFrame()4067 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
4068 if (false) {
4069 LOG(INFO) << "Visiting stack roots in " << ArtMethod::PrettyMethod(GetMethod())
4070 << StringPrintf("@ PC:%04x", GetDexPc());
4071 }
4072 ShadowFrame* shadow_frame = GetCurrentShadowFrame();
4073 if (shadow_frame != nullptr) {
4074 VisitShadowFrame(shadow_frame);
4075 } else if (GetCurrentOatQuickMethodHeader()->IsNterpMethodHeader()) {
4076 VisitNterpFrame();
4077 } else {
4078 VisitQuickFrame();
4079 }
4080 return true;
4081 }
4082
VisitShadowFrame(ShadowFrame * shadow_frame)4083 void VisitShadowFrame(ShadowFrame* shadow_frame) REQUIRES_SHARED(Locks::mutator_lock_) {
4084 ArtMethod* m = shadow_frame->GetMethod();
4085 VisitDeclaringClass(m);
4086 DCHECK(m != nullptr);
4087 size_t num_regs = shadow_frame->NumberOfVRegs();
4088 // handle scope for JNI or References for interpreter.
4089 for (size_t reg = 0; reg < num_regs; ++reg) {
4090 mirror::Object* ref = shadow_frame->GetVRegReference(reg);
4091 if (ref != nullptr) {
4092 mirror::Object* new_ref = ref;
4093 visitor_(&new_ref, reg, this);
4094 if (new_ref != ref) {
4095 shadow_frame->SetVRegReference(reg, new_ref);
4096 }
4097 }
4098 }
4099 // Mark lock count map required for structured locking checks.
4100 shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg= */ -1, this);
4101 }
4102
4103 private:
4104 // Visiting the declaring class is necessary so that we don't unload the class of a method that
4105 // is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since
4106 // the threads do not all hold the heap bitmap lock for parallel GC.
VisitDeclaringClass(ArtMethod * method)4107 void VisitDeclaringClass(ArtMethod* method)
4108 REQUIRES_SHARED(Locks::mutator_lock_)
4109 NO_THREAD_SAFETY_ANALYSIS {
4110 if (!visit_declaring_class_) {
4111 return;
4112 }
4113 ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
4114 // klass can be null for runtime methods.
4115 if (klass != nullptr) {
4116 if (kVerifyImageObjectsMarked) {
4117 gc::Heap* const heap = Runtime::Current()->GetHeap();
4118 gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass,
4119 /*fail_ok=*/true);
4120 if (space != nullptr && space->IsImageSpace()) {
4121 bool failed = false;
4122 if (!space->GetLiveBitmap()->Test(klass.Ptr())) {
4123 failed = true;
4124 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space;
4125 } else if (!heap->GetLiveBitmap()->Test(klass.Ptr())) {
4126 failed = true;
4127 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space;
4128 }
4129 if (failed) {
4130 GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
4131 space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT));
4132 LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method
4133 << " klass@" << klass.Ptr();
4134 // Pretty info last in case it crashes.
4135 LOG(FATAL) << "Method " << method->PrettyMethod() << " klass "
4136 << klass->PrettyClass();
4137 }
4138 }
4139 }
4140 mirror::Object* new_ref = klass.Ptr();
4141 visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kMethodDeclaringClass, this);
4142 if (new_ref != klass) {
4143 method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass());
4144 }
4145 }
4146 }
4147
VisitNterpFrame()4148 void VisitNterpFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
4149 ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
4150 StackReference<mirror::Object>* vreg_ref_base =
4151 reinterpret_cast<StackReference<mirror::Object>*>(NterpGetReferenceArray(cur_quick_frame));
4152 StackReference<mirror::Object>* vreg_int_base =
4153 reinterpret_cast<StackReference<mirror::Object>*>(NterpGetRegistersArray(cur_quick_frame));
4154 CodeItemDataAccessor accessor((*cur_quick_frame)->DexInstructionData());
4155 const uint16_t num_regs = accessor.RegistersSize();
4156 // An nterp frame has two arrays: a dex register array and a reference array
4157 // that shadows the dex register array but only containing references
4158 // (non-reference dex registers have nulls). See nterp_helpers.cc.
4159 for (size_t reg = 0; reg < num_regs; ++reg) {
4160 StackReference<mirror::Object>* ref_addr = vreg_ref_base + reg;
4161 mirror::Object* ref = ref_addr->AsMirrorPtr();
4162 if (ref != nullptr) {
4163 mirror::Object* new_ref = ref;
4164 visitor_(&new_ref, reg, this);
4165 if (new_ref != ref) {
4166 ref_addr->Assign(new_ref);
4167 StackReference<mirror::Object>* int_addr = vreg_int_base + reg;
4168 int_addr->Assign(new_ref);
4169 }
4170 }
4171 }
4172 }
4173
4174 template <typename T>
4175 ALWAYS_INLINE
VisitQuickFrameWithVregCallback()4176 inline void VisitQuickFrameWithVregCallback() REQUIRES_SHARED(Locks::mutator_lock_) {
4177 ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
4178 DCHECK(cur_quick_frame != nullptr);
4179 ArtMethod* m = *cur_quick_frame;
4180 VisitDeclaringClass(m);
4181
4182 if (m->IsNative()) {
4183 // TODO: Spill the `this` reference in the AOT-compiled String.charAt()
4184 // slow-path for throwing SIOOBE, so that we can remove this carve-out.
4185 if (UNLIKELY(m->IsIntrinsic()) &&
4186 m->GetIntrinsic() == enum_cast<uint32_t>(Intrinsics::kStringCharAt)) {
4187 // The String.charAt() method is AOT-compiled with an intrinsic implementation
4188 // instead of a JNI stub. It has a slow path that constructs a runtime frame
4189 // for throwing SIOOBE and in that path we do not get the `this` pointer
4190 // spilled on the stack, so there is nothing to visit. We can distinguish
4191 // this from the GenericJni path by checking that the PC is in the boot image
4192 // (PC shall be known thanks to the runtime frame for throwing SIOOBE).
4193 // Note that JIT does not emit that intrinic implementation.
4194 const void* pc = reinterpret_cast<const void*>(GetCurrentQuickFramePc());
4195 if (pc != nullptr && Runtime::Current()->GetHeap()->IsInBootImageOatFile(pc)) {
4196 return;
4197 }
4198 }
4199 // Native methods spill their arguments to the reserved vregs in the caller's frame
4200 // and use pointers to these stack references as jobject, jclass, jarray, etc.
4201 // Note: We can come here for a @CriticalNative method when it needs to resolve the
4202 // target native function but there would be no references to visit below.
4203 const size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
4204 const size_t method_pointer_size = static_cast<size_t>(kRuntimePointerSize);
4205 uint32_t* current_vreg = reinterpret_cast<uint32_t*>(
4206 reinterpret_cast<uint8_t*>(cur_quick_frame) + frame_size + method_pointer_size);
4207 auto visit = [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
4208 auto* ref_addr = reinterpret_cast<StackReference<mirror::Object>*>(current_vreg);
4209 mirror::Object* ref = ref_addr->AsMirrorPtr();
4210 if (ref != nullptr) {
4211 mirror::Object* new_ref = ref;
4212 visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kNativeReferenceArgument, this);
4213 if (ref != new_ref) {
4214 ref_addr->Assign(new_ref);
4215 }
4216 }
4217 };
4218 const char* shorty = m->GetShorty();
4219 if (!m->IsStatic()) {
4220 visit();
4221 current_vreg += 1u;
4222 }
4223 for (shorty += 1u; *shorty != 0; ++shorty) {
4224 switch (*shorty) {
4225 case 'D':
4226 case 'J':
4227 current_vreg += 2u;
4228 break;
4229 case 'L':
4230 visit();
4231 FALLTHROUGH_INTENDED;
4232 default:
4233 current_vreg += 1u;
4234 break;
4235 }
4236 }
4237 } else if (!m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) {
4238 // Process register map (which native, runtime and proxy methods don't have)
4239 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
4240 DCHECK(method_header->IsOptimized());
4241 StackReference<mirror::Object>* vreg_base =
4242 reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame);
4243 uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
4244 CodeInfo code_info = kPrecise
4245 ? CodeInfo(method_header) // We will need dex register maps.
4246 : CodeInfo::DecodeGcMasksOnly(method_header);
4247 StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
4248 DCHECK(map.IsValid());
4249
4250 T vreg_info(m, code_info, map, visitor_);
4251
4252 // Visit stack entries that hold pointers.
4253 BitMemoryRegion stack_mask = code_info.GetStackMaskOf(map);
4254 for (size_t i = 0; i < stack_mask.size_in_bits(); ++i) {
4255 if (stack_mask.LoadBit(i)) {
4256 StackReference<mirror::Object>* ref_addr = vreg_base + i;
4257 mirror::Object* ref = ref_addr->AsMirrorPtr();
4258 if (ref != nullptr) {
4259 mirror::Object* new_ref = ref;
4260 vreg_info.VisitStack(&new_ref, i, this);
4261 if (ref != new_ref) {
4262 ref_addr->Assign(new_ref);
4263 }
4264 }
4265 }
4266 }
4267 // Visit callee-save registers that hold pointers.
4268 uint32_t register_mask = code_info.GetRegisterMaskOf(map);
4269 for (uint32_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
4270 if (register_mask & (1 << i)) {
4271 mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
4272 if (kIsDebugBuild && ref_addr == nullptr) {
4273 std::string thread_name;
4274 GetThread()->GetThreadName(thread_name);
4275 LOG(FATAL_WITHOUT_ABORT) << "On thread " << thread_name;
4276 DescribeStack(GetThread());
4277 LOG(FATAL) << "Found an unsaved callee-save register " << i << " (null GPRAddress) "
4278 << "set in register_mask=" << register_mask << " at " << DescribeLocation();
4279 }
4280 if (*ref_addr != nullptr) {
4281 vreg_info.VisitRegister(ref_addr, i, this);
4282 }
4283 }
4284 }
4285 } else if (!m->IsRuntimeMethod() && m->IsProxyMethod()) {
4286 // If this is a proxy method, visit its reference arguments.
4287 DCHECK(!m->IsStatic());
4288 DCHECK(!m->IsNative());
4289 std::vector<StackReference<mirror::Object>*> ref_addrs =
4290 GetProxyReferenceArguments(cur_quick_frame);
4291 for (StackReference<mirror::Object>* ref_addr : ref_addrs) {
4292 mirror::Object* ref = ref_addr->AsMirrorPtr();
4293 if (ref != nullptr) {
4294 mirror::Object* new_ref = ref;
4295 visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kProxyReferenceArgument, this);
4296 if (ref != new_ref) {
4297 ref_addr->Assign(new_ref);
4298 }
4299 }
4300 }
4301 }
4302 }
4303
VisitQuickFrame()4304 void VisitQuickFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
4305 if (kPrecise) {
4306 VisitQuickFramePrecise();
4307 } else {
4308 VisitQuickFrameNonPrecise();
4309 }
4310 }
4311
VisitQuickFrameNonPrecise()4312 void VisitQuickFrameNonPrecise() REQUIRES_SHARED(Locks::mutator_lock_) {
4313 struct UndefinedVRegInfo {
4314 UndefinedVRegInfo([[maybe_unused]] ArtMethod* method,
4315 [[maybe_unused]] const CodeInfo& code_info,
4316 [[maybe_unused]] const StackMap& map,
4317 RootVisitor& _visitor)
4318 : visitor(_visitor) {}
4319
4320 ALWAYS_INLINE
4321 void VisitStack(mirror::Object** ref,
4322 [[maybe_unused]] size_t stack_index,
4323 const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
4324 visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor);
4325 }
4326
4327 ALWAYS_INLINE
4328 void VisitRegister(mirror::Object** ref,
4329 [[maybe_unused]] size_t register_index,
4330 const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
4331 visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor);
4332 }
4333
4334 RootVisitor& visitor;
4335 };
4336 VisitQuickFrameWithVregCallback<UndefinedVRegInfo>();
4337 }
4338
VisitQuickFramePrecise()4339 void VisitQuickFramePrecise() REQUIRES_SHARED(Locks::mutator_lock_) {
4340 struct StackMapVRegInfo {
4341 StackMapVRegInfo(ArtMethod* method,
4342 const CodeInfo& _code_info,
4343 const StackMap& map,
4344 RootVisitor& _visitor)
4345 : number_of_dex_registers(method->DexInstructionData().RegistersSize()),
4346 code_info(_code_info),
4347 dex_register_map(code_info.GetDexRegisterMapOf(map)),
4348 visitor(_visitor) {
4349 DCHECK_EQ(dex_register_map.size(), number_of_dex_registers);
4350 }
4351
4352 // TODO: If necessary, we should consider caching a reverse map instead of the linear
4353 // lookups for each location.
4354 void FindWithType(const size_t index,
4355 const DexRegisterLocation::Kind kind,
4356 mirror::Object** ref,
4357 const StackVisitor* stack_visitor)
4358 REQUIRES_SHARED(Locks::mutator_lock_) {
4359 bool found = false;
4360 for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) {
4361 DexRegisterLocation location = dex_register_map[dex_reg];
4362 if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) {
4363 visitor(ref, dex_reg, stack_visitor);
4364 found = true;
4365 }
4366 }
4367
4368 if (!found) {
4369 // If nothing found, report with unknown.
4370 visitor(ref, JavaFrameRootInfo::kUnknownVreg, stack_visitor);
4371 }
4372 }
4373
4374 void VisitStack(mirror::Object** ref, size_t stack_index, const StackVisitor* stack_visitor)
4375 REQUIRES_SHARED(Locks::mutator_lock_) {
4376 const size_t stack_offset = stack_index * kFrameSlotSize;
4377 FindWithType(stack_offset,
4378 DexRegisterLocation::Kind::kInStack,
4379 ref,
4380 stack_visitor);
4381 }
4382
4383 void VisitRegister(mirror::Object** ref,
4384 size_t register_index,
4385 const StackVisitor* stack_visitor)
4386 REQUIRES_SHARED(Locks::mutator_lock_) {
4387 FindWithType(register_index,
4388 DexRegisterLocation::Kind::kInRegister,
4389 ref,
4390 stack_visitor);
4391 }
4392
4393 size_t number_of_dex_registers;
4394 const CodeInfo& code_info;
4395 DexRegisterMap dex_register_map;
4396 RootVisitor& visitor;
4397 };
4398 VisitQuickFrameWithVregCallback<StackMapVRegInfo>();
4399 }
4400
4401 // Visitor for when we visit a root.
4402 RootVisitor& visitor_;
4403 bool visit_declaring_class_;
4404 };
4405
4406 class RootCallbackVisitor {
4407 public:
RootCallbackVisitor(RootVisitor * visitor,uint32_t tid)4408 RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
4409
operator ()(mirror::Object ** obj,size_t vreg,const StackVisitor * stack_visitor) const4410 void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
4411 REQUIRES_SHARED(Locks::mutator_lock_) {
4412 visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
4413 }
4414
4415 private:
4416 RootVisitor* const visitor_;
4417 const uint32_t tid_;
4418 };
4419
VisitReflectiveTargets(ReflectiveValueVisitor * visitor)4420 void Thread::VisitReflectiveTargets(ReflectiveValueVisitor* visitor) {
4421 for (BaseReflectiveHandleScope* brhs = GetTopReflectiveHandleScope();
4422 brhs != nullptr;
4423 brhs = brhs->GetLink()) {
4424 brhs->VisitTargets(visitor);
4425 }
4426 }
4427
4428 // FIXME: clang-r433403 reports the below function exceeds frame size limit.
4429 // http://b/197647048
4430 #pragma GCC diagnostic push
4431 #pragma GCC diagnostic ignored "-Wframe-larger-than="
4432 template <bool kPrecise>
VisitRoots(RootVisitor * visitor)4433 void Thread::VisitRoots(RootVisitor* visitor) {
4434 const uint32_t thread_id = GetThreadId();
4435 visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
4436 if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
4437 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
4438 RootInfo(kRootNativeStack, thread_id));
4439 }
4440 if (tlsPtr_.async_exception != nullptr) {
4441 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.async_exception),
4442 RootInfo(kRootNativeStack, thread_id));
4443 }
4444 visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
4445 tlsPtr_.jni_env->VisitJniLocalRoots(visitor, RootInfo(kRootJNILocal, thread_id));
4446 tlsPtr_.jni_env->VisitMonitorRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
4447 HandleScopeVisitRoots(visitor, thread_id);
4448 // Visit roots for deoptimization.
4449 if (tlsPtr_.stacked_shadow_frame_record != nullptr) {
4450 RootCallbackVisitor visitor_to_callback(visitor, thread_id);
4451 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback);
4452 for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
4453 record != nullptr;
4454 record = record->GetLink()) {
4455 for (ShadowFrame* shadow_frame = record->GetShadowFrame();
4456 shadow_frame != nullptr;
4457 shadow_frame = shadow_frame->GetLink()) {
4458 mapper.VisitShadowFrame(shadow_frame);
4459 }
4460 }
4461 }
4462 for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
4463 record != nullptr;
4464 record = record->GetLink()) {
4465 if (record->IsReference()) {
4466 visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(),
4467 RootInfo(kRootThreadObject, thread_id));
4468 }
4469 visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(),
4470 RootInfo(kRootThreadObject, thread_id));
4471 }
4472 if (tlsPtr_.frame_id_to_shadow_frame != nullptr) {
4473 RootCallbackVisitor visitor_to_callback(visitor, thread_id);
4474 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback);
4475 for (FrameIdToShadowFrame* record = tlsPtr_.frame_id_to_shadow_frame;
4476 record != nullptr;
4477 record = record->GetNext()) {
4478 mapper.VisitShadowFrame(record->GetShadowFrame());
4479 }
4480 }
4481 // Visit roots on this thread's stack
4482 RuntimeContextType context;
4483 RootCallbackVisitor visitor_to_callback(visitor, thread_id);
4484 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback);
4485 mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false);
4486 }
4487 #pragma GCC diagnostic pop
4488
SweepCacheEntry(IsMarkedVisitor * visitor,const Instruction * inst,size_t * value)4489 static void SweepCacheEntry(IsMarkedVisitor* visitor, const Instruction* inst, size_t* value)
4490 REQUIRES_SHARED(Locks::mutator_lock_) {
4491 if (inst == nullptr) {
4492 return;
4493 }
4494 using Opcode = Instruction::Code;
4495 Opcode opcode = inst->Opcode();
4496 switch (opcode) {
4497 case Opcode::NEW_INSTANCE:
4498 case Opcode::CHECK_CAST:
4499 case Opcode::INSTANCE_OF:
4500 case Opcode::NEW_ARRAY:
4501 case Opcode::CONST_CLASS: {
4502 mirror::Class* klass = reinterpret_cast<mirror::Class*>(*value);
4503 if (klass == nullptr || klass == Runtime::GetWeakClassSentinel()) {
4504 return;
4505 }
4506 mirror::Class* new_klass = down_cast<mirror::Class*>(visitor->IsMarked(klass));
4507 if (new_klass == nullptr) {
4508 *value = reinterpret_cast<size_t>(Runtime::GetWeakClassSentinel());
4509 } else if (new_klass != klass) {
4510 *value = reinterpret_cast<size_t>(new_klass);
4511 }
4512 return;
4513 }
4514 case Opcode::CONST_STRING:
4515 case Opcode::CONST_STRING_JUMBO: {
4516 mirror::Object* object = reinterpret_cast<mirror::Object*>(*value);
4517 if (object == nullptr) {
4518 return;
4519 }
4520 mirror::Object* new_object = visitor->IsMarked(object);
4521 // We know the string is marked because it's a strongly-interned string that
4522 // is always alive (see b/117621117 for trying to make those strings weak).
4523 if (kIsDebugBuild && new_object == nullptr) {
4524 // (b/275005060) Currently the problem is reported only on CC GC.
4525 // Therefore we log it with more information. But since the failure rate
4526 // is quite high, sampling it.
4527 if (gUseReadBarrier) {
4528 Runtime* runtime = Runtime::Current();
4529 gc::collector::ConcurrentCopying* cc = runtime->GetHeap()->ConcurrentCopyingCollector();
4530 CHECK_NE(cc, nullptr);
4531 LOG(FATAL) << cc->DumpReferenceInfo(object, "string")
4532 << " string interned: " << std::boolalpha
4533 << runtime->GetInternTable()->LookupStrong(Thread::Current(),
4534 down_cast<mirror::String*>(object))
4535 << std::noboolalpha;
4536 } else {
4537 // Other GCs
4538 LOG(FATAL) << __FUNCTION__
4539 << ": IsMarked returned null for a strongly interned string: " << object;
4540 }
4541 } else if (new_object != object) {
4542 *value = reinterpret_cast<size_t>(new_object);
4543 }
4544 return;
4545 }
4546 default:
4547 // The following opcode ranges store non-reference values.
4548 if ((Opcode::IGET <= opcode && opcode <= Opcode::SPUT_SHORT) ||
4549 (Opcode::INVOKE_VIRTUAL <= opcode && opcode <= Opcode::INVOKE_INTERFACE_RANGE)) {
4550 return; // Nothing to do for the GC.
4551 }
4552 // New opcode is using the cache. We need to explicitly handle it in this method.
4553 DCHECK(false) << "Unhandled opcode " << inst->Opcode();
4554 }
4555 }
4556
SweepInterpreterCache(IsMarkedVisitor * visitor)4557 void Thread::SweepInterpreterCache(IsMarkedVisitor* visitor) {
4558 for (InterpreterCache::Entry& entry : GetInterpreterCache()->GetArray()) {
4559 SweepCacheEntry(visitor, reinterpret_cast<const Instruction*>(entry.first), &entry.second);
4560 }
4561 }
4562
4563 // FIXME: clang-r433403 reports the below function exceeds frame size limit.
4564 // http://b/197647048
4565 #pragma GCC diagnostic push
4566 #pragma GCC diagnostic ignored "-Wframe-larger-than="
VisitRoots(RootVisitor * visitor,VisitRootFlags flags)4567 void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
4568 if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) {
4569 VisitRoots</* kPrecise= */ true>(visitor);
4570 } else {
4571 VisitRoots</* kPrecise= */ false>(visitor);
4572 }
4573 }
4574 #pragma GCC diagnostic pop
4575
4576 class VerifyRootVisitor : public SingleRootVisitor {
4577 public:
VisitRoot(mirror::Object * root,const RootInfo & info)4578 void VisitRoot(mirror::Object* root, [[maybe_unused]] const RootInfo& info) override
4579 REQUIRES_SHARED(Locks::mutator_lock_) {
4580 VerifyObject(root);
4581 }
4582 };
4583
VerifyStackImpl()4584 void Thread::VerifyStackImpl() {
4585 if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) {
4586 VerifyRootVisitor visitor;
4587 std::unique_ptr<Context> context(Context::Create());
4588 RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId());
4589 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback);
4590 mapper.WalkStack();
4591 }
4592 }
4593
4594 // Set the stack end to that to be used during a stack overflow
SetStackEndForStackOverflow()4595 void Thread::SetStackEndForStackOverflow() {
4596 // During stack overflow we allow use of the full stack.
4597 if (tlsPtr_.stack_end == tlsPtr_.stack_begin) {
4598 // However, we seem to have already extended to use the full stack.
4599 LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
4600 << GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
4601 DumpStack(LOG_STREAM(ERROR));
4602 LOG(FATAL) << "Recursive stack overflow.";
4603 }
4604
4605 tlsPtr_.stack_end = tlsPtr_.stack_begin;
4606
4607 // Remove the stack overflow protection if is it set up.
4608 bool implicit_stack_check = Runtime::Current()->GetImplicitStackOverflowChecks();
4609 if (implicit_stack_check) {
4610 if (!UnprotectStack()) {
4611 LOG(ERROR) << "Unable to remove stack protection for stack overflow";
4612 }
4613 }
4614 }
4615
SetTlab(uint8_t * start,uint8_t * end,uint8_t * limit)4616 void Thread::SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit) {
4617 DCHECK_LE(start, end);
4618 DCHECK_LE(end, limit);
4619 tlsPtr_.thread_local_start = start;
4620 tlsPtr_.thread_local_pos = tlsPtr_.thread_local_start;
4621 tlsPtr_.thread_local_end = end;
4622 tlsPtr_.thread_local_limit = limit;
4623 tlsPtr_.thread_local_objects = 0;
4624 }
4625
ResetTlab()4626 void Thread::ResetTlab() {
4627 gc::Heap* const heap = Runtime::Current()->GetHeap();
4628 if (heap->GetHeapSampler().IsEnabled()) {
4629 // Note: We always ResetTlab before SetTlab, therefore we can do the sample
4630 // offset adjustment here.
4631 heap->AdjustSampleOffset(GetTlabPosOffset());
4632 VLOG(heap) << "JHP: ResetTlab, Tid: " << GetTid()
4633 << " adjustment = "
4634 << (tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start);
4635 }
4636 SetTlab(nullptr, nullptr, nullptr);
4637 }
4638
HasTlab() const4639 bool Thread::HasTlab() const {
4640 const bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
4641 if (has_tlab) {
4642 DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr);
4643 } else {
4644 DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr);
4645 }
4646 return has_tlab;
4647 }
4648
AdjustTlab(size_t slide_bytes)4649 void Thread::AdjustTlab(size_t slide_bytes) {
4650 if (HasTlab()) {
4651 tlsPtr_.thread_local_start -= slide_bytes;
4652 tlsPtr_.thread_local_pos -= slide_bytes;
4653 tlsPtr_.thread_local_end -= slide_bytes;
4654 tlsPtr_.thread_local_limit -= slide_bytes;
4655 }
4656 }
4657
operator <<(std::ostream & os,const Thread & thread)4658 std::ostream& operator<<(std::ostream& os, const Thread& thread) {
4659 thread.ShortDump(os);
4660 return os;
4661 }
4662
ProtectStack(bool fatal_on_error)4663 bool Thread::ProtectStack(bool fatal_on_error) {
4664 void* pregion = tlsPtr_.stack_begin - GetStackOverflowProtectedSize();
4665 VLOG(threads) << "Protecting stack at " << pregion;
4666 if (mprotect(pregion, GetStackOverflowProtectedSize(), PROT_NONE) == -1) {
4667 if (fatal_on_error) {
4668 // b/249586057, LOG(FATAL) times out
4669 LOG(ERROR) << "Unable to create protected region in stack for implicit overflow check. "
4670 "Reason: "
4671 << strerror(errno) << " size: " << GetStackOverflowProtectedSize();
4672 exit(1);
4673 }
4674 return false;
4675 }
4676 return true;
4677 }
4678
UnprotectStack()4679 bool Thread::UnprotectStack() {
4680 void* pregion = tlsPtr_.stack_begin - GetStackOverflowProtectedSize();
4681 VLOG(threads) << "Unprotecting stack at " << pregion;
4682 return mprotect(pregion, GetStackOverflowProtectedSize(), PROT_READ|PROT_WRITE) == 0;
4683 }
4684
NumberOfHeldMutexes() const4685 size_t Thread::NumberOfHeldMutexes() const {
4686 size_t count = 0;
4687 for (BaseMutex* mu : tlsPtr_.held_mutexes) {
4688 count += mu != nullptr ? 1 : 0;
4689 }
4690 return count;
4691 }
4692
DeoptimizeWithDeoptimizationException(JValue * result)4693 void Thread::DeoptimizeWithDeoptimizationException(JValue* result) {
4694 DCHECK_EQ(GetException(), Thread::GetDeoptimizationException());
4695 ClearException();
4696 ObjPtr<mirror::Throwable> pending_exception;
4697 bool from_code = false;
4698 DeoptimizationMethodType method_type;
4699 PopDeoptimizationContext(result, &pending_exception, &from_code, &method_type);
4700 SetTopOfStack(nullptr);
4701
4702 // Restore the exception that was pending before deoptimization then interpret the
4703 // deoptimized frames.
4704 if (pending_exception != nullptr) {
4705 SetException(pending_exception);
4706 }
4707
4708 ShadowFrame* shadow_frame = MaybePopDeoptimizedStackedShadowFrame();
4709 // We may not have a shadow frame if we deoptimized at the return of the
4710 // quick_to_interpreter_bridge which got directly called by art_quick_invoke_stub.
4711 if (shadow_frame != nullptr) {
4712 SetTopOfShadowStack(shadow_frame);
4713 interpreter::EnterInterpreterFromDeoptimize(this,
4714 shadow_frame,
4715 result,
4716 from_code,
4717 method_type);
4718 }
4719 }
4720
SetAsyncException(ObjPtr<mirror::Throwable> new_exception)4721 void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) {
4722 CHECK(new_exception != nullptr);
4723 Runtime::Current()->SetAsyncExceptionsThrown();
4724 if (kIsDebugBuild) {
4725 // Make sure we are in a checkpoint.
4726 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
4727 CHECK(this == Thread::Current() || GetSuspendCount() >= 1)
4728 << "It doesn't look like this was called in a checkpoint! this: "
4729 << this << " count: " << GetSuspendCount();
4730 }
4731 tlsPtr_.async_exception = new_exception.Ptr();
4732 }
4733
ObserveAsyncException()4734 bool Thread::ObserveAsyncException() {
4735 DCHECK(this == Thread::Current());
4736 if (tlsPtr_.async_exception != nullptr) {
4737 if (tlsPtr_.exception != nullptr) {
4738 LOG(WARNING) << "Overwriting pending exception with async exception. Pending exception is: "
4739 << tlsPtr_.exception->Dump();
4740 LOG(WARNING) << "Async exception is " << tlsPtr_.async_exception->Dump();
4741 }
4742 tlsPtr_.exception = tlsPtr_.async_exception;
4743 tlsPtr_.async_exception = nullptr;
4744 return true;
4745 } else {
4746 return IsExceptionPending();
4747 }
4748 }
4749
SetException(ObjPtr<mirror::Throwable> new_exception)4750 void Thread::SetException(ObjPtr<mirror::Throwable> new_exception) {
4751 CHECK(new_exception != nullptr);
4752 // TODO: DCHECK(!IsExceptionPending());
4753 tlsPtr_.exception = new_exception.Ptr();
4754 }
4755
IsAotCompiler()4756 bool Thread::IsAotCompiler() {
4757 return Runtime::Current()->IsAotCompiler();
4758 }
4759
GetPeerFromOtherThread()4760 mirror::Object* Thread::GetPeerFromOtherThread() {
4761 Thread* self = Thread::Current();
4762 if (this == self) {
4763 // We often call this on every thread, including ourselves.
4764 return GetPeer();
4765 }
4766 // If "this" thread is not suspended, it could disappear.
4767 DCHECK(IsSuspended()) << *this;
4768 DCHECK(tlsPtr_.jpeer == nullptr);
4769 // Some JVMTI code may unfortunately hold thread_list_lock_, but if it does, it should hold the
4770 // mutator lock in exclusive mode, and we should not have a pending flip function.
4771 if (kIsDebugBuild && Locks::thread_list_lock_->IsExclusiveHeld(self)) {
4772 Locks::mutator_lock_->AssertExclusiveHeld(self);
4773 CHECK(!ReadFlag(ThreadFlag::kPendingFlipFunction));
4774 }
4775 // Ensure that opeer is not obsolete.
4776 EnsureFlipFunctionStarted(self, this);
4777 if (ReadFlag(ThreadFlag::kRunningFlipFunction)) {
4778 // Does not release mutator lock. Hence no new flip requests can be issued.
4779 WaitForFlipFunction(self);
4780 }
4781 return tlsPtr_.opeer;
4782 }
4783
LockedGetPeerFromOtherThread(ThreadExitFlag * tef)4784 mirror::Object* Thread::LockedGetPeerFromOtherThread(ThreadExitFlag* tef) {
4785 DCHECK(tlsPtr_.jpeer == nullptr);
4786 Thread* self = Thread::Current();
4787 Locks::thread_list_lock_->AssertHeld(self);
4788 if (ReadFlag(ThreadFlag::kPendingFlipFunction)) {
4789 // It is unsafe to call EnsureFlipFunctionStarted with thread_list_lock_. Thus we temporarily
4790 // release it, taking care to handle the case in which "this" thread disapppears while we no
4791 // longer hold it.
4792 Locks::thread_list_lock_->Unlock(self);
4793 EnsureFlipFunctionStarted(self, this, StateAndFlags(0), tef);
4794 Locks::thread_list_lock_->Lock(self);
4795 if (tef->HasExited()) {
4796 return nullptr;
4797 }
4798 }
4799 if (ReadFlag(ThreadFlag::kRunningFlipFunction)) {
4800 // Does not release mutator lock. Hence no new flip requests can be issued.
4801 WaitForFlipFunction(self);
4802 }
4803 return tlsPtr_.opeer;
4804 }
4805
SetReadBarrierEntrypoints()4806 void Thread::SetReadBarrierEntrypoints() {
4807 // Make sure entrypoints aren't null.
4808 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active=*/ true);
4809 }
4810
ClearAllInterpreterCaches()4811 void Thread::ClearAllInterpreterCaches() {
4812 static struct ClearInterpreterCacheClosure : Closure {
4813 void Run(Thread* thread) override {
4814 thread->GetInterpreterCache()->Clear(thread);
4815 }
4816 } closure;
4817 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
4818 }
4819
4820
ReleaseLongJumpContextInternal()4821 void Thread::ReleaseLongJumpContextInternal() {
4822 // Each QuickExceptionHandler gets a long jump context and uses
4823 // it for doing the long jump, after finding catch blocks/doing deoptimization.
4824 // Both finding catch blocks and deoptimization can trigger another
4825 // exception such as a result of class loading. So there can be nested
4826 // cases of exception handling and multiple contexts being used.
4827 // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
4828 // for reuse so there is no need to always allocate a new one each time when
4829 // getting a context. Since we only keep one context for reuse, delete the
4830 // existing one since the passed in context is yet to be used for longjump.
4831 delete tlsPtr_.long_jump_context;
4832 }
4833
SetNativePriority(int new_priority)4834 void Thread::SetNativePriority(int new_priority) {
4835 palette_status_t status = PaletteSchedSetPriority(GetTid(), new_priority);
4836 CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO);
4837 }
4838
GetNativePriority() const4839 int Thread::GetNativePriority() const {
4840 int priority = 0;
4841 palette_status_t status = PaletteSchedGetPriority(GetTid(), &priority);
4842 CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO);
4843 return priority;
4844 }
4845
AbortInThis(const std::string & message)4846 void Thread::AbortInThis(const std::string& message) {
4847 std::string thread_name;
4848 Thread::Current()->GetThreadName(thread_name);
4849 LOG(ERROR) << message;
4850 LOG(ERROR) << "Aborting culprit thread";
4851 Runtime::Current()->SetAbortMessage(("Caused " + thread_name + " failure : " + message).c_str());
4852 // Unlike Runtime::Abort() we do not fflush(nullptr), since we want to send the signal with as
4853 // little delay as possible.
4854 int res = pthread_kill(tlsPtr_.pthread_self, SIGABRT);
4855 if (res != 0) {
4856 LOG(ERROR) << "pthread_kill failed with " << res << " " << strerror(res) << " target was "
4857 << tls32_.tid;
4858 } else {
4859 // Wait for our process to be aborted.
4860 sleep(10 /* seconds */);
4861 }
4862 // The process should have died long before we got here. Never return.
4863 LOG(FATAL) << "Failed to abort in culprit thread: " << message;
4864 UNREACHABLE();
4865 }
4866
IsSystemDaemon() const4867 bool Thread::IsSystemDaemon() const {
4868 if (GetPeer() == nullptr) {
4869 return false;
4870 }
4871 return WellKnownClasses::java_lang_Thread_systemDaemon->GetBoolean(GetPeer());
4872 }
4873
StateAndFlagsAsHexString() const4874 std::string Thread::StateAndFlagsAsHexString() const {
4875 std::stringstream result_stream;
4876 result_stream << std::hex << GetStateAndFlags(std::memory_order_relaxed).GetValue();
4877 return result_stream.str();
4878 }
4879
ScopedExceptionStorage(art::Thread * self)4880 ScopedExceptionStorage::ScopedExceptionStorage(art::Thread* self)
4881 : self_(self), hs_(self_), excp_(hs_.NewHandle<art::mirror::Throwable>(self_->GetException())) {
4882 self_->ClearException();
4883 }
4884
SuppressOldException(const char * message)4885 void ScopedExceptionStorage::SuppressOldException(const char* message) {
4886 CHECK(self_->IsExceptionPending()) << *self_;
4887 ObjPtr<mirror::Throwable> old_suppressed(excp_.Get());
4888 excp_.Assign(self_->GetException());
4889 if (old_suppressed != nullptr) {
4890 LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump();
4891 }
4892 self_->ClearException();
4893 }
4894
~ScopedExceptionStorage()4895 ScopedExceptionStorage::~ScopedExceptionStorage() {
4896 CHECK(!self_->IsExceptionPending()) << *self_;
4897 if (!excp_.IsNull()) {
4898 self_->SetException(excp_.Get());
4899 }
4900 }
4901
4902 } // namespace art
4903
4904 #pragma clang diagnostic pop // -Wconversion
4905