1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "thread.h"
18
19 #include <limits.h> // for INT_MAX
20 #include <pthread.h>
21 #include <signal.h>
22 #include <stdlib.h>
23 #include <sys/resource.h>
24 #include <sys/time.h>
25
26 #include <algorithm>
27 #include <atomic>
28 #include <bitset>
29 #include <cerrno>
30 #include <iostream>
31 #include <list>
32 #include <sstream>
33
34 #include "android-base/file.h"
35 #include "android-base/stringprintf.h"
36 #include "android-base/strings.h"
37
38 #include "unwindstack/AndroidUnwinder.h"
39
40 #include "arch/context-inl.h"
41 #include "arch/context.h"
42 #include "art_field-inl.h"
43 #include "art_method-inl.h"
44 #include "base/atomic.h"
45 #include "base/bit_utils.h"
46 #include "base/casts.h"
47 #include "base/file_utils.h"
48 #include "base/memory_tool.h"
49 #include "base/mutex.h"
50 #include "base/stl_util.h"
51 #include "base/systrace.h"
52 #include "base/time_utils.h"
53 #include "base/timing_logger.h"
54 #include "base/to_str.h"
55 #include "base/utils.h"
56 #include "class_linker-inl.h"
57 #include "class_root-inl.h"
58 #include "debugger.h"
59 #include "dex/descriptors_names.h"
60 #include "dex/dex_file-inl.h"
61 #include "dex/dex_file_annotations.h"
62 #include "dex/dex_file_types.h"
63 #include "entrypoints/entrypoint_utils.h"
64 #include "entrypoints/quick/quick_alloc_entrypoints.h"
65 #include "gc/accounting/card_table-inl.h"
66 #include "gc/accounting/heap_bitmap-inl.h"
67 #include "gc/allocator/rosalloc.h"
68 #include "gc/heap.h"
69 #include "gc/space/space-inl.h"
70 #include "gc_root.h"
71 #include "handle_scope-inl.h"
72 #include "indirect_reference_table-inl.h"
73 #include "instrumentation.h"
74 #include "intern_table.h"
75 #include "interpreter/interpreter.h"
76 #include "interpreter/shadow_frame-inl.h"
77 #include "java_frame_root_info.h"
78 #include "jni/java_vm_ext.h"
79 #include "jni/jni_internal.h"
80 #include "mirror/class-alloc-inl.h"
81 #include "mirror/class_loader.h"
82 #include "mirror/object_array-alloc-inl.h"
83 #include "mirror/object_array-inl.h"
84 #include "mirror/stack_frame_info.h"
85 #include "mirror/stack_trace_element.h"
86 #include "monitor.h"
87 #include "monitor_objects_stack_visitor.h"
88 #include "native_stack_dump.h"
89 #include "nativehelper/scoped_local_ref.h"
90 #include "nativehelper/scoped_utf_chars.h"
91 #include "nterp_helpers.h"
92 #include "nth_caller_visitor.h"
93 #include "oat_quick_method_header.h"
94 #include "obj_ptr-inl.h"
95 #include "object_lock.h"
96 #include "palette/palette.h"
97 #include "quick/quick_method_frame_info.h"
98 #include "quick_exception_handler.h"
99 #include "read_barrier-inl.h"
100 #include "reflection.h"
101 #include "reflective_handle_scope-inl.h"
102 #include "runtime-inl.h"
103 #include "runtime.h"
104 #include "runtime_callbacks.h"
105 #include "scoped_thread_state_change-inl.h"
106 #include "scoped_disable_public_sdk_checker.h"
107 #include "stack.h"
108 #include "stack_map.h"
109 #include "thread-inl.h"
110 #include "thread_list.h"
111 #include "trace.h"
112 #include "verifier/method_verifier.h"
113 #include "verify_object.h"
114 #include "well_known_classes-inl.h"
115
116 #if ART_USE_FUTEXES
117 #include "linux/futex.h"
118 #include "sys/syscall.h"
119 #ifndef SYS_futex
120 #define SYS_futex __NR_futex
121 #endif
122 #endif // ART_USE_FUTEXES
123
124 #pragma clang diagnostic push
125 #pragma clang diagnostic error "-Wconversion"
126
127 extern "C" __attribute__((weak)) void* __hwasan_tag_pointer(const volatile void* p,
128 unsigned char tag);
129
130 namespace art {
131
132 using android::base::StringAppendV;
133 using android::base::StringPrintf;
134
135 extern "C" NO_RETURN void artDeoptimize(Thread* self, bool skip_method_exit_callbacks);
136
137 bool Thread::is_started_ = false;
138 pthread_key_t Thread::pthread_key_self_;
139 ConditionVariable* Thread::resume_cond_ = nullptr;
140 const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
141 bool (*Thread::is_sensitive_thread_hook_)() = nullptr;
142 Thread* Thread::jit_sensitive_thread_ = nullptr;
143 #ifndef __BIONIC__
144 thread_local Thread* Thread::self_tls_ = nullptr;
145 #endif
146
147 static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild;
148
149 // For implicit overflow checks we reserve an extra piece of memory at the bottom
150 // of the stack (lowest memory). The higher portion of the memory
151 // is protected against reads and the lower is available for use while
152 // throwing the StackOverflow exception.
153 constexpr size_t kStackOverflowProtectedSize = 4 * kMemoryToolStackGuardSizeScale * KB;
154
155 static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
156
InitCardTable()157 void Thread::InitCardTable() {
158 tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
159 }
160
UnimplementedEntryPoint()161 static void UnimplementedEntryPoint() {
162 UNIMPLEMENTED(FATAL);
163 }
164
165 void InitEntryPoints(JniEntryPoints* jpoints,
166 QuickEntryPoints* qpoints,
167 bool monitor_jni_entry_exit);
168 void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active);
169
SetIsGcMarkingAndUpdateEntrypoints(bool is_marking)170 void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
171 CHECK(gUseReadBarrier);
172 tls32_.is_gc_marking = is_marking;
173 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking);
174 }
175
InitTlsEntryPoints()176 void Thread::InitTlsEntryPoints() {
177 ScopedTrace trace("InitTlsEntryPoints");
178 // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
179 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints);
180 uintptr_t* end = reinterpret_cast<uintptr_t*>(
181 reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) + sizeof(tlsPtr_.quick_entrypoints));
182 for (uintptr_t* it = begin; it != end; ++it) {
183 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
184 }
185 bool monitor_jni_entry_exit = false;
186 PaletteShouldReportJniInvocations(&monitor_jni_entry_exit);
187 if (monitor_jni_entry_exit) {
188 AtomicSetFlag(ThreadFlag::kMonitorJniEntryExit);
189 }
190 InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints, monitor_jni_entry_exit);
191 }
192
ResetQuickAllocEntryPointsForThread()193 void Thread::ResetQuickAllocEntryPointsForThread() {
194 ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
195 }
196
197 class DeoptimizationContextRecord {
198 public:
DeoptimizationContextRecord(const JValue & ret_val,bool is_reference,bool from_code,ObjPtr<mirror::Throwable> pending_exception,DeoptimizationMethodType method_type,DeoptimizationContextRecord * link)199 DeoptimizationContextRecord(const JValue& ret_val,
200 bool is_reference,
201 bool from_code,
202 ObjPtr<mirror::Throwable> pending_exception,
203 DeoptimizationMethodType method_type,
204 DeoptimizationContextRecord* link)
205 : ret_val_(ret_val),
206 is_reference_(is_reference),
207 from_code_(from_code),
208 pending_exception_(pending_exception.Ptr()),
209 deopt_method_type_(method_type),
210 link_(link) {}
211
GetReturnValue() const212 JValue GetReturnValue() const { return ret_val_; }
IsReference() const213 bool IsReference() const { return is_reference_; }
GetFromCode() const214 bool GetFromCode() const { return from_code_; }
GetPendingException() const215 ObjPtr<mirror::Throwable> GetPendingException() const REQUIRES_SHARED(Locks::mutator_lock_) {
216 return pending_exception_;
217 }
GetLink() const218 DeoptimizationContextRecord* GetLink() const { return link_; }
GetReturnValueAsGCRoot()219 mirror::Object** GetReturnValueAsGCRoot() {
220 DCHECK(is_reference_);
221 return ret_val_.GetGCRoot();
222 }
GetPendingExceptionAsGCRoot()223 mirror::Object** GetPendingExceptionAsGCRoot() {
224 return reinterpret_cast<mirror::Object**>(&pending_exception_);
225 }
GetDeoptimizationMethodType() const226 DeoptimizationMethodType GetDeoptimizationMethodType() const {
227 return deopt_method_type_;
228 }
229
230 private:
231 // The value returned by the method at the top of the stack before deoptimization.
232 JValue ret_val_;
233
234 // Indicates whether the returned value is a reference. If so, the GC will visit it.
235 const bool is_reference_;
236
237 // Whether the context was created from an explicit deoptimization in the code.
238 const bool from_code_;
239
240 // The exception that was pending before deoptimization (or null if there was no pending
241 // exception).
242 mirror::Throwable* pending_exception_;
243
244 // Whether the context was created for an (idempotent) runtime method.
245 const DeoptimizationMethodType deopt_method_type_;
246
247 // A link to the previous DeoptimizationContextRecord.
248 DeoptimizationContextRecord* const link_;
249
250 DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord);
251 };
252
253 class StackedShadowFrameRecord {
254 public:
StackedShadowFrameRecord(ShadowFrame * shadow_frame,StackedShadowFrameType type,StackedShadowFrameRecord * link)255 StackedShadowFrameRecord(ShadowFrame* shadow_frame,
256 StackedShadowFrameType type,
257 StackedShadowFrameRecord* link)
258 : shadow_frame_(shadow_frame),
259 type_(type),
260 link_(link) {}
261
GetShadowFrame() const262 ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
GetType() const263 StackedShadowFrameType GetType() const { return type_; }
GetLink() const264 StackedShadowFrameRecord* GetLink() const { return link_; }
265
266 private:
267 ShadowFrame* const shadow_frame_;
268 const StackedShadowFrameType type_;
269 StackedShadowFrameRecord* const link_;
270
271 DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord);
272 };
273
PushDeoptimizationContext(const JValue & return_value,bool is_reference,ObjPtr<mirror::Throwable> exception,bool from_code,DeoptimizationMethodType method_type)274 void Thread::PushDeoptimizationContext(const JValue& return_value,
275 bool is_reference,
276 ObjPtr<mirror::Throwable> exception,
277 bool from_code,
278 DeoptimizationMethodType method_type) {
279 DCHECK(exception != Thread::GetDeoptimizationException());
280 DeoptimizationContextRecord* record = new DeoptimizationContextRecord(
281 return_value,
282 is_reference,
283 from_code,
284 exception,
285 method_type,
286 tlsPtr_.deoptimization_context_stack);
287 tlsPtr_.deoptimization_context_stack = record;
288 }
289
PopDeoptimizationContext(JValue * result,ObjPtr<mirror::Throwable> * exception,bool * from_code,DeoptimizationMethodType * method_type)290 void Thread::PopDeoptimizationContext(JValue* result,
291 ObjPtr<mirror::Throwable>* exception,
292 bool* from_code,
293 DeoptimizationMethodType* method_type) {
294 AssertHasDeoptimizationContext();
295 DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
296 tlsPtr_.deoptimization_context_stack = record->GetLink();
297 result->SetJ(record->GetReturnValue().GetJ());
298 *exception = record->GetPendingException();
299 *from_code = record->GetFromCode();
300 *method_type = record->GetDeoptimizationMethodType();
301 delete record;
302 }
303
AssertHasDeoptimizationContext()304 void Thread::AssertHasDeoptimizationContext() {
305 CHECK(tlsPtr_.deoptimization_context_stack != nullptr)
306 << "No deoptimization context for thread " << *this;
307 }
308
309 enum {
310 kPermitAvailable = 0, // Incrementing consumes the permit
311 kNoPermit = 1, // Incrementing marks as waiter waiting
312 kNoPermitWaiterWaiting = 2
313 };
314
Park(bool is_absolute,int64_t time)315 void Thread::Park(bool is_absolute, int64_t time) {
316 DCHECK(this == Thread::Current());
317 #if ART_USE_FUTEXES
318 // Consume the permit, or mark as waiting. This cannot cause park_state to go
319 // outside of its valid range (0, 1, 2), because in all cases where 2 is
320 // assigned it is set back to 1 before returning, and this method cannot run
321 // concurrently with itself since it operates on the current thread.
322 int old_state = tls32_.park_state_.fetch_add(1, std::memory_order_relaxed);
323 if (old_state == kNoPermit) {
324 // no permit was available. block thread until later.
325 Runtime::Current()->GetRuntimeCallbacks()->ThreadParkStart(is_absolute, time);
326 bool timed_out = false;
327 if (!is_absolute && time == 0) {
328 // Thread.getState() is documented to return waiting for untimed parks.
329 ScopedThreadSuspension sts(this, ThreadState::kWaiting);
330 DCHECK_EQ(NumberOfHeldMutexes(), 0u);
331 int result = futex(tls32_.park_state_.Address(),
332 FUTEX_WAIT_PRIVATE,
333 /* sleep if val = */ kNoPermitWaiterWaiting,
334 /* timeout */ nullptr,
335 nullptr,
336 0);
337 // This errno check must happen before the scope is closed, to ensure that
338 // no destructors (such as ScopedThreadSuspension) overwrite errno.
339 if (result == -1) {
340 switch (errno) {
341 case EAGAIN:
342 FALLTHROUGH_INTENDED;
343 case EINTR: break; // park() is allowed to spuriously return
344 default: PLOG(FATAL) << "Failed to park";
345 }
346 }
347 } else if (time > 0) {
348 // Only actually suspend and futex_wait if we're going to wait for some
349 // positive amount of time - the kernel will reject negative times with
350 // EINVAL, and a zero time will just noop.
351
352 // Thread.getState() is documented to return timed wait for timed parks.
353 ScopedThreadSuspension sts(this, ThreadState::kTimedWaiting);
354 DCHECK_EQ(NumberOfHeldMutexes(), 0u);
355 timespec timespec;
356 int result = 0;
357 if (is_absolute) {
358 // Time is millis when scheduled for an absolute time
359 timespec.tv_nsec = (time % 1000) * 1000000;
360 timespec.tv_sec = SaturatedTimeT(time / 1000);
361 // This odd looking pattern is recommended by futex documentation to
362 // wait until an absolute deadline, with otherwise identical behavior to
363 // FUTEX_WAIT_PRIVATE. This also allows parkUntil() to return at the
364 // correct time when the system clock changes.
365 result = futex(tls32_.park_state_.Address(),
366 FUTEX_WAIT_BITSET_PRIVATE | FUTEX_CLOCK_REALTIME,
367 /* sleep if val = */ kNoPermitWaiterWaiting,
368 ×pec,
369 nullptr,
370 static_cast<int>(FUTEX_BITSET_MATCH_ANY));
371 } else {
372 // Time is nanos when scheduled for a relative time
373 timespec.tv_sec = SaturatedTimeT(time / 1000000000);
374 timespec.tv_nsec = time % 1000000000;
375 result = futex(tls32_.park_state_.Address(),
376 FUTEX_WAIT_PRIVATE,
377 /* sleep if val = */ kNoPermitWaiterWaiting,
378 ×pec,
379 nullptr,
380 0);
381 }
382 // This errno check must happen before the scope is closed, to ensure that
383 // no destructors (such as ScopedThreadSuspension) overwrite errno.
384 if (result == -1) {
385 switch (errno) {
386 case ETIMEDOUT:
387 timed_out = true;
388 FALLTHROUGH_INTENDED;
389 case EAGAIN:
390 case EINTR: break; // park() is allowed to spuriously return
391 default: PLOG(FATAL) << "Failed to park";
392 }
393 }
394 }
395 // Mark as no longer waiting, and consume permit if there is one.
396 tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
397 // TODO: Call to signal jvmti here
398 Runtime::Current()->GetRuntimeCallbacks()->ThreadParkFinished(timed_out);
399 } else {
400 // the fetch_add has consumed the permit. immediately return.
401 DCHECK_EQ(old_state, kPermitAvailable);
402 }
403 #else
404 #pragma clang diagnostic push
405 #pragma clang diagnostic warning "-W#warnings"
406 #warning "LockSupport.park/unpark implemented as noops without FUTEX support."
407 #pragma clang diagnostic pop
408 UNUSED(is_absolute, time);
409 UNIMPLEMENTED(WARNING);
410 sched_yield();
411 #endif
412 }
413
Unpark()414 void Thread::Unpark() {
415 #if ART_USE_FUTEXES
416 // Set permit available; will be consumed either by fetch_add (when the thread
417 // tries to park) or store (when the parked thread is woken up)
418 if (tls32_.park_state_.exchange(kPermitAvailable, std::memory_order_relaxed)
419 == kNoPermitWaiterWaiting) {
420 int result = futex(tls32_.park_state_.Address(),
421 FUTEX_WAKE_PRIVATE,
422 /* number of waiters = */ 1,
423 nullptr,
424 nullptr,
425 0);
426 if (result == -1) {
427 PLOG(FATAL) << "Failed to unpark";
428 }
429 }
430 #else
431 UNIMPLEMENTED(WARNING);
432 #endif
433 }
434
PushStackedShadowFrame(ShadowFrame * sf,StackedShadowFrameType type)435 void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
436 StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
437 sf, type, tlsPtr_.stacked_shadow_frame_record);
438 tlsPtr_.stacked_shadow_frame_record = record;
439 }
440
MaybePopDeoptimizedStackedShadowFrame()441 ShadowFrame* Thread::MaybePopDeoptimizedStackedShadowFrame() {
442 StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
443 if (record == nullptr ||
444 record->GetType() != StackedShadowFrameType::kDeoptimizationShadowFrame) {
445 return nullptr;
446 }
447 return PopStackedShadowFrame();
448 }
449
PopStackedShadowFrame()450 ShadowFrame* Thread::PopStackedShadowFrame() {
451 StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
452 DCHECK_NE(record, nullptr);
453 tlsPtr_.stacked_shadow_frame_record = record->GetLink();
454 ShadowFrame* shadow_frame = record->GetShadowFrame();
455 delete record;
456 return shadow_frame;
457 }
458
459 class FrameIdToShadowFrame {
460 public:
Create(size_t frame_id,ShadowFrame * shadow_frame,FrameIdToShadowFrame * next,size_t num_vregs)461 static FrameIdToShadowFrame* Create(size_t frame_id,
462 ShadowFrame* shadow_frame,
463 FrameIdToShadowFrame* next,
464 size_t num_vregs) {
465 // Append a bool array at the end to keep track of what vregs are updated by the debugger.
466 uint8_t* memory = new uint8_t[sizeof(FrameIdToShadowFrame) + sizeof(bool) * num_vregs];
467 return new (memory) FrameIdToShadowFrame(frame_id, shadow_frame, next);
468 }
469
Delete(FrameIdToShadowFrame * f)470 static void Delete(FrameIdToShadowFrame* f) {
471 uint8_t* memory = reinterpret_cast<uint8_t*>(f);
472 delete[] memory;
473 }
474
GetFrameId() const475 size_t GetFrameId() const { return frame_id_; }
GetShadowFrame() const476 ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
GetNext() const477 FrameIdToShadowFrame* GetNext() const { return next_; }
SetNext(FrameIdToShadowFrame * next)478 void SetNext(FrameIdToShadowFrame* next) { next_ = next; }
GetUpdatedVRegFlags()479 bool* GetUpdatedVRegFlags() {
480 return updated_vreg_flags_;
481 }
482
483 private:
FrameIdToShadowFrame(size_t frame_id,ShadowFrame * shadow_frame,FrameIdToShadowFrame * next)484 FrameIdToShadowFrame(size_t frame_id,
485 ShadowFrame* shadow_frame,
486 FrameIdToShadowFrame* next)
487 : frame_id_(frame_id),
488 shadow_frame_(shadow_frame),
489 next_(next) {}
490
491 const size_t frame_id_;
492 ShadowFrame* const shadow_frame_;
493 FrameIdToShadowFrame* next_;
494 bool updated_vreg_flags_[0];
495
496 DISALLOW_COPY_AND_ASSIGN(FrameIdToShadowFrame);
497 };
498
FindFrameIdToShadowFrame(FrameIdToShadowFrame * head,size_t frame_id)499 static FrameIdToShadowFrame* FindFrameIdToShadowFrame(FrameIdToShadowFrame* head,
500 size_t frame_id) {
501 FrameIdToShadowFrame* found = nullptr;
502 for (FrameIdToShadowFrame* record = head; record != nullptr; record = record->GetNext()) {
503 if (record->GetFrameId() == frame_id) {
504 if (kIsDebugBuild) {
505 // Check we have at most one record for this frame.
506 CHECK(found == nullptr) << "Multiple records for the frame " << frame_id;
507 found = record;
508 } else {
509 return record;
510 }
511 }
512 }
513 return found;
514 }
515
FindDebuggerShadowFrame(size_t frame_id)516 ShadowFrame* Thread::FindDebuggerShadowFrame(size_t frame_id) {
517 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame(
518 tlsPtr_.frame_id_to_shadow_frame, frame_id);
519 if (record != nullptr) {
520 return record->GetShadowFrame();
521 }
522 return nullptr;
523 }
524
525 // Must only be called when FindDebuggerShadowFrame(frame_id) returns non-nullptr.
GetUpdatedVRegFlags(size_t frame_id)526 bool* Thread::GetUpdatedVRegFlags(size_t frame_id) {
527 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame(
528 tlsPtr_.frame_id_to_shadow_frame, frame_id);
529 CHECK(record != nullptr);
530 return record->GetUpdatedVRegFlags();
531 }
532
FindOrCreateDebuggerShadowFrame(size_t frame_id,uint32_t num_vregs,ArtMethod * method,uint32_t dex_pc)533 ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id,
534 uint32_t num_vregs,
535 ArtMethod* method,
536 uint32_t dex_pc) {
537 ShadowFrame* shadow_frame = FindDebuggerShadowFrame(frame_id);
538 if (shadow_frame != nullptr) {
539 return shadow_frame;
540 }
541 VLOG(deopt) << "Create pre-deopted ShadowFrame for " << ArtMethod::PrettyMethod(method);
542 shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, method, dex_pc);
543 FrameIdToShadowFrame* record = FrameIdToShadowFrame::Create(frame_id,
544 shadow_frame,
545 tlsPtr_.frame_id_to_shadow_frame,
546 num_vregs);
547 for (uint32_t i = 0; i < num_vregs; i++) {
548 // Do this to clear all references for root visitors.
549 shadow_frame->SetVRegReference(i, nullptr);
550 // This flag will be changed to true if the debugger modifies the value.
551 record->GetUpdatedVRegFlags()[i] = false;
552 }
553 tlsPtr_.frame_id_to_shadow_frame = record;
554 return shadow_frame;
555 }
556
GetCustomTLS(const char * key)557 TLSData* Thread::GetCustomTLS(const char* key) {
558 MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_);
559 auto it = custom_tls_.find(key);
560 return (it != custom_tls_.end()) ? it->second.get() : nullptr;
561 }
562
SetCustomTLS(const char * key,TLSData * data)563 void Thread::SetCustomTLS(const char* key, TLSData* data) {
564 // We will swap the old data (which might be nullptr) with this and then delete it outside of the
565 // custom_tls_lock_.
566 std::unique_ptr<TLSData> old_data(data);
567 {
568 MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_);
569 custom_tls_.GetOrCreate(key, []() { return std::unique_ptr<TLSData>(); }).swap(old_data);
570 }
571 }
572
RemoveDebuggerShadowFrameMapping(size_t frame_id)573 void Thread::RemoveDebuggerShadowFrameMapping(size_t frame_id) {
574 FrameIdToShadowFrame* head = tlsPtr_.frame_id_to_shadow_frame;
575 if (head->GetFrameId() == frame_id) {
576 tlsPtr_.frame_id_to_shadow_frame = head->GetNext();
577 FrameIdToShadowFrame::Delete(head);
578 return;
579 }
580 FrameIdToShadowFrame* prev = head;
581 for (FrameIdToShadowFrame* record = head->GetNext();
582 record != nullptr;
583 prev = record, record = record->GetNext()) {
584 if (record->GetFrameId() == frame_id) {
585 prev->SetNext(record->GetNext());
586 FrameIdToShadowFrame::Delete(record);
587 return;
588 }
589 }
590 LOG(FATAL) << "No shadow frame for frame " << frame_id;
591 UNREACHABLE();
592 }
593
InitTid()594 void Thread::InitTid() {
595 tls32_.tid = ::art::GetTid();
596 }
597
InitAfterFork()598 void Thread::InitAfterFork() {
599 // One thread (us) survived the fork, but we have a new tid so we need to
600 // update the value stashed in this Thread*.
601 InitTid();
602 }
603
DeleteJPeer(JNIEnv * env)604 void Thread::DeleteJPeer(JNIEnv* env) {
605 // Make sure nothing can observe both opeer and jpeer set at the same time.
606 jobject old_jpeer = tlsPtr_.jpeer;
607 CHECK(old_jpeer != nullptr);
608 tlsPtr_.jpeer = nullptr;
609 env->DeleteGlobalRef(old_jpeer);
610 }
611
CreateCallbackWithUffdGc(void * arg)612 void* Thread::CreateCallbackWithUffdGc(void* arg) {
613 return Thread::CreateCallback(arg);
614 }
615
CreateCallback(void * arg)616 void* Thread::CreateCallback(void* arg) {
617 Thread* self = reinterpret_cast<Thread*>(arg);
618 Runtime* runtime = Runtime::Current();
619 if (runtime == nullptr) {
620 LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
621 return nullptr;
622 }
623 {
624 // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
625 // after self->Init().
626 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
627 // Check that if we got here we cannot be shutting down (as shutdown should never have started
628 // while threads are being born).
629 CHECK(!runtime->IsShuttingDownLocked());
630 // Note: given that the JNIEnv is created in the parent thread, the only failure point here is
631 // a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort
632 // the runtime in such a case. In case this ever changes, we need to make sure here to
633 // delete the tmp_jni_env, as we own it at this point.
634 CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env));
635 self->tlsPtr_.tmp_jni_env = nullptr;
636 Runtime::Current()->EndThreadBirth();
637 }
638 {
639 ScopedObjectAccess soa(self);
640 self->InitStringEntryPoints();
641
642 // Copy peer into self, deleting global reference when done.
643 CHECK(self->tlsPtr_.jpeer != nullptr);
644 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr();
645 // Make sure nothing can observe both opeer and jpeer set at the same time.
646 self->DeleteJPeer(self->GetJniEnv());
647 self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str());
648
649 ArtField* priorityField = WellKnownClasses::java_lang_Thread_priority;
650 self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
651
652 runtime->GetRuntimeCallbacks()->ThreadStart(self);
653
654 // Unpark ourselves if the java peer was unparked before it started (see
655 // b/28845097#comment49 for more information)
656
657 ArtField* unparkedField = WellKnownClasses::java_lang_Thread_unparkedBeforeStart;
658 bool should_unpark = false;
659 {
660 // Hold the lock here, so that if another thread calls unpark before the thread starts
661 // we don't observe the unparkedBeforeStart field before the unparker writes to it,
662 // which could cause a lost unpark.
663 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
664 should_unpark = unparkedField->GetBoolean(self->tlsPtr_.opeer) == JNI_TRUE;
665 }
666 if (should_unpark) {
667 self->Unpark();
668 }
669 // Invoke the 'run' method of our java.lang.Thread.
670 ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer;
671 WellKnownClasses::java_lang_Thread_run->InvokeVirtual<'V'>(self, receiver);
672 }
673 // Detach and delete self.
674 Runtime::Current()->GetThreadList()->Unregister(self, /* should_run_callbacks= */ true);
675
676 return nullptr;
677 }
678
FromManagedThread(const ScopedObjectAccessAlreadyRunnable & soa,ObjPtr<mirror::Object> thread_peer)679 Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
680 ObjPtr<mirror::Object> thread_peer) {
681 ArtField* f = WellKnownClasses::java_lang_Thread_nativePeer;
682 Thread* result = reinterpret_cast64<Thread*>(f->GetLong(thread_peer));
683 // Check that if we have a result it is either suspended or we hold the thread_list_lock_
684 // to stop it from going away.
685 if (kIsDebugBuild) {
686 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
687 if (result != nullptr && !result->IsSuspended()) {
688 Locks::thread_list_lock_->AssertHeld(soa.Self());
689 }
690 }
691 return result;
692 }
693
FromManagedThread(const ScopedObjectAccessAlreadyRunnable & soa,jobject java_thread)694 Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
695 jobject java_thread) {
696 return FromManagedThread(soa, soa.Decode<mirror::Object>(java_thread));
697 }
698
FixStackSize(size_t stack_size)699 static size_t FixStackSize(size_t stack_size) {
700 // A stack size of zero means "use the default".
701 if (stack_size == 0) {
702 stack_size = Runtime::Current()->GetDefaultStackSize();
703 }
704
705 // Dalvik used the bionic pthread default stack size for native threads,
706 // so include that here to support apps that expect large native stacks.
707 stack_size += 1 * MB;
708
709 // Under sanitization, frames of the interpreter may become bigger, both for C code as
710 // well as the ShadowFrame. Ensure a larger minimum size. Otherwise initialization
711 // of all core classes cannot be done in all test circumstances.
712 if (kMemoryToolIsAvailable) {
713 stack_size = std::max(2 * MB, stack_size);
714 }
715
716 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
717 if (stack_size < PTHREAD_STACK_MIN) {
718 stack_size = PTHREAD_STACK_MIN;
719 }
720
721 if (Runtime::Current()->GetImplicitStackOverflowChecks()) {
722 // If we are going to use implicit stack checks, allocate space for the protected
723 // region at the bottom of the stack.
724 stack_size += Thread::kStackOverflowImplicitCheckSize +
725 GetStackOverflowReservedBytes(kRuntimeISA);
726 } else {
727 // It's likely that callers are trying to ensure they have at least a certain amount of
728 // stack space, so we should add our reserved space on top of what they requested, rather
729 // than implicitly take it away from them.
730 stack_size += GetStackOverflowReservedBytes(kRuntimeISA);
731 }
732
733 // Some systems require the stack size to be a multiple of the system page size, so round up.
734 stack_size = RoundUp(stack_size, kPageSize);
735
736 return stack_size;
737 }
738
739 // Return the nearest page-aligned address below the current stack top.
740 NO_INLINE
FindStackTop()741 static uint8_t* FindStackTop() {
742 return reinterpret_cast<uint8_t*>(
743 AlignDown(__builtin_frame_address(0), kPageSize));
744 }
745
746 // Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack
747 // overflow is detected. It is located right below the stack_begin_.
748 ATTRIBUTE_NO_SANITIZE_ADDRESS
InstallImplicitProtection()749 void Thread::InstallImplicitProtection() {
750 uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
751 // Page containing current top of stack.
752 uint8_t* stack_top = FindStackTop();
753
754 // Try to directly protect the stack.
755 VLOG(threads) << "installing stack protected region at " << std::hex <<
756 static_cast<void*>(pregion) << " to " <<
757 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
758 if (ProtectStack(/* fatal_on_error= */ false)) {
759 // Tell the kernel that we won't be needing these pages any more.
760 // NB. madvise will probably write zeroes into the memory (on linux it does).
761 size_t unwanted_size =
762 reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - kPageSize;
763 madvise(pregion, unwanted_size, MADV_DONTNEED);
764 return;
765 }
766
767 // There is a little complexity here that deserves a special mention. On some
768 // architectures, the stack is created using a VM_GROWSDOWN flag
769 // to prevent memory being allocated when it's not needed. This flag makes the
770 // kernel only allocate memory for the stack by growing down in memory. Because we
771 // want to put an mprotected region far away from that at the stack top, we need
772 // to make sure the pages for the stack are mapped in before we call mprotect.
773 //
774 // The failed mprotect in UnprotectStack is an indication of a thread with VM_GROWSDOWN
775 // with a non-mapped stack (usually only the main thread).
776 //
777 // We map in the stack by reading every page from the stack bottom (highest address)
778 // to the stack top. (We then madvise this away.) This must be done by reading from the
779 // current stack pointer downwards.
780 //
781 // Accesses too far below the current machine register corresponding to the stack pointer (e.g.,
782 // ESP on x86[-32], SP on ARM) might cause a SIGSEGV (at least on x86 with newer kernels). We
783 // thus have to move the stack pointer. We do this portably by using a recursive function with a
784 // large stack frame size.
785
786 // (Defensively) first remove the protection on the protected region as we'll want to read
787 // and write it. Ignore errors.
788 UnprotectStack();
789
790 VLOG(threads) << "Need to map in stack for thread at " << std::hex <<
791 static_cast<void*>(pregion);
792
793 struct RecurseDownStack {
794 // This function has an intentionally large stack size.
795 #pragma GCC diagnostic push
796 #pragma GCC diagnostic ignored "-Wframe-larger-than="
797 NO_INLINE
798 __attribute__((no_sanitize("memtag"))) static void Touch(uintptr_t target) {
799 volatile size_t zero = 0;
800 // Use a large local volatile array to ensure a large frame size. Do not use anything close
801 // to a full page for ASAN. It would be nice to ensure the frame size is at most a page, but
802 // there is no pragma support for this.
803 // Note: for ASAN we need to shrink the array a bit, as there's other overhead.
804 constexpr size_t kAsanMultiplier =
805 #ifdef ADDRESS_SANITIZER
806 2u;
807 #else
808 1u;
809 #endif
810 // Keep space uninitialized as it can overflow the stack otherwise (should Clang actually
811 // auto-initialize this local variable).
812 volatile char space[kPageSize - (kAsanMultiplier * 256)] __attribute__((uninitialized));
813 char sink ATTRIBUTE_UNUSED = space[zero]; // NOLINT
814 // Remove tag from the pointer. Nop in non-hwasan builds.
815 uintptr_t addr = reinterpret_cast<uintptr_t>(
816 __hwasan_tag_pointer != nullptr ? __hwasan_tag_pointer(space, 0) : space);
817 if (addr >= target + kPageSize) {
818 Touch(target);
819 }
820 zero *= 2; // Try to avoid tail recursion.
821 }
822 #pragma GCC diagnostic pop
823 };
824 RecurseDownStack::Touch(reinterpret_cast<uintptr_t>(pregion));
825
826 VLOG(threads) << "(again) installing stack protected region at " << std::hex <<
827 static_cast<void*>(pregion) << " to " <<
828 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
829
830 // Protect the bottom of the stack to prevent read/write to it.
831 ProtectStack(/* fatal_on_error= */ true);
832
833 // Tell the kernel that we won't be needing these pages any more.
834 // NB. madvise will probably write zeroes into the memory (on linux it does).
835 size_t unwanted_size =
836 reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - kPageSize;
837 madvise(pregion, unwanted_size, MADV_DONTNEED);
838 }
839
840 template <bool kSupportTransaction>
SetNativePeer(ObjPtr<mirror::Object> java_peer,Thread * thread)841 static void SetNativePeer(ObjPtr<mirror::Object> java_peer, Thread* thread)
842 REQUIRES_SHARED(Locks::mutator_lock_) {
843 ArtField* field = WellKnownClasses::java_lang_Thread_nativePeer;
844 if (kSupportTransaction && Runtime::Current()->IsActiveTransaction()) {
845 field->SetLong</*kTransactionActive=*/ true>(java_peer, reinterpret_cast<jlong>(thread));
846 } else {
847 field->SetLong</*kTransactionActive=*/ false>(java_peer, reinterpret_cast<jlong>(thread));
848 }
849 }
850
SetNativePeer(JNIEnv * env,jobject java_peer,Thread * thread)851 static void SetNativePeer(JNIEnv* env, jobject java_peer, Thread* thread) {
852 ScopedObjectAccess soa(env);
853 SetNativePeer</*kSupportTransaction=*/ false>(soa.Decode<mirror::Object>(java_peer), thread);
854 }
855
CreateNativeThread(JNIEnv * env,jobject java_peer,size_t stack_size,bool is_daemon)856 void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
857 CHECK(java_peer != nullptr);
858 Thread* self = static_cast<JNIEnvExt*>(env)->GetSelf();
859
860 if (VLOG_IS_ON(threads)) {
861 ScopedObjectAccess soa(env);
862
863 ArtField* f = WellKnownClasses::java_lang_Thread_name;
864 ObjPtr<mirror::String> java_name =
865 f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString();
866 std::string thread_name;
867 if (java_name != nullptr) {
868 thread_name = java_name->ToModifiedUtf8();
869 } else {
870 thread_name = "(Unnamed)";
871 }
872
873 VLOG(threads) << "Creating native thread for " << thread_name;
874 self->Dump(LOG_STREAM(INFO));
875 }
876
877 Runtime* runtime = Runtime::Current();
878
879 // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
880 bool thread_start_during_shutdown = false;
881 {
882 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
883 if (runtime->IsShuttingDownLocked()) {
884 thread_start_during_shutdown = true;
885 } else {
886 runtime->StartThreadBirth();
887 }
888 }
889 if (thread_start_during_shutdown) {
890 ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
891 env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
892 return;
893 }
894
895 Thread* child_thread = new Thread(is_daemon);
896 // Use global JNI ref to hold peer live while child thread starts.
897 child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);
898 stack_size = FixStackSize(stack_size);
899
900 // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing
901 // to assign it.
902 SetNativePeer(env, java_peer, child_thread);
903
904 // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and
905 // do not have a good way to report this on the child's side.
906 std::string error_msg;
907 std::unique_ptr<JNIEnvExt> child_jni_env_ext(
908 JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM(), &error_msg));
909
910 int pthread_create_result = 0;
911 if (child_jni_env_ext.get() != nullptr) {
912 pthread_t new_pthread;
913 pthread_attr_t attr;
914 child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get();
915 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
916 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
917 "PTHREAD_CREATE_DETACHED");
918 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
919 pthread_create_result = pthread_create(&new_pthread,
920 &attr,
921 gUseUserfaultfd ? Thread::CreateCallbackWithUffdGc
922 : Thread::CreateCallback,
923 child_thread);
924 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
925
926 if (pthread_create_result == 0) {
927 // pthread_create started the new thread. The child is now responsible for managing the
928 // JNIEnvExt we created.
929 // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization
930 // between the threads.
931 child_jni_env_ext.release(); // NOLINT pthreads API.
932 return;
933 }
934 }
935
936 // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up.
937 {
938 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
939 runtime->EndThreadBirth();
940 }
941 // Manually delete the global reference since Thread::Init will not have been run. Make sure
942 // nothing can observe both opeer and jpeer set at the same time.
943 child_thread->DeleteJPeer(env);
944 delete child_thread;
945 child_thread = nullptr;
946 // TODO: remove from thread group?
947 SetNativePeer(env, java_peer, nullptr);
948 {
949 std::string msg(child_jni_env_ext.get() == nullptr ?
950 StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) :
951 StringPrintf("pthread_create (%s stack) failed: %s",
952 PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
953 ScopedObjectAccess soa(env);
954 soa.Self()->ThrowOutOfMemoryError(msg.c_str());
955 }
956 }
957
Init(ThreadList * thread_list,JavaVMExt * java_vm,JNIEnvExt * jni_env_ext)958 bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) {
959 // This function does all the initialization that must be run by the native thread it applies to.
960 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
961 // we can handshake with the corresponding native thread when it's ready.) Check this native
962 // thread hasn't been through here already...
963 CHECK(Thread::Current() == nullptr);
964
965 // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
966 // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
967 tlsPtr_.pthread_self = pthread_self();
968 CHECK(is_started_);
969
970 ScopedTrace trace("Thread::Init");
971
972 SetUpAlternateSignalStack();
973 if (!InitStackHwm()) {
974 return false;
975 }
976 InitCpu();
977 InitTlsEntryPoints();
978 RemoveSuspendTrigger();
979 InitCardTable();
980 InitTid();
981
982 #ifdef __BIONIC__
983 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
984 #else
985 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
986 Thread::self_tls_ = this;
987 #endif
988 DCHECK_EQ(Thread::Current(), this);
989
990 tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
991
992 if (jni_env_ext != nullptr) {
993 DCHECK_EQ(jni_env_ext->GetVm(), java_vm);
994 DCHECK_EQ(jni_env_ext->GetSelf(), this);
995 tlsPtr_.jni_env = jni_env_ext;
996 } else {
997 std::string error_msg;
998 tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm, &error_msg);
999 if (tlsPtr_.jni_env == nullptr) {
1000 LOG(ERROR) << "Failed to create JNIEnvExt: " << error_msg;
1001 return false;
1002 }
1003 }
1004
1005 ScopedTrace trace3("ThreadList::Register");
1006 thread_list->Register(this);
1007 return true;
1008 }
1009
1010 template <typename PeerAction>
Attach(const char * thread_name,bool as_daemon,PeerAction peer_action,bool should_run_callbacks)1011 Thread* Thread::Attach(const char* thread_name,
1012 bool as_daemon,
1013 PeerAction peer_action,
1014 bool should_run_callbacks) {
1015 Runtime* runtime = Runtime::Current();
1016 ScopedTrace trace("Thread::Attach");
1017 if (runtime == nullptr) {
1018 LOG(ERROR) << "Thread attaching to non-existent runtime: " <<
1019 ((thread_name != nullptr) ? thread_name : "(Unnamed)");
1020 return nullptr;
1021 }
1022 Thread* self;
1023 {
1024 ScopedTrace trace2("Thread birth");
1025 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
1026 if (runtime->IsShuttingDownLocked()) {
1027 LOG(WARNING) << "Thread attaching while runtime is shutting down: " <<
1028 ((thread_name != nullptr) ? thread_name : "(Unnamed)");
1029 return nullptr;
1030 } else {
1031 Runtime::Current()->StartThreadBirth();
1032 self = new Thread(as_daemon);
1033 bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
1034 Runtime::Current()->EndThreadBirth();
1035 if (!init_success) {
1036 delete self;
1037 return nullptr;
1038 }
1039 }
1040 }
1041
1042 self->InitStringEntryPoints();
1043
1044 CHECK_NE(self->GetState(), ThreadState::kRunnable);
1045 self->SetState(ThreadState::kNative);
1046
1047 // Run the action that is acting on the peer.
1048 if (!peer_action(self)) {
1049 runtime->GetThreadList()->Unregister(self, should_run_callbacks);
1050 // Unregister deletes self, no need to do this here.
1051 return nullptr;
1052 }
1053
1054 if (VLOG_IS_ON(threads)) {
1055 if (thread_name != nullptr) {
1056 VLOG(threads) << "Attaching thread " << thread_name;
1057 } else {
1058 VLOG(threads) << "Attaching unnamed thread.";
1059 }
1060 ScopedObjectAccess soa(self);
1061 self->Dump(LOG_STREAM(INFO));
1062 }
1063
1064 if (should_run_callbacks) {
1065 ScopedObjectAccess soa(self);
1066 runtime->GetRuntimeCallbacks()->ThreadStart(self);
1067 }
1068
1069 return self;
1070 }
1071
Attach(const char * thread_name,bool as_daemon,jobject thread_group,bool create_peer,bool should_run_callbacks)1072 Thread* Thread::Attach(const char* thread_name,
1073 bool as_daemon,
1074 jobject thread_group,
1075 bool create_peer,
1076 bool should_run_callbacks) {
1077 auto create_peer_action = [&](Thread* self) {
1078 // If we're the main thread, ClassLinker won't be created until after we're attached,
1079 // so that thread needs a two-stage attach. Regular threads don't need this hack.
1080 // In the compiler, all threads need this hack, because no-one's going to be getting
1081 // a native peer!
1082 if (create_peer) {
1083 self->CreatePeer(thread_name, as_daemon, thread_group);
1084 if (self->IsExceptionPending()) {
1085 // We cannot keep the exception around, as we're deleting self. Try to be helpful and log
1086 // the failure but do not dump the exception details. If we fail to allocate the peer, we
1087 // usually also fail to allocate an exception object and throw a pre-allocated OOME without
1088 // any useful information. If we do manage to allocate the exception object, the memory
1089 // information in the message could have been collected too late and therefore misleading.
1090 {
1091 ScopedObjectAccess soa(self);
1092 LOG(ERROR) << "Exception creating thread peer: "
1093 << ((thread_name != nullptr) ? thread_name : "<null>");
1094 self->ClearException();
1095 }
1096 return false;
1097 }
1098 } else {
1099 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
1100 if (thread_name != nullptr) {
1101 self->SetCachedThreadName(thread_name);
1102 ::art::SetThreadName(thread_name);
1103 } else if (self->GetJniEnv()->IsCheckJniEnabled()) {
1104 LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
1105 }
1106 }
1107 return true;
1108 };
1109 return Attach(thread_name, as_daemon, create_peer_action, should_run_callbacks);
1110 }
1111
Attach(const char * thread_name,bool as_daemon,jobject thread_peer)1112 Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_peer) {
1113 auto set_peer_action = [&](Thread* self) {
1114 // Install the given peer.
1115 DCHECK(self == Thread::Current());
1116 ScopedObjectAccess soa(self);
1117 ObjPtr<mirror::Object> peer = soa.Decode<mirror::Object>(thread_peer);
1118 self->tlsPtr_.opeer = peer.Ptr();
1119 SetNativePeer</*kSupportTransaction=*/ false>(peer, self);
1120 return true;
1121 };
1122 return Attach(thread_name, as_daemon, set_peer_action, /* should_run_callbacks= */ true);
1123 }
1124
CreatePeer(const char * name,bool as_daemon,jobject thread_group)1125 void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
1126 Runtime* runtime = Runtime::Current();
1127 CHECK(runtime->IsStarted());
1128 Thread* self = this;
1129 DCHECK_EQ(self, Thread::Current());
1130
1131 ScopedObjectAccess soa(self);
1132 StackHandleScope<4u> hs(self);
1133 DCHECK(WellKnownClasses::java_lang_ThreadGroup->IsInitialized());
1134 Handle<mirror::Object> thr_group = hs.NewHandle(soa.Decode<mirror::Object>(
1135 thread_group != nullptr ? thread_group : runtime->GetMainThreadGroup()));
1136 Handle<mirror::String> thread_name = hs.NewHandle(
1137 name != nullptr ? mirror::String::AllocFromModifiedUtf8(self, name) : nullptr);
1138 // Add missing null check in case of OOM b/18297817
1139 if (name != nullptr && UNLIKELY(thread_name == nullptr)) {
1140 CHECK(self->IsExceptionPending());
1141 return;
1142 }
1143 jint thread_priority = GetNativePriority();
1144
1145 DCHECK(WellKnownClasses::java_lang_Thread->IsInitialized());
1146 Handle<mirror::Object> peer =
1147 hs.NewHandle(WellKnownClasses::java_lang_Thread->AllocObject(self));
1148 if (UNLIKELY(peer == nullptr)) {
1149 CHECK(IsExceptionPending());
1150 return;
1151 }
1152 tlsPtr_.opeer = peer.Get();
1153 WellKnownClasses::java_lang_Thread_init->InvokeInstance<'V', 'L', 'L', 'I', 'Z'>(
1154 self, peer.Get(), thr_group.Get(), thread_name.Get(), thread_priority, as_daemon);
1155 if (self->IsExceptionPending()) {
1156 return;
1157 }
1158
1159 SetNativePeer</*kSupportTransaction=*/ false>(peer.Get(), self);
1160
1161 MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName()));
1162 if (peer_thread_name == nullptr) {
1163 // The Thread constructor should have set the Thread.name to a
1164 // non-null value. However, because we can run without code
1165 // available (in the compiler, in tests), we manually assign the
1166 // fields the constructor should have set.
1167 if (runtime->IsActiveTransaction()) {
1168 InitPeer<true>(tlsPtr_.opeer,
1169 as_daemon,
1170 thr_group.Get(),
1171 thread_name.Get(),
1172 thread_priority);
1173 } else {
1174 InitPeer<false>(tlsPtr_.opeer,
1175 as_daemon,
1176 thr_group.Get(),
1177 thread_name.Get(),
1178 thread_priority);
1179 }
1180 peer_thread_name.Assign(GetThreadName());
1181 }
1182 // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
1183 if (peer_thread_name != nullptr) {
1184 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
1185 }
1186 }
1187
CreateCompileTimePeer(const char * name,bool as_daemon,jobject thread_group)1188 ObjPtr<mirror::Object> Thread::CreateCompileTimePeer(const char* name,
1189 bool as_daemon,
1190 jobject thread_group) {
1191 Runtime* runtime = Runtime::Current();
1192 CHECK(!runtime->IsStarted());
1193 Thread* self = this;
1194 DCHECK_EQ(self, Thread::Current());
1195
1196 ScopedObjectAccessUnchecked soa(self);
1197 StackHandleScope<3u> hs(self);
1198 DCHECK(WellKnownClasses::java_lang_ThreadGroup->IsInitialized());
1199 Handle<mirror::Object> thr_group = hs.NewHandle(soa.Decode<mirror::Object>(
1200 thread_group != nullptr ? thread_group : runtime->GetMainThreadGroup()));
1201 Handle<mirror::String> thread_name = hs.NewHandle(
1202 name != nullptr ? mirror::String::AllocFromModifiedUtf8(self, name) : nullptr);
1203 // Add missing null check in case of OOM b/18297817
1204 if (name != nullptr && UNLIKELY(thread_name == nullptr)) {
1205 CHECK(self->IsExceptionPending());
1206 return nullptr;
1207 }
1208 jint thread_priority = kNormThreadPriority; // Always normalize to NORM priority.
1209
1210 DCHECK(WellKnownClasses::java_lang_Thread->IsInitialized());
1211 Handle<mirror::Object> peer = hs.NewHandle(
1212 WellKnownClasses::java_lang_Thread->AllocObject(self));
1213 if (peer == nullptr) {
1214 CHECK(Thread::Current()->IsExceptionPending());
1215 return nullptr;
1216 }
1217
1218 // We cannot call Thread.init, as it will recursively ask for currentThread.
1219
1220 // The Thread constructor should have set the Thread.name to a
1221 // non-null value. However, because we can run without code
1222 // available (in the compiler, in tests), we manually assign the
1223 // fields the constructor should have set.
1224 if (runtime->IsActiveTransaction()) {
1225 InitPeer<true>(peer.Get(),
1226 as_daemon,
1227 thr_group.Get(),
1228 thread_name.Get(),
1229 thread_priority);
1230 } else {
1231 InitPeer<false>(peer.Get(),
1232 as_daemon,
1233 thr_group.Get(),
1234 thread_name.Get(),
1235 thread_priority);
1236 }
1237
1238 return peer.Get();
1239 }
1240
1241 template<bool kTransactionActive>
InitPeer(ObjPtr<mirror::Object> peer,bool as_daemon,ObjPtr<mirror::Object> thread_group,ObjPtr<mirror::String> thread_name,jint thread_priority)1242 void Thread::InitPeer(ObjPtr<mirror::Object> peer,
1243 bool as_daemon,
1244 ObjPtr<mirror::Object> thread_group,
1245 ObjPtr<mirror::String> thread_name,
1246 jint thread_priority) {
1247 WellKnownClasses::java_lang_Thread_daemon->SetBoolean<kTransactionActive>(peer,
1248 static_cast<uint8_t>(as_daemon ? 1u : 0u));
1249 WellKnownClasses::java_lang_Thread_group->SetObject<kTransactionActive>(peer, thread_group);
1250 WellKnownClasses::java_lang_Thread_name->SetObject<kTransactionActive>(peer, thread_name);
1251 WellKnownClasses::java_lang_Thread_priority->SetInt<kTransactionActive>(peer, thread_priority);
1252 }
1253
SetCachedThreadName(const char * name)1254 void Thread::SetCachedThreadName(const char* name) {
1255 DCHECK(name != kThreadNameDuringStartup);
1256 const char* old_name = tlsPtr_.name.exchange(name == nullptr ? nullptr : strdup(name));
1257 if (old_name != nullptr && old_name != kThreadNameDuringStartup) {
1258 // Deallocate it, carefully. Note that the load has to be ordered wrt the store of the xchg.
1259 for (uint32_t i = 0; UNLIKELY(tls32_.num_name_readers.load(std::memory_order_seq_cst) != 0);
1260 ++i) {
1261 static constexpr uint32_t kNumSpins = 1000;
1262 // Ugly, but keeps us from having to do anything on the reader side.
1263 if (i > kNumSpins) {
1264 usleep(500);
1265 }
1266 }
1267 // We saw the reader count drop to zero since we replaced the name; old one is now safe to
1268 // deallocate.
1269 free(const_cast<char *>(old_name));
1270 }
1271 }
1272
SetThreadName(const char * name)1273 void Thread::SetThreadName(const char* name) {
1274 SetCachedThreadName(name);
1275 ::art::SetThreadName(name);
1276 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
1277 }
1278
GetThreadStack(pthread_t thread,void ** stack_base,size_t * stack_size,size_t * guard_size)1279 static void GetThreadStack(pthread_t thread,
1280 void** stack_base,
1281 size_t* stack_size,
1282 size_t* guard_size) {
1283 #if defined(__APPLE__)
1284 *stack_size = pthread_get_stacksize_np(thread);
1285 void* stack_addr = pthread_get_stackaddr_np(thread);
1286
1287 // Check whether stack_addr is the base or end of the stack.
1288 // (On Mac OS 10.7, it's the end.)
1289 int stack_variable;
1290 if (stack_addr > &stack_variable) {
1291 *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size;
1292 } else {
1293 *stack_base = stack_addr;
1294 }
1295
1296 // This is wrong, but there doesn't seem to be a way to get the actual value on the Mac.
1297 pthread_attr_t attributes;
1298 CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), __FUNCTION__);
1299 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
1300 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
1301 #else
1302 pthread_attr_t attributes;
1303 CHECK_PTHREAD_CALL(pthread_getattr_np, (thread, &attributes), __FUNCTION__);
1304 CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__);
1305 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
1306 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
1307
1308 #if defined(__GLIBC__)
1309 // If we're the main thread, check whether we were run with an unlimited stack. In that case,
1310 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
1311 // will be broken because we'll die long before we get close to 2GB.
1312 bool is_main_thread = (::art::GetTid() == static_cast<uint32_t>(getpid()));
1313 if (is_main_thread) {
1314 rlimit stack_limit;
1315 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
1316 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
1317 }
1318 if (stack_limit.rlim_cur == RLIM_INFINITY) {
1319 size_t old_stack_size = *stack_size;
1320
1321 // Use the kernel default limit as our size, and adjust the base to match.
1322 *stack_size = 8 * MB;
1323 *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size);
1324
1325 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
1326 << " to " << PrettySize(*stack_size)
1327 << " with base " << *stack_base;
1328 }
1329 }
1330 #endif
1331
1332 #endif
1333 }
1334
InitStackHwm()1335 bool Thread::InitStackHwm() {
1336 ScopedTrace trace("InitStackHwm");
1337 void* read_stack_base;
1338 size_t read_stack_size;
1339 size_t read_guard_size;
1340 GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size);
1341
1342 tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base);
1343 tlsPtr_.stack_size = read_stack_size;
1344
1345 // The minimum stack size we can cope with is the overflow reserved bytes (typically
1346 // 8K) + the protected region size (4K) + another page (4K). Typically this will
1347 // be 8+4+4 = 16K. The thread won't be able to do much with this stack even the GC takes
1348 // between 8K and 12K.
1349 uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
1350 + 4 * KB;
1351 if (read_stack_size <= min_stack) {
1352 // Note, as we know the stack is small, avoid operations that could use a lot of stack.
1353 LogHelper::LogLineLowStack(__PRETTY_FUNCTION__,
1354 __LINE__,
1355 ::android::base::ERROR,
1356 "Attempt to attach a thread with a too-small stack");
1357 return false;
1358 }
1359
1360 // This is included in the SIGQUIT output, but it's useful here for thread debugging.
1361 VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)",
1362 read_stack_base,
1363 PrettySize(read_stack_size).c_str(),
1364 PrettySize(read_guard_size).c_str());
1365
1366 // Set stack_end_ to the bottom of the stack saving space of stack overflows
1367
1368 Runtime* runtime = Runtime::Current();
1369 bool implicit_stack_check =
1370 runtime->GetImplicitStackOverflowChecks() && !runtime->IsAotCompiler();
1371
1372 ResetDefaultStackEnd();
1373
1374 // Install the protected region if we are doing implicit overflow checks.
1375 if (implicit_stack_check) {
1376 // The thread might have protected region at the bottom. We need
1377 // to install our own region so we need to move the limits
1378 // of the stack to make room for it.
1379
1380 tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize;
1381 tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize;
1382 tlsPtr_.stack_size -= read_guard_size + kStackOverflowProtectedSize;
1383
1384 InstallImplicitProtection();
1385 }
1386
1387 // Consistency check.
1388 CHECK_GT(FindStackTop(), reinterpret_cast<void*>(tlsPtr_.stack_end));
1389
1390 return true;
1391 }
1392
ShortDump(std::ostream & os) const1393 void Thread::ShortDump(std::ostream& os) const {
1394 os << "Thread[";
1395 if (GetThreadId() != 0) {
1396 // If we're in kStarting, we won't have a thin lock id or tid yet.
1397 os << GetThreadId()
1398 << ",tid=" << GetTid() << ',';
1399 }
1400 tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst);
1401 const char* name = tlsPtr_.name.load();
1402 os << GetState()
1403 << ",Thread*=" << this
1404 << ",peer=" << tlsPtr_.opeer
1405 << ",\"" << (name == nullptr ? "null" : name) << "\""
1406 << "]";
1407 tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */);
1408 }
1409
Dump(std::ostream & os,bool dump_native_stack,bool force_dump_stack) const1410 Thread::DumpOrder Thread::Dump(std::ostream& os,
1411 bool dump_native_stack,
1412 bool force_dump_stack) const {
1413 DumpState(os);
1414 return DumpStack(os, dump_native_stack, force_dump_stack);
1415 }
1416
Dump(std::ostream & os,unwindstack::AndroidLocalUnwinder & unwinder,bool dump_native_stack,bool force_dump_stack) const1417 Thread::DumpOrder Thread::Dump(std::ostream& os,
1418 unwindstack::AndroidLocalUnwinder& unwinder,
1419 bool dump_native_stack,
1420 bool force_dump_stack) const {
1421 DumpState(os);
1422 return DumpStack(os, unwinder, dump_native_stack, force_dump_stack);
1423 }
1424
GetThreadName() const1425 ObjPtr<mirror::String> Thread::GetThreadName() const {
1426 if (tlsPtr_.opeer == nullptr) {
1427 return nullptr;
1428 }
1429 ObjPtr<mirror::Object> name = WellKnownClasses::java_lang_Thread_name->GetObject(tlsPtr_.opeer);
1430 return name == nullptr ? nullptr : name->AsString();
1431 }
1432
GetThreadName(std::string & name) const1433 void Thread::GetThreadName(std::string& name) const {
1434 tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst);
1435 // The store part of the increment has to be ordered with respect to the following load.
1436 name.assign(tlsPtr_.name.load(std::memory_order_seq_cst));
1437 tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */);
1438 }
1439
GetCpuMicroTime() const1440 uint64_t Thread::GetCpuMicroTime() const {
1441 #if defined(__linux__)
1442 clockid_t cpu_clock_id;
1443 pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
1444 timespec now;
1445 clock_gettime(cpu_clock_id, &now);
1446 return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) +
1447 static_cast<uint64_t>(now.tv_nsec) / UINT64_C(1000);
1448 #else // __APPLE__
1449 UNIMPLEMENTED(WARNING);
1450 return -1;
1451 #endif
1452 }
1453
1454 // Attempt to rectify locks so that we dump thread list with required locks before exiting.
UnsafeLogFatalForSuspendCount(Thread * self,Thread * thread)1455 static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
1456 LOG(ERROR) << *thread << " suspend count already zero.";
1457 Locks::thread_suspend_count_lock_->Unlock(self);
1458 if (!Locks::mutator_lock_->IsSharedHeld(self)) {
1459 Locks::mutator_lock_->SharedTryLock(self);
1460 if (!Locks::mutator_lock_->IsSharedHeld(self)) {
1461 LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
1462 }
1463 }
1464 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
1465 Locks::thread_list_lock_->TryLock(self);
1466 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
1467 LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
1468 }
1469 }
1470 std::ostringstream ss;
1471 Runtime::Current()->GetThreadList()->Dump(ss);
1472 LOG(FATAL) << ss.str();
1473 }
1474
ModifySuspendCountInternal(Thread * self,int delta,AtomicInteger * suspend_barrier,SuspendReason reason)1475 bool Thread::ModifySuspendCountInternal(Thread* self,
1476 int delta,
1477 AtomicInteger* suspend_barrier,
1478 SuspendReason reason) {
1479 if (kIsDebugBuild) {
1480 DCHECK(delta == -1 || delta == +1)
1481 << reason << " " << delta << " " << this;
1482 Locks::thread_suspend_count_lock_->AssertHeld(self);
1483 if (this != self && !IsSuspended()) {
1484 Locks::thread_list_lock_->AssertHeld(self);
1485 }
1486 }
1487 // User code suspensions need to be checked more closely since they originate from code outside of
1488 // the runtime's control.
1489 if (UNLIKELY(reason == SuspendReason::kForUserCode)) {
1490 Locks::user_code_suspension_lock_->AssertHeld(self);
1491 if (UNLIKELY(delta + tls32_.user_code_suspend_count < 0)) {
1492 LOG(ERROR) << "attempting to modify suspend count in an illegal way.";
1493 return false;
1494 }
1495 }
1496 if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
1497 UnsafeLogFatalForSuspendCount(self, this);
1498 return false;
1499 }
1500
1501 if (delta > 0 && this != self && tlsPtr_.flip_function != nullptr) {
1502 // Force retry of a suspend request if it's in the middle of a thread flip to avoid a
1503 // deadlock. b/31683379.
1504 return false;
1505 }
1506
1507 uint32_t flags = enum_cast<uint32_t>(ThreadFlag::kSuspendRequest);
1508 if (delta > 0 && suspend_barrier != nullptr) {
1509 uint32_t available_barrier = kMaxSuspendBarriers;
1510 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
1511 if (tlsPtr_.active_suspend_barriers[i] == nullptr) {
1512 available_barrier = i;
1513 break;
1514 }
1515 }
1516 if (available_barrier == kMaxSuspendBarriers) {
1517 // No barrier spaces available, we can't add another.
1518 return false;
1519 }
1520 tlsPtr_.active_suspend_barriers[available_barrier] = suspend_barrier;
1521 flags |= enum_cast<uint32_t>(ThreadFlag::kActiveSuspendBarrier);
1522 }
1523
1524 tls32_.suspend_count += delta;
1525 switch (reason) {
1526 case SuspendReason::kForUserCode:
1527 tls32_.user_code_suspend_count += delta;
1528 break;
1529 case SuspendReason::kInternal:
1530 break;
1531 }
1532
1533 if (tls32_.suspend_count == 0) {
1534 AtomicClearFlag(ThreadFlag::kSuspendRequest);
1535 } else {
1536 // Two bits might be set simultaneously.
1537 tls32_.state_and_flags.fetch_or(flags, std::memory_order_seq_cst);
1538 TriggerSuspend();
1539 }
1540 return true;
1541 }
1542
PassActiveSuspendBarriers(Thread * self)1543 bool Thread::PassActiveSuspendBarriers(Thread* self) {
1544 // Grab the suspend_count lock and copy the current set of
1545 // barriers. Then clear the list and the flag. The ModifySuspendCount
1546 // function requires the lock so we prevent a race between setting
1547 // the kActiveSuspendBarrier flag and clearing it.
1548 AtomicInteger* pass_barriers[kMaxSuspendBarriers];
1549 {
1550 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1551 if (!ReadFlag(ThreadFlag::kActiveSuspendBarrier)) {
1552 // quick exit test: the barriers have already been claimed - this is
1553 // possible as there may be a race to claim and it doesn't matter
1554 // who wins.
1555 // All of the callers of this function (except the SuspendAllInternal)
1556 // will first test the kActiveSuspendBarrier flag without lock. Here
1557 // double-check whether the barrier has been passed with the
1558 // suspend_count lock.
1559 return false;
1560 }
1561
1562 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
1563 pass_barriers[i] = tlsPtr_.active_suspend_barriers[i];
1564 tlsPtr_.active_suspend_barriers[i] = nullptr;
1565 }
1566 AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
1567 }
1568
1569 uint32_t barrier_count = 0;
1570 for (uint32_t i = 0; i < kMaxSuspendBarriers; i++) {
1571 AtomicInteger* pending_threads = pass_barriers[i];
1572 if (pending_threads != nullptr) {
1573 bool done = false;
1574 do {
1575 int32_t cur_val = pending_threads->load(std::memory_order_relaxed);
1576 CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val;
1577 // Reduce value by 1.
1578 done = pending_threads->CompareAndSetWeakRelaxed(cur_val, cur_val - 1);
1579 #if ART_USE_FUTEXES
1580 if (done && (cur_val - 1) == 0) { // Weak CAS may fail spuriously.
1581 futex(pending_threads->Address(), FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0);
1582 }
1583 #endif
1584 } while (!done);
1585 ++barrier_count;
1586 }
1587 }
1588 CHECK_GT(barrier_count, 0U);
1589 return true;
1590 }
1591
ClearSuspendBarrier(AtomicInteger * target)1592 void Thread::ClearSuspendBarrier(AtomicInteger* target) {
1593 CHECK(ReadFlag(ThreadFlag::kActiveSuspendBarrier));
1594 bool clear_flag = true;
1595 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
1596 AtomicInteger* ptr = tlsPtr_.active_suspend_barriers[i];
1597 if (ptr == target) {
1598 tlsPtr_.active_suspend_barriers[i] = nullptr;
1599 } else if (ptr != nullptr) {
1600 clear_flag = false;
1601 }
1602 }
1603 if (LIKELY(clear_flag)) {
1604 AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
1605 }
1606 }
1607
RunCheckpointFunction()1608 void Thread::RunCheckpointFunction() {
1609 // If this thread is suspended and another thread is running the checkpoint on its behalf,
1610 // we may have a pending flip function that we need to run for the sake of those checkpoints
1611 // that need to walk the stack. We should not see the flip function flags when the thread
1612 // is running the checkpoint on its own.
1613 StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1614 if (UNLIKELY(state_and_flags.IsAnyOfFlagsSet(FlipFunctionFlags()))) {
1615 DCHECK(IsSuspended());
1616 Thread* self = Thread::Current();
1617 DCHECK(self != this);
1618 if (state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction)) {
1619 EnsureFlipFunctionStarted(self);
1620 state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1621 DCHECK(!state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction));
1622 }
1623 if (state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction)) {
1624 WaitForFlipFunction(self);
1625 }
1626 }
1627
1628 // Grab the suspend_count lock, get the next checkpoint and update all the checkpoint fields. If
1629 // there are no more checkpoints we will also clear the kCheckpointRequest flag.
1630 Closure* checkpoint;
1631 {
1632 MutexLock mu(this, *Locks::thread_suspend_count_lock_);
1633 checkpoint = tlsPtr_.checkpoint_function;
1634 if (!checkpoint_overflow_.empty()) {
1635 // Overflow list not empty, copy the first one out and continue.
1636 tlsPtr_.checkpoint_function = checkpoint_overflow_.front();
1637 checkpoint_overflow_.pop_front();
1638 } else {
1639 // No overflow checkpoints. Clear the kCheckpointRequest flag
1640 tlsPtr_.checkpoint_function = nullptr;
1641 AtomicClearFlag(ThreadFlag::kCheckpointRequest);
1642 }
1643 }
1644 // Outside the lock, run the checkpoint function.
1645 ScopedTrace trace("Run checkpoint function");
1646 CHECK(checkpoint != nullptr) << "Checkpoint flag set without pending checkpoint";
1647 checkpoint->Run(this);
1648 }
1649
RunEmptyCheckpoint()1650 void Thread::RunEmptyCheckpoint() {
1651 // Note: Empty checkpoint does not access the thread's stack,
1652 // so we do not need to check for the flip function.
1653 DCHECK_EQ(Thread::Current(), this);
1654 AtomicClearFlag(ThreadFlag::kEmptyCheckpointRequest);
1655 Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this);
1656 }
1657
RequestCheckpoint(Closure * function)1658 bool Thread::RequestCheckpoint(Closure* function) {
1659 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1660 if (old_state_and_flags.GetState() != ThreadState::kRunnable) {
1661 return false; // Fail, thread is suspended and so can't run a checkpoint.
1662 }
1663
1664 // We must be runnable to request a checkpoint.
1665 DCHECK_EQ(old_state_and_flags.GetState(), ThreadState::kRunnable);
1666 StateAndFlags new_state_and_flags = old_state_and_flags;
1667 new_state_and_flags.SetFlag(ThreadFlag::kCheckpointRequest);
1668 bool success = tls32_.state_and_flags.CompareAndSetStrongSequentiallyConsistent(
1669 old_state_and_flags.GetValue(), new_state_and_flags.GetValue());
1670 if (success) {
1671 // Succeeded setting checkpoint flag, now insert the actual checkpoint.
1672 if (tlsPtr_.checkpoint_function == nullptr) {
1673 tlsPtr_.checkpoint_function = function;
1674 } else {
1675 checkpoint_overflow_.push_back(function);
1676 }
1677 CHECK(ReadFlag(ThreadFlag::kCheckpointRequest));
1678 TriggerSuspend();
1679 }
1680 return success;
1681 }
1682
RequestEmptyCheckpoint()1683 bool Thread::RequestEmptyCheckpoint() {
1684 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1685 if (old_state_and_flags.GetState() != ThreadState::kRunnable) {
1686 // If it's not runnable, we don't need to do anything because it won't be in the middle of a
1687 // heap access (eg. the read barrier).
1688 return false;
1689 }
1690
1691 // We must be runnable to request a checkpoint.
1692 DCHECK_EQ(old_state_and_flags.GetState(), ThreadState::kRunnable);
1693 StateAndFlags new_state_and_flags = old_state_and_flags;
1694 new_state_and_flags.SetFlag(ThreadFlag::kEmptyCheckpointRequest);
1695 bool success = tls32_.state_and_flags.CompareAndSetStrongSequentiallyConsistent(
1696 old_state_and_flags.GetValue(), new_state_and_flags.GetValue());
1697 if (success) {
1698 TriggerSuspend();
1699 }
1700 return success;
1701 }
1702
1703 class BarrierClosure : public Closure {
1704 public:
BarrierClosure(Closure * wrapped)1705 explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {}
1706
Run(Thread * self)1707 void Run(Thread* self) override {
1708 wrapped_->Run(self);
1709 barrier_.Pass(self);
1710 }
1711
Wait(Thread * self,ThreadState suspend_state)1712 void Wait(Thread* self, ThreadState suspend_state) {
1713 if (suspend_state != ThreadState::kRunnable) {
1714 barrier_.Increment<Barrier::kDisallowHoldingLocks>(self, 1);
1715 } else {
1716 barrier_.Increment<Barrier::kAllowHoldingLocks>(self, 1);
1717 }
1718 }
1719
1720 private:
1721 Closure* wrapped_;
1722 Barrier barrier_;
1723 };
1724
1725 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
RequestSynchronousCheckpoint(Closure * function,ThreadState suspend_state)1726 bool Thread::RequestSynchronousCheckpoint(Closure* function, ThreadState suspend_state) {
1727 Thread* self = Thread::Current();
1728 if (this == Thread::Current()) {
1729 Locks::thread_list_lock_->AssertExclusiveHeld(self);
1730 // Unlock the tll before running so that the state is the same regardless of thread.
1731 Locks::thread_list_lock_->ExclusiveUnlock(self);
1732 // Asked to run on this thread. Just run.
1733 function->Run(this);
1734 return true;
1735 }
1736
1737 // The current thread is not this thread.
1738
1739 if (GetState() == ThreadState::kTerminated) {
1740 Locks::thread_list_lock_->ExclusiveUnlock(self);
1741 return false;
1742 }
1743
1744 struct ScopedThreadListLockUnlock {
1745 explicit ScopedThreadListLockUnlock(Thread* self_in) RELEASE(*Locks::thread_list_lock_)
1746 : self_thread(self_in) {
1747 Locks::thread_list_lock_->AssertHeld(self_thread);
1748 Locks::thread_list_lock_->Unlock(self_thread);
1749 }
1750
1751 ~ScopedThreadListLockUnlock() ACQUIRE(*Locks::thread_list_lock_) {
1752 Locks::thread_list_lock_->AssertNotHeld(self_thread);
1753 Locks::thread_list_lock_->Lock(self_thread);
1754 }
1755
1756 Thread* self_thread;
1757 };
1758
1759 for (;;) {
1760 Locks::thread_list_lock_->AssertExclusiveHeld(self);
1761 // If this thread is runnable, try to schedule a checkpoint. Do some gymnastics to not hold the
1762 // suspend-count lock for too long.
1763 if (GetState() == ThreadState::kRunnable) {
1764 BarrierClosure barrier_closure(function);
1765 bool installed = false;
1766 {
1767 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1768 installed = RequestCheckpoint(&barrier_closure);
1769 }
1770 if (installed) {
1771 // Relinquish the thread-list lock. We should not wait holding any locks. We cannot
1772 // reacquire it since we don't know if 'this' hasn't been deleted yet.
1773 Locks::thread_list_lock_->ExclusiveUnlock(self);
1774 ScopedThreadStateChange sts(self, suspend_state);
1775 barrier_closure.Wait(self, suspend_state);
1776 return true;
1777 }
1778 // Fall-through.
1779 }
1780
1781 // This thread is not runnable, make sure we stay suspended, then run the checkpoint.
1782 // Note: ModifySuspendCountInternal also expects the thread_list_lock to be held in
1783 // certain situations.
1784 {
1785 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1786
1787 if (!ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal)) {
1788 // Just retry the loop.
1789 sched_yield();
1790 continue;
1791 }
1792 }
1793
1794 {
1795 // Release for the wait. The suspension will keep us from being deleted. Reacquire after so
1796 // that we can call ModifySuspendCount without racing against ThreadList::Unregister.
1797 ScopedThreadListLockUnlock stllu(self);
1798 {
1799 ScopedThreadStateChange sts(self, suspend_state);
1800 while (GetState() == ThreadState::kRunnable) {
1801 // We became runnable again. Wait till the suspend triggered in ModifySuspendCount
1802 // moves us to suspended.
1803 sched_yield();
1804 }
1805 }
1806 // Ensure that the flip function for this thread, if pending, is finished *before*
1807 // the checkpoint function is run. Otherwise, we may end up with both `to' and 'from'
1808 // space references on the stack, confusing the GC's thread-flip logic. The caller is
1809 // runnable so can't have a pending flip function.
1810 DCHECK_EQ(self->GetState(), ThreadState::kRunnable);
1811 DCHECK(
1812 !self->GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags()));
1813 EnsureFlipFunctionStarted(self);
1814 while (GetStateAndFlags(std::memory_order_acquire).IsAnyOfFlagsSet(FlipFunctionFlags())) {
1815 sched_yield();
1816 }
1817
1818 function->Run(this);
1819 }
1820
1821 {
1822 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1823
1824 DCHECK_NE(GetState(), ThreadState::kRunnable);
1825 bool updated = ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal);
1826 DCHECK(updated);
1827 }
1828
1829 {
1830 // Imitate ResumeAll, the thread may be waiting on Thread::resume_cond_ since we raised its
1831 // suspend count. Now the suspend_count_ is lowered so we must do the broadcast.
1832 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1833 Thread::resume_cond_->Broadcast(self);
1834 }
1835
1836 // Release the thread_list_lock_ to be consistent with the barrier-closure path.
1837 Locks::thread_list_lock_->ExclusiveUnlock(self);
1838
1839 return true; // We're done, break out of the loop.
1840 }
1841 }
1842
SetFlipFunction(Closure * function)1843 void Thread::SetFlipFunction(Closure* function) {
1844 // This is called with all threads suspended, except for the calling thread.
1845 DCHECK(IsSuspended() || Thread::Current() == this);
1846 DCHECK(function != nullptr);
1847 DCHECK(tlsPtr_.flip_function == nullptr);
1848 tlsPtr_.flip_function = function;
1849 DCHECK(!GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags()));
1850 AtomicSetFlag(ThreadFlag::kPendingFlipFunction, std::memory_order_release);
1851 }
1852
EnsureFlipFunctionStarted(Thread * self)1853 void Thread::EnsureFlipFunctionStarted(Thread* self) {
1854 while (true) {
1855 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1856 if (!old_state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction)) {
1857 return;
1858 }
1859 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction));
1860 StateAndFlags new_state_and_flags =
1861 old_state_and_flags.WithFlag(ThreadFlag::kRunningFlipFunction)
1862 .WithoutFlag(ThreadFlag::kPendingFlipFunction);
1863 if (tls32_.state_and_flags.CompareAndSetWeakAcquire(old_state_and_flags.GetValue(),
1864 new_state_and_flags.GetValue())) {
1865 RunFlipFunction(self, /*notify=*/ true);
1866 DCHECK(!GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags()));
1867 return;
1868 }
1869 }
1870 }
1871
RunFlipFunction(Thread * self,bool notify)1872 void Thread::RunFlipFunction(Thread* self, bool notify) {
1873 // This function is called for suspended threads and by the thread running
1874 // `ThreadList::FlipThreadRoots()` after we've successfully set the flag
1875 // `ThreadFlag::kRunningFlipFunction`. This flag is not set if the thread is
1876 // running the flip function right after transitioning to Runnable as
1877 // no other thread may run checkpoints on a thread that's actually Runnable.
1878 DCHECK_EQ(notify, ReadFlag(ThreadFlag::kRunningFlipFunction));
1879
1880 Closure* flip_function = tlsPtr_.flip_function;
1881 tlsPtr_.flip_function = nullptr;
1882 DCHECK(flip_function != nullptr);
1883 flip_function->Run(this);
1884
1885 if (notify) {
1886 // Clear the `ThreadFlag::kRunningFlipFunction` and `ThreadFlag::kWaitingForFlipFunction`.
1887 // Check if the latter was actually set, indicating that there is at least one waiting thread.
1888 constexpr uint32_t kFlagsToClear = enum_cast<uint32_t>(ThreadFlag::kRunningFlipFunction) |
1889 enum_cast<uint32_t>(ThreadFlag::kWaitingForFlipFunction);
1890 StateAndFlags old_state_and_flags(
1891 tls32_.state_and_flags.fetch_and(~kFlagsToClear, std::memory_order_release));
1892 if (old_state_and_flags.IsFlagSet(ThreadFlag::kWaitingForFlipFunction)) {
1893 // Notify all threads that are waiting for completion (at least one).
1894 // TODO: Should we create a separate mutex and condition variable instead
1895 // of piggy-backing on the `thread_suspend_count_lock_` and `resume_cond_`?
1896 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1897 resume_cond_->Broadcast(self);
1898 }
1899 }
1900 }
1901
WaitForFlipFunction(Thread * self)1902 void Thread::WaitForFlipFunction(Thread* self) {
1903 // Another thread is running the flip function. Wait for it to complete.
1904 // Check the flag while holding the mutex so that we do not miss the broadcast.
1905 // Repeat the check after waiting to guard against spurious wakeups (and because
1906 // we share the `thread_suspend_count_lock_` and `resume_cond_` with other code).
1907 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1908 while (true) {
1909 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_acquire);
1910 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction));
1911 if (!old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction)) {
1912 DCHECK(!old_state_and_flags.IsAnyOfFlagsSet(FlipFunctionFlags()));
1913 break;
1914 }
1915 if (!old_state_and_flags.IsFlagSet(ThreadFlag::kWaitingForFlipFunction)) {
1916 // Mark that there is a waiting thread.
1917 StateAndFlags new_state_and_flags =
1918 old_state_and_flags.WithFlag(ThreadFlag::kWaitingForFlipFunction);
1919 if (!tls32_.state_and_flags.CompareAndSetWeakRelaxed(old_state_and_flags.GetValue(),
1920 new_state_and_flags.GetValue())) {
1921 continue; // Retry.
1922 }
1923 }
1924 resume_cond_->Wait(self);
1925 }
1926 }
1927
FullSuspendCheck(bool implicit)1928 void Thread::FullSuspendCheck(bool implicit) {
1929 ScopedTrace trace(__FUNCTION__);
1930 VLOG(threads) << this << " self-suspending";
1931 // Make thread appear suspended to other threads, release mutator_lock_.
1932 // Transition to suspended and back to runnable, re-acquire share on mutator_lock_.
1933 ScopedThreadSuspension(this, ThreadState::kSuspended); // NOLINT
1934 if (implicit) {
1935 // For implicit suspend check we want to `madvise()` away
1936 // the alternate signal stack to avoid wasting memory.
1937 MadviseAwayAlternateSignalStack();
1938 }
1939 VLOG(threads) << this << " self-reviving";
1940 }
1941
GetSchedulerGroupName(pid_t tid)1942 static std::string GetSchedulerGroupName(pid_t tid) {
1943 // /proc/<pid>/cgroup looks like this:
1944 // 2:devices:/
1945 // 1:cpuacct,cpu:/
1946 // We want the third field from the line whose second field contains the "cpu" token.
1947 std::string cgroup_file;
1948 if (!android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid),
1949 &cgroup_file)) {
1950 return "";
1951 }
1952 std::vector<std::string> cgroup_lines;
1953 Split(cgroup_file, '\n', &cgroup_lines);
1954 for (size_t i = 0; i < cgroup_lines.size(); ++i) {
1955 std::vector<std::string> cgroup_fields;
1956 Split(cgroup_lines[i], ':', &cgroup_fields);
1957 std::vector<std::string> cgroups;
1958 Split(cgroup_fields[1], ',', &cgroups);
1959 for (size_t j = 0; j < cgroups.size(); ++j) {
1960 if (cgroups[j] == "cpu") {
1961 return cgroup_fields[2].substr(1); // Skip the leading slash.
1962 }
1963 }
1964 }
1965 return "";
1966 }
1967
DumpState(std::ostream & os,const Thread * thread,pid_t tid)1968 void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
1969 std::string group_name;
1970 int priority;
1971 bool is_daemon = false;
1972 Thread* self = Thread::Current();
1973
1974 // Don't do this if we are aborting since the GC may have all the threads suspended. This will
1975 // cause ScopedObjectAccessUnchecked to deadlock.
1976 if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
1977 ScopedObjectAccessUnchecked soa(self);
1978 priority = WellKnownClasses::java_lang_Thread_priority->GetInt(thread->tlsPtr_.opeer);
1979 is_daemon = WellKnownClasses::java_lang_Thread_daemon->GetBoolean(thread->tlsPtr_.opeer);
1980
1981 ObjPtr<mirror::Object> thread_group =
1982 WellKnownClasses::java_lang_Thread_group->GetObject(thread->tlsPtr_.opeer);
1983
1984 if (thread_group != nullptr) {
1985 ObjPtr<mirror::Object> group_name_object =
1986 WellKnownClasses::java_lang_ThreadGroup_name->GetObject(thread_group);
1987 group_name = (group_name_object != nullptr)
1988 ? group_name_object->AsString()->ToModifiedUtf8()
1989 : "<null>";
1990 }
1991 } else if (thread != nullptr) {
1992 priority = thread->GetNativePriority();
1993 } else {
1994 palette_status_t status = PaletteSchedGetPriority(tid, &priority);
1995 CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO);
1996 }
1997
1998 std::string scheduler_group_name(GetSchedulerGroupName(tid));
1999 if (scheduler_group_name.empty()) {
2000 scheduler_group_name = "default";
2001 }
2002
2003 if (thread != nullptr) {
2004 thread->tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst);
2005 os << '"' << thread->tlsPtr_.name.load() << '"';
2006 thread->tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */);
2007 if (is_daemon) {
2008 os << " daemon";
2009 }
2010 os << " prio=" << priority
2011 << " tid=" << thread->GetThreadId()
2012 << " " << thread->GetState();
2013 if (thread->IsStillStarting()) {
2014 os << " (still starting up)";
2015 }
2016 if (thread->tls32_.disable_thread_flip_count != 0) {
2017 os << " DisableFlipCount = " << thread->tls32_.disable_thread_flip_count;
2018 }
2019 os << "\n";
2020 } else {
2021 os << '"' << ::art::GetThreadName(tid) << '"'
2022 << " prio=" << priority
2023 << " (not attached)\n";
2024 }
2025
2026 if (thread != nullptr) {
2027 auto suspend_log_fn = [&]() REQUIRES(Locks::thread_suspend_count_lock_) {
2028 StateAndFlags state_and_flags = thread->GetStateAndFlags(std::memory_order_relaxed);
2029 static_assert(
2030 static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
2031 state_and_flags.SetState(ThreadState::kRunnable); // Clear state bits.
2032 os << " | group=\"" << group_name << "\""
2033 << " sCount=" << thread->tls32_.suspend_count
2034 << " ucsCount=" << thread->tls32_.user_code_suspend_count
2035 << " flags=" << state_and_flags.GetValue()
2036 << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
2037 << " self=" << reinterpret_cast<const void*>(thread) << "\n";
2038 };
2039 if (Locks::thread_suspend_count_lock_->IsExclusiveHeld(self)) {
2040 Locks::thread_suspend_count_lock_->AssertExclusiveHeld(self); // For annotalysis.
2041 suspend_log_fn();
2042 } else {
2043 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
2044 suspend_log_fn();
2045 }
2046 }
2047
2048 os << " | sysTid=" << tid
2049 << " nice=" << getpriority(PRIO_PROCESS, static_cast<id_t>(tid))
2050 << " cgrp=" << scheduler_group_name;
2051 if (thread != nullptr) {
2052 int policy;
2053 sched_param sp;
2054 #if !defined(__APPLE__)
2055 // b/36445592 Don't use pthread_getschedparam since pthread may have exited.
2056 policy = sched_getscheduler(tid);
2057 if (policy == -1) {
2058 PLOG(WARNING) << "sched_getscheduler(" << tid << ")";
2059 }
2060 int sched_getparam_result = sched_getparam(tid, &sp);
2061 if (sched_getparam_result == -1) {
2062 PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)";
2063 sp.sched_priority = -1;
2064 }
2065 #else
2066 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
2067 __FUNCTION__);
2068 #endif
2069 os << " sched=" << policy << "/" << sp.sched_priority
2070 << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
2071 }
2072 os << "\n";
2073
2074 // Grab the scheduler stats for this thread.
2075 std::string scheduler_stats;
2076 if (android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid),
2077 &scheduler_stats)
2078 && !scheduler_stats.empty()) {
2079 scheduler_stats = android::base::Trim(scheduler_stats); // Lose the trailing '\n'.
2080 } else {
2081 scheduler_stats = "0 0 0";
2082 }
2083
2084 char native_thread_state = '?';
2085 int utime = 0;
2086 int stime = 0;
2087 int task_cpu = 0;
2088 GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
2089
2090 os << " | state=" << native_thread_state
2091 << " schedstat=( " << scheduler_stats << " )"
2092 << " utm=" << utime
2093 << " stm=" << stime
2094 << " core=" << task_cpu
2095 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
2096 if (thread != nullptr) {
2097 os << " | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-"
2098 << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize="
2099 << PrettySize(thread->tlsPtr_.stack_size) << "\n";
2100 // Dump the held mutexes.
2101 os << " | held mutexes=";
2102 for (size_t i = 0; i < kLockLevelCount; ++i) {
2103 if (i != kMonitorLock) {
2104 BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i));
2105 if (mutex != nullptr) {
2106 os << " \"" << mutex->GetName() << "\"";
2107 if (mutex->IsReaderWriterMutex()) {
2108 ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
2109 if (rw_mutex->GetExclusiveOwnerTid() == tid) {
2110 os << "(exclusive held)";
2111 } else {
2112 os << "(shared held)";
2113 }
2114 }
2115 }
2116 }
2117 }
2118 os << "\n";
2119 }
2120 }
2121
DumpState(std::ostream & os) const2122 void Thread::DumpState(std::ostream& os) const {
2123 Thread::DumpState(os, this, GetTid());
2124 }
2125
2126 struct StackDumpVisitor : public MonitorObjectsStackVisitor {
StackDumpVisitorart::StackDumpVisitor2127 StackDumpVisitor(std::ostream& os_in,
2128 Thread* thread_in,
2129 Context* context,
2130 bool can_allocate,
2131 bool check_suspended = true,
2132 bool dump_locks = true)
2133 REQUIRES_SHARED(Locks::mutator_lock_)
2134 : MonitorObjectsStackVisitor(thread_in,
2135 context,
2136 check_suspended,
2137 can_allocate && dump_locks),
2138 os(os_in),
2139 last_method(nullptr),
2140 last_line_number(0),
2141 repetition_count(0) {}
2142
~StackDumpVisitorart::StackDumpVisitor2143 virtual ~StackDumpVisitor() {
2144 if (frame_count == 0) {
2145 os << " (no managed stack frames)\n";
2146 }
2147 }
2148
2149 static constexpr size_t kMaxRepetition = 3u;
2150
StartMethodart::StackDumpVisitor2151 VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
2152 override
2153 REQUIRES_SHARED(Locks::mutator_lock_) {
2154 m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
2155 ObjPtr<mirror::DexCache> dex_cache = m->GetDexCache();
2156 int line_number = -1;
2157 uint32_t dex_pc = GetDexPc(false);
2158 if (dex_cache != nullptr) { // be tolerant of bad input
2159 const DexFile* dex_file = dex_cache->GetDexFile();
2160 line_number = annotations::GetLineNumFromPC(dex_file, m, dex_pc);
2161 }
2162 if (line_number == last_line_number && last_method == m) {
2163 ++repetition_count;
2164 } else {
2165 if (repetition_count >= kMaxRepetition) {
2166 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
2167 }
2168 repetition_count = 0;
2169 last_line_number = line_number;
2170 last_method = m;
2171 }
2172
2173 if (repetition_count >= kMaxRepetition) {
2174 // Skip visiting=printing anything.
2175 return VisitMethodResult::kSkipMethod;
2176 }
2177
2178 os << " at " << m->PrettyMethod(false);
2179 if (m->IsNative()) {
2180 os << "(Native method)";
2181 } else {
2182 const char* source_file(m->GetDeclaringClassSourceFile());
2183 if (line_number == -1) {
2184 // If we failed to map to a line number, use
2185 // the dex pc as the line number and leave source file null
2186 source_file = nullptr;
2187 line_number = static_cast<int32_t>(dex_pc);
2188 }
2189 os << "(" << (source_file != nullptr ? source_file : "unavailable")
2190 << ":" << line_number << ")";
2191 }
2192 os << "\n";
2193 // Go and visit locks.
2194 return VisitMethodResult::kContinueMethod;
2195 }
2196
EndMethodart::StackDumpVisitor2197 VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
2198 return VisitMethodResult::kContinueMethod;
2199 }
2200
VisitWaitingObjectart::StackDumpVisitor2201 void VisitWaitingObject(ObjPtr<mirror::Object> obj, ThreadState state ATTRIBUTE_UNUSED)
2202 override
2203 REQUIRES_SHARED(Locks::mutator_lock_) {
2204 PrintObject(obj, " - waiting on ", ThreadList::kInvalidThreadId);
2205 }
VisitSleepingObjectart::StackDumpVisitor2206 void VisitSleepingObject(ObjPtr<mirror::Object> obj)
2207 override
2208 REQUIRES_SHARED(Locks::mutator_lock_) {
2209 PrintObject(obj, " - sleeping on ", ThreadList::kInvalidThreadId);
2210 }
VisitBlockedOnObjectart::StackDumpVisitor2211 void VisitBlockedOnObject(ObjPtr<mirror::Object> obj,
2212 ThreadState state,
2213 uint32_t owner_tid)
2214 override
2215 REQUIRES_SHARED(Locks::mutator_lock_) {
2216 const char* msg;
2217 switch (state) {
2218 case ThreadState::kBlocked:
2219 msg = " - waiting to lock ";
2220 break;
2221
2222 case ThreadState::kWaitingForLockInflation:
2223 msg = " - waiting for lock inflation of ";
2224 break;
2225
2226 default:
2227 LOG(FATAL) << "Unreachable";
2228 UNREACHABLE();
2229 }
2230 PrintObject(obj, msg, owner_tid);
2231 num_blocked++;
2232 }
VisitLockedObjectart::StackDumpVisitor2233 void VisitLockedObject(ObjPtr<mirror::Object> obj)
2234 override
2235 REQUIRES_SHARED(Locks::mutator_lock_) {
2236 PrintObject(obj, " - locked ", ThreadList::kInvalidThreadId);
2237 num_locked++;
2238 }
2239
PrintObjectart::StackDumpVisitor2240 void PrintObject(ObjPtr<mirror::Object> obj,
2241 const char* msg,
2242 uint32_t owner_tid) REQUIRES_SHARED(Locks::mutator_lock_) {
2243 if (obj == nullptr) {
2244 os << msg << "an unknown object";
2245 } else {
2246 if ((obj->GetLockWord(true).GetState() == LockWord::kThinLocked) &&
2247 Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
2248 // Getting the identity hashcode here would result in lock inflation and suspension of the
2249 // current thread, which isn't safe if this is the only runnable thread.
2250 os << msg << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)",
2251 reinterpret_cast<intptr_t>(obj.Ptr()),
2252 obj->PrettyTypeOf().c_str());
2253 } else {
2254 // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
2255 // Call PrettyTypeOf before IdentityHashCode since IdentityHashCode can cause thread
2256 // suspension and move pretty_object.
2257 const std::string pretty_type(obj->PrettyTypeOf());
2258 os << msg << StringPrintf("<0x%08x> (a %s)", obj->IdentityHashCode(), pretty_type.c_str());
2259 }
2260 }
2261 if (owner_tid != ThreadList::kInvalidThreadId) {
2262 os << " held by thread " << owner_tid;
2263 }
2264 os << "\n";
2265 }
2266
2267 std::ostream& os;
2268 ArtMethod* last_method;
2269 int last_line_number;
2270 size_t repetition_count;
2271 size_t num_blocked = 0;
2272 size_t num_locked = 0;
2273 };
2274
ShouldShowNativeStack(const Thread * thread)2275 static bool ShouldShowNativeStack(const Thread* thread)
2276 REQUIRES_SHARED(Locks::mutator_lock_) {
2277 ThreadState state = thread->GetState();
2278
2279 // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
2280 if (state > ThreadState::kWaiting && state < ThreadState::kStarting) {
2281 return true;
2282 }
2283
2284 // In an Object.wait variant or Thread.sleep? That's not interesting.
2285 if (state == ThreadState::kTimedWaiting ||
2286 state == ThreadState::kSleeping ||
2287 state == ThreadState::kWaiting) {
2288 return false;
2289 }
2290
2291 // Threads with no managed stack frames should be shown.
2292 if (!thread->HasManagedStack()) {
2293 return true;
2294 }
2295
2296 // In some other native method? That's interesting.
2297 // We don't just check kNative because native methods will be in state kSuspended if they're
2298 // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
2299 // thread-startup states if it's early enough in their life cycle (http://b/7432159).
2300 ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
2301 return current_method != nullptr && current_method->IsNative();
2302 }
2303
DumpJavaStack(std::ostream & os,bool check_suspended,bool dump_locks) const2304 Thread::DumpOrder Thread::DumpJavaStack(std::ostream& os,
2305 bool check_suspended,
2306 bool dump_locks) const {
2307 // Dumping the Java stack involves the verifier for locks. The verifier operates under the
2308 // assumption that there is no exception pending on entry. Thus, stash any pending exception.
2309 // Thread::Current() instead of this in case a thread is dumping the stack of another suspended
2310 // thread.
2311 ScopedExceptionStorage ses(Thread::Current());
2312
2313 std::unique_ptr<Context> context(Context::Create());
2314 StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
2315 !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks);
2316 dumper.WalkStack();
2317 if (IsJitSensitiveThread()) {
2318 return DumpOrder::kMain;
2319 } else if (dumper.num_blocked > 0) {
2320 return DumpOrder::kBlocked;
2321 } else if (dumper.num_locked > 0) {
2322 return DumpOrder::kLocked;
2323 } else {
2324 return DumpOrder::kDefault;
2325 }
2326 }
2327
DumpStack(std::ostream & os,bool dump_native_stack,bool force_dump_stack) const2328 Thread::DumpOrder Thread::DumpStack(std::ostream& os,
2329 bool dump_native_stack,
2330 bool force_dump_stack) const {
2331 unwindstack::AndroidLocalUnwinder unwinder;
2332 return DumpStack(os, unwinder, dump_native_stack, force_dump_stack);
2333 }
2334
DumpStack(std::ostream & os,unwindstack::AndroidLocalUnwinder & unwinder,bool dump_native_stack,bool force_dump_stack) const2335 Thread::DumpOrder Thread::DumpStack(std::ostream& os,
2336 unwindstack::AndroidLocalUnwinder& unwinder,
2337 bool dump_native_stack,
2338 bool force_dump_stack) const {
2339 // TODO: we call this code when dying but may not have suspended the thread ourself. The
2340 // IsSuspended check is therefore racy with the use for dumping (normally we inhibit
2341 // the race with the thread_suspend_count_lock_).
2342 bool dump_for_abort = (gAborting > 0);
2343 bool safe_to_dump = (this == Thread::Current() || IsSuspended());
2344 if (!kIsDebugBuild) {
2345 // We always want to dump the stack for an abort, however, there is no point dumping another
2346 // thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
2347 safe_to_dump = (safe_to_dump || dump_for_abort);
2348 }
2349 DumpOrder dump_order = DumpOrder::kDefault;
2350 if (safe_to_dump || force_dump_stack) {
2351 // If we're currently in native code, dump that stack before dumping the managed stack.
2352 if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
2353 ArtMethod* method =
2354 GetCurrentMethod(nullptr,
2355 /*check_suspended=*/ !force_dump_stack,
2356 /*abort_on_error=*/ !(dump_for_abort || force_dump_stack));
2357 DumpNativeStack(os, unwinder, GetTid(), " native: ", method);
2358 }
2359 dump_order = DumpJavaStack(os,
2360 /*check_suspended=*/ !force_dump_stack,
2361 /*dump_locks=*/ !force_dump_stack);
2362 } else {
2363 os << "Not able to dump stack of thread that isn't suspended";
2364 }
2365 return dump_order;
2366 }
2367
ThreadExitCallback(void * arg)2368 void Thread::ThreadExitCallback(void* arg) {
2369 Thread* self = reinterpret_cast<Thread*>(arg);
2370 if (self->tls32_.thread_exit_check_count == 0) {
2371 LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
2372 "going to use a pthread_key_create destructor?): " << *self;
2373 CHECK(is_started_);
2374 #ifdef __BIONIC__
2375 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self;
2376 #else
2377 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
2378 Thread::self_tls_ = self;
2379 #endif
2380 self->tls32_.thread_exit_check_count = 1;
2381 } else {
2382 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
2383 }
2384 }
2385
Startup()2386 void Thread::Startup() {
2387 CHECK(!is_started_);
2388 is_started_ = true;
2389 {
2390 // MutexLock to keep annotalysis happy.
2391 //
2392 // Note we use null for the thread because Thread::Current can
2393 // return garbage since (is_started_ == true) and
2394 // Thread::pthread_key_self_ is not yet initialized.
2395 // This was seen on glibc.
2396 MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
2397 resume_cond_ = new ConditionVariable("Thread resumption condition variable",
2398 *Locks::thread_suspend_count_lock_);
2399 }
2400
2401 // Allocate a TLS slot.
2402 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
2403 "self key");
2404
2405 // Double-check the TLS slot allocation.
2406 if (pthread_getspecific(pthread_key_self_) != nullptr) {
2407 LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
2408 }
2409 #ifndef __BIONIC__
2410 CHECK(Thread::self_tls_ == nullptr);
2411 #endif
2412 }
2413
FinishStartup()2414 void Thread::FinishStartup() {
2415 Runtime* runtime = Runtime::Current();
2416 CHECK(runtime->IsStarted());
2417
2418 // Finish attaching the main thread.
2419 ScopedObjectAccess soa(Thread::Current());
2420 soa.Self()->CreatePeer("main", false, runtime->GetMainThreadGroup());
2421 soa.Self()->AssertNoPendingException();
2422
2423 runtime->RunRootClinits(soa.Self());
2424
2425 // The thread counts as started from now on. We need to add it to the ThreadGroup. For regular
2426 // threads, this is done in Thread.start() on the Java side.
2427 soa.Self()->NotifyThreadGroup(soa, runtime->GetMainThreadGroup());
2428 soa.Self()->AssertNoPendingException();
2429 }
2430
Shutdown()2431 void Thread::Shutdown() {
2432 CHECK(is_started_);
2433 is_started_ = false;
2434 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
2435 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
2436 if (resume_cond_ != nullptr) {
2437 delete resume_cond_;
2438 resume_cond_ = nullptr;
2439 }
2440 }
2441
NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable & soa,jobject thread_group)2442 void Thread::NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group) {
2443 ObjPtr<mirror::Object> thread_object = soa.Self()->GetPeer();
2444 ObjPtr<mirror::Object> thread_group_object = soa.Decode<mirror::Object>(thread_group);
2445 if (thread_group == nullptr || kIsDebugBuild) {
2446 // There is always a group set. Retrieve it.
2447 thread_group_object = WellKnownClasses::java_lang_Thread_group->GetObject(thread_object);
2448 if (kIsDebugBuild && thread_group != nullptr) {
2449 CHECK(thread_group_object == soa.Decode<mirror::Object>(thread_group));
2450 }
2451 }
2452 WellKnownClasses::java_lang_ThreadGroup_add->InvokeVirtual<'V', 'L'>(
2453 soa.Self(), thread_group_object, thread_object);
2454 }
2455
Thread(bool daemon)2456 Thread::Thread(bool daemon)
2457 : tls32_(daemon),
2458 wait_monitor_(nullptr),
2459 is_runtime_thread_(false) {
2460 wait_mutex_ = new Mutex("a thread wait mutex", LockLevel::kThreadWaitLock);
2461 wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
2462 tlsPtr_.mutator_lock = Locks::mutator_lock_;
2463 DCHECK(tlsPtr_.mutator_lock != nullptr);
2464 tlsPtr_.name.store(kThreadNameDuringStartup, std::memory_order_relaxed);
2465
2466 static_assert((sizeof(Thread) % 4) == 0U,
2467 "art::Thread has a size which is not a multiple of 4.");
2468 DCHECK_EQ(GetStateAndFlags(std::memory_order_relaxed).GetValue(), 0u);
2469 StateAndFlags state_and_flags = StateAndFlags(0u).WithState(ThreadState::kNative);
2470 tls32_.state_and_flags.store(state_and_flags.GetValue(), std::memory_order_relaxed);
2471 tls32_.interrupted.store(false, std::memory_order_relaxed);
2472 // Initialize with no permit; if the java Thread was unparked before being
2473 // started, it will unpark itself before calling into java code.
2474 tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
2475 memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
2476 std::fill(tlsPtr_.rosalloc_runs,
2477 tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
2478 gc::allocator::RosAlloc::GetDedicatedFullRun());
2479 tlsPtr_.checkpoint_function = nullptr;
2480 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
2481 tlsPtr_.active_suspend_barriers[i] = nullptr;
2482 }
2483 tlsPtr_.flip_function = nullptr;
2484 tlsPtr_.thread_local_mark_stack = nullptr;
2485 tls32_.is_transitioning_to_runnable = false;
2486 ResetTlab();
2487 }
2488
CanLoadClasses() const2489 bool Thread::CanLoadClasses() const {
2490 return !IsRuntimeThread() || !Runtime::Current()->IsJavaDebuggable();
2491 }
2492
IsStillStarting() const2493 bool Thread::IsStillStarting() const {
2494 // You might think you can check whether the state is kStarting, but for much of thread startup,
2495 // the thread is in kNative; it might also be in kVmWait.
2496 // You might think you can check whether the peer is null, but the peer is actually created and
2497 // assigned fairly early on, and needs to be.
2498 // It turns out that the last thing to change is the thread name; that's a good proxy for "has
2499 // this thread _ever_ entered kRunnable".
2500 return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) ||
2501 (tlsPtr_.name.load() == kThreadNameDuringStartup);
2502 }
2503
AssertPendingException() const2504 void Thread::AssertPendingException() const {
2505 CHECK(IsExceptionPending()) << "Pending exception expected.";
2506 }
2507
AssertPendingOOMException() const2508 void Thread::AssertPendingOOMException() const {
2509 AssertPendingException();
2510 auto* e = GetException();
2511 CHECK_EQ(e->GetClass(), WellKnownClasses::java_lang_OutOfMemoryError.Get()) << e->Dump();
2512 }
2513
AssertNoPendingException() const2514 void Thread::AssertNoPendingException() const {
2515 if (UNLIKELY(IsExceptionPending())) {
2516 ScopedObjectAccess soa(Thread::Current());
2517 LOG(FATAL) << "No pending exception expected: " << GetException()->Dump();
2518 }
2519 }
2520
AssertNoPendingExceptionForNewException(const char * msg) const2521 void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
2522 if (UNLIKELY(IsExceptionPending())) {
2523 ScopedObjectAccess soa(Thread::Current());
2524 LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
2525 << GetException()->Dump();
2526 }
2527 }
2528
2529 class MonitorExitVisitor : public SingleRootVisitor {
2530 public:
MonitorExitVisitor(Thread * self)2531 explicit MonitorExitVisitor(Thread* self) : self_(self) { }
2532
2533 // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
VisitRoot(mirror::Object * entered_monitor,const RootInfo & info ATTRIBUTE_UNUSED)2534 void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
2535 override NO_THREAD_SAFETY_ANALYSIS {
2536 if (self_->HoldsLock(entered_monitor)) {
2537 LOG(WARNING) << "Calling MonitorExit on object "
2538 << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")"
2539 << " left locked by native thread "
2540 << *Thread::Current() << " which is detaching";
2541 entered_monitor->MonitorExit(self_);
2542 }
2543 }
2544
2545 private:
2546 Thread* const self_;
2547 };
2548
Destroy(bool should_run_callbacks)2549 void Thread::Destroy(bool should_run_callbacks) {
2550 Thread* self = this;
2551 DCHECK_EQ(self, Thread::Current());
2552
2553 if (tlsPtr_.jni_env != nullptr) {
2554 {
2555 ScopedObjectAccess soa(self);
2556 MonitorExitVisitor visitor(self);
2557 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
2558 tlsPtr_.jni_env->monitors_.VisitRoots(&visitor, RootInfo(kRootVMInternal));
2559 }
2560 // Release locally held global references which releasing may require the mutator lock.
2561 if (tlsPtr_.jpeer != nullptr) {
2562 // If pthread_create fails we don't have a jni env here.
2563 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
2564 tlsPtr_.jpeer = nullptr;
2565 }
2566 if (tlsPtr_.class_loader_override != nullptr) {
2567 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
2568 tlsPtr_.class_loader_override = nullptr;
2569 }
2570 }
2571
2572 if (tlsPtr_.opeer != nullptr) {
2573 ScopedObjectAccess soa(self);
2574 if (UNLIKELY(self->GetMethodTraceBuffer() != nullptr)) {
2575 Trace::FlushThreadBuffer(self);
2576 self->ResetMethodTraceBuffer();
2577 }
2578 // We may need to call user-supplied managed code, do this before final clean-up.
2579 HandleUncaughtExceptions();
2580 RemoveFromThreadGroup();
2581 Runtime* runtime = Runtime::Current();
2582 if (runtime != nullptr && should_run_callbacks) {
2583 runtime->GetRuntimeCallbacks()->ThreadDeath(self);
2584 }
2585
2586 // this.nativePeer = 0;
2587 SetNativePeer</*kSupportTransaction=*/ true>(tlsPtr_.opeer, nullptr);
2588
2589 // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
2590 // who is waiting.
2591 ObjPtr<mirror::Object> lock =
2592 WellKnownClasses::java_lang_Thread_lock->GetObject(tlsPtr_.opeer);
2593 // (This conditional is only needed for tests, where Thread.lock won't have been set.)
2594 if (lock != nullptr) {
2595 StackHandleScope<1> hs(self);
2596 Handle<mirror::Object> h_obj(hs.NewHandle(lock));
2597 ObjectLock<mirror::Object> locker(self, h_obj);
2598 locker.NotifyAll();
2599 }
2600 tlsPtr_.opeer = nullptr;
2601 }
2602
2603 {
2604 ScopedObjectAccess soa(self);
2605 Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
2606 }
2607 // Mark-stack revocation must be performed at the very end. No
2608 // checkpoint/flip-function or read-barrier should be called after this.
2609 if (gUseReadBarrier) {
2610 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this);
2611 }
2612 }
2613
~Thread()2614 Thread::~Thread() {
2615 CHECK(tlsPtr_.class_loader_override == nullptr);
2616 CHECK(tlsPtr_.jpeer == nullptr);
2617 CHECK(tlsPtr_.opeer == nullptr);
2618 bool initialized = (tlsPtr_.jni_env != nullptr); // Did Thread::Init run?
2619 if (initialized) {
2620 delete tlsPtr_.jni_env;
2621 tlsPtr_.jni_env = nullptr;
2622 }
2623 CHECK_NE(GetState(), ThreadState::kRunnable);
2624 CHECK(!ReadFlag(ThreadFlag::kCheckpointRequest));
2625 CHECK(!ReadFlag(ThreadFlag::kEmptyCheckpointRequest));
2626 CHECK(tlsPtr_.checkpoint_function == nullptr);
2627 CHECK_EQ(checkpoint_overflow_.size(), 0u);
2628 CHECK(tlsPtr_.flip_function == nullptr);
2629 CHECK_EQ(tls32_.is_transitioning_to_runnable, false);
2630
2631 // Make sure we processed all deoptimization requests.
2632 CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization";
2633 CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) <<
2634 "Not all deoptimized frames have been consumed by the debugger.";
2635
2636 // We may be deleting a still born thread.
2637 SetStateUnsafe(ThreadState::kTerminated);
2638
2639 delete wait_cond_;
2640 delete wait_mutex_;
2641
2642 if (tlsPtr_.long_jump_context != nullptr) {
2643 delete tlsPtr_.long_jump_context;
2644 }
2645
2646 if (initialized) {
2647 CleanupCpu();
2648 }
2649
2650 SetCachedThreadName(nullptr); // Deallocate name.
2651 delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
2652
2653 if (tlsPtr_.method_trace_buffer != nullptr) {
2654 delete[] tlsPtr_.method_trace_buffer;
2655 }
2656
2657 Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
2658
2659 TearDownAlternateSignalStack();
2660 }
2661
HandleUncaughtExceptions()2662 void Thread::HandleUncaughtExceptions() {
2663 Thread* self = this;
2664 DCHECK_EQ(self, Thread::Current());
2665 if (!self->IsExceptionPending()) {
2666 return;
2667 }
2668
2669 // Get and clear the exception.
2670 ObjPtr<mirror::Object> exception = self->GetException();
2671 self->ClearException();
2672
2673 // Call the Thread instance's dispatchUncaughtException(Throwable)
2674 WellKnownClasses::java_lang_Thread_dispatchUncaughtException->InvokeFinal<'V', 'L'>(
2675 self, tlsPtr_.opeer, exception);
2676
2677 // If the dispatchUncaughtException threw, clear that exception too.
2678 self->ClearException();
2679 }
2680
RemoveFromThreadGroup()2681 void Thread::RemoveFromThreadGroup() {
2682 Thread* self = this;
2683 DCHECK_EQ(self, Thread::Current());
2684 // this.group.threadTerminated(this);
2685 // group can be null if we're in the compiler or a test.
2686 ObjPtr<mirror::Object> group =
2687 WellKnownClasses::java_lang_Thread_group->GetObject(tlsPtr_.opeer);
2688 if (group != nullptr) {
2689 WellKnownClasses::java_lang_ThreadGroup_threadTerminated->InvokeVirtual<'V', 'L'>(
2690 self, group, tlsPtr_.opeer);
2691 }
2692 }
2693
2694 template <bool kPointsToStack>
2695 class JniTransitionReferenceVisitor : public StackVisitor {
2696 public:
JniTransitionReferenceVisitor(Thread * thread,void * obj)2697 JniTransitionReferenceVisitor(Thread* thread, void* obj) REQUIRES_SHARED(Locks::mutator_lock_)
2698 : StackVisitor(thread, /*context=*/ nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
2699 obj_(obj),
2700 found_(false) {}
2701
VisitFrame()2702 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
2703 ArtMethod* m = GetMethod();
2704 if (!m->IsNative() || m->IsCriticalNative()) {
2705 return true;
2706 }
2707 if (kPointsToStack) {
2708 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
2709 size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
2710 uint32_t* current_vreg = reinterpret_cast<uint32_t*>(sp + frame_size + sizeof(ArtMethod*));
2711 if (!m->IsStatic()) {
2712 if (current_vreg == obj_) {
2713 found_ = true;
2714 return false;
2715 }
2716 current_vreg += 1u;
2717 }
2718 uint32_t shorty_length;
2719 const char* shorty = m->GetShorty(&shorty_length);
2720 for (size_t i = 1; i != shorty_length; ++i) {
2721 switch (shorty[i]) {
2722 case 'D':
2723 case 'J':
2724 current_vreg += 2u;
2725 break;
2726 case 'L':
2727 if (current_vreg == obj_) {
2728 found_ = true;
2729 return false;
2730 }
2731 FALLTHROUGH_INTENDED;
2732 default:
2733 current_vreg += 1u;
2734 break;
2735 }
2736 }
2737 // Continue only if the object is somewhere higher on the stack.
2738 return obj_ >= current_vreg;
2739 } else { // if (kPointsToStack)
2740 if (m->IsStatic() && obj_ == m->GetDeclaringClassAddressWithoutBarrier()) {
2741 found_ = true;
2742 return false;
2743 }
2744 return true;
2745 }
2746 }
2747
Found() const2748 bool Found() const {
2749 return found_;
2750 }
2751
2752 private:
2753 void* obj_;
2754 bool found_;
2755 };
2756
IsJniTransitionReference(jobject obj) const2757 bool Thread::IsJniTransitionReference(jobject obj) const {
2758 DCHECK(obj != nullptr);
2759 // We need a non-const pointer for stack walk even if we're not modifying the thread state.
2760 Thread* thread = const_cast<Thread*>(this);
2761 uint8_t* raw_obj = reinterpret_cast<uint8_t*>(obj);
2762 if (static_cast<size_t>(raw_obj - tlsPtr_.stack_begin) < tlsPtr_.stack_size) {
2763 JniTransitionReferenceVisitor</*kPointsToStack=*/ true> visitor(thread, raw_obj);
2764 visitor.WalkStack();
2765 return visitor.Found();
2766 } else {
2767 JniTransitionReferenceVisitor</*kPointsToStack=*/ false> visitor(thread, raw_obj);
2768 visitor.WalkStack();
2769 return visitor.Found();
2770 }
2771 }
2772
HandleScopeVisitRoots(RootVisitor * visitor,uint32_t thread_id)2773 void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
2774 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
2775 visitor, RootInfo(kRootNativeStack, thread_id));
2776 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
2777 cur->VisitRoots(buffered_visitor);
2778 }
2779 }
2780
DecodeGlobalJObject(jobject obj) const2781 ObjPtr<mirror::Object> Thread::DecodeGlobalJObject(jobject obj) const {
2782 DCHECK(obj != nullptr);
2783 IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
2784 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
2785 DCHECK_NE(kind, kJniTransition);
2786 DCHECK_NE(kind, kLocal);
2787 ObjPtr<mirror::Object> result;
2788 bool expect_null = false;
2789 if (kind == kGlobal) {
2790 result = tlsPtr_.jni_env->vm_->DecodeGlobal(ref);
2791 } else {
2792 DCHECK_EQ(kind, kWeakGlobal);
2793 result = tlsPtr_.jni_env->vm_->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
2794 if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
2795 // This is a special case where it's okay to return null.
2796 expect_null = true;
2797 result = nullptr;
2798 }
2799 }
2800
2801 DCHECK(expect_null || result != nullptr)
2802 << "use of deleted " << ToStr<IndirectRefKind>(kind).c_str()
2803 << " " << static_cast<const void*>(obj);
2804 return result;
2805 }
2806
IsJWeakCleared(jweak obj) const2807 bool Thread::IsJWeakCleared(jweak obj) const {
2808 CHECK(obj != nullptr);
2809 IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
2810 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
2811 CHECK_EQ(kind, kWeakGlobal);
2812 return tlsPtr_.jni_env->vm_->IsWeakGlobalCleared(const_cast<Thread*>(this), ref);
2813 }
2814
2815 // Implements java.lang.Thread.interrupted.
Interrupted()2816 bool Thread::Interrupted() {
2817 DCHECK_EQ(Thread::Current(), this);
2818 // No other thread can concurrently reset the interrupted flag.
2819 bool interrupted = tls32_.interrupted.load(std::memory_order_seq_cst);
2820 if (interrupted) {
2821 tls32_.interrupted.store(false, std::memory_order_seq_cst);
2822 }
2823 return interrupted;
2824 }
2825
2826 // Implements java.lang.Thread.isInterrupted.
IsInterrupted()2827 bool Thread::IsInterrupted() {
2828 return tls32_.interrupted.load(std::memory_order_seq_cst);
2829 }
2830
Interrupt(Thread * self)2831 void Thread::Interrupt(Thread* self) {
2832 {
2833 MutexLock mu(self, *wait_mutex_);
2834 if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
2835 return;
2836 }
2837 tls32_.interrupted.store(true, std::memory_order_seq_cst);
2838 NotifyLocked(self);
2839 }
2840 Unpark();
2841 }
2842
Notify()2843 void Thread::Notify() {
2844 Thread* self = Thread::Current();
2845 MutexLock mu(self, *wait_mutex_);
2846 NotifyLocked(self);
2847 }
2848
NotifyLocked(Thread * self)2849 void Thread::NotifyLocked(Thread* self) {
2850 if (wait_monitor_ != nullptr) {
2851 wait_cond_->Signal(self);
2852 }
2853 }
2854
SetClassLoaderOverride(jobject class_loader_override)2855 void Thread::SetClassLoaderOverride(jobject class_loader_override) {
2856 if (tlsPtr_.class_loader_override != nullptr) {
2857 GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
2858 }
2859 tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
2860 }
2861
2862 using ArtMethodDexPcPair = std::pair<ArtMethod*, uint32_t>;
2863
2864 // Counts the stack trace depth and also fetches the first max_saved_frames frames.
2865 class FetchStackTraceVisitor : public StackVisitor {
2866 public:
FetchStackTraceVisitor(Thread * thread,ArtMethodDexPcPair * saved_frames=nullptr,size_t max_saved_frames=0)2867 explicit FetchStackTraceVisitor(Thread* thread,
2868 ArtMethodDexPcPair* saved_frames = nullptr,
2869 size_t max_saved_frames = 0)
2870 REQUIRES_SHARED(Locks::mutator_lock_)
2871 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2872 saved_frames_(saved_frames),
2873 max_saved_frames_(max_saved_frames) {}
2874
VisitFrame()2875 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
2876 // We want to skip frames up to and including the exception's constructor.
2877 // Note we also skip the frame if it doesn't have a method (namely the callee
2878 // save frame)
2879 ArtMethod* m = GetMethod();
2880 if (skipping_ && !m->IsRuntimeMethod() &&
2881 !GetClassRoot<mirror::Throwable>()->IsAssignableFrom(m->GetDeclaringClass())) {
2882 skipping_ = false;
2883 }
2884 if (!skipping_) {
2885 if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save).
2886 if (depth_ < max_saved_frames_) {
2887 saved_frames_[depth_].first = m;
2888 saved_frames_[depth_].second = m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc();
2889 }
2890 ++depth_;
2891 }
2892 } else {
2893 ++skip_depth_;
2894 }
2895 return true;
2896 }
2897
GetDepth() const2898 uint32_t GetDepth() const {
2899 return depth_;
2900 }
2901
GetSkipDepth() const2902 uint32_t GetSkipDepth() const {
2903 return skip_depth_;
2904 }
2905
2906 private:
2907 uint32_t depth_ = 0;
2908 uint32_t skip_depth_ = 0;
2909 bool skipping_ = true;
2910 ArtMethodDexPcPair* saved_frames_;
2911 const size_t max_saved_frames_;
2912
2913 DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor);
2914 };
2915
2916 class BuildInternalStackTraceVisitor : public StackVisitor {
2917 public:
BuildInternalStackTraceVisitor(Thread * self,Thread * thread,uint32_t skip_depth)2918 BuildInternalStackTraceVisitor(Thread* self, Thread* thread, uint32_t skip_depth)
2919 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2920 self_(self),
2921 skip_depth_(skip_depth),
2922 pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
2923
Init(uint32_t depth)2924 bool Init(uint32_t depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
2925 // Allocate method trace as an object array where the first element is a pointer array that
2926 // contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring
2927 // class of the ArtMethod pointers.
2928 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
2929 StackHandleScope<1> hs(self_);
2930 ObjPtr<mirror::Class> array_class =
2931 GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker);
2932 // The first element is the methods and dex pc array, the other elements are declaring classes
2933 // for the methods to ensure classes in the stack trace don't get unloaded.
2934 Handle<mirror::ObjectArray<mirror::Object>> trace(
2935 hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(
2936 hs.Self(), array_class, static_cast<int32_t>(depth) + 1)));
2937 if (trace == nullptr) {
2938 // Acquire uninterruptible_ in all paths.
2939 self_->StartAssertNoThreadSuspension("Building internal stack trace");
2940 self_->AssertPendingOOMException();
2941 return false;
2942 }
2943 ObjPtr<mirror::PointerArray> methods_and_pcs =
2944 class_linker->AllocPointerArray(self_, depth * 2);
2945 const char* last_no_suspend_cause =
2946 self_->StartAssertNoThreadSuspension("Building internal stack trace");
2947 if (methods_and_pcs == nullptr) {
2948 self_->AssertPendingOOMException();
2949 return false;
2950 }
2951 trace->Set</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(0, methods_and_pcs);
2952 trace_ = trace.Get();
2953 // If We are called from native, use non-transactional mode.
2954 CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
2955 return true;
2956 }
2957
RELEASE(Roles::uninterruptible_)2958 virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) {
2959 self_->EndAssertNoThreadSuspension(nullptr);
2960 }
2961
VisitFrame()2962 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
2963 if (trace_ == nullptr) {
2964 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
2965 }
2966 if (skip_depth_ > 0) {
2967 skip_depth_--;
2968 return true;
2969 }
2970 ArtMethod* m = GetMethod();
2971 if (m->IsRuntimeMethod()) {
2972 return true; // Ignore runtime frames (in particular callee save).
2973 }
2974 AddFrame(m, m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc());
2975 return true;
2976 }
2977
AddFrame(ArtMethod * method,uint32_t dex_pc)2978 void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
2979 ObjPtr<mirror::PointerArray> methods_and_pcs = GetTraceMethodsAndPCs();
2980 methods_and_pcs->SetElementPtrSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
2981 count_, method, pointer_size_);
2982 methods_and_pcs->SetElementPtrSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
2983 static_cast<uint32_t>(methods_and_pcs->GetLength()) / 2 + count_, dex_pc, pointer_size_);
2984 // Save the declaring class of the method to ensure that the declaring classes of the methods
2985 // do not get unloaded while the stack trace is live. However, this does not work for copied
2986 // methods because the declaring class of a copied method points to an interface class which
2987 // may be in a different class loader. Instead, retrieve the class loader associated with the
2988 // allocator that holds the copied method. This is much cheaper than finding the actual class.
2989 ObjPtr<mirror::Object> keep_alive;
2990 if (UNLIKELY(method->IsCopied())) {
2991 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
2992 keep_alive = class_linker->GetHoldingClassLoaderOfCopiedMethod(self_, method);
2993 } else {
2994 keep_alive = method->GetDeclaringClass();
2995 }
2996 trace_->Set</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
2997 static_cast<int32_t>(count_) + 1, keep_alive);
2998 ++count_;
2999 }
3000
GetTraceMethodsAndPCs() const3001 ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) {
3002 return ObjPtr<mirror::PointerArray>::DownCast(trace_->Get(0));
3003 }
3004
GetInternalStackTrace() const3005 mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
3006 return trace_;
3007 }
3008
3009 private:
3010 Thread* const self_;
3011 // How many more frames to skip.
3012 uint32_t skip_depth_;
3013 // Current position down stack trace.
3014 uint32_t count_ = 0;
3015 // An object array where the first element is a pointer array that contains the `ArtMethod`
3016 // pointers on the stack and dex PCs. The rest of the elements are referencing objects
3017 // that shall keep the methods alive, namely the declaring class of the `ArtMethod` for
3018 // declared methods and the class loader for copied methods (because it's faster to find
3019 // the class loader than the actual class that holds the copied method). The `trace_[i+1]`
3020 // contains the declaring class or class loader of the `ArtMethod` of the i'th frame.
3021 // We're initializing a newly allocated trace, so we do not need to record that under
3022 // a transaction. If the transaction is aborted, the whole trace shall be unreachable.
3023 mirror::ObjectArray<mirror::Object>* trace_ = nullptr;
3024 // For cross compilation.
3025 const PointerSize pointer_size_;
3026
3027 DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor);
3028 };
3029
CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable & soa) const3030 jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
3031 // Compute depth of stack, save frames if possible to avoid needing to recompute many.
3032 constexpr size_t kMaxSavedFrames = 256;
3033 std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]);
3034 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this),
3035 &saved_frames[0],
3036 kMaxSavedFrames);
3037 count_visitor.WalkStack();
3038 const uint32_t depth = count_visitor.GetDepth();
3039 const uint32_t skip_depth = count_visitor.GetSkipDepth();
3040
3041 // Build internal stack trace.
3042 BuildInternalStackTraceVisitor build_trace_visitor(
3043 soa.Self(), const_cast<Thread*>(this), skip_depth);
3044 if (!build_trace_visitor.Init(depth)) {
3045 return nullptr; // Allocation failed.
3046 }
3047 // If we saved all of the frames we don't even need to do the actual stack walk. This is faster
3048 // than doing the stack walk twice.
3049 if (depth < kMaxSavedFrames) {
3050 for (size_t i = 0; i < depth; ++i) {
3051 build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second);
3052 }
3053 } else {
3054 build_trace_visitor.WalkStack();
3055 }
3056
3057 mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
3058 if (kIsDebugBuild) {
3059 ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs();
3060 // Second half of trace_methods is dex PCs.
3061 for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) {
3062 auto* method = trace_methods->GetElementPtrSize<ArtMethod*>(
3063 i, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
3064 CHECK(method != nullptr);
3065 }
3066 }
3067 return soa.AddLocalReference<jobject>(trace);
3068 }
3069
IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const3070 bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const {
3071 // Only count the depth since we do not pass a stack frame array as an argument.
3072 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this));
3073 count_visitor.WalkStack();
3074 return count_visitor.GetDepth() == static_cast<uint32_t>(exception->GetStackDepth());
3075 }
3076
CreateStackTraceElement(const ScopedObjectAccessAlreadyRunnable & soa,ArtMethod * method,uint32_t dex_pc)3077 static ObjPtr<mirror::StackTraceElement> CreateStackTraceElement(
3078 const ScopedObjectAccessAlreadyRunnable& soa,
3079 ArtMethod* method,
3080 uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
3081 int32_t line_number;
3082 StackHandleScope<3> hs(soa.Self());
3083 auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
3084 auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
3085 if (method->IsProxyMethod()) {
3086 line_number = -1;
3087 class_name_object.Assign(method->GetDeclaringClass()->GetName());
3088 // source_name_object intentionally left null for proxy methods
3089 } else {
3090 line_number = method->GetLineNumFromDexPC(dex_pc);
3091 // Allocate element, potentially triggering GC
3092 // TODO: reuse class_name_object via Class::name_?
3093 const char* descriptor = method->GetDeclaringClassDescriptor();
3094 CHECK(descriptor != nullptr);
3095 std::string class_name(PrettyDescriptor(descriptor));
3096 class_name_object.Assign(
3097 mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
3098 if (class_name_object == nullptr) {
3099 soa.Self()->AssertPendingOOMException();
3100 return nullptr;
3101 }
3102 const char* source_file = method->GetDeclaringClassSourceFile();
3103 if (line_number == -1) {
3104 // Make the line_number field of StackTraceElement hold the dex pc.
3105 // source_name_object is intentionally left null if we failed to map the dex pc to
3106 // a line number (most probably because there is no debug info). See b/30183883.
3107 line_number = static_cast<int32_t>(dex_pc);
3108 } else {
3109 if (source_file != nullptr) {
3110 source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
3111 if (source_name_object == nullptr) {
3112 soa.Self()->AssertPendingOOMException();
3113 return nullptr;
3114 }
3115 }
3116 }
3117 }
3118 const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
3119 CHECK(method_name != nullptr);
3120 Handle<mirror::String> method_name_object(
3121 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
3122 if (method_name_object == nullptr) {
3123 return nullptr;
3124 }
3125 return mirror::StackTraceElement::Alloc(soa.Self(),
3126 class_name_object,
3127 method_name_object,
3128 source_name_object,
3129 line_number);
3130 }
3131
InternalStackTraceToStackTraceElementArray(const ScopedObjectAccessAlreadyRunnable & soa,jobject internal,jobjectArray output_array,int * stack_depth)3132 jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
3133 const ScopedObjectAccessAlreadyRunnable& soa,
3134 jobject internal,
3135 jobjectArray output_array,
3136 int* stack_depth) {
3137 // Decode the internal stack trace into the depth, method trace and PC trace.
3138 // Subtract one for the methods and PC trace.
3139 int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1;
3140 DCHECK_GE(depth, 0);
3141
3142 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
3143
3144 jobjectArray result;
3145
3146 if (output_array != nullptr) {
3147 // Reuse the array we were given.
3148 result = output_array;
3149 // ...adjusting the number of frames we'll write to not exceed the array length.
3150 const int32_t traces_length =
3151 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->GetLength();
3152 depth = std::min(depth, traces_length);
3153 } else {
3154 // Create java_trace array and place in local reference table
3155 ObjPtr<mirror::ObjectArray<mirror::StackTraceElement>> java_traces =
3156 class_linker->AllocStackTraceElementArray(soa.Self(), static_cast<size_t>(depth));
3157 if (java_traces == nullptr) {
3158 return nullptr;
3159 }
3160 result = soa.AddLocalReference<jobjectArray>(java_traces);
3161 }
3162
3163 if (stack_depth != nullptr) {
3164 *stack_depth = depth;
3165 }
3166
3167 for (uint32_t i = 0; i < static_cast<uint32_t>(depth); ++i) {
3168 ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces =
3169 soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>();
3170 // Methods and dex PC trace is element 0.
3171 DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
3172 const ObjPtr<mirror::PointerArray> method_trace =
3173 ObjPtr<mirror::PointerArray>::DownCast(decoded_traces->Get(0));
3174 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
3175 ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
3176 uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
3177 i + static_cast<uint32_t>(method_trace->GetLength()) / 2, kRuntimePointerSize);
3178 const ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(soa, method, dex_pc);
3179 if (obj == nullptr) {
3180 return nullptr;
3181 }
3182 // We are called from native: use non-transactional mode.
3183 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->Set<false>(
3184 static_cast<int32_t>(i), obj);
3185 }
3186 return result;
3187 }
3188
InitStackFrameInfo(const ScopedObjectAccessAlreadyRunnable & soa,ClassLinker * class_linker,Handle<mirror::StackFrameInfo> stackFrameInfo,ArtMethod * method,uint32_t dex_pc)3189 [[nodiscard]] static ObjPtr<mirror::StackFrameInfo> InitStackFrameInfo(
3190 const ScopedObjectAccessAlreadyRunnable& soa,
3191 ClassLinker* class_linker,
3192 Handle<mirror::StackFrameInfo> stackFrameInfo,
3193 ArtMethod* method,
3194 uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
3195 StackHandleScope<4> hs(soa.Self());
3196 int32_t line_number;
3197 auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
3198 if (method->IsProxyMethod()) {
3199 line_number = -1;
3200 // source_name_object intentionally left null for proxy methods
3201 } else {
3202 line_number = method->GetLineNumFromDexPC(dex_pc);
3203 if (line_number == -1) {
3204 // Make the line_number field of StackFrameInfo hold the dex pc.
3205 // source_name_object is intentionally left null if we failed to map the dex pc to
3206 // a line number (most probably because there is no debug info). See b/30183883.
3207 line_number = static_cast<int32_t>(dex_pc);
3208 } else {
3209 const char* source_file = method->GetDeclaringClassSourceFile();
3210 if (source_file != nullptr) {
3211 source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
3212 if (source_name_object == nullptr) {
3213 soa.Self()->AssertPendingOOMException();
3214 return nullptr;
3215 }
3216 }
3217 }
3218 }
3219
3220 Handle<mirror::Class> declaring_class_object(
3221 hs.NewHandle<mirror::Class>(method->GetDeclaringClass()));
3222
3223 ArtMethod* interface_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
3224 const char* method_name = interface_method->GetName();
3225 CHECK(method_name != nullptr);
3226 Handle<mirror::String> method_name_object(
3227 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
3228 if (method_name_object == nullptr) {
3229 soa.Self()->AssertPendingOOMException();
3230 return nullptr;
3231 }
3232
3233 dex::ProtoIndex proto_idx =
3234 method->GetDexFile()->GetIndexForProtoId(interface_method->GetPrototype());
3235 Handle<mirror::MethodType> method_type_object(hs.NewHandle<mirror::MethodType>(
3236 class_linker->ResolveMethodType(soa.Self(), proto_idx, interface_method)));
3237 if (method_type_object == nullptr) {
3238 soa.Self()->AssertPendingOOMException();
3239 return nullptr;
3240 }
3241
3242 stackFrameInfo->AssignFields(declaring_class_object,
3243 method_type_object,
3244 method_name_object,
3245 source_name_object,
3246 line_number,
3247 static_cast<int32_t>(dex_pc));
3248 return stackFrameInfo.Get();
3249 }
3250
3251 constexpr jlong FILL_CLASS_REFS_ONLY = 0x2; // StackStreamFactory.FILL_CLASS_REFS_ONLY
3252
InternalStackTraceToStackFrameInfoArray(const ScopedObjectAccessAlreadyRunnable & soa,jlong mode,jobject internal,jint startLevel,jint batchSize,jint startBufferIndex,jobjectArray output_array)3253 jint Thread::InternalStackTraceToStackFrameInfoArray(
3254 const ScopedObjectAccessAlreadyRunnable& soa,
3255 jlong mode, // See java.lang.StackStreamFactory for the mode flags
3256 jobject internal,
3257 jint startLevel,
3258 jint batchSize,
3259 jint startBufferIndex,
3260 jobjectArray output_array) {
3261 // Decode the internal stack trace into the depth, method trace and PC trace.
3262 // Subtract one for the methods and PC trace.
3263 int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1;
3264 DCHECK_GE(depth, 0);
3265
3266 StackHandleScope<6> hs(soa.Self());
3267 Handle<mirror::ObjectArray<mirror::Object>> framesOrClasses =
3268 hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(output_array));
3269
3270 jint endBufferIndex = startBufferIndex;
3271
3272 if (startLevel < 0 || startLevel >= depth) {
3273 return endBufferIndex;
3274 }
3275
3276 int32_t bufferSize = framesOrClasses->GetLength();
3277 if (startBufferIndex < 0 || startBufferIndex >= bufferSize) {
3278 return endBufferIndex;
3279 }
3280
3281 // The FILL_CLASS_REFS_ONLY flag is defined in AbstractStackWalker.fetchStackFrames() javadoc.
3282 bool isClassArray = (mode & FILL_CLASS_REFS_ONLY) != 0;
3283
3284 Handle<mirror::ObjectArray<mirror::Object>> decoded_traces =
3285 hs.NewHandle(soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>());
3286 // Methods and dex PC trace is element 0.
3287 DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
3288 Handle<mirror::PointerArray> method_trace =
3289 hs.NewHandle(ObjPtr<mirror::PointerArray>::DownCast(decoded_traces->Get(0)));
3290
3291 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
3292 Handle<mirror::Class> sfi_class =
3293 hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/StackFrameInfo;"));
3294 DCHECK(sfi_class != nullptr);
3295
3296 MutableHandle<mirror::StackFrameInfo> frame = hs.NewHandle<mirror::StackFrameInfo>(nullptr);
3297 MutableHandle<mirror::Class> clazz = hs.NewHandle<mirror::Class>(nullptr);
3298 for (uint32_t i = static_cast<uint32_t>(startLevel); i < static_cast<uint32_t>(depth); ++i) {
3299 if (endBufferIndex >= startBufferIndex + batchSize || endBufferIndex >= bufferSize) {
3300 break;
3301 }
3302
3303 ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
3304 if (isClassArray) {
3305 clazz.Assign(method->GetDeclaringClass());
3306 framesOrClasses->Set(endBufferIndex, clazz.Get());
3307 } else {
3308 // Prepare parameters for fields in StackFrameInfo
3309 uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
3310 i + static_cast<uint32_t>(method_trace->GetLength()) / 2, kRuntimePointerSize);
3311
3312 ObjPtr<mirror::Object> frameObject = framesOrClasses->Get(endBufferIndex);
3313 // If libcore didn't allocate the object, we just stop here, but it's unlikely.
3314 if (frameObject == nullptr || !frameObject->InstanceOf(sfi_class.Get())) {
3315 break;
3316 }
3317 frame.Assign(ObjPtr<mirror::StackFrameInfo>::DownCast(frameObject));
3318 frame.Assign(InitStackFrameInfo(soa, class_linker, frame, method, dex_pc));
3319 // Break if InitStackFrameInfo fails to allocate objects or assign the fields.
3320 if (frame == nullptr) {
3321 break;
3322 }
3323 }
3324
3325 ++endBufferIndex;
3326 }
3327
3328 return endBufferIndex;
3329 }
3330
CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable & soa) const3331 jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
3332 // This code allocates. Do not allow it to operate with a pending exception.
3333 if (IsExceptionPending()) {
3334 return nullptr;
3335 }
3336
3337 class CollectFramesAndLocksStackVisitor : public MonitorObjectsStackVisitor {
3338 public:
3339 CollectFramesAndLocksStackVisitor(const ScopedObjectAccessAlreadyRunnable& soaa_in,
3340 Thread* self,
3341 Context* context)
3342 : MonitorObjectsStackVisitor(self, context),
3343 wait_jobject_(soaa_in.Env(), nullptr),
3344 block_jobject_(soaa_in.Env(), nullptr),
3345 soaa_(soaa_in) {}
3346
3347 protected:
3348 VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
3349 override
3350 REQUIRES_SHARED(Locks::mutator_lock_) {
3351 ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(
3352 soaa_, m, GetDexPc(/* abort on error */ false));
3353 if (obj == nullptr) {
3354 return VisitMethodResult::kEndStackWalk;
3355 }
3356 stack_trace_elements_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj.Ptr()));
3357 return VisitMethodResult::kContinueMethod;
3358 }
3359
3360 VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
3361 lock_objects_.push_back({});
3362 lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_);
3363
3364 DCHECK_EQ(lock_objects_.size(), stack_trace_elements_.size());
3365
3366 return VisitMethodResult::kContinueMethod;
3367 }
3368
3369 void VisitWaitingObject(ObjPtr<mirror::Object> obj, ThreadState state ATTRIBUTE_UNUSED)
3370 override
3371 REQUIRES_SHARED(Locks::mutator_lock_) {
3372 wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
3373 }
3374 void VisitSleepingObject(ObjPtr<mirror::Object> obj)
3375 override
3376 REQUIRES_SHARED(Locks::mutator_lock_) {
3377 wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
3378 }
3379 void VisitBlockedOnObject(ObjPtr<mirror::Object> obj,
3380 ThreadState state ATTRIBUTE_UNUSED,
3381 uint32_t owner_tid ATTRIBUTE_UNUSED)
3382 override
3383 REQUIRES_SHARED(Locks::mutator_lock_) {
3384 block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
3385 }
3386 void VisitLockedObject(ObjPtr<mirror::Object> obj)
3387 override
3388 REQUIRES_SHARED(Locks::mutator_lock_) {
3389 frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj));
3390 }
3391
3392 public:
3393 std::vector<ScopedLocalRef<jobject>> stack_trace_elements_;
3394 ScopedLocalRef<jobject> wait_jobject_;
3395 ScopedLocalRef<jobject> block_jobject_;
3396 std::vector<std::vector<ScopedLocalRef<jobject>>> lock_objects_;
3397
3398 private:
3399 const ScopedObjectAccessAlreadyRunnable& soaa_;
3400
3401 std::vector<ScopedLocalRef<jobject>> frame_lock_objects_;
3402 };
3403
3404 std::unique_ptr<Context> context(Context::Create());
3405 CollectFramesAndLocksStackVisitor dumper(soa, const_cast<Thread*>(this), context.get());
3406 dumper.WalkStack();
3407
3408 // There should not be a pending exception. Otherwise, return with it pending.
3409 if (IsExceptionPending()) {
3410 return nullptr;
3411 }
3412
3413 // Now go and create Java arrays.
3414
3415 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
3416
3417 StackHandleScope<6> hs(soa.Self());
3418 Handle<mirror::Class> h_aste_array_class = hs.NewHandle(class_linker->FindSystemClass(
3419 soa.Self(),
3420 "[Ldalvik/system/AnnotatedStackTraceElement;"));
3421 if (h_aste_array_class == nullptr) {
3422 return nullptr;
3423 }
3424 Handle<mirror::Class> h_aste_class = hs.NewHandle(h_aste_array_class->GetComponentType());
3425
3426 Handle<mirror::Class> h_o_array_class =
3427 hs.NewHandle(GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker));
3428 DCHECK(h_o_array_class != nullptr); // Class roots must be already initialized.
3429
3430
3431 // Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 .
3432 class_linker->EnsureInitialized(soa.Self(),
3433 h_aste_class,
3434 /* can_init_fields= */ true,
3435 /* can_init_parents= */ true);
3436 if (soa.Self()->IsExceptionPending()) {
3437 // This should not fail in a healthy runtime.
3438 return nullptr;
3439 }
3440
3441 ArtField* stack_trace_element_field =
3442 h_aste_class->FindDeclaredInstanceField("stackTraceElement", "Ljava/lang/StackTraceElement;");
3443 DCHECK(stack_trace_element_field != nullptr);
3444 ArtField* held_locks_field =
3445 h_aste_class->FindDeclaredInstanceField("heldLocks", "[Ljava/lang/Object;");
3446 DCHECK(held_locks_field != nullptr);
3447 ArtField* blocked_on_field =
3448 h_aste_class->FindDeclaredInstanceField("blockedOn", "Ljava/lang/Object;");
3449 DCHECK(blocked_on_field != nullptr);
3450
3451 int32_t length = static_cast<int32_t>(dumper.stack_trace_elements_.size());
3452 ObjPtr<mirror::ObjectArray<mirror::Object>> array =
3453 mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), h_aste_array_class.Get(), length);
3454 if (array == nullptr) {
3455 soa.Self()->AssertPendingOOMException();
3456 return nullptr;
3457 }
3458
3459 ScopedLocalRef<jobjectArray> result(soa.Env(), soa.Env()->AddLocalReference<jobjectArray>(array));
3460
3461 MutableHandle<mirror::Object> handle(hs.NewHandle<mirror::Object>(nullptr));
3462 MutableHandle<mirror::ObjectArray<mirror::Object>> handle2(
3463 hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr));
3464 for (size_t i = 0; i != static_cast<size_t>(length); ++i) {
3465 handle.Assign(h_aste_class->AllocObject(soa.Self()));
3466 if (handle == nullptr) {
3467 soa.Self()->AssertPendingOOMException();
3468 return nullptr;
3469 }
3470
3471 // Set stack trace element.
3472 stack_trace_element_field->SetObject<false>(
3473 handle.Get(), soa.Decode<mirror::Object>(dumper.stack_trace_elements_[i].get()));
3474
3475 // Create locked-on array.
3476 if (!dumper.lock_objects_[i].empty()) {
3477 handle2.Assign(mirror::ObjectArray<mirror::Object>::Alloc(
3478 soa.Self(), h_o_array_class.Get(), static_cast<int32_t>(dumper.lock_objects_[i].size())));
3479 if (handle2 == nullptr) {
3480 soa.Self()->AssertPendingOOMException();
3481 return nullptr;
3482 }
3483 int32_t j = 0;
3484 for (auto& scoped_local : dumper.lock_objects_[i]) {
3485 if (scoped_local == nullptr) {
3486 continue;
3487 }
3488 handle2->Set(j, soa.Decode<mirror::Object>(scoped_local.get()));
3489 DCHECK(!soa.Self()->IsExceptionPending());
3490 j++;
3491 }
3492 held_locks_field->SetObject<false>(handle.Get(), handle2.Get());
3493 }
3494
3495 // Set blocked-on object.
3496 if (i == 0) {
3497 if (dumper.block_jobject_ != nullptr) {
3498 blocked_on_field->SetObject<false>(
3499 handle.Get(), soa.Decode<mirror::Object>(dumper.block_jobject_.get()));
3500 }
3501 }
3502
3503 ScopedLocalRef<jobject> elem(soa.Env(), soa.AddLocalReference<jobject>(handle.Get()));
3504 soa.Env()->SetObjectArrayElement(result.get(), static_cast<jsize>(i), elem.get());
3505 DCHECK(!soa.Self()->IsExceptionPending());
3506 }
3507
3508 return result.release();
3509 }
3510
ThrowNewExceptionF(const char * exception_class_descriptor,const char * fmt,...)3511 void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
3512 va_list args;
3513 va_start(args, fmt);
3514 ThrowNewExceptionV(exception_class_descriptor, fmt, args);
3515 va_end(args);
3516 }
3517
ThrowNewExceptionV(const char * exception_class_descriptor,const char * fmt,va_list ap)3518 void Thread::ThrowNewExceptionV(const char* exception_class_descriptor,
3519 const char* fmt, va_list ap) {
3520 std::string msg;
3521 StringAppendV(&msg, fmt, ap);
3522 ThrowNewException(exception_class_descriptor, msg.c_str());
3523 }
3524
ThrowNewException(const char * exception_class_descriptor,const char * msg)3525 void Thread::ThrowNewException(const char* exception_class_descriptor,
3526 const char* msg) {
3527 // Callers should either clear or call ThrowNewWrappedException.
3528 AssertNoPendingExceptionForNewException(msg);
3529 ThrowNewWrappedException(exception_class_descriptor, msg);
3530 }
3531
GetCurrentClassLoader(Thread * self)3532 static ObjPtr<mirror::ClassLoader> GetCurrentClassLoader(Thread* self)
3533 REQUIRES_SHARED(Locks::mutator_lock_) {
3534 ArtMethod* method = self->GetCurrentMethod(nullptr);
3535 return method != nullptr
3536 ? method->GetDeclaringClass()->GetClassLoader()
3537 : nullptr;
3538 }
3539
ThrowNewWrappedException(const char * exception_class_descriptor,const char * msg)3540 void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
3541 const char* msg) {
3542 DCHECK_EQ(this, Thread::Current());
3543 ScopedObjectAccessUnchecked soa(this);
3544 StackHandleScope<3> hs(soa.Self());
3545
3546 // Disable public sdk checks if we need to throw exceptions.
3547 // The checks are only used in AOT compilation and may block (exception) class
3548 // initialization if it needs access to private fields (e.g. serialVersionUID).
3549 //
3550 // Since throwing an exception will EnsureInitialization and the public sdk may
3551 // block that, disable the checks. It's ok to do so, because the thrown exceptions
3552 // are not part of the application code that needs to verified.
3553 ScopedDisablePublicSdkChecker sdpsc;
3554
3555 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self())));
3556 ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
3557 ClearException();
3558 Runtime* runtime = Runtime::Current();
3559 auto* cl = runtime->GetClassLinker();
3560 Handle<mirror::Class> exception_class(
3561 hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader)));
3562 if (UNLIKELY(exception_class == nullptr)) {
3563 CHECK(IsExceptionPending());
3564 LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
3565 return;
3566 }
3567
3568 if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true,
3569 true))) {
3570 DCHECK(IsExceptionPending());
3571 return;
3572 }
3573 DCHECK_IMPLIES(runtime->IsStarted(), exception_class->IsThrowableClass());
3574 Handle<mirror::Throwable> exception(
3575 hs.NewHandle(ObjPtr<mirror::Throwable>::DownCast(exception_class->AllocObject(this))));
3576
3577 // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
3578 if (exception == nullptr) {
3579 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one.
3580 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenThrowingException());
3581 return;
3582 }
3583
3584 // Choose an appropriate constructor and set up the arguments.
3585 const char* signature;
3586 ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr);
3587 if (msg != nullptr) {
3588 // Ensure we remember this and the method over the String allocation.
3589 msg_string.reset(
3590 soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg)));
3591 if (UNLIKELY(msg_string.get() == nullptr)) {
3592 CHECK(IsExceptionPending()); // OOME.
3593 return;
3594 }
3595 if (cause.get() == nullptr) {
3596 signature = "(Ljava/lang/String;)V";
3597 } else {
3598 signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
3599 }
3600 } else {
3601 if (cause.get() == nullptr) {
3602 signature = "()V";
3603 } else {
3604 signature = "(Ljava/lang/Throwable;)V";
3605 }
3606 }
3607 ArtMethod* exception_init_method =
3608 exception_class->FindConstructor(signature, cl->GetImagePointerSize());
3609
3610 CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
3611 << PrettyDescriptor(exception_class_descriptor);
3612
3613 if (UNLIKELY(!runtime->IsStarted())) {
3614 // Something is trying to throw an exception without a started runtime, which is the common
3615 // case in the compiler. We won't be able to invoke the constructor of the exception, so set
3616 // the exception fields directly.
3617 if (msg != nullptr) {
3618 exception->SetDetailMessage(DecodeJObject(msg_string.get())->AsString());
3619 }
3620 if (cause.get() != nullptr) {
3621 exception->SetCause(DecodeJObject(cause.get())->AsThrowable());
3622 }
3623 ScopedLocalRef<jobject> trace(GetJniEnv(), CreateInternalStackTrace(soa));
3624 if (trace.get() != nullptr) {
3625 exception->SetStackState(DecodeJObject(trace.get()).Ptr());
3626 }
3627 SetException(exception.Get());
3628 } else {
3629 jvalue jv_args[2];
3630 size_t i = 0;
3631
3632 if (msg != nullptr) {
3633 jv_args[i].l = msg_string.get();
3634 ++i;
3635 }
3636 if (cause.get() != nullptr) {
3637 jv_args[i].l = cause.get();
3638 ++i;
3639 }
3640 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get()));
3641 InvokeWithJValues(soa, ref.get(), exception_init_method, jv_args);
3642 if (LIKELY(!IsExceptionPending())) {
3643 SetException(exception.Get());
3644 }
3645 }
3646 }
3647
ThrowOutOfMemoryError(const char * msg)3648 void Thread::ThrowOutOfMemoryError(const char* msg) {
3649 LOG(WARNING) << "Throwing OutOfMemoryError "
3650 << '"' << msg << '"'
3651 << " (VmSize " << GetProcessStatus("VmSize")
3652 << (tls32_.throwing_OutOfMemoryError ? ", recursive case)" : ")");
3653 ScopedTrace trace("OutOfMemoryError");
3654 if (!tls32_.throwing_OutOfMemoryError) {
3655 tls32_.throwing_OutOfMemoryError = true;
3656 ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
3657 tls32_.throwing_OutOfMemoryError = false;
3658 } else {
3659 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one.
3660 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME());
3661 }
3662 }
3663
CurrentFromGdb()3664 Thread* Thread::CurrentFromGdb() {
3665 return Thread::Current();
3666 }
3667
DumpFromGdb() const3668 void Thread::DumpFromGdb() const {
3669 std::ostringstream ss;
3670 Dump(ss);
3671 std::string str(ss.str());
3672 // log to stderr for debugging command line processes
3673 std::cerr << str;
3674 #ifdef ART_TARGET_ANDROID
3675 // log to logcat for debugging frameworks processes
3676 LOG(INFO) << str;
3677 #endif
3678 }
3679
3680 // Explicitly instantiate 32 and 64bit thread offset dumping support.
3681 template
3682 void Thread::DumpThreadOffset<PointerSize::k32>(std::ostream& os, uint32_t offset);
3683 template
3684 void Thread::DumpThreadOffset<PointerSize::k64>(std::ostream& os, uint32_t offset);
3685
3686 template<PointerSize ptr_size>
DumpThreadOffset(std::ostream & os,uint32_t offset)3687 void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
3688 #define DO_THREAD_OFFSET(x, y) \
3689 if (offset == (x).Uint32Value()) { \
3690 os << (y); \
3691 return; \
3692 }
3693 DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags")
3694 DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table")
3695 DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception")
3696 DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer");
3697 DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env")
3698 DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self")
3699 DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end")
3700 DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id")
3701 DO_THREAD_OFFSET(IsGcMarkingOffset<ptr_size>(), "is_gc_marking")
3702 DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
3703 DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
3704 DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
3705 DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
3706 #undef DO_THREAD_OFFSET
3707
3708 #define JNI_ENTRY_POINT_INFO(x) \
3709 if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
3710 os << #x; \
3711 return; \
3712 }
3713 JNI_ENTRY_POINT_INFO(pDlsymLookup)
3714 JNI_ENTRY_POINT_INFO(pDlsymLookupCritical)
3715 #undef JNI_ENTRY_POINT_INFO
3716
3717 #define QUICK_ENTRY_POINT_INFO(x) \
3718 if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
3719 os << #x; \
3720 return; \
3721 }
3722 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
3723 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved8)
3724 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved16)
3725 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved32)
3726 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved64)
3727 QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
3728 QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
3729 QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks)
3730 QUICK_ENTRY_POINT_INFO(pAllocStringObject)
3731 QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes)
3732 QUICK_ENTRY_POINT_INFO(pAllocStringFromChars)
3733 QUICK_ENTRY_POINT_INFO(pAllocStringFromString)
3734 QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
3735 QUICK_ENTRY_POINT_INFO(pCheckInstanceOf)
3736 QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
3737 QUICK_ENTRY_POINT_INFO(pResolveTypeAndVerifyAccess)
3738 QUICK_ENTRY_POINT_INFO(pResolveType)
3739 QUICK_ENTRY_POINT_INFO(pResolveString)
3740 QUICK_ENTRY_POINT_INFO(pSet8Instance)
3741 QUICK_ENTRY_POINT_INFO(pSet8Static)
3742 QUICK_ENTRY_POINT_INFO(pSet16Instance)
3743 QUICK_ENTRY_POINT_INFO(pSet16Static)
3744 QUICK_ENTRY_POINT_INFO(pSet32Instance)
3745 QUICK_ENTRY_POINT_INFO(pSet32Static)
3746 QUICK_ENTRY_POINT_INFO(pSet64Instance)
3747 QUICK_ENTRY_POINT_INFO(pSet64Static)
3748 QUICK_ENTRY_POINT_INFO(pSetObjInstance)
3749 QUICK_ENTRY_POINT_INFO(pSetObjStatic)
3750 QUICK_ENTRY_POINT_INFO(pGetByteInstance)
3751 QUICK_ENTRY_POINT_INFO(pGetBooleanInstance)
3752 QUICK_ENTRY_POINT_INFO(pGetByteStatic)
3753 QUICK_ENTRY_POINT_INFO(pGetBooleanStatic)
3754 QUICK_ENTRY_POINT_INFO(pGetShortInstance)
3755 QUICK_ENTRY_POINT_INFO(pGetCharInstance)
3756 QUICK_ENTRY_POINT_INFO(pGetShortStatic)
3757 QUICK_ENTRY_POINT_INFO(pGetCharStatic)
3758 QUICK_ENTRY_POINT_INFO(pGet32Instance)
3759 QUICK_ENTRY_POINT_INFO(pGet32Static)
3760 QUICK_ENTRY_POINT_INFO(pGet64Instance)
3761 QUICK_ENTRY_POINT_INFO(pGet64Static)
3762 QUICK_ENTRY_POINT_INFO(pGetObjInstance)
3763 QUICK_ENTRY_POINT_INFO(pGetObjStatic)
3764 QUICK_ENTRY_POINT_INFO(pAputObject)
3765 QUICK_ENTRY_POINT_INFO(pJniMethodStart)
3766 QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
3767 QUICK_ENTRY_POINT_INFO(pJniMethodEntryHook)
3768 QUICK_ENTRY_POINT_INFO(pJniDecodeReferenceResult)
3769 QUICK_ENTRY_POINT_INFO(pJniLockObject)
3770 QUICK_ENTRY_POINT_INFO(pJniUnlockObject)
3771 QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline)
3772 QUICK_ENTRY_POINT_INFO(pLockObject)
3773 QUICK_ENTRY_POINT_INFO(pUnlockObject)
3774 QUICK_ENTRY_POINT_INFO(pCmpgDouble)
3775 QUICK_ENTRY_POINT_INFO(pCmpgFloat)
3776 QUICK_ENTRY_POINT_INFO(pCmplDouble)
3777 QUICK_ENTRY_POINT_INFO(pCmplFloat)
3778 QUICK_ENTRY_POINT_INFO(pCos)
3779 QUICK_ENTRY_POINT_INFO(pSin)
3780 QUICK_ENTRY_POINT_INFO(pAcos)
3781 QUICK_ENTRY_POINT_INFO(pAsin)
3782 QUICK_ENTRY_POINT_INFO(pAtan)
3783 QUICK_ENTRY_POINT_INFO(pAtan2)
3784 QUICK_ENTRY_POINT_INFO(pCbrt)
3785 QUICK_ENTRY_POINT_INFO(pCosh)
3786 QUICK_ENTRY_POINT_INFO(pExp)
3787 QUICK_ENTRY_POINT_INFO(pExpm1)
3788 QUICK_ENTRY_POINT_INFO(pHypot)
3789 QUICK_ENTRY_POINT_INFO(pLog)
3790 QUICK_ENTRY_POINT_INFO(pLog10)
3791 QUICK_ENTRY_POINT_INFO(pNextAfter)
3792 QUICK_ENTRY_POINT_INFO(pSinh)
3793 QUICK_ENTRY_POINT_INFO(pTan)
3794 QUICK_ENTRY_POINT_INFO(pTanh)
3795 QUICK_ENTRY_POINT_INFO(pFmod)
3796 QUICK_ENTRY_POINT_INFO(pL2d)
3797 QUICK_ENTRY_POINT_INFO(pFmodf)
3798 QUICK_ENTRY_POINT_INFO(pL2f)
3799 QUICK_ENTRY_POINT_INFO(pD2iz)
3800 QUICK_ENTRY_POINT_INFO(pF2iz)
3801 QUICK_ENTRY_POINT_INFO(pIdivmod)
3802 QUICK_ENTRY_POINT_INFO(pD2l)
3803 QUICK_ENTRY_POINT_INFO(pF2l)
3804 QUICK_ENTRY_POINT_INFO(pLdiv)
3805 QUICK_ENTRY_POINT_INFO(pLmod)
3806 QUICK_ENTRY_POINT_INFO(pLmul)
3807 QUICK_ENTRY_POINT_INFO(pShlLong)
3808 QUICK_ENTRY_POINT_INFO(pShrLong)
3809 QUICK_ENTRY_POINT_INFO(pUshrLong)
3810 QUICK_ENTRY_POINT_INFO(pIndexOf)
3811 QUICK_ENTRY_POINT_INFO(pStringCompareTo)
3812 QUICK_ENTRY_POINT_INFO(pMemcpy)
3813 QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline)
3814 QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline)
3815 QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge)
3816 QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck)
3817 QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck)
3818 QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
3819 QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
3820 QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
3821 QUICK_ENTRY_POINT_INFO(pInvokePolymorphic)
3822 QUICK_ENTRY_POINT_INFO(pTestSuspend)
3823 QUICK_ENTRY_POINT_INFO(pDeliverException)
3824 QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
3825 QUICK_ENTRY_POINT_INFO(pThrowDivZero)
3826 QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
3827 QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
3828 QUICK_ENTRY_POINT_INFO(pDeoptimize)
3829 QUICK_ENTRY_POINT_INFO(pA64Load)
3830 QUICK_ENTRY_POINT_INFO(pA64Store)
3831 QUICK_ENTRY_POINT_INFO(pNewEmptyString)
3832 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B)
3833 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BB)
3834 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI)
3835 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII)
3836 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII)
3837 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString)
3838 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString)
3839 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset)
3840 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset)
3841 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C)
3842 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII)
3843 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC)
3844 QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints)
3845 QUICK_ENTRY_POINT_INFO(pNewStringFromString)
3846 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
3847 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
3848 QUICK_ENTRY_POINT_INFO(pNewStringFromUtf16Bytes_BII)
3849 QUICK_ENTRY_POINT_INFO(pJniReadBarrier)
3850 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg00)
3851 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg01)
3852 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg02)
3853 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg03)
3854 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg04)
3855 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg05)
3856 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg06)
3857 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg07)
3858 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg08)
3859 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg09)
3860 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg10)
3861 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg11)
3862 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg12)
3863 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg13)
3864 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg14)
3865 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg15)
3866 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg16)
3867 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg17)
3868 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg18)
3869 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg19)
3870 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg20)
3871 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg21)
3872 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg22)
3873 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg23)
3874 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg24)
3875 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg25)
3876 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg26)
3877 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg27)
3878 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg28)
3879 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg29)
3880 QUICK_ENTRY_POINT_INFO(pReadBarrierSlow)
3881 QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow)
3882 #undef QUICK_ENTRY_POINT_INFO
3883
3884 os << offset;
3885 }
3886
QuickDeliverException(bool skip_method_exit_callbacks)3887 void Thread::QuickDeliverException(bool skip_method_exit_callbacks) {
3888 // Get exception from thread.
3889 ObjPtr<mirror::Throwable> exception = GetException();
3890 CHECK(exception != nullptr);
3891 if (exception == GetDeoptimizationException()) {
3892 // This wasn't a real exception, so just clear it here. If there was an actual exception it
3893 // will be recorded in the DeoptimizationContext and it will be restored later.
3894 ClearException();
3895 artDeoptimize(this, skip_method_exit_callbacks);
3896 UNREACHABLE();
3897 }
3898
3899 ReadBarrier::MaybeAssertToSpaceInvariant(exception.Ptr());
3900
3901 // This is a real exception: let the instrumentation know about it. Exception throw listener
3902 // could set a breakpoint or install listeners that might require a deoptimization. Hence the
3903 // deoptimization check needs to happen after calling the listener.
3904 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3905 if (instrumentation->HasExceptionThrownListeners() &&
3906 IsExceptionThrownByCurrentMethod(exception)) {
3907 // Instrumentation may cause GC so keep the exception object safe.
3908 StackHandleScope<1> hs(this);
3909 HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
3910 instrumentation->ExceptionThrownEvent(this, exception);
3911 }
3912 // Does instrumentation need to deoptimize the stack or otherwise go to interpreter for something?
3913 // Note: we do this *after* reporting the exception to instrumentation in case it now requires
3914 // deoptimization. It may happen if a debugger is attached and requests new events (single-step,
3915 // breakpoint, ...) when the exception is reported.
3916 // Frame pop can be requested on a method unwind callback which requires a deopt. We could
3917 // potentially check after each unwind callback to see if a frame pop was requested and deopt if
3918 // needed. Since this is a debug only feature and this path is only taken when an exception is
3919 // thrown, it is not performance critical and we keep it simple by just deopting if method exit
3920 // listeners are installed and frame pop feature is supported.
3921 bool needs_deopt =
3922 instrumentation->HasMethodExitListeners() && Runtime::Current()->AreNonStandardExitsEnabled();
3923 if (Dbg::IsForcedInterpreterNeededForException(this) || IsForceInterpreter() || needs_deopt) {
3924 NthCallerVisitor visitor(this, 0, false);
3925 visitor.WalkStack();
3926 if (visitor.GetCurrentQuickFrame() != nullptr) {
3927 if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.GetOuterMethod(), visitor.caller_pc)) {
3928 // method_type shouldn't matter due to exception handling.
3929 const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault;
3930 // Save the exception into the deoptimization context so it can be restored
3931 // before entering the interpreter.
3932 PushDeoptimizationContext(
3933 JValue(),
3934 /* is_reference= */ false,
3935 exception,
3936 /* from_code= */ false,
3937 method_type);
3938 artDeoptimize(this, skip_method_exit_callbacks);
3939 UNREACHABLE();
3940 } else {
3941 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
3942 << visitor.caller->PrettyMethod();
3943 }
3944 } else {
3945 // This is either top of call stack, or shadow frame.
3946 DCHECK(visitor.caller == nullptr || visitor.IsShadowFrame());
3947 }
3948 }
3949
3950 // Don't leave exception visible while we try to find the handler, which may cause class
3951 // resolution.
3952 ClearException();
3953 QuickExceptionHandler exception_handler(this, false);
3954 exception_handler.FindCatch(exception, skip_method_exit_callbacks);
3955 if (exception_handler.GetClearException()) {
3956 // Exception was cleared as part of delivery.
3957 DCHECK(!IsExceptionPending());
3958 } else {
3959 // Exception was put back with a throw location.
3960 DCHECK(IsExceptionPending());
3961 // Check the to-space invariant on the re-installed exception (if applicable).
3962 ReadBarrier::MaybeAssertToSpaceInvariant(GetException());
3963 }
3964 exception_handler.DoLongJump();
3965 }
3966
GetLongJumpContext()3967 Context* Thread::GetLongJumpContext() {
3968 Context* result = tlsPtr_.long_jump_context;
3969 if (result == nullptr) {
3970 result = Context::Create();
3971 } else {
3972 tlsPtr_.long_jump_context = nullptr; // Avoid context being shared.
3973 result->Reset();
3974 }
3975 return result;
3976 }
3977
GetCurrentMethod(uint32_t * dex_pc_out,bool check_suspended,bool abort_on_error) const3978 ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc_out,
3979 bool check_suspended,
3980 bool abort_on_error) const {
3981 // Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
3982 // so we don't abort in a special situation (thinlocked monitor) when dumping the Java
3983 // stack.
3984 ArtMethod* method = nullptr;
3985 uint32_t dex_pc = dex::kDexNoIndex;
3986 StackVisitor::WalkStack(
3987 [&](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
3988 ArtMethod* m = visitor->GetMethod();
3989 if (m->IsRuntimeMethod()) {
3990 // Continue if this is a runtime method.
3991 return true;
3992 }
3993 method = m;
3994 dex_pc = visitor->GetDexPc(abort_on_error);
3995 return false;
3996 },
3997 const_cast<Thread*>(this),
3998 /* context= */ nullptr,
3999 StackVisitor::StackWalkKind::kIncludeInlinedFrames,
4000 check_suspended);
4001
4002 if (dex_pc_out != nullptr) {
4003 *dex_pc_out = dex_pc;
4004 }
4005 return method;
4006 }
4007
HoldsLock(ObjPtr<mirror::Object> object) const4008 bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const {
4009 return object != nullptr && object->GetLockOwnerThreadId() == GetThreadId();
4010 }
4011
4012 extern std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMethod** sp)
4013 REQUIRES_SHARED(Locks::mutator_lock_);
4014
4015 // RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
4016 template <typename RootVisitor, bool kPrecise = false>
4017 class ReferenceMapVisitor : public StackVisitor {
4018 public:
ReferenceMapVisitor(Thread * thread,Context * context,RootVisitor & visitor)4019 ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
4020 REQUIRES_SHARED(Locks::mutator_lock_)
4021 // We are visiting the references in compiled frames, so we do not need
4022 // to know the inlined frames.
4023 : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
4024 visitor_(visitor),
4025 visit_declaring_class_(!Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {}
4026
VisitFrame()4027 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
4028 if (false) {
4029 LOG(INFO) << "Visiting stack roots in " << ArtMethod::PrettyMethod(GetMethod())
4030 << StringPrintf("@ PC:%04x", GetDexPc());
4031 }
4032 ShadowFrame* shadow_frame = GetCurrentShadowFrame();
4033 if (shadow_frame != nullptr) {
4034 VisitShadowFrame(shadow_frame);
4035 } else if (GetCurrentOatQuickMethodHeader()->IsNterpMethodHeader()) {
4036 VisitNterpFrame();
4037 } else {
4038 VisitQuickFrame();
4039 }
4040 return true;
4041 }
4042
VisitShadowFrame(ShadowFrame * shadow_frame)4043 void VisitShadowFrame(ShadowFrame* shadow_frame) REQUIRES_SHARED(Locks::mutator_lock_) {
4044 ArtMethod* m = shadow_frame->GetMethod();
4045 VisitDeclaringClass(m);
4046 DCHECK(m != nullptr);
4047 size_t num_regs = shadow_frame->NumberOfVRegs();
4048 // handle scope for JNI or References for interpreter.
4049 for (size_t reg = 0; reg < num_regs; ++reg) {
4050 mirror::Object* ref = shadow_frame->GetVRegReference(reg);
4051 if (ref != nullptr) {
4052 mirror::Object* new_ref = ref;
4053 visitor_(&new_ref, reg, this);
4054 if (new_ref != ref) {
4055 shadow_frame->SetVRegReference(reg, new_ref);
4056 }
4057 }
4058 }
4059 // Mark lock count map required for structured locking checks.
4060 shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg= */ -1, this);
4061 }
4062
4063 private:
4064 // Visiting the declaring class is necessary so that we don't unload the class of a method that
4065 // is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since
4066 // the threads do not all hold the heap bitmap lock for parallel GC.
VisitDeclaringClass(ArtMethod * method)4067 void VisitDeclaringClass(ArtMethod* method)
4068 REQUIRES_SHARED(Locks::mutator_lock_)
4069 NO_THREAD_SAFETY_ANALYSIS {
4070 if (!visit_declaring_class_) {
4071 return;
4072 }
4073 ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
4074 // klass can be null for runtime methods.
4075 if (klass != nullptr) {
4076 if (kVerifyImageObjectsMarked) {
4077 gc::Heap* const heap = Runtime::Current()->GetHeap();
4078 gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass,
4079 /*fail_ok=*/true);
4080 if (space != nullptr && space->IsImageSpace()) {
4081 bool failed = false;
4082 if (!space->GetLiveBitmap()->Test(klass.Ptr())) {
4083 failed = true;
4084 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space;
4085 } else if (!heap->GetLiveBitmap()->Test(klass.Ptr())) {
4086 failed = true;
4087 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space;
4088 }
4089 if (failed) {
4090 GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
4091 space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT));
4092 LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method
4093 << " klass@" << klass.Ptr();
4094 // Pretty info last in case it crashes.
4095 LOG(FATAL) << "Method " << method->PrettyMethod() << " klass "
4096 << klass->PrettyClass();
4097 }
4098 }
4099 }
4100 mirror::Object* new_ref = klass.Ptr();
4101 visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kMethodDeclaringClass, this);
4102 if (new_ref != klass) {
4103 method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass());
4104 }
4105 }
4106 }
4107
VisitNterpFrame()4108 void VisitNterpFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
4109 ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
4110 StackReference<mirror::Object>* vreg_ref_base =
4111 reinterpret_cast<StackReference<mirror::Object>*>(NterpGetReferenceArray(cur_quick_frame));
4112 StackReference<mirror::Object>* vreg_int_base =
4113 reinterpret_cast<StackReference<mirror::Object>*>(NterpGetRegistersArray(cur_quick_frame));
4114 CodeItemDataAccessor accessor((*cur_quick_frame)->DexInstructionData());
4115 const uint16_t num_regs = accessor.RegistersSize();
4116 // An nterp frame has two arrays: a dex register array and a reference array
4117 // that shadows the dex register array but only containing references
4118 // (non-reference dex registers have nulls). See nterp_helpers.cc.
4119 for (size_t reg = 0; reg < num_regs; ++reg) {
4120 StackReference<mirror::Object>* ref_addr = vreg_ref_base + reg;
4121 mirror::Object* ref = ref_addr->AsMirrorPtr();
4122 if (ref != nullptr) {
4123 mirror::Object* new_ref = ref;
4124 visitor_(&new_ref, reg, this);
4125 if (new_ref != ref) {
4126 ref_addr->Assign(new_ref);
4127 StackReference<mirror::Object>* int_addr = vreg_int_base + reg;
4128 int_addr->Assign(new_ref);
4129 }
4130 }
4131 }
4132 }
4133
4134 template <typename T>
4135 ALWAYS_INLINE
VisitQuickFrameWithVregCallback()4136 inline void VisitQuickFrameWithVregCallback() REQUIRES_SHARED(Locks::mutator_lock_) {
4137 ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
4138 DCHECK(cur_quick_frame != nullptr);
4139 ArtMethod* m = *cur_quick_frame;
4140 VisitDeclaringClass(m);
4141
4142 if (m->IsNative()) {
4143 // TODO: Spill the `this` reference in the AOT-compiled String.charAt()
4144 // slow-path for throwing SIOOBE, so that we can remove this carve-out.
4145 if (UNLIKELY(m->IsIntrinsic()) &&
4146 m->GetIntrinsic() == enum_cast<uint32_t>(Intrinsics::kStringCharAt)) {
4147 // The String.charAt() method is AOT-compiled with an intrinsic implementation
4148 // instead of a JNI stub. It has a slow path that constructs a runtime frame
4149 // for throwing SIOOBE and in that path we do not get the `this` pointer
4150 // spilled on the stack, so there is nothing to visit. We can distinguish
4151 // this from the GenericJni path by checking that the PC is in the boot image
4152 // (PC shall be known thanks to the runtime frame for throwing SIOOBE).
4153 // Note that JIT does not emit that intrinic implementation.
4154 const void* pc = reinterpret_cast<const void*>(GetCurrentQuickFramePc());
4155 if (pc != nullptr && Runtime::Current()->GetHeap()->IsInBootImageOatFile(pc)) {
4156 return;
4157 }
4158 }
4159 // Native methods spill their arguments to the reserved vregs in the caller's frame
4160 // and use pointers to these stack references as jobject, jclass, jarray, etc.
4161 // Note: We can come here for a @CriticalNative method when it needs to resolve the
4162 // target native function but there would be no references to visit below.
4163 const size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
4164 const size_t method_pointer_size = static_cast<size_t>(kRuntimePointerSize);
4165 uint32_t* current_vreg = reinterpret_cast<uint32_t*>(
4166 reinterpret_cast<uint8_t*>(cur_quick_frame) + frame_size + method_pointer_size);
4167 auto visit = [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
4168 auto* ref_addr = reinterpret_cast<StackReference<mirror::Object>*>(current_vreg);
4169 mirror::Object* ref = ref_addr->AsMirrorPtr();
4170 if (ref != nullptr) {
4171 mirror::Object* new_ref = ref;
4172 visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kNativeReferenceArgument, this);
4173 if (ref != new_ref) {
4174 ref_addr->Assign(new_ref);
4175 }
4176 }
4177 };
4178 const char* shorty = m->GetShorty();
4179 if (!m->IsStatic()) {
4180 visit();
4181 current_vreg += 1u;
4182 }
4183 for (shorty += 1u; *shorty != 0; ++shorty) {
4184 switch (*shorty) {
4185 case 'D':
4186 case 'J':
4187 current_vreg += 2u;
4188 break;
4189 case 'L':
4190 visit();
4191 FALLTHROUGH_INTENDED;
4192 default:
4193 current_vreg += 1u;
4194 break;
4195 }
4196 }
4197 } else if (!m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) {
4198 // Process register map (which native, runtime and proxy methods don't have)
4199 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
4200 DCHECK(method_header->IsOptimized());
4201 StackReference<mirror::Object>* vreg_base =
4202 reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame);
4203 uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
4204 CodeInfo code_info = kPrecise
4205 ? CodeInfo(method_header) // We will need dex register maps.
4206 : CodeInfo::DecodeGcMasksOnly(method_header);
4207 StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
4208 DCHECK(map.IsValid());
4209
4210 T vreg_info(m, code_info, map, visitor_);
4211
4212 // Visit stack entries that hold pointers.
4213 BitMemoryRegion stack_mask = code_info.GetStackMaskOf(map);
4214 for (size_t i = 0; i < stack_mask.size_in_bits(); ++i) {
4215 if (stack_mask.LoadBit(i)) {
4216 StackReference<mirror::Object>* ref_addr = vreg_base + i;
4217 mirror::Object* ref = ref_addr->AsMirrorPtr();
4218 if (ref != nullptr) {
4219 mirror::Object* new_ref = ref;
4220 vreg_info.VisitStack(&new_ref, i, this);
4221 if (ref != new_ref) {
4222 ref_addr->Assign(new_ref);
4223 }
4224 }
4225 }
4226 }
4227 // Visit callee-save registers that hold pointers.
4228 uint32_t register_mask = code_info.GetRegisterMaskOf(map);
4229 for (uint32_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
4230 if (register_mask & (1 << i)) {
4231 mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
4232 if (kIsDebugBuild && ref_addr == nullptr) {
4233 std::string thread_name;
4234 GetThread()->GetThreadName(thread_name);
4235 LOG(FATAL_WITHOUT_ABORT) << "On thread " << thread_name;
4236 DescribeStack(GetThread());
4237 LOG(FATAL) << "Found an unsaved callee-save register " << i << " (null GPRAddress) "
4238 << "set in register_mask=" << register_mask << " at " << DescribeLocation();
4239 }
4240 if (*ref_addr != nullptr) {
4241 vreg_info.VisitRegister(ref_addr, i, this);
4242 }
4243 }
4244 }
4245 } else if (!m->IsRuntimeMethod() && m->IsProxyMethod()) {
4246 // If this is a proxy method, visit its reference arguments.
4247 DCHECK(!m->IsStatic());
4248 DCHECK(!m->IsNative());
4249 std::vector<StackReference<mirror::Object>*> ref_addrs =
4250 GetProxyReferenceArguments(cur_quick_frame);
4251 for (StackReference<mirror::Object>* ref_addr : ref_addrs) {
4252 mirror::Object* ref = ref_addr->AsMirrorPtr();
4253 if (ref != nullptr) {
4254 mirror::Object* new_ref = ref;
4255 visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kProxyReferenceArgument, this);
4256 if (ref != new_ref) {
4257 ref_addr->Assign(new_ref);
4258 }
4259 }
4260 }
4261 }
4262 }
4263
VisitQuickFrame()4264 void VisitQuickFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
4265 if (kPrecise) {
4266 VisitQuickFramePrecise();
4267 } else {
4268 VisitQuickFrameNonPrecise();
4269 }
4270 }
4271
VisitQuickFrameNonPrecise()4272 void VisitQuickFrameNonPrecise() REQUIRES_SHARED(Locks::mutator_lock_) {
4273 struct UndefinedVRegInfo {
4274 UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED,
4275 const CodeInfo& code_info ATTRIBUTE_UNUSED,
4276 const StackMap& map ATTRIBUTE_UNUSED,
4277 RootVisitor& _visitor)
4278 : visitor(_visitor) {
4279 }
4280
4281 ALWAYS_INLINE
4282 void VisitStack(mirror::Object** ref,
4283 size_t stack_index ATTRIBUTE_UNUSED,
4284 const StackVisitor* stack_visitor)
4285 REQUIRES_SHARED(Locks::mutator_lock_) {
4286 visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor);
4287 }
4288
4289 ALWAYS_INLINE
4290 void VisitRegister(mirror::Object** ref,
4291 size_t register_index ATTRIBUTE_UNUSED,
4292 const StackVisitor* stack_visitor)
4293 REQUIRES_SHARED(Locks::mutator_lock_) {
4294 visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor);
4295 }
4296
4297 RootVisitor& visitor;
4298 };
4299 VisitQuickFrameWithVregCallback<UndefinedVRegInfo>();
4300 }
4301
VisitQuickFramePrecise()4302 void VisitQuickFramePrecise() REQUIRES_SHARED(Locks::mutator_lock_) {
4303 struct StackMapVRegInfo {
4304 StackMapVRegInfo(ArtMethod* method,
4305 const CodeInfo& _code_info,
4306 const StackMap& map,
4307 RootVisitor& _visitor)
4308 : number_of_dex_registers(method->DexInstructionData().RegistersSize()),
4309 code_info(_code_info),
4310 dex_register_map(code_info.GetDexRegisterMapOf(map)),
4311 visitor(_visitor) {
4312 DCHECK_EQ(dex_register_map.size(), number_of_dex_registers);
4313 }
4314
4315 // TODO: If necessary, we should consider caching a reverse map instead of the linear
4316 // lookups for each location.
4317 void FindWithType(const size_t index,
4318 const DexRegisterLocation::Kind kind,
4319 mirror::Object** ref,
4320 const StackVisitor* stack_visitor)
4321 REQUIRES_SHARED(Locks::mutator_lock_) {
4322 bool found = false;
4323 for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) {
4324 DexRegisterLocation location = dex_register_map[dex_reg];
4325 if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) {
4326 visitor(ref, dex_reg, stack_visitor);
4327 found = true;
4328 }
4329 }
4330
4331 if (!found) {
4332 // If nothing found, report with unknown.
4333 visitor(ref, JavaFrameRootInfo::kUnknownVreg, stack_visitor);
4334 }
4335 }
4336
4337 void VisitStack(mirror::Object** ref, size_t stack_index, const StackVisitor* stack_visitor)
4338 REQUIRES_SHARED(Locks::mutator_lock_) {
4339 const size_t stack_offset = stack_index * kFrameSlotSize;
4340 FindWithType(stack_offset,
4341 DexRegisterLocation::Kind::kInStack,
4342 ref,
4343 stack_visitor);
4344 }
4345
4346 void VisitRegister(mirror::Object** ref,
4347 size_t register_index,
4348 const StackVisitor* stack_visitor)
4349 REQUIRES_SHARED(Locks::mutator_lock_) {
4350 FindWithType(register_index,
4351 DexRegisterLocation::Kind::kInRegister,
4352 ref,
4353 stack_visitor);
4354 }
4355
4356 size_t number_of_dex_registers;
4357 const CodeInfo& code_info;
4358 DexRegisterMap dex_register_map;
4359 RootVisitor& visitor;
4360 };
4361 VisitQuickFrameWithVregCallback<StackMapVRegInfo>();
4362 }
4363
4364 // Visitor for when we visit a root.
4365 RootVisitor& visitor_;
4366 bool visit_declaring_class_;
4367 };
4368
4369 class RootCallbackVisitor {
4370 public:
RootCallbackVisitor(RootVisitor * visitor,uint32_t tid)4371 RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
4372
operator ()(mirror::Object ** obj,size_t vreg,const StackVisitor * stack_visitor) const4373 void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
4374 REQUIRES_SHARED(Locks::mutator_lock_) {
4375 visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
4376 }
4377
4378 private:
4379 RootVisitor* const visitor_;
4380 const uint32_t tid_;
4381 };
4382
VisitReflectiveTargets(ReflectiveValueVisitor * visitor)4383 void Thread::VisitReflectiveTargets(ReflectiveValueVisitor* visitor) {
4384 for (BaseReflectiveHandleScope* brhs = GetTopReflectiveHandleScope();
4385 brhs != nullptr;
4386 brhs = brhs->GetLink()) {
4387 brhs->VisitTargets(visitor);
4388 }
4389 }
4390
4391 // FIXME: clang-r433403 reports the below function exceeds frame size limit.
4392 // http://b/197647048
4393 #pragma GCC diagnostic push
4394 #pragma GCC diagnostic ignored "-Wframe-larger-than="
4395 template <bool kPrecise>
VisitRoots(RootVisitor * visitor)4396 void Thread::VisitRoots(RootVisitor* visitor) {
4397 const uint32_t thread_id = GetThreadId();
4398 visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
4399 if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
4400 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
4401 RootInfo(kRootNativeStack, thread_id));
4402 }
4403 if (tlsPtr_.async_exception != nullptr) {
4404 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.async_exception),
4405 RootInfo(kRootNativeStack, thread_id));
4406 }
4407 visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
4408 tlsPtr_.jni_env->VisitJniLocalRoots(visitor, RootInfo(kRootJNILocal, thread_id));
4409 tlsPtr_.jni_env->VisitMonitorRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
4410 HandleScopeVisitRoots(visitor, thread_id);
4411 // Visit roots for deoptimization.
4412 if (tlsPtr_.stacked_shadow_frame_record != nullptr) {
4413 RootCallbackVisitor visitor_to_callback(visitor, thread_id);
4414 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback);
4415 for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
4416 record != nullptr;
4417 record = record->GetLink()) {
4418 for (ShadowFrame* shadow_frame = record->GetShadowFrame();
4419 shadow_frame != nullptr;
4420 shadow_frame = shadow_frame->GetLink()) {
4421 mapper.VisitShadowFrame(shadow_frame);
4422 }
4423 }
4424 }
4425 for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
4426 record != nullptr;
4427 record = record->GetLink()) {
4428 if (record->IsReference()) {
4429 visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(),
4430 RootInfo(kRootThreadObject, thread_id));
4431 }
4432 visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(),
4433 RootInfo(kRootThreadObject, thread_id));
4434 }
4435 if (tlsPtr_.frame_id_to_shadow_frame != nullptr) {
4436 RootCallbackVisitor visitor_to_callback(visitor, thread_id);
4437 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback);
4438 for (FrameIdToShadowFrame* record = tlsPtr_.frame_id_to_shadow_frame;
4439 record != nullptr;
4440 record = record->GetNext()) {
4441 mapper.VisitShadowFrame(record->GetShadowFrame());
4442 }
4443 }
4444 for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) {
4445 verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id));
4446 }
4447 // Visit roots on this thread's stack
4448 RuntimeContextType context;
4449 RootCallbackVisitor visitor_to_callback(visitor, thread_id);
4450 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback);
4451 mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false);
4452 }
4453 #pragma GCC diagnostic pop
4454
SweepCacheEntry(IsMarkedVisitor * visitor,const Instruction * inst,size_t * value)4455 static void SweepCacheEntry(IsMarkedVisitor* visitor, const Instruction* inst, size_t* value)
4456 REQUIRES_SHARED(Locks::mutator_lock_) {
4457 if (inst == nullptr) {
4458 return;
4459 }
4460 using Opcode = Instruction::Code;
4461 Opcode opcode = inst->Opcode();
4462 switch (opcode) {
4463 case Opcode::NEW_INSTANCE:
4464 case Opcode::CHECK_CAST:
4465 case Opcode::INSTANCE_OF:
4466 case Opcode::NEW_ARRAY:
4467 case Opcode::CONST_CLASS: {
4468 mirror::Class* klass = reinterpret_cast<mirror::Class*>(*value);
4469 if (klass == nullptr || klass == Runtime::GetWeakClassSentinel()) {
4470 return;
4471 }
4472 mirror::Class* new_klass = down_cast<mirror::Class*>(visitor->IsMarked(klass));
4473 if (new_klass == nullptr) {
4474 *value = reinterpret_cast<size_t>(Runtime::GetWeakClassSentinel());
4475 } else if (new_klass != klass) {
4476 *value = reinterpret_cast<size_t>(new_klass);
4477 }
4478 return;
4479 }
4480 case Opcode::CONST_STRING:
4481 case Opcode::CONST_STRING_JUMBO: {
4482 mirror::Object* object = reinterpret_cast<mirror::Object*>(*value);
4483 if (object == nullptr) {
4484 return;
4485 }
4486 mirror::Object* new_object = visitor->IsMarked(object);
4487 // We know the string is marked because it's a strongly-interned string that
4488 // is always alive (see b/117621117 for trying to make those strings weak).
4489 if (kIsDebugBuild && new_object == nullptr) {
4490 // (b/275005060) Currently the problem is reported only on CC GC.
4491 // Therefore we log it with more information. But since the failure rate
4492 // is quite high, sampling it.
4493 if (gUseReadBarrier) {
4494 Runtime* runtime = Runtime::Current();
4495 gc::collector::ConcurrentCopying* cc = runtime->GetHeap()->ConcurrentCopyingCollector();
4496 CHECK_NE(cc, nullptr);
4497 LOG(FATAL) << cc->DumpReferenceInfo(object, "string")
4498 << " string interned: " << std::boolalpha
4499 << runtime->GetInternTable()->LookupStrong(Thread::Current(),
4500 down_cast<mirror::String*>(object))
4501 << std::noboolalpha;
4502 } else {
4503 // Other GCs
4504 LOG(FATAL) << __FUNCTION__
4505 << ": IsMarked returned null for a strongly interned string: " << object;
4506 }
4507 } else if (new_object != object) {
4508 *value = reinterpret_cast<size_t>(new_object);
4509 }
4510 return;
4511 }
4512 default:
4513 // The following opcode ranges store non-reference values.
4514 if ((Opcode::IGET <= opcode && opcode <= Opcode::SPUT_SHORT) ||
4515 (Opcode::INVOKE_VIRTUAL <= opcode && opcode <= Opcode::INVOKE_INTERFACE_RANGE)) {
4516 return; // Nothing to do for the GC.
4517 }
4518 // New opcode is using the cache. We need to explicitly handle it in this method.
4519 DCHECK(false) << "Unhandled opcode " << inst->Opcode();
4520 }
4521 }
4522
SweepInterpreterCache(IsMarkedVisitor * visitor)4523 void Thread::SweepInterpreterCache(IsMarkedVisitor* visitor) {
4524 for (InterpreterCache::Entry& entry : GetInterpreterCache()->GetArray()) {
4525 SweepCacheEntry(visitor, reinterpret_cast<const Instruction*>(entry.first), &entry.second);
4526 }
4527 }
4528
4529 // FIXME: clang-r433403 reports the below function exceeds frame size limit.
4530 // http://b/197647048
4531 #pragma GCC diagnostic push
4532 #pragma GCC diagnostic ignored "-Wframe-larger-than="
VisitRoots(RootVisitor * visitor,VisitRootFlags flags)4533 void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
4534 if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) {
4535 VisitRoots</* kPrecise= */ true>(visitor);
4536 } else {
4537 VisitRoots</* kPrecise= */ false>(visitor);
4538 }
4539 }
4540 #pragma GCC diagnostic pop
4541
4542 class VerifyRootVisitor : public SingleRootVisitor {
4543 public:
VisitRoot(mirror::Object * root,const RootInfo & info ATTRIBUTE_UNUSED)4544 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
4545 override REQUIRES_SHARED(Locks::mutator_lock_) {
4546 VerifyObject(root);
4547 }
4548 };
4549
VerifyStackImpl()4550 void Thread::VerifyStackImpl() {
4551 if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) {
4552 VerifyRootVisitor visitor;
4553 std::unique_ptr<Context> context(Context::Create());
4554 RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId());
4555 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback);
4556 mapper.WalkStack();
4557 }
4558 }
4559
4560 // Set the stack end to that to be used during a stack overflow
SetStackEndForStackOverflow()4561 void Thread::SetStackEndForStackOverflow() {
4562 // During stack overflow we allow use of the full stack.
4563 if (tlsPtr_.stack_end == tlsPtr_.stack_begin) {
4564 // However, we seem to have already extended to use the full stack.
4565 LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
4566 << GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
4567 DumpStack(LOG_STREAM(ERROR));
4568 LOG(FATAL) << "Recursive stack overflow.";
4569 }
4570
4571 tlsPtr_.stack_end = tlsPtr_.stack_begin;
4572
4573 // Remove the stack overflow protection if is it set up.
4574 bool implicit_stack_check = Runtime::Current()->GetImplicitStackOverflowChecks();
4575 if (implicit_stack_check) {
4576 if (!UnprotectStack()) {
4577 LOG(ERROR) << "Unable to remove stack protection for stack overflow";
4578 }
4579 }
4580 }
4581
SetTlab(uint8_t * start,uint8_t * end,uint8_t * limit)4582 void Thread::SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit) {
4583 DCHECK_LE(start, end);
4584 DCHECK_LE(end, limit);
4585 tlsPtr_.thread_local_start = start;
4586 tlsPtr_.thread_local_pos = tlsPtr_.thread_local_start;
4587 tlsPtr_.thread_local_end = end;
4588 tlsPtr_.thread_local_limit = limit;
4589 tlsPtr_.thread_local_objects = 0;
4590 }
4591
ResetTlab()4592 void Thread::ResetTlab() {
4593 gc::Heap* const heap = Runtime::Current()->GetHeap();
4594 if (heap->GetHeapSampler().IsEnabled()) {
4595 // Note: We always ResetTlab before SetTlab, therefore we can do the sample
4596 // offset adjustment here.
4597 heap->AdjustSampleOffset(GetTlabPosOffset());
4598 VLOG(heap) << "JHP: ResetTlab, Tid: " << GetTid()
4599 << " adjustment = "
4600 << (tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start);
4601 }
4602 SetTlab(nullptr, nullptr, nullptr);
4603 }
4604
HasTlab() const4605 bool Thread::HasTlab() const {
4606 const bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
4607 if (has_tlab) {
4608 DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr);
4609 } else {
4610 DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr);
4611 }
4612 return has_tlab;
4613 }
4614
AdjustTlab(size_t slide_bytes)4615 void Thread::AdjustTlab(size_t slide_bytes) {
4616 if (HasTlab()) {
4617 tlsPtr_.thread_local_start -= slide_bytes;
4618 tlsPtr_.thread_local_pos -= slide_bytes;
4619 tlsPtr_.thread_local_end -= slide_bytes;
4620 tlsPtr_.thread_local_limit -= slide_bytes;
4621 }
4622 }
4623
operator <<(std::ostream & os,const Thread & thread)4624 std::ostream& operator<<(std::ostream& os, const Thread& thread) {
4625 thread.ShortDump(os);
4626 return os;
4627 }
4628
ProtectStack(bool fatal_on_error)4629 bool Thread::ProtectStack(bool fatal_on_error) {
4630 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
4631 VLOG(threads) << "Protecting stack at " << pregion;
4632 if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
4633 if (fatal_on_error) {
4634 // b/249586057, LOG(FATAL) times out
4635 LOG(ERROR) << "Unable to create protected region in stack for implicit overflow check. "
4636 "Reason: "
4637 << strerror(errno) << " size: " << kStackOverflowProtectedSize;
4638 exit(1);
4639 }
4640 return false;
4641 }
4642 return true;
4643 }
4644
UnprotectStack()4645 bool Thread::UnprotectStack() {
4646 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
4647 VLOG(threads) << "Unprotecting stack at " << pregion;
4648 return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
4649 }
4650
PushVerifier(verifier::MethodVerifier * verifier)4651 void Thread::PushVerifier(verifier::MethodVerifier* verifier) {
4652 verifier->link_ = tlsPtr_.method_verifier;
4653 tlsPtr_.method_verifier = verifier;
4654 }
4655
PopVerifier(verifier::MethodVerifier * verifier)4656 void Thread::PopVerifier(verifier::MethodVerifier* verifier) {
4657 CHECK_EQ(tlsPtr_.method_verifier, verifier);
4658 tlsPtr_.method_verifier = verifier->link_;
4659 }
4660
NumberOfHeldMutexes() const4661 size_t Thread::NumberOfHeldMutexes() const {
4662 size_t count = 0;
4663 for (BaseMutex* mu : tlsPtr_.held_mutexes) {
4664 count += mu != nullptr ? 1 : 0;
4665 }
4666 return count;
4667 }
4668
DeoptimizeWithDeoptimizationException(JValue * result)4669 void Thread::DeoptimizeWithDeoptimizationException(JValue* result) {
4670 DCHECK_EQ(GetException(), Thread::GetDeoptimizationException());
4671 ClearException();
4672 ObjPtr<mirror::Throwable> pending_exception;
4673 bool from_code = false;
4674 DeoptimizationMethodType method_type;
4675 PopDeoptimizationContext(result, &pending_exception, &from_code, &method_type);
4676 SetTopOfStack(nullptr);
4677
4678 // Restore the exception that was pending before deoptimization then interpret the
4679 // deoptimized frames.
4680 if (pending_exception != nullptr) {
4681 SetException(pending_exception);
4682 }
4683
4684 ShadowFrame* shadow_frame = MaybePopDeoptimizedStackedShadowFrame();
4685 // We may not have a shadow frame if we deoptimized at the return of the
4686 // quick_to_interpreter_bridge which got directly called by art_quick_invoke_stub.
4687 if (shadow_frame != nullptr) {
4688 SetTopOfShadowStack(shadow_frame);
4689 interpreter::EnterInterpreterFromDeoptimize(this,
4690 shadow_frame,
4691 result,
4692 from_code,
4693 method_type);
4694 }
4695 }
4696
SetAsyncException(ObjPtr<mirror::Throwable> new_exception)4697 void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) {
4698 CHECK(new_exception != nullptr);
4699 Runtime::Current()->SetAsyncExceptionsThrown();
4700 if (kIsDebugBuild) {
4701 // Make sure we are in a checkpoint.
4702 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
4703 CHECK(this == Thread::Current() || GetSuspendCount() >= 1)
4704 << "It doesn't look like this was called in a checkpoint! this: "
4705 << this << " count: " << GetSuspendCount();
4706 }
4707 tlsPtr_.async_exception = new_exception.Ptr();
4708 }
4709
ObserveAsyncException()4710 bool Thread::ObserveAsyncException() {
4711 DCHECK(this == Thread::Current());
4712 if (tlsPtr_.async_exception != nullptr) {
4713 if (tlsPtr_.exception != nullptr) {
4714 LOG(WARNING) << "Overwriting pending exception with async exception. Pending exception is: "
4715 << tlsPtr_.exception->Dump();
4716 LOG(WARNING) << "Async exception is " << tlsPtr_.async_exception->Dump();
4717 }
4718 tlsPtr_.exception = tlsPtr_.async_exception;
4719 tlsPtr_.async_exception = nullptr;
4720 return true;
4721 } else {
4722 return IsExceptionPending();
4723 }
4724 }
4725
SetException(ObjPtr<mirror::Throwable> new_exception)4726 void Thread::SetException(ObjPtr<mirror::Throwable> new_exception) {
4727 CHECK(new_exception != nullptr);
4728 // TODO: DCHECK(!IsExceptionPending());
4729 tlsPtr_.exception = new_exception.Ptr();
4730 }
4731
IsAotCompiler()4732 bool Thread::IsAotCompiler() {
4733 return Runtime::Current()->IsAotCompiler();
4734 }
4735
GetPeerFromOtherThread() const4736 mirror::Object* Thread::GetPeerFromOtherThread() const {
4737 DCHECK(tlsPtr_.jpeer == nullptr);
4738 mirror::Object* peer = tlsPtr_.opeer;
4739 if (gUseReadBarrier && Current()->GetIsGcMarking()) {
4740 // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
4741 // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly
4742 // mark/forward it here.
4743 peer = art::ReadBarrier::Mark(peer);
4744 }
4745 return peer;
4746 }
4747
SetReadBarrierEntrypoints()4748 void Thread::SetReadBarrierEntrypoints() {
4749 // Make sure entrypoints aren't null.
4750 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active=*/ true);
4751 }
4752
ClearAllInterpreterCaches()4753 void Thread::ClearAllInterpreterCaches() {
4754 static struct ClearInterpreterCacheClosure : Closure {
4755 void Run(Thread* thread) override {
4756 thread->GetInterpreterCache()->Clear(thread);
4757 }
4758 } closure;
4759 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
4760 }
4761
4762
ReleaseLongJumpContextInternal()4763 void Thread::ReleaseLongJumpContextInternal() {
4764 // Each QuickExceptionHandler gets a long jump context and uses
4765 // it for doing the long jump, after finding catch blocks/doing deoptimization.
4766 // Both finding catch blocks and deoptimization can trigger another
4767 // exception such as a result of class loading. So there can be nested
4768 // cases of exception handling and multiple contexts being used.
4769 // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
4770 // for reuse so there is no need to always allocate a new one each time when
4771 // getting a context. Since we only keep one context for reuse, delete the
4772 // existing one since the passed in context is yet to be used for longjump.
4773 delete tlsPtr_.long_jump_context;
4774 }
4775
SetNativePriority(int new_priority)4776 void Thread::SetNativePriority(int new_priority) {
4777 palette_status_t status = PaletteSchedSetPriority(GetTid(), new_priority);
4778 CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO);
4779 }
4780
GetNativePriority() const4781 int Thread::GetNativePriority() const {
4782 int priority = 0;
4783 palette_status_t status = PaletteSchedGetPriority(GetTid(), &priority);
4784 CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO);
4785 return priority;
4786 }
4787
IsSystemDaemon() const4788 bool Thread::IsSystemDaemon() const {
4789 if (GetPeer() == nullptr) {
4790 return false;
4791 }
4792 return WellKnownClasses::java_lang_Thread_systemDaemon->GetBoolean(GetPeer());
4793 }
4794
StateAndFlagsAsHexString() const4795 std::string Thread::StateAndFlagsAsHexString() const {
4796 std::stringstream result_stream;
4797 result_stream << std::hex << GetStateAndFlags(std::memory_order_relaxed).GetValue();
4798 return result_stream.str();
4799 }
4800
ScopedExceptionStorage(art::Thread * self)4801 ScopedExceptionStorage::ScopedExceptionStorage(art::Thread* self)
4802 : self_(self), hs_(self_), excp_(hs_.NewHandle<art::mirror::Throwable>(self_->GetException())) {
4803 self_->ClearException();
4804 }
4805
SuppressOldException(const char * message)4806 void ScopedExceptionStorage::SuppressOldException(const char* message) {
4807 CHECK(self_->IsExceptionPending()) << *self_;
4808 ObjPtr<mirror::Throwable> old_suppressed(excp_.Get());
4809 excp_.Assign(self_->GetException());
4810 if (old_suppressed != nullptr) {
4811 LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump();
4812 }
4813 self_->ClearException();
4814 }
4815
~ScopedExceptionStorage()4816 ScopedExceptionStorage::~ScopedExceptionStorage() {
4817 CHECK(!self_->IsExceptionPending()) << *self_;
4818 if (!excp_.IsNull()) {
4819 self_->SetException(excp_.Get());
4820 }
4821 }
4822
4823 } // namespace art
4824
4825 #pragma clang diagnostic pop // -Wconversion
4826