1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "thread.h"
18
19 #include <limits.h> // for INT_MAX
20 #include <pthread.h>
21 #include <signal.h>
22 #include <stdlib.h>
23 #include <sys/resource.h>
24 #include <sys/time.h>
25
26 #include <algorithm>
27 #include <atomic>
28 #include <bitset>
29 #include <cerrno>
30 #include <iostream>
31 #include <list>
32 #include <optional>
33 #include <sstream>
34
35 #include "android-base/file.h"
36 #include "android-base/stringprintf.h"
37 #include "android-base/strings.h"
38
39 #include "unwindstack/AndroidUnwinder.h"
40
41 #include "arch/context-inl.h"
42 #include "arch/context.h"
43 #include "art_field-inl.h"
44 #include "art_method-inl.h"
45 #include "base/atomic.h"
46 #include "base/bit_utils.h"
47 #include "base/casts.h"
48 #include "base/file_utils.h"
49 #include "base/memory_tool.h"
50 #include "base/mutex.h"
51 #include "base/stl_util.h"
52 #include "base/systrace.h"
53 #include "base/time_utils.h"
54 #include "base/timing_logger.h"
55 #include "base/to_str.h"
56 #include "base/utils.h"
57 #include "class_linker-inl.h"
58 #include "class_root-inl.h"
59 #include "com_android_art_flags.h"
60 #include "debugger.h"
61 #include "dex/descriptors_names.h"
62 #include "dex/dex_file-inl.h"
63 #include "dex/dex_file_annotations.h"
64 #include "dex/dex_file_types.h"
65 #include "entrypoints/entrypoint_utils.h"
66 #include "entrypoints/quick/quick_alloc_entrypoints.h"
67 #include "entrypoints/quick/runtime_entrypoints_list.h"
68 #include "gc/accounting/card_table-inl.h"
69 #include "gc/accounting/heap_bitmap-inl.h"
70 #include "gc/allocator/rosalloc.h"
71 #include "gc/heap.h"
72 #include "gc/space/space-inl.h"
73 #include "gc_root.h"
74 #include "handle_scope-inl.h"
75 #include "indirect_reference_table-inl.h"
76 #include "instrumentation.h"
77 #include "intern_table.h"
78 #include "interpreter/interpreter.h"
79 #include "interpreter/shadow_frame-inl.h"
80 #include "java_frame_root_info.h"
81 #include "jni/java_vm_ext.h"
82 #include "jni/jni_internal.h"
83 #include "mirror/class-alloc-inl.h"
84 #include "mirror/class_loader.h"
85 #include "mirror/object_array-alloc-inl.h"
86 #include "mirror/object_array-inl.h"
87 #include "mirror/stack_frame_info.h"
88 #include "mirror/stack_trace_element.h"
89 #include "monitor.h"
90 #include "monitor_objects_stack_visitor.h"
91 #include "native_stack_dump.h"
92 #include "nativehelper/scoped_local_ref.h"
93 #include "nativehelper/scoped_utf_chars.h"
94 #include "nterp_helpers.h"
95 #include "nth_caller_visitor.h"
96 #include "oat/oat_quick_method_header.h"
97 #include "oat/stack_map.h"
98 #include "obj_ptr-inl.h"
99 #include "object_lock.h"
100 #include "palette/palette.h"
101 #include "quick/quick_method_frame_info.h"
102 #include "quick_exception_handler.h"
103 #include "read_barrier-inl.h"
104 #include "reflection.h"
105 #include "reflective_handle_scope-inl.h"
106 #include "runtime-inl.h"
107 #include "runtime.h"
108 #include "runtime_callbacks.h"
109 #include "scoped_thread_state_change-inl.h"
110 #include "scoped_disable_public_sdk_checker.h"
111 #include "stack.h"
112 #include "thread-inl.h"
113 #include "thread_list.h"
114 #include "trace.h"
115 #include "trace_profile.h"
116 #include "verify_object.h"
117 #include "well_known_classes-inl.h"
118
119 #ifdef ART_TARGET_ANDROID
120 #include <android/set_abort_message.h>
121 #endif
122
123 #if ART_USE_FUTEXES
124 #include <linux/futex.h>
125 #include <sys/syscall.h>
126 #endif // ART_USE_FUTEXES
127
128 #pragma clang diagnostic push
129 #pragma clang diagnostic error "-Wconversion"
130
131 extern "C" __attribute__((weak)) void* __hwasan_tag_pointer(const volatile void* p,
132 unsigned char tag);
133
134 namespace art_flags = com::android::art::flags;
135
136 namespace art HIDDEN {
137
138 using android::base::StringAppendV;
139 using android::base::StringPrintf;
140
141 bool Thread::is_started_ = false;
142 pthread_key_t Thread::pthread_key_self_;
143 ConditionVariable* Thread::resume_cond_ = nullptr;
144 const size_t Thread::kStackOverflowImplicitCheckSize =
145 GetStackOverflowReservedBytes(kRuntimeQuickCodeISA);
146 bool (*Thread::is_sensitive_thread_hook_)() = nullptr;
147 Thread* Thread::jit_sensitive_thread_ = nullptr;
148 std::atomic<Mutex*> Thread::cp_placeholder_mutex_(nullptr);
149 #ifndef __BIONIC__
150 thread_local Thread* Thread::self_tls_ = nullptr;
151 #endif
152
153 static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild;
154
155 static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
156
InitCardTable()157 void Thread::InitCardTable() {
158 tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
159 }
160
UnimplementedEntryPoint()161 static void UnimplementedEntryPoint() {
162 UNIMPLEMENTED(FATAL);
163 }
164
165 void InitEntryPoints(JniEntryPoints* jpoints,
166 QuickEntryPoints* qpoints,
167 bool monitor_jni_entry_exit);
168 void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active);
169 void UpdateLowOverheadTraceEntrypoints(QuickEntryPoints* qpoints, LowOverheadTraceType trace_type);
170
UpdateTlsLowOverheadTraceEntrypoints(LowOverheadTraceType trace_type)171 void Thread::UpdateTlsLowOverheadTraceEntrypoints(LowOverheadTraceType trace_type) {
172 UpdateLowOverheadTraceEntrypoints(&tlsPtr_.quick_entrypoints, trace_type);
173 }
174
SetIsGcMarkingAndUpdateEntrypoints(bool is_marking)175 void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
176 CHECK(gUseReadBarrier);
177 tls32_.is_gc_marking = is_marking;
178 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking);
179 }
180
InitTlsEntryPoints()181 void Thread::InitTlsEntryPoints() {
182 ScopedTrace trace("InitTlsEntryPoints");
183 // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
184 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints);
185 uintptr_t* end = reinterpret_cast<uintptr_t*>(
186 reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) + sizeof(tlsPtr_.quick_entrypoints));
187 for (uintptr_t* it = begin; it != end; ++it) {
188 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
189 }
190 bool monitor_jni_entry_exit = false;
191 PaletteShouldReportJniInvocations(&monitor_jni_entry_exit);
192 if (monitor_jni_entry_exit) {
193 AtomicSetFlag(ThreadFlag::kMonitorJniEntryExit);
194 }
195 InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints, monitor_jni_entry_exit);
196 }
197
ResetQuickAllocEntryPointsForThread()198 void Thread::ResetQuickAllocEntryPointsForThread() {
199 ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
200 }
201
202 class DeoptimizationContextRecord {
203 public:
DeoptimizationContextRecord(const JValue & ret_val,bool is_reference,bool from_code,ObjPtr<mirror::Throwable> pending_exception,DeoptimizationMethodType method_type,DeoptimizationContextRecord * link)204 DeoptimizationContextRecord(const JValue& ret_val,
205 bool is_reference,
206 bool from_code,
207 ObjPtr<mirror::Throwable> pending_exception,
208 DeoptimizationMethodType method_type,
209 DeoptimizationContextRecord* link)
210 : ret_val_(ret_val),
211 is_reference_(is_reference),
212 from_code_(from_code),
213 pending_exception_(pending_exception.Ptr()),
214 deopt_method_type_(method_type),
215 link_(link) {}
216
GetReturnValue() const217 JValue GetReturnValue() const { return ret_val_; }
IsReference() const218 bool IsReference() const { return is_reference_; }
GetFromCode() const219 bool GetFromCode() const { return from_code_; }
GetPendingException() const220 ObjPtr<mirror::Throwable> GetPendingException() const REQUIRES_SHARED(Locks::mutator_lock_) {
221 return pending_exception_;
222 }
GetLink() const223 DeoptimizationContextRecord* GetLink() const { return link_; }
GetReturnValueAsGCRoot()224 mirror::Object** GetReturnValueAsGCRoot() {
225 DCHECK(is_reference_);
226 return ret_val_.GetGCRoot();
227 }
GetPendingExceptionAsGCRoot()228 mirror::Object** GetPendingExceptionAsGCRoot() {
229 return reinterpret_cast<mirror::Object**>(&pending_exception_);
230 }
GetDeoptimizationMethodType() const231 DeoptimizationMethodType GetDeoptimizationMethodType() const {
232 return deopt_method_type_;
233 }
234
235 private:
236 // The value returned by the method at the top of the stack before deoptimization.
237 JValue ret_val_;
238
239 // Indicates whether the returned value is a reference. If so, the GC will visit it.
240 const bool is_reference_;
241
242 // Whether the context was created from an explicit deoptimization in the code.
243 const bool from_code_;
244
245 // The exception that was pending before deoptimization (or null if there was no pending
246 // exception).
247 mirror::Throwable* pending_exception_;
248
249 // Whether the context was created for an (idempotent) runtime method.
250 const DeoptimizationMethodType deopt_method_type_;
251
252 // A link to the previous DeoptimizationContextRecord.
253 DeoptimizationContextRecord* const link_;
254
255 DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord);
256 };
257
258 class StackedShadowFrameRecord {
259 public:
StackedShadowFrameRecord(ShadowFrame * shadow_frame,StackedShadowFrameType type,StackedShadowFrameRecord * link)260 StackedShadowFrameRecord(ShadowFrame* shadow_frame,
261 StackedShadowFrameType type,
262 StackedShadowFrameRecord* link)
263 : shadow_frame_(shadow_frame),
264 type_(type),
265 link_(link) {}
266
GetShadowFrame() const267 ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
GetType() const268 StackedShadowFrameType GetType() const { return type_; }
GetLink() const269 StackedShadowFrameRecord* GetLink() const { return link_; }
270
271 private:
272 ShadowFrame* const shadow_frame_;
273 const StackedShadowFrameType type_;
274 StackedShadowFrameRecord* const link_;
275
276 DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord);
277 };
278
PushDeoptimizationContext(const JValue & return_value,bool is_reference,ObjPtr<mirror::Throwable> exception,bool from_code,DeoptimizationMethodType method_type)279 void Thread::PushDeoptimizationContext(const JValue& return_value,
280 bool is_reference,
281 ObjPtr<mirror::Throwable> exception,
282 bool from_code,
283 DeoptimizationMethodType method_type) {
284 DCHECK(exception != Thread::GetDeoptimizationException());
285 DeoptimizationContextRecord* record = new DeoptimizationContextRecord(
286 return_value,
287 is_reference,
288 from_code,
289 exception,
290 method_type,
291 tlsPtr_.deoptimization_context_stack);
292 tlsPtr_.deoptimization_context_stack = record;
293 }
294
PopDeoptimizationContext(JValue * result,ObjPtr<mirror::Throwable> * exception,bool * from_code,DeoptimizationMethodType * method_type)295 void Thread::PopDeoptimizationContext(JValue* result,
296 ObjPtr<mirror::Throwable>* exception,
297 bool* from_code,
298 DeoptimizationMethodType* method_type) {
299 AssertHasDeoptimizationContext();
300 DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
301 tlsPtr_.deoptimization_context_stack = record->GetLink();
302 result->SetJ(record->GetReturnValue().GetJ());
303 *exception = record->GetPendingException();
304 *from_code = record->GetFromCode();
305 *method_type = record->GetDeoptimizationMethodType();
306 delete record;
307 }
308
AssertHasDeoptimizationContext()309 void Thread::AssertHasDeoptimizationContext() {
310 CHECK(tlsPtr_.deoptimization_context_stack != nullptr)
311 << "No deoptimization context for thread " << *this;
312 }
313
314 enum {
315 kPermitAvailable = 0, // Incrementing consumes the permit
316 kNoPermit = 1, // Incrementing marks as waiter waiting
317 kNoPermitWaiterWaiting = 2
318 };
319
Park(bool is_absolute,int64_t time)320 void Thread::Park(bool is_absolute, int64_t time) {
321 DCHECK(this == Thread::Current());
322 #if ART_USE_FUTEXES
323 // Consume the permit, or mark as waiting. This cannot cause park_state to go
324 // outside of its valid range (0, 1, 2), because in all cases where 2 is
325 // assigned it is set back to 1 before returning, and this method cannot run
326 // concurrently with itself since it operates on the current thread.
327 int old_state = tls32_.park_state_.fetch_add(1, std::memory_order_relaxed);
328 if (old_state == kNoPermit) {
329 // no permit was available. block thread until later.
330 Runtime::Current()->GetRuntimeCallbacks()->ThreadParkStart(is_absolute, time);
331 bool timed_out = false;
332 if (!is_absolute && time == 0) {
333 // Thread.getState() is documented to return waiting for untimed parks.
334 ScopedThreadSuspension sts(this, ThreadState::kWaiting);
335 DCHECK_EQ(NumberOfHeldMutexes(), 0u);
336 int result = futex(tls32_.park_state_.Address(),
337 FUTEX_WAIT_PRIVATE,
338 /* sleep if val = */ kNoPermitWaiterWaiting,
339 /* timeout */ nullptr,
340 nullptr,
341 0);
342 // This errno check must happen before the scope is closed, to ensure that
343 // no destructors (such as ScopedThreadSuspension) overwrite errno.
344 if (result == -1) {
345 switch (errno) {
346 case EAGAIN:
347 FALLTHROUGH_INTENDED;
348 case EINTR: break; // park() is allowed to spuriously return
349 default: PLOG(FATAL) << "Failed to park";
350 }
351 }
352 } else if (time > 0) {
353 // Only actually suspend and futex_wait if we're going to wait for some
354 // positive amount of time - the kernel will reject negative times with
355 // EINVAL, and a zero time will just noop.
356
357 // Thread.getState() is documented to return timed wait for timed parks.
358 ScopedThreadSuspension sts(this, ThreadState::kTimedWaiting);
359 DCHECK_EQ(NumberOfHeldMutexes(), 0u);
360 timespec timespec;
361 int result = 0;
362 if (is_absolute) {
363 // Time is millis when scheduled for an absolute time
364 timespec.tv_nsec = (time % 1000) * 1000000;
365 timespec.tv_sec = SaturatedTimeT(time / 1000);
366 // This odd looking pattern is recommended by futex documentation to
367 // wait until an absolute deadline, with otherwise identical behavior to
368 // FUTEX_WAIT_PRIVATE. This also allows parkUntil() to return at the
369 // correct time when the system clock changes.
370 result = futex(tls32_.park_state_.Address(),
371 FUTEX_WAIT_BITSET_PRIVATE | FUTEX_CLOCK_REALTIME,
372 /* sleep if val = */ kNoPermitWaiterWaiting,
373 ×pec,
374 nullptr,
375 static_cast<int>(FUTEX_BITSET_MATCH_ANY));
376 } else {
377 // Time is nanos when scheduled for a relative time
378 timespec.tv_sec = SaturatedTimeT(time / 1000000000);
379 timespec.tv_nsec = time % 1000000000;
380 result = futex(tls32_.park_state_.Address(),
381 FUTEX_WAIT_PRIVATE,
382 /* sleep if val = */ kNoPermitWaiterWaiting,
383 ×pec,
384 nullptr,
385 0);
386 }
387 // This errno check must happen before the scope is closed, to ensure that
388 // no destructors (such as ScopedThreadSuspension) overwrite errno.
389 if (result == -1) {
390 switch (errno) {
391 case ETIMEDOUT:
392 timed_out = true;
393 FALLTHROUGH_INTENDED;
394 case EAGAIN:
395 case EINTR: break; // park() is allowed to spuriously return
396 default: PLOG(FATAL) << "Failed to park";
397 }
398 }
399 }
400 // Mark as no longer waiting, and consume permit if there is one.
401 tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
402 // TODO: Call to signal jvmti here
403 Runtime::Current()->GetRuntimeCallbacks()->ThreadParkFinished(timed_out);
404 } else {
405 // the fetch_add has consumed the permit. immediately return.
406 DCHECK_EQ(old_state, kPermitAvailable);
407 }
408 #else
409 #pragma clang diagnostic push
410 #pragma clang diagnostic warning "-W#warnings"
411 #warning "LockSupport.park/unpark implemented as noops without FUTEX support."
412 #pragma clang diagnostic pop
413 UNUSED(is_absolute, time);
414 UNIMPLEMENTED(WARNING);
415 sched_yield();
416 #endif
417 }
418
Unpark()419 void Thread::Unpark() {
420 #if ART_USE_FUTEXES
421 // Set permit available; will be consumed either by fetch_add (when the thread
422 // tries to park) or store (when the parked thread is woken up)
423 if (tls32_.park_state_.exchange(kPermitAvailable, std::memory_order_relaxed)
424 == kNoPermitWaiterWaiting) {
425 int result = futex(tls32_.park_state_.Address(),
426 FUTEX_WAKE_PRIVATE,
427 /* number of waiters = */ 1,
428 nullptr,
429 nullptr,
430 0);
431 if (result == -1) {
432 PLOG(FATAL) << "Failed to unpark";
433 }
434 }
435 #else
436 UNIMPLEMENTED(WARNING);
437 #endif
438 }
439
PushStackedShadowFrame(ShadowFrame * sf,StackedShadowFrameType type)440 void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
441 StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
442 sf, type, tlsPtr_.stacked_shadow_frame_record);
443 tlsPtr_.stacked_shadow_frame_record = record;
444 }
445
MaybePopDeoptimizedStackedShadowFrame()446 ShadowFrame* Thread::MaybePopDeoptimizedStackedShadowFrame() {
447 StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
448 if (record == nullptr ||
449 record->GetType() != StackedShadowFrameType::kDeoptimizationShadowFrame) {
450 return nullptr;
451 }
452 return PopStackedShadowFrame();
453 }
454
PopStackedShadowFrame()455 ShadowFrame* Thread::PopStackedShadowFrame() {
456 StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
457 DCHECK_NE(record, nullptr);
458 tlsPtr_.stacked_shadow_frame_record = record->GetLink();
459 ShadowFrame* shadow_frame = record->GetShadowFrame();
460 delete record;
461 return shadow_frame;
462 }
463
464 class FrameIdToShadowFrame {
465 public:
Create(size_t frame_id,ShadowFrame * shadow_frame,FrameIdToShadowFrame * next,size_t num_vregs)466 static FrameIdToShadowFrame* Create(size_t frame_id,
467 ShadowFrame* shadow_frame,
468 FrameIdToShadowFrame* next,
469 size_t num_vregs) {
470 // Append a bool array at the end to keep track of what vregs are updated by the debugger.
471 uint8_t* memory = new uint8_t[sizeof(FrameIdToShadowFrame) + sizeof(bool) * num_vregs];
472 return new (memory) FrameIdToShadowFrame(frame_id, shadow_frame, next);
473 }
474
Delete(FrameIdToShadowFrame * f)475 static void Delete(FrameIdToShadowFrame* f) {
476 uint8_t* memory = reinterpret_cast<uint8_t*>(f);
477 delete[] memory;
478 }
479
GetFrameId() const480 size_t GetFrameId() const { return frame_id_; }
GetShadowFrame() const481 ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
GetNext() const482 FrameIdToShadowFrame* GetNext() const { return next_; }
SetNext(FrameIdToShadowFrame * next)483 void SetNext(FrameIdToShadowFrame* next) { next_ = next; }
GetUpdatedVRegFlags()484 bool* GetUpdatedVRegFlags() {
485 return updated_vreg_flags_;
486 }
487
488 private:
FrameIdToShadowFrame(size_t frame_id,ShadowFrame * shadow_frame,FrameIdToShadowFrame * next)489 FrameIdToShadowFrame(size_t frame_id,
490 ShadowFrame* shadow_frame,
491 FrameIdToShadowFrame* next)
492 : frame_id_(frame_id),
493 shadow_frame_(shadow_frame),
494 next_(next) {}
495
496 const size_t frame_id_;
497 ShadowFrame* const shadow_frame_;
498 FrameIdToShadowFrame* next_;
499 bool updated_vreg_flags_[0];
500
501 DISALLOW_COPY_AND_ASSIGN(FrameIdToShadowFrame);
502 };
503
FindFrameIdToShadowFrame(FrameIdToShadowFrame * head,size_t frame_id)504 static FrameIdToShadowFrame* FindFrameIdToShadowFrame(FrameIdToShadowFrame* head,
505 size_t frame_id) {
506 FrameIdToShadowFrame* found = nullptr;
507 for (FrameIdToShadowFrame* record = head; record != nullptr; record = record->GetNext()) {
508 if (record->GetFrameId() == frame_id) {
509 if (kIsDebugBuild) {
510 // Check we have at most one record for this frame.
511 CHECK(found == nullptr) << "Multiple records for the frame " << frame_id;
512 found = record;
513 } else {
514 return record;
515 }
516 }
517 }
518 return found;
519 }
520
FindDebuggerShadowFrame(size_t frame_id)521 ShadowFrame* Thread::FindDebuggerShadowFrame(size_t frame_id) {
522 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame(
523 tlsPtr_.frame_id_to_shadow_frame, frame_id);
524 if (record != nullptr) {
525 return record->GetShadowFrame();
526 }
527 return nullptr;
528 }
529
530 // Must only be called when FindDebuggerShadowFrame(frame_id) returns non-nullptr.
GetUpdatedVRegFlags(size_t frame_id)531 bool* Thread::GetUpdatedVRegFlags(size_t frame_id) {
532 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame(
533 tlsPtr_.frame_id_to_shadow_frame, frame_id);
534 CHECK(record != nullptr);
535 return record->GetUpdatedVRegFlags();
536 }
537
FindOrCreateDebuggerShadowFrame(size_t frame_id,uint32_t num_vregs,ArtMethod * method,uint32_t dex_pc)538 ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id,
539 uint32_t num_vregs,
540 ArtMethod* method,
541 uint32_t dex_pc) {
542 ShadowFrame* shadow_frame = FindDebuggerShadowFrame(frame_id);
543 if (shadow_frame != nullptr) {
544 return shadow_frame;
545 }
546 VLOG(deopt) << "Create pre-deopted ShadowFrame for " << ArtMethod::PrettyMethod(method);
547 shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, method, dex_pc);
548 FrameIdToShadowFrame* record = FrameIdToShadowFrame::Create(frame_id,
549 shadow_frame,
550 tlsPtr_.frame_id_to_shadow_frame,
551 num_vregs);
552 for (uint32_t i = 0; i < num_vregs; i++) {
553 // Do this to clear all references for root visitors.
554 shadow_frame->SetVRegReference(i, nullptr);
555 // This flag will be changed to true if the debugger modifies the value.
556 record->GetUpdatedVRegFlags()[i] = false;
557 }
558 tlsPtr_.frame_id_to_shadow_frame = record;
559 return shadow_frame;
560 }
561
GetCustomTLS(const char * key)562 TLSData* Thread::GetCustomTLS(const char* key) {
563 MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_);
564 auto it = custom_tls_.find(key);
565 return (it != custom_tls_.end()) ? it->second.get() : nullptr;
566 }
567
SetCustomTLS(const char * key,TLSData * data)568 void Thread::SetCustomTLS(const char* key, TLSData* data) {
569 // We will swap the old data (which might be nullptr) with this and then delete it outside of the
570 // custom_tls_lock_.
571 std::unique_ptr<TLSData> old_data(data);
572 {
573 MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_);
574 custom_tls_.GetOrCreate(key, []() { return std::unique_ptr<TLSData>(); }).swap(old_data);
575 }
576 }
577
RemoveDebuggerShadowFrameMapping(size_t frame_id)578 void Thread::RemoveDebuggerShadowFrameMapping(size_t frame_id) {
579 FrameIdToShadowFrame* head = tlsPtr_.frame_id_to_shadow_frame;
580 if (head->GetFrameId() == frame_id) {
581 tlsPtr_.frame_id_to_shadow_frame = head->GetNext();
582 FrameIdToShadowFrame::Delete(head);
583 return;
584 }
585 FrameIdToShadowFrame* prev = head;
586 for (FrameIdToShadowFrame* record = head->GetNext();
587 record != nullptr;
588 prev = record, record = record->GetNext()) {
589 if (record->GetFrameId() == frame_id) {
590 prev->SetNext(record->GetNext());
591 FrameIdToShadowFrame::Delete(record);
592 return;
593 }
594 }
595 LOG(FATAL) << "No shadow frame for frame " << frame_id;
596 UNREACHABLE();
597 }
598
InitTid()599 void Thread::InitTid() {
600 tls32_.tid = ::art::GetTid();
601 }
602
InitAfterFork()603 void Thread::InitAfterFork() {
604 // One thread (us) survived the fork, but we have a new tid so we need to
605 // update the value stashed in this Thread*.
606 InitTid();
607 }
608
DeleteJPeer(JNIEnv * env)609 void Thread::DeleteJPeer(JNIEnv* env) {
610 // Make sure nothing can observe both opeer and jpeer set at the same time.
611 jobject old_jpeer = tlsPtr_.jpeer;
612 CHECK(old_jpeer != nullptr);
613 tlsPtr_.jpeer = nullptr;
614 env->DeleteGlobalRef(old_jpeer);
615 }
616
CreateCallbackWithUffdGc(void * arg)617 void* Thread::CreateCallbackWithUffdGc(void* arg) {
618 return Thread::CreateCallback(arg);
619 }
620
CreateCallback(void * arg)621 void* Thread::CreateCallback(void* arg) {
622 Thread* self = reinterpret_cast<Thread*>(arg);
623 Runtime* runtime = Runtime::Current();
624 if (runtime == nullptr) {
625 LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
626 return nullptr;
627 }
628 {
629 // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
630 // after self->Init().
631 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
632 // Check that if we got here we cannot be shutting down (as shutdown should never have started
633 // while threads are being born).
634 CHECK(!runtime->IsShuttingDownLocked());
635 // Note: given that the JNIEnv is created in the parent thread, the only failure point here is
636 // a mess in InitStack. We do not have a reasonable way to recover from that, so abort
637 // the runtime in such a case. In case this ever changes, we need to make sure here to
638 // delete the tmp_jni_env, as we own it at this point.
639 CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env));
640 self->tlsPtr_.tmp_jni_env = nullptr;
641 Runtime::Current()->EndThreadBirth();
642 }
643 {
644 ScopedObjectAccess soa(self);
645 self->InitStringEntryPoints();
646
647 // Copy peer into self, deleting global reference when done.
648 CHECK(self->tlsPtr_.jpeer != nullptr);
649 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr();
650 // Make sure nothing can observe both opeer and jpeer set at the same time.
651 self->DeleteJPeer(self->GetJniEnv());
652 self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str());
653
654 ArtField* priorityField = WellKnownClasses::java_lang_Thread_priority;
655 self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
656
657 runtime->GetRuntimeCallbacks()->ThreadStart(self);
658
659 // Unpark ourselves if the java peer was unparked before it started (see
660 // b/28845097#comment49 for more information)
661
662 ArtField* unparkedField = WellKnownClasses::java_lang_Thread_unparkedBeforeStart;
663 bool should_unpark = false;
664 {
665 // Hold the lock here, so that if another thread calls unpark before the thread starts
666 // we don't observe the unparkedBeforeStart field before the unparker writes to it,
667 // which could cause a lost unpark.
668 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
669 should_unpark = unparkedField->GetBoolean(self->tlsPtr_.opeer) == JNI_TRUE;
670 }
671 if (should_unpark) {
672 self->Unpark();
673 }
674 // Invoke the 'run' method of our java.lang.Thread.
675 ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer;
676 WellKnownClasses::java_lang_Thread_run->InvokeVirtual<'V'>(self, receiver);
677 }
678 // Detach and delete self.
679 Runtime::Current()->GetThreadList()->Unregister(self, /* should_run_callbacks= */ true);
680
681 return nullptr;
682 }
683
FromManagedThread(Thread * self,ObjPtr<mirror::Object> thread_peer)684 Thread* Thread::FromManagedThread(Thread* self, ObjPtr<mirror::Object> thread_peer) {
685 ArtField* f = WellKnownClasses::java_lang_Thread_nativePeer;
686 Thread* result = reinterpret_cast64<Thread*>(f->GetLong(thread_peer));
687 // Check that if we have a result it is either suspended or we hold the thread_list_lock_
688 // to stop it from going away.
689 if (kIsDebugBuild) {
690 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
691 if (result != nullptr && !result->IsSuspended()) {
692 Locks::thread_list_lock_->AssertHeld(self);
693 }
694 }
695 return result;
696 }
697
FromManagedThread(const ScopedObjectAccessAlreadyRunnable & soa,jobject java_thread)698 Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
699 jobject java_thread) {
700 return FromManagedThread(soa.Self(), soa.Decode<mirror::Object>(java_thread));
701 }
702
FixStackSize(size_t stack_size)703 static size_t FixStackSize(size_t stack_size) {
704 // A stack size of zero means "use the default".
705 if (stack_size == 0) {
706 stack_size = Runtime::Current()->GetDefaultStackSize();
707 }
708
709 // Dalvik used the bionic pthread default stack size for native threads,
710 // so include that here to support apps that expect large native stacks.
711 stack_size += 1 * MB;
712
713 // Under sanitization, frames of the interpreter may become bigger, both for C code as
714 // well as the ShadowFrame. Ensure a larger minimum size. Otherwise initialization
715 // of all core classes cannot be done in all test circumstances.
716 if (kMemoryToolIsAvailable) {
717 stack_size = std::max(2 * MB, stack_size);
718 }
719
720 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
721 if (stack_size < PTHREAD_STACK_MIN) {
722 stack_size = PTHREAD_STACK_MIN;
723 }
724
725 if (Runtime::Current()->GetImplicitStackOverflowChecks()) {
726 // If we are going to use implicit stack checks, allocate space for the protected
727 // region at the bottom of the stack.
728 stack_size += Thread::kStackOverflowImplicitCheckSize +
729 GetStackOverflowReservedBytes(kRuntimeQuickCodeISA);
730 } else {
731 // It's likely that callers are trying to ensure they have at least a certain amount of
732 // stack space, so we should add our reserved space on top of what they requested, rather
733 // than implicitly take it away from them.
734 stack_size += GetStackOverflowReservedBytes(kRuntimeQuickCodeISA);
735 }
736
737 // Some systems require the stack size to be a multiple of the system page size, so round up.
738 stack_size = RoundUp(stack_size, gPageSize);
739
740 return stack_size;
741 }
742
743 template <>
FindStackTop()744 NO_INLINE uint8_t* Thread::FindStackTop<StackType::kHardware>() {
745 return reinterpret_cast<uint8_t*>(
746 AlignDown(__builtin_frame_address(0), gPageSize));
747 }
748
749 // Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack
750 // overflow is detected. It is located right below the stack_begin_.
751 template <StackType stack_type>
752 ATTRIBUTE_NO_SANITIZE_ADDRESS
InstallImplicitProtection()753 void Thread::InstallImplicitProtection() {
754 uint8_t* pregion = GetStackBegin<stack_type>() - GetStackOverflowProtectedSize();
755 // Page containing current top of stack.
756 uint8_t* stack_top = FindStackTop<stack_type>();
757
758 // Try to directly protect the stack.
759 VLOG(threads) << "installing stack protected region at " << std::hex <<
760 static_cast<void*>(pregion) << " to " <<
761 static_cast<void*>(pregion + GetStackOverflowProtectedSize() - 1);
762 if (ProtectStack<stack_type>(/* fatal_on_error= */ false)) {
763 // Tell the kernel that we won't be needing these pages any more.
764 // NB. madvise will probably write zeroes into the memory (on linux it does).
765 size_t unwanted_size =
766 reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - gPageSize;
767 madvise(pregion, unwanted_size, MADV_DONTNEED);
768 return;
769 }
770
771 // There is a little complexity here that deserves a special mention. On some
772 // architectures, the stack is created using a VM_GROWSDOWN flag
773 // to prevent memory being allocated when it's not needed. This flag makes the
774 // kernel only allocate memory for the stack by growing down in memory. Because we
775 // want to put an mprotected region far away from that at the stack top, we need
776 // to make sure the pages for the stack are mapped in before we call mprotect.
777 //
778 // The failed mprotect in UnprotectStack is an indication of a thread with VM_GROWSDOWN
779 // with a non-mapped stack (usually only the main thread).
780 //
781 // We map in the stack by reading every page from the stack bottom (highest address)
782 // to the stack top. (We then madvise this away.) This must be done by reading from the
783 // current stack pointer downwards.
784 //
785 // Accesses too far below the current machine register corresponding to the stack pointer (e.g.,
786 // ESP on x86[-32], SP on ARM) might cause a SIGSEGV (at least on x86 with newer kernels). We
787 // thus have to move the stack pointer. We do this portably by using a recursive function with a
788 // large stack frame size.
789
790 // (Defensively) first remove the protection on the protected region as we'll want to read
791 // and write it. Ignore errors.
792 UnprotectStack<stack_type>();
793
794 VLOG(threads) << "Need to map in stack for thread at " << std::hex <<
795 static_cast<void*>(pregion);
796
797 struct RecurseDownStack {
798 // This function has an intentionally large stack size.
799 #pragma GCC diagnostic push
800 #pragma GCC diagnostic ignored "-Wframe-larger-than="
801 NO_INLINE
802 __attribute__((no_sanitize("memtag"))) static void Touch(uintptr_t target) {
803 volatile size_t zero = 0;
804 // Use a large local volatile array to ensure a large frame size. Do not use anything close
805 // to a full page for ASAN. It would be nice to ensure the frame size is at most a page, but
806 // there is no pragma support for this.
807 // Note: for ASAN we need to shrink the array a bit, as there's other overhead.
808 constexpr size_t kAsanMultiplier =
809 #ifdef ADDRESS_SANITIZER
810 2u;
811 #else
812 1u;
813 #endif
814 // Keep space uninitialized as it can overflow the stack otherwise (should Clang actually
815 // auto-initialize this local variable).
816 volatile char space[gPageSize - (kAsanMultiplier * 256)] __attribute__((uninitialized));
817 [[maybe_unused]] char sink = space[zero];
818 // Remove tag from the pointer. Nop in non-hwasan builds.
819 uintptr_t addr = reinterpret_cast<uintptr_t>(
820 __hwasan_tag_pointer != nullptr ? __hwasan_tag_pointer(space, 0) : space);
821 if (addr >= target + gPageSize) {
822 Touch(target);
823 }
824 zero *= 2; // Try to avoid tail recursion.
825 }
826 #pragma GCC diagnostic pop
827 };
828 RecurseDownStack::Touch(reinterpret_cast<uintptr_t>(pregion));
829
830 VLOG(threads) << "(again) installing stack protected region at " << std::hex <<
831 static_cast<void*>(pregion) << " to " <<
832 static_cast<void*>(pregion + GetStackOverflowProtectedSize() - 1);
833
834 // Protect the bottom of the stack to prevent read/write to it.
835 ProtectStack<stack_type>(/* fatal_on_error= */ true);
836
837 // Tell the kernel that we won't be needing these pages any more.
838 // NB. madvise will probably write zeroes into the memory (on linux it does).
839 size_t unwanted_size =
840 reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - gPageSize;
841 madvise(pregion, unwanted_size, MADV_DONTNEED);
842 }
843
844 template <bool kSupportTransaction>
SetNativePeer(ObjPtr<mirror::Object> java_peer,Thread * thread)845 static void SetNativePeer(ObjPtr<mirror::Object> java_peer, Thread* thread)
846 REQUIRES_SHARED(Locks::mutator_lock_) {
847 ArtField* field = WellKnownClasses::java_lang_Thread_nativePeer;
848 if (kSupportTransaction && Runtime::Current()->IsActiveTransaction()) {
849 field->SetLong</*kTransactionActive=*/ true>(java_peer, reinterpret_cast<jlong>(thread));
850 } else {
851 field->SetLong</*kTransactionActive=*/ false>(java_peer, reinterpret_cast<jlong>(thread));
852 }
853 }
854
SetNativePeer(JNIEnv * env,jobject java_peer,Thread * thread)855 static void SetNativePeer(JNIEnv* env, jobject java_peer, Thread* thread) {
856 ScopedObjectAccess soa(env);
857 SetNativePeer</*kSupportTransaction=*/ false>(soa.Decode<mirror::Object>(java_peer), thread);
858 }
859
CreateNativeThread(JNIEnv * env,jobject java_peer,size_t stack_size,bool is_daemon)860 void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
861 CHECK(java_peer != nullptr);
862 Thread* self = static_cast<JNIEnvExt*>(env)->GetSelf();
863
864 if (VLOG_IS_ON(threads)) {
865 ScopedObjectAccess soa(env);
866
867 ArtField* f = WellKnownClasses::java_lang_Thread_name;
868 ObjPtr<mirror::String> java_name =
869 f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString();
870 std::string thread_name;
871 if (java_name != nullptr) {
872 thread_name = java_name->ToModifiedUtf8();
873 } else {
874 thread_name = "(Unnamed)";
875 }
876
877 VLOG(threads) << "Creating native thread for " << thread_name;
878 self->Dump(LOG_STREAM(INFO));
879 }
880
881 Runtime* runtime = Runtime::Current();
882
883 // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
884 bool thread_start_during_shutdown = false;
885 {
886 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
887 if (runtime->IsShuttingDownLocked()) {
888 thread_start_during_shutdown = true;
889 } else {
890 runtime->StartThreadBirth();
891 }
892 }
893 if (thread_start_during_shutdown) {
894 ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
895 env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
896 return;
897 }
898
899 Thread* child_thread = new Thread(is_daemon);
900 // Use global JNI ref to hold peer live while child thread starts.
901 child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);
902 stack_size = FixStackSize(stack_size);
903
904 // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing
905 // to assign it.
906 SetNativePeer(env, java_peer, child_thread);
907
908 // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and
909 // do not have a good way to report this on the child's side.
910 std::string error_msg;
911 std::unique_ptr<JNIEnvExt> child_jni_env_ext(
912 JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM(), &error_msg));
913
914 int pthread_create_result = 0;
915 if (child_jni_env_ext.get() != nullptr) {
916 pthread_t new_pthread;
917 pthread_attr_t attr;
918 child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get();
919 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
920 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
921 "PTHREAD_CREATE_DETACHED");
922 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
923 pthread_create_result = pthread_create(&new_pthread,
924 &attr,
925 gUseUserfaultfd ? Thread::CreateCallbackWithUffdGc
926 : Thread::CreateCallback,
927 child_thread);
928 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
929
930 if (pthread_create_result == 0) {
931 // pthread_create started the new thread. The child is now responsible for managing the
932 // JNIEnvExt we created.
933 // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization
934 // between the threads.
935 child_jni_env_ext.release(); // NOLINT pthreads API.
936 return;
937 }
938 }
939
940 // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up.
941 {
942 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
943 runtime->EndThreadBirth();
944 }
945 // Manually delete the global reference since Thread::Init will not have been run. Make sure
946 // nothing can observe both opeer and jpeer set at the same time.
947 child_thread->DeleteJPeer(env);
948 delete child_thread;
949 child_thread = nullptr;
950 // TODO: remove from thread group?
951 SetNativePeer(env, java_peer, nullptr);
952 {
953 std::string msg(child_jni_env_ext.get() == nullptr ?
954 StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) :
955 StringPrintf("pthread_create (%s stack) failed: %s",
956 PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
957 ScopedObjectAccess soa(env);
958 soa.Self()->ThrowOutOfMemoryError(msg.c_str());
959 }
960 }
961
GetThreadStack(pthread_t thread,void ** stack_base,size_t * stack_size,size_t * guard_size)962 static void GetThreadStack(pthread_t thread,
963 void** stack_base,
964 size_t* stack_size,
965 size_t* guard_size) {
966 #if defined(__APPLE__)
967 *stack_size = pthread_get_stacksize_np(thread);
968 void* stack_addr = pthread_get_stackaddr_np(thread);
969
970 // Check whether stack_addr is the base or end of the stack.
971 // (On Mac OS 10.7, it's the end.)
972 int stack_variable;
973 if (stack_addr > &stack_variable) {
974 *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size;
975 } else {
976 *stack_base = stack_addr;
977 }
978
979 // This is wrong, but there doesn't seem to be a way to get the actual value on the Mac.
980 pthread_attr_t attributes;
981 CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), __FUNCTION__);
982 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
983 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
984 #else
985 pthread_attr_t attributes;
986 CHECK_PTHREAD_CALL(pthread_getattr_np, (thread, &attributes), __FUNCTION__);
987 CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__);
988 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
989 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
990
991 #if defined(__GLIBC__)
992 // If we're the main thread, check whether we were run with an unlimited stack. In that case,
993 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
994 // will be broken because we'll die long before we get close to 2GB.
995 bool is_main_thread = (::art::GetTid() == static_cast<uint32_t>(getpid()));
996 if (is_main_thread) {
997 rlimit stack_limit;
998 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
999 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
1000 }
1001 if (stack_limit.rlim_cur == RLIM_INFINITY) {
1002 size_t old_stack_size = *stack_size;
1003
1004 // Use the kernel default limit as our size, and adjust the base to match.
1005 *stack_size = 8 * MB;
1006 *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size);
1007
1008 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
1009 << " to " << PrettySize(*stack_size)
1010 << " with base " << *stack_base;
1011 }
1012 }
1013 #endif
1014
1015 #endif
1016 }
1017
Init(ThreadList * thread_list,JavaVMExt * java_vm,JNIEnvExt * jni_env_ext)1018 bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) {
1019 // This function does all the initialization that must be run by the native thread it applies to.
1020 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
1021 // we can handshake with the corresponding native thread when it's ready.) Check this native
1022 // thread hasn't been through here already...
1023 CHECK(Thread::Current() == nullptr);
1024
1025 // Set pthread_self ahead of pthread_setspecific, that makes Thread::Current function, this
1026 // avoids pthread_self ever being invalid when discovered from Thread::Current().
1027 tlsPtr_.pthread_self = pthread_self();
1028 CHECK(is_started_);
1029
1030 ScopedTrace trace("Thread::Init");
1031
1032 SetUpAlternateSignalStack();
1033
1034 void* read_stack_base = nullptr;
1035 size_t read_stack_size = 0;
1036 size_t read_guard_size = 0;
1037 GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size);
1038 if (!InitStack<kNativeStackType>(reinterpret_cast<uint8_t*>(read_stack_base),
1039 read_stack_size,
1040 read_guard_size)) {
1041 return false;
1042 }
1043 InitCpu();
1044 InitTlsEntryPoints();
1045 RemoveSuspendTrigger();
1046 InitCardTable();
1047 InitTid();
1048
1049 #ifdef __BIONIC__
1050 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
1051 #else
1052 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
1053 Thread::self_tls_ = this;
1054 #endif
1055 DCHECK_EQ(Thread::Current(), this);
1056
1057 tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
1058
1059 if (jni_env_ext != nullptr) {
1060 DCHECK_EQ(jni_env_ext->GetVm(), java_vm);
1061 DCHECK_EQ(jni_env_ext->GetSelf(), this);
1062 tlsPtr_.jni_env = jni_env_ext;
1063 } else {
1064 std::string error_msg;
1065 tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm, &error_msg);
1066 if (tlsPtr_.jni_env == nullptr) {
1067 LOG(ERROR) << "Failed to create JNIEnvExt: " << error_msg;
1068 return false;
1069 }
1070 }
1071
1072 ScopedTrace trace3("ThreadList::Register");
1073 thread_list->Register(this);
1074 if (art_flags::always_enable_profile_code()) {
1075 UpdateTlsLowOverheadTraceEntrypoints(TraceProfiler::GetTraceType());
1076 }
1077 return true;
1078 }
1079
1080 template <typename PeerAction>
Attach(const char * thread_name,bool as_daemon,PeerAction peer_action,bool should_run_callbacks)1081 Thread* Thread::Attach(const char* thread_name,
1082 bool as_daemon,
1083 PeerAction peer_action,
1084 bool should_run_callbacks) {
1085 Runtime* runtime = Runtime::Current();
1086 ScopedTrace trace("Thread::Attach");
1087 if (runtime == nullptr) {
1088 LOG(ERROR) << "Thread attaching to non-existent runtime: " <<
1089 ((thread_name != nullptr) ? thread_name : "(Unnamed)");
1090 return nullptr;
1091 }
1092 Thread* self;
1093 {
1094 ScopedTrace trace2("Thread birth");
1095 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
1096 if (runtime->IsShuttingDownLocked()) {
1097 LOG(WARNING) << "Thread attaching while runtime is shutting down: " <<
1098 ((thread_name != nullptr) ? thread_name : "(Unnamed)");
1099 return nullptr;
1100 } else {
1101 Runtime::Current()->StartThreadBirth();
1102 self = new Thread(as_daemon);
1103 bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
1104 Runtime::Current()->EndThreadBirth();
1105 if (!init_success) {
1106 delete self;
1107 return nullptr;
1108 }
1109 }
1110 }
1111
1112 self->InitStringEntryPoints();
1113
1114 CHECK_NE(self->GetState(), ThreadState::kRunnable);
1115 self->SetState(ThreadState::kNative);
1116
1117 // Run the action that is acting on the peer.
1118 if (!peer_action(self)) {
1119 runtime->GetThreadList()->Unregister(self, should_run_callbacks);
1120 // Unregister deletes self, no need to do this here.
1121 return nullptr;
1122 }
1123
1124 if (VLOG_IS_ON(threads)) {
1125 if (thread_name != nullptr) {
1126 VLOG(threads) << "Attaching thread " << thread_name;
1127 } else {
1128 VLOG(threads) << "Attaching unnamed thread.";
1129 }
1130 ScopedObjectAccess soa(self);
1131 self->Dump(LOG_STREAM(INFO));
1132 }
1133
1134 TraceProfiler::AllocateBuffer(self);
1135 if (should_run_callbacks) {
1136 ScopedObjectAccess soa(self);
1137 runtime->GetRuntimeCallbacks()->ThreadStart(self);
1138 }
1139
1140 return self;
1141 }
1142
Attach(const char * thread_name,bool as_daemon,jobject thread_group,bool create_peer,bool should_run_callbacks)1143 Thread* Thread::Attach(const char* thread_name,
1144 bool as_daemon,
1145 jobject thread_group,
1146 bool create_peer,
1147 bool should_run_callbacks) {
1148 auto create_peer_action = [&](Thread* self) {
1149 // If we're the main thread, ClassLinker won't be created until after we're attached,
1150 // so that thread needs a two-stage attach. Regular threads don't need this hack.
1151 // In the compiler, all threads need this hack, because no-one's going to be getting
1152 // a native peer!
1153 if (create_peer) {
1154 self->CreatePeer(thread_name, as_daemon, thread_group);
1155 if (self->IsExceptionPending()) {
1156 // We cannot keep the exception around, as we're deleting self. Try to be helpful and log
1157 // the failure but do not dump the exception details. If we fail to allocate the peer, we
1158 // usually also fail to allocate an exception object and throw a pre-allocated OOME without
1159 // any useful information. If we do manage to allocate the exception object, the memory
1160 // information in the message could have been collected too late and therefore misleading.
1161 {
1162 ScopedObjectAccess soa(self);
1163 LOG(ERROR) << "Exception creating thread peer: "
1164 << ((thread_name != nullptr) ? thread_name : "<null>");
1165 self->ClearException();
1166 }
1167 return false;
1168 }
1169 } else {
1170 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
1171 if (thread_name != nullptr) {
1172 self->SetCachedThreadName(thread_name);
1173 ::art::SetThreadName(thread_name);
1174 } else if (self->GetJniEnv()->IsCheckJniEnabled()) {
1175 LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
1176 }
1177 }
1178 return true;
1179 };
1180 return Attach(thread_name, as_daemon, create_peer_action, should_run_callbacks);
1181 }
1182
Attach(const char * thread_name,bool as_daemon,jobject thread_peer)1183 Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_peer) {
1184 auto set_peer_action = [&](Thread* self) {
1185 // Install the given peer.
1186 DCHECK(self == Thread::Current());
1187 ScopedObjectAccess soa(self);
1188 ObjPtr<mirror::Object> peer = soa.Decode<mirror::Object>(thread_peer);
1189 self->tlsPtr_.opeer = peer.Ptr();
1190 SetNativePeer</*kSupportTransaction=*/ false>(peer, self);
1191 return true;
1192 };
1193 return Attach(thread_name, as_daemon, set_peer_action, /* should_run_callbacks= */ true);
1194 }
1195
CreatePeer(const char * name,bool as_daemon,jobject thread_group)1196 void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
1197 Runtime* runtime = Runtime::Current();
1198 CHECK(runtime->IsStarted());
1199 Thread* self = this;
1200 DCHECK_EQ(self, Thread::Current());
1201
1202 ScopedObjectAccess soa(self);
1203 StackHandleScope<4u> hs(self);
1204 DCHECK(WellKnownClasses::java_lang_ThreadGroup->IsInitialized());
1205 Handle<mirror::Object> thr_group = hs.NewHandle(soa.Decode<mirror::Object>(
1206 thread_group != nullptr ? thread_group : runtime->GetMainThreadGroup()));
1207 Handle<mirror::String> thread_name = hs.NewHandle(
1208 name != nullptr ? mirror::String::AllocFromModifiedUtf8(self, name) : nullptr);
1209 // Add missing null check in case of OOM b/18297817
1210 if (name != nullptr && UNLIKELY(thread_name == nullptr)) {
1211 CHECK(self->IsExceptionPending());
1212 return;
1213 }
1214 jint thread_priority = GetNativePriority();
1215
1216 DCHECK(WellKnownClasses::java_lang_Thread->IsInitialized());
1217 Handle<mirror::Object> peer =
1218 hs.NewHandle(WellKnownClasses::java_lang_Thread->AllocObject(self));
1219 if (UNLIKELY(peer == nullptr)) {
1220 CHECK(IsExceptionPending());
1221 return;
1222 }
1223 tlsPtr_.opeer = peer.Get();
1224 WellKnownClasses::java_lang_Thread_init->InvokeInstance<'V', 'L', 'L', 'I', 'Z'>(
1225 self, peer.Get(), thr_group.Get(), thread_name.Get(), thread_priority, as_daemon);
1226 if (self->IsExceptionPending()) {
1227 return;
1228 }
1229
1230 SetNativePeer</*kSupportTransaction=*/ false>(peer.Get(), self);
1231
1232 MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName()));
1233 if (peer_thread_name == nullptr) {
1234 // The Thread constructor should have set the Thread.name to a
1235 // non-null value. However, because we can run without code
1236 // available (in the compiler, in tests), we manually assign the
1237 // fields the constructor should have set.
1238 if (runtime->IsActiveTransaction()) {
1239 InitPeer<true>(tlsPtr_.opeer,
1240 as_daemon,
1241 thr_group.Get(),
1242 thread_name.Get(),
1243 thread_priority);
1244 } else {
1245 InitPeer<false>(tlsPtr_.opeer,
1246 as_daemon,
1247 thr_group.Get(),
1248 thread_name.Get(),
1249 thread_priority);
1250 }
1251 peer_thread_name.Assign(GetThreadName());
1252 }
1253 // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
1254 if (peer_thread_name != nullptr) {
1255 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
1256 }
1257 }
1258
CreateCompileTimePeer(const char * name,bool as_daemon,jobject thread_group)1259 ObjPtr<mirror::Object> Thread::CreateCompileTimePeer(const char* name,
1260 bool as_daemon,
1261 jobject thread_group) {
1262 Runtime* runtime = Runtime::Current();
1263 CHECK(!runtime->IsStarted());
1264 Thread* self = this;
1265 DCHECK_EQ(self, Thread::Current());
1266
1267 ScopedObjectAccessUnchecked soa(self);
1268 StackHandleScope<3u> hs(self);
1269 DCHECK(WellKnownClasses::java_lang_ThreadGroup->IsInitialized());
1270 Handle<mirror::Object> thr_group = hs.NewHandle(soa.Decode<mirror::Object>(
1271 thread_group != nullptr ? thread_group : runtime->GetMainThreadGroup()));
1272 Handle<mirror::String> thread_name = hs.NewHandle(
1273 name != nullptr ? mirror::String::AllocFromModifiedUtf8(self, name) : nullptr);
1274 // Add missing null check in case of OOM b/18297817
1275 if (name != nullptr && UNLIKELY(thread_name == nullptr)) {
1276 CHECK(self->IsExceptionPending());
1277 return nullptr;
1278 }
1279 jint thread_priority = kNormThreadPriority; // Always normalize to NORM priority.
1280
1281 DCHECK(WellKnownClasses::java_lang_Thread->IsInitialized());
1282 Handle<mirror::Object> peer = hs.NewHandle(
1283 WellKnownClasses::java_lang_Thread->AllocObject(self));
1284 if (peer == nullptr) {
1285 CHECK(Thread::Current()->IsExceptionPending());
1286 return nullptr;
1287 }
1288
1289 // We cannot call Thread.init, as it will recursively ask for currentThread.
1290
1291 // The Thread constructor should have set the Thread.name to a
1292 // non-null value. However, because we can run without code
1293 // available (in the compiler, in tests), we manually assign the
1294 // fields the constructor should have set.
1295 if (runtime->IsActiveTransaction()) {
1296 InitPeer<true>(peer.Get(),
1297 as_daemon,
1298 thr_group.Get(),
1299 thread_name.Get(),
1300 thread_priority);
1301 } else {
1302 InitPeer<false>(peer.Get(),
1303 as_daemon,
1304 thr_group.Get(),
1305 thread_name.Get(),
1306 thread_priority);
1307 }
1308
1309 return peer.Get();
1310 }
1311
1312 template<bool kTransactionActive>
InitPeer(ObjPtr<mirror::Object> peer,bool as_daemon,ObjPtr<mirror::Object> thread_group,ObjPtr<mirror::String> thread_name,jint thread_priority)1313 void Thread::InitPeer(ObjPtr<mirror::Object> peer,
1314 bool as_daemon,
1315 ObjPtr<mirror::Object> thread_group,
1316 ObjPtr<mirror::String> thread_name,
1317 jint thread_priority) {
1318 WellKnownClasses::java_lang_Thread_daemon->SetBoolean<kTransactionActive>(peer,
1319 static_cast<uint8_t>(as_daemon ? 1u : 0u));
1320 WellKnownClasses::java_lang_Thread_group->SetObject<kTransactionActive>(peer, thread_group);
1321 WellKnownClasses::java_lang_Thread_name->SetObject<kTransactionActive>(peer, thread_name);
1322 WellKnownClasses::java_lang_Thread_priority->SetInt<kTransactionActive>(peer, thread_priority);
1323 }
1324
SetCachedThreadName(const char * name)1325 void Thread::SetCachedThreadName(const char* name) {
1326 DCHECK(name != kThreadNameDuringStartup);
1327 const char* old_name = tlsPtr_.name.exchange(name == nullptr ? nullptr : strdup(name));
1328 if (old_name != nullptr && old_name != kThreadNameDuringStartup) {
1329 // Deallocate it, carefully. Note that the load has to be ordered wrt the store of the xchg.
1330 for (uint32_t i = 0; UNLIKELY(tls32_.num_name_readers.load(std::memory_order_seq_cst) != 0);
1331 ++i) {
1332 static constexpr uint32_t kNumSpins = 1000;
1333 // Ugly, but keeps us from having to do anything on the reader side.
1334 if (i > kNumSpins) {
1335 usleep(500);
1336 }
1337 }
1338 // We saw the reader count drop to zero since we replaced the name; old one is now safe to
1339 // deallocate.
1340 free(const_cast<char *>(old_name));
1341 }
1342 }
1343
SetThreadName(const char * name)1344 void Thread::SetThreadName(const char* name) {
1345 DCHECK(this == Thread::Current() || IsSuspended()); // O.w. `this` may disappear.
1346 SetCachedThreadName(name);
1347 if (!IsStillStarting() || this == Thread::Current()) {
1348 // The RI is documented to do this only in the this == self case, which would avoid the
1349 // IsStillStarting() issue below. We instead use a best effort approach.
1350 ::art::SetThreadName(tlsPtr_.pthread_self /* Not necessarily current thread! */, name);
1351 } // O.w. this will normally be set when we finish starting. We can rarely fail to set the
1352 // pthread name. See TODO in IsStillStarting().
1353 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
1354 }
1355
1356 template <StackType stack_type>
InitStack(uint8_t * read_stack_base,size_t read_stack_size,size_t read_guard_size)1357 bool Thread::InitStack(uint8_t* read_stack_base, size_t read_stack_size, size_t read_guard_size) {
1358 ScopedTrace trace("InitStack");
1359
1360 SetStackBegin<stack_type>(read_stack_base);
1361 SetStackSize<stack_type>(read_stack_size);
1362
1363 // The minimum stack size we can cope with is the protected region size + stack overflow check
1364 // region size + some memory for normal stack usage.
1365 //
1366 // The protected region is located at the beginning (lowest address) of the stack region.
1367 // Therefore, it starts at a page-aligned address. Its size should be a multiple of page sizes.
1368 // Typically, it is one page in size, however this varies in some configurations.
1369 //
1370 // The overflow reserved bytes is size of the stack overflow check region, located right after
1371 // the protected region, so also starts at a page-aligned address. The size is discretionary.
1372 // Typically it is 8K, but this varies in some configurations.
1373 //
1374 // The rest of the stack memory is available for normal stack usage. It is located right after
1375 // the stack overflow check region, so its starting address isn't necessarily page-aligned. The
1376 // size of the region is discretionary, however should be chosen in a way that the overall stack
1377 // size is a multiple of page sizes. Historically, it is chosen to be at least 4 KB.
1378 //
1379 // On systems with 4K page size, typically the minimum stack size will be 4+8+4 = 16K.
1380 // The thread won't be able to do much with this stack: even the GC takes between 8K and 12K.
1381 DCHECK_ALIGNED_PARAM(static_cast<size_t>(GetStackOverflowProtectedSize()),
1382 static_cast<int32_t>(gPageSize));
1383 size_t min_stack = GetStackOverflowProtectedSize() +
1384 RoundUp(GetStackOverflowReservedBytes(kRuntimeQuickCodeISA) + 4 * KB, gPageSize);
1385 if (read_stack_size <= min_stack) {
1386 // Note, as we know the stack is small, avoid operations that could use a lot of stack.
1387 LogHelper::LogLineLowStack(__PRETTY_FUNCTION__,
1388 __LINE__,
1389 ::android::base::ERROR,
1390 "Attempt to attach a thread with a too-small stack");
1391 return false;
1392 }
1393
1394 const char* stack_type_str = "";
1395 if constexpr (stack_type == kNativeStackType) {
1396 stack_type_str = "Native";
1397 } else if constexpr (stack_type == kQuickStackType) {
1398 stack_type_str = "Quick";
1399 }
1400
1401 // This is included in the SIGQUIT output, but it's useful here for thread debugging.
1402 VLOG(threads) << StringPrintf("%s stack is at %p (%s with %s guard)",
1403 stack_type_str,
1404 read_stack_base,
1405 PrettySize(read_stack_size).c_str(),
1406 PrettySize(read_guard_size).c_str());
1407
1408 // Set stack_end_ to the bottom of the stack saving space of stack overflows
1409
1410 Runtime* runtime = Runtime::Current();
1411 bool implicit_stack_check =
1412 runtime->GetImplicitStackOverflowChecks() && !runtime->IsAotCompiler();
1413
1414 ResetDefaultStackEnd<stack_type>();
1415
1416 // Install the protected region if we are doing implicit overflow checks.
1417 if (implicit_stack_check) {
1418 // The thread might have protected region at the bottom. We need
1419 // to install our own region so we need to move the limits
1420 // of the stack to make room for it.
1421
1422 SetStackBegin<stack_type>(
1423 GetStackBegin<stack_type>() + read_guard_size + GetStackOverflowProtectedSize());
1424 SetStackEnd<stack_type>(
1425 GetStackEnd<stack_type>() + read_guard_size + GetStackOverflowProtectedSize());
1426 SetStackSize<stack_type>(
1427 GetStackSize<stack_type>() - (read_guard_size + GetStackOverflowProtectedSize()));
1428
1429 InstallImplicitProtection<stack_type>();
1430 }
1431
1432 // Consistency check.
1433 CHECK_GT(FindStackTop<stack_type>(), reinterpret_cast<void*>(GetStackEnd<stack_type>()));
1434
1435 return true;
1436 }
1437
ShortDump(std::ostream & os) const1438 void Thread::ShortDump(std::ostream& os) const {
1439 os << "Thread[";
1440 if (GetThreadId() != 0) {
1441 // If we're in kStarting, we won't have a thin lock id or tid yet.
1442 os << GetThreadId()
1443 << ",tid=" << GetTid() << ',';
1444 }
1445 tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst);
1446 const char* name = tlsPtr_.name.load();
1447 os << GetState()
1448 << ",Thread*=" << this
1449 << ",peer=" << tlsPtr_.opeer
1450 << ",\"" << (name == nullptr ? "null" : name) << "\""
1451 << "]";
1452 tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */);
1453 }
1454
Dump(std::ostream & os,bool dump_native_stack,bool force_dump_stack) const1455 Thread::DumpOrder Thread::Dump(std::ostream& os,
1456 bool dump_native_stack,
1457 bool force_dump_stack) const {
1458 DumpState(os);
1459 return DumpStack(os, dump_native_stack, force_dump_stack);
1460 }
1461
Dump(std::ostream & os,unwindstack::AndroidLocalUnwinder & unwinder,bool dump_native_stack,bool force_dump_stack) const1462 Thread::DumpOrder Thread::Dump(std::ostream& os,
1463 unwindstack::AndroidLocalUnwinder& unwinder,
1464 bool dump_native_stack,
1465 bool force_dump_stack) const {
1466 DumpState(os);
1467 return DumpStack(os, unwinder, dump_native_stack, force_dump_stack);
1468 }
1469
GetThreadName() const1470 ObjPtr<mirror::String> Thread::GetThreadName() const {
1471 if (tlsPtr_.opeer == nullptr) {
1472 return nullptr;
1473 }
1474 ObjPtr<mirror::Object> name = WellKnownClasses::java_lang_Thread_name->GetObject(tlsPtr_.opeer);
1475 return name == nullptr ? nullptr : name->AsString();
1476 }
1477
GetThreadName(std::string & name) const1478 void Thread::GetThreadName(std::string& name) const {
1479 tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst);
1480 // The store part of the increment has to be ordered with respect to the following load.
1481 const char* c_name = tlsPtr_.name.load(std::memory_order_seq_cst);
1482 name.assign(c_name == nullptr ? "<no name>" : c_name);
1483 tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */);
1484 }
1485
GetCpuMicroTime() const1486 uint64_t Thread::GetCpuMicroTime() const {
1487 #if defined(__linux__)
1488 return Thread::GetCpuNanoTime() / 1000;
1489 #else // __APPLE__
1490 UNIMPLEMENTED(WARNING);
1491 return -1;
1492 #endif
1493 }
1494
GetCpuNanoTime() const1495 uint64_t Thread::GetCpuNanoTime() const {
1496 #if defined(__linux__)
1497 clockid_t cpu_clock_id;
1498 pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
1499 timespec now;
1500 clock_gettime(cpu_clock_id, &now);
1501 return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) +
1502 static_cast<uint64_t>(now.tv_nsec);
1503 #else // __APPLE__
1504 UNIMPLEMENTED(WARNING);
1505 return -1;
1506 #endif
1507 }
1508
1509 // Attempt to rectify locks so that we dump thread list with required locks before exiting.
UnsafeLogFatalForSuspendCount(Thread * self,Thread * thread)1510 void Thread::UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
1511 LOG(ERROR) << *thread << " suspend count already zero.";
1512 Locks::thread_suspend_count_lock_->Unlock(self);
1513 if (!Locks::mutator_lock_->IsSharedHeld(self)) {
1514 Locks::mutator_lock_->SharedTryLock(self);
1515 if (!Locks::mutator_lock_->IsSharedHeld(self)) {
1516 LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
1517 }
1518 }
1519 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
1520 Locks::thread_list_lock_->TryLock(self);
1521 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
1522 LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
1523 }
1524 }
1525 std::ostringstream ss;
1526 Runtime::Current()->GetThreadList()->Dump(ss);
1527 LOG(FATAL) << ss.str();
1528 UNREACHABLE();
1529 }
1530
PassActiveSuspendBarriers()1531 bool Thread::PassActiveSuspendBarriers() {
1532 DCHECK_EQ(this, Thread::Current());
1533 DCHECK_NE(GetState(), ThreadState::kRunnable);
1534 // Grab the suspend_count lock and copy the current set of barriers. Then clear the list and the
1535 // flag. The IncrementSuspendCount function requires the lock so we prevent a race between setting
1536 // the kActiveSuspendBarrier flag and clearing it.
1537 // TODO: Consider doing this without the temporary vector. That code will be a bit
1538 // tricky, since the WrappedSuspend1Barrier may disappear once the barrier is decremented.
1539 std::vector<AtomicInteger*> pass_barriers{};
1540 {
1541 MutexLock mu(this, *Locks::thread_suspend_count_lock_);
1542 if (!ReadFlag(ThreadFlag::kActiveSuspendBarrier, std::memory_order_relaxed)) {
1543 // Quick exit test: The barriers have already been claimed - this is possible as there may
1544 // be a race to claim and it doesn't matter who wins. All of the callers of this function
1545 // (except SuspendAllInternal) will first test the kActiveSuspendBarrier flag without the
1546 // lock. Here we double-check whether the barrier has been passed with the
1547 // suspend_count_lock_.
1548 return false;
1549 }
1550 if (tlsPtr_.active_suspendall_barrier != nullptr) {
1551 // We have at most one active active_suspendall_barrier. See thread.h comment.
1552 pass_barriers.push_back(tlsPtr_.active_suspendall_barrier);
1553 tlsPtr_.active_suspendall_barrier = nullptr;
1554 }
1555 for (WrappedSuspend1Barrier* w = tlsPtr_.active_suspend1_barriers; w != nullptr; w = w->next_) {
1556 CHECK_EQ(w->magic_, WrappedSuspend1Barrier::kMagic)
1557 << "first = " << tlsPtr_.active_suspend1_barriers << " current = " << w
1558 << " next = " << w->next_;
1559 pass_barriers.push_back(&(w->barrier_));
1560 }
1561 tlsPtr_.active_suspend1_barriers = nullptr;
1562 AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
1563 CHECK_GT(pass_barriers.size(), 0U); // Since kActiveSuspendBarrier was set.
1564 // Decrement suspend barrier(s) while we still hold the lock, since SuspendThread may
1565 // remove and deallocate suspend barriers while holding suspend_count_lock_ .
1566 // There will typically only be a single barrier to pass here.
1567 for (AtomicInteger*& barrier : pass_barriers) {
1568 int32_t old_val = barrier->fetch_sub(1, std::memory_order_release);
1569 CHECK_GT(old_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << old_val;
1570 if (old_val != 1) {
1571 // We're done with it.
1572 barrier = nullptr;
1573 }
1574 }
1575 }
1576 // Finally do futex_wakes after releasing the lock.
1577 for (AtomicInteger* barrier : pass_barriers) {
1578 #if ART_USE_FUTEXES
1579 if (barrier != nullptr) {
1580 futex(barrier->Address(), FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0);
1581 }
1582 #endif
1583 }
1584 return true;
1585 }
1586
RunCheckpointFunction()1587 void Thread::RunCheckpointFunction() {
1588 DCHECK_EQ(Thread::Current(), this);
1589 CHECK(!GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags()));
1590 // Grab the suspend_count lock, get the next checkpoint and update all the checkpoint fields. If
1591 // there are no more checkpoints we will also clear the kCheckpointRequest flag.
1592 Closure* checkpoint;
1593 {
1594 MutexLock mu(this, *Locks::thread_suspend_count_lock_);
1595 checkpoint = tlsPtr_.checkpoint_function;
1596 if (!checkpoint_overflow_.empty()) {
1597 // Overflow list not empty, copy the first one out and continue.
1598 tlsPtr_.checkpoint_function = checkpoint_overflow_.front();
1599 checkpoint_overflow_.pop_front();
1600 } else {
1601 // No overflow checkpoints. Clear the kCheckpointRequest flag
1602 tlsPtr_.checkpoint_function = nullptr;
1603 AtomicClearFlag(ThreadFlag::kCheckpointRequest);
1604 }
1605 }
1606 // Outside the lock, run the checkpoint function.
1607 ScopedTrace trace("Run checkpoint function");
1608 CHECK(checkpoint != nullptr) << "Checkpoint flag set without pending checkpoint";
1609 checkpoint->Run(this);
1610 }
1611
RunEmptyCheckpoint()1612 void Thread::RunEmptyCheckpoint() {
1613 // Note: Empty checkpoint does not access the thread's stack,
1614 // so we do not need to check for the flip function.
1615 DCHECK_EQ(Thread::Current(), this);
1616 // See mutator_gc_coord.md and b/382722942 for memory ordering discussion.
1617 AtomicClearFlag(ThreadFlag::kEmptyCheckpointRequest, std::memory_order_release);
1618 Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this);
1619 }
1620
RequestCheckpoint(Closure * function)1621 bool Thread::RequestCheckpoint(Closure* function) {
1622 bool success;
1623 do {
1624 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1625 if (old_state_and_flags.GetState() != ThreadState::kRunnable) {
1626 return false; // Fail, thread is suspended and so can't run a checkpoint.
1627 }
1628 StateAndFlags new_state_and_flags = old_state_and_flags;
1629 new_state_and_flags.SetFlag(ThreadFlag::kCheckpointRequest);
1630 success = tls32_.state_and_flags.CompareAndSetWeakSequentiallyConsistent(
1631 old_state_and_flags.GetValue(), new_state_and_flags.GetValue());
1632 } while (!success);
1633 // Succeeded setting checkpoint flag, now insert the actual checkpoint.
1634 if (tlsPtr_.checkpoint_function == nullptr) {
1635 tlsPtr_.checkpoint_function = function;
1636 } else {
1637 checkpoint_overflow_.push_back(function);
1638 }
1639 DCHECK(ReadFlag(ThreadFlag::kCheckpointRequest, std::memory_order_relaxed));
1640 TriggerSuspend();
1641 return true;
1642 }
1643
RequestEmptyCheckpoint()1644 bool Thread::RequestEmptyCheckpoint() {
1645 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1646 if (old_state_and_flags.GetState() != ThreadState::kRunnable) {
1647 // If it's not runnable, we don't need to do anything because it won't be in the middle of a
1648 // heap access (eg. the read barrier).
1649 return false;
1650 }
1651
1652 // We must be runnable to request a checkpoint.
1653 DCHECK_EQ(old_state_and_flags.GetState(), ThreadState::kRunnable);
1654 StateAndFlags new_state_and_flags = old_state_and_flags;
1655 new_state_and_flags.SetFlag(ThreadFlag::kEmptyCheckpointRequest);
1656 bool success = tls32_.state_and_flags.CompareAndSetStrongSequentiallyConsistent(
1657 old_state_and_flags.GetValue(), new_state_and_flags.GetValue());
1658 if (success) {
1659 TriggerSuspend();
1660 }
1661 return success;
1662 }
1663
1664 class BarrierClosure : public Closure {
1665 public:
BarrierClosure(Closure * wrapped)1666 explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {}
1667
Run(Thread * self)1668 void Run(Thread* self) override {
1669 wrapped_->Run(self);
1670 barrier_.Pass(self);
1671 }
1672
Wait(Thread * self,ThreadState wait_state)1673 void Wait(Thread* self, ThreadState wait_state) {
1674 if (wait_state != ThreadState::kRunnable) {
1675 barrier_.Increment<Barrier::kDisallowHoldingLocks>(self, 1);
1676 } else {
1677 barrier_.Increment<Barrier::kAllowHoldingLocks>(self, 1);
1678 }
1679 }
1680
1681 private:
1682 Closure* wrapped_;
1683 Barrier barrier_;
1684 };
1685
1686 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
RequestSynchronousCheckpoint(Closure * function,ThreadState wait_state)1687 bool Thread::RequestSynchronousCheckpoint(Closure* function, ThreadState wait_state) {
1688 Thread* self = Thread::Current();
1689 if (this == self) {
1690 Locks::thread_list_lock_->AssertExclusiveHeld(self);
1691 // Unlock the tll before running so that the state is the same regardless of thread.
1692 Locks::thread_list_lock_->ExclusiveUnlock(self);
1693 // Asked to run on this thread. Just run.
1694 function->Run(this);
1695 return true;
1696 }
1697
1698 // The current thread is not this thread.
1699
1700 VerifyState();
1701
1702 Locks::thread_list_lock_->AssertExclusiveHeld(self);
1703 // If target "this" thread is runnable, try to schedule a checkpoint. Do some gymnastics to not
1704 // hold the suspend-count lock for too long.
1705 if (GetState() == ThreadState::kRunnable) {
1706 BarrierClosure barrier_closure(function);
1707 bool installed = false;
1708 {
1709 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1710 installed = RequestCheckpoint(&barrier_closure);
1711 }
1712 if (installed) {
1713 // Relinquish the thread-list lock. We should not wait holding any locks. We cannot
1714 // reacquire it since we don't know if 'this' hasn't been deleted yet.
1715 Locks::thread_list_lock_->ExclusiveUnlock(self);
1716 ScopedThreadStateChange sts(self, wait_state);
1717 // Wait state can be kRunnable, in which case, for lock ordering purposes, it's as if we ran
1718 // the closure ourselves. This means that the target thread should not acquire a pre-mutator
1719 // lock without running the checkpoint, and the closure should not acquire a pre-mutator
1720 // lock or suspend.
1721 barrier_closure.Wait(self, wait_state);
1722 return true;
1723 }
1724 // No longer runnable. Fall-through.
1725 }
1726
1727 // Target "this" thread was not runnable. Suspend it, hopefully redundantly,
1728 // but it might have become runnable in the meantime.
1729 // Although this is a thread suspension, the target thread only blocks while we run the
1730 // checkpoint, which is presumed to terminate quickly even if other threads are blocked.
1731 // Note: IncrementSuspendCount also expects the thread_list_lock to be held unless this == self.
1732 WrappedSuspend1Barrier wrapped_barrier{};
1733 {
1734 bool is_suspended = false;
1735
1736 {
1737 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1738 // If wait_state is kRunnable, function may not suspend. We thus never block because
1739 // we ourselves are being asked to suspend.
1740 if (UNLIKELY(wait_state != ThreadState::kRunnable && self->GetSuspendCount() != 0)) {
1741 // We are being asked to suspend while we are suspending another thread that may be
1742 // responsible for our suspension. This is likely to result in deadlock if we each
1743 // block on the suspension request. Instead we wait for the situation to change.
1744 ThreadExitFlag target_status;
1745 NotifyOnThreadExit(&target_status);
1746 for (int iter_count = 1; self->GetSuspendCount() != 0; ++iter_count) {
1747 Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1748 Locks::thread_list_lock_->ExclusiveUnlock(self);
1749 {
1750 ScopedThreadStateChange sts(self, wait_state);
1751 usleep(ThreadList::kThreadSuspendSleepUs);
1752 }
1753 CHECK_LT(iter_count, ThreadList::kMaxSuspendRetries);
1754 Locks::thread_list_lock_->ExclusiveLock(self);
1755 if (target_status.HasExited()) {
1756 Locks::thread_list_lock_->ExclusiveUnlock(self);
1757 DCheckUnregisteredEverywhere(&target_status, &target_status);
1758 return false;
1759 }
1760 Locks::thread_suspend_count_lock_->ExclusiveLock(self);
1761 }
1762 UnregisterThreadExitFlag(&target_status);
1763 }
1764 IncrementSuspendCount(self, nullptr, &wrapped_barrier, SuspendReason::kInternal);
1765 VerifyState();
1766 DCHECK_GT(GetSuspendCount(), 0);
1767 if (wait_state != ThreadState::kRunnable) {
1768 DCHECK_EQ(self->GetSuspendCount(), 0);
1769 }
1770 // Since we've incremented the suspend count, "this" thread can no longer disappear.
1771 Locks::thread_list_lock_->ExclusiveUnlock(self);
1772 if (IsSuspended()) {
1773 // See the discussion in mutator_gc_coord.md and SuspendAllInternal for the race here.
1774 RemoveFirstSuspend1Barrier(&wrapped_barrier);
1775 if (!HasActiveSuspendBarrier()) {
1776 AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
1777 }
1778 is_suspended = true;
1779 }
1780 }
1781 if (!is_suspended) {
1782 // This waits while holding the mutator lock. Effectively `self` becomes
1783 // impossible to suspend until `this` responds to the suspend request.
1784 // Arguably that's not making anything qualitatively worse.
1785 bool success = !Runtime::Current()
1786 ->GetThreadList()
1787 ->WaitForSuspendBarrier(&wrapped_barrier.barrier_)
1788 .has_value();
1789 CHECK(success);
1790 }
1791
1792 // Ensure that the flip function for this thread, if pending, is finished *before*
1793 // the checkpoint function is run. Otherwise, we may end up with both `to' and 'from'
1794 // space references on the stack, confusing the GC's thread-flip logic. The caller is
1795 // runnable so can't have a pending flip function.
1796 DCHECK_EQ(self->GetState(), ThreadState::kRunnable);
1797 DCHECK(IsSuspended());
1798 DCHECK(!self->GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags()));
1799 EnsureFlipFunctionStarted(self, this);
1800 // Since we're runnable, and kPendingFlipFunction is set with all threads suspended, it
1801 // cannot be set again here. Thus kRunningFlipFunction is either already set after the
1802 // EnsureFlipFunctionStarted call, or will not be set before we call Run().
1803 // See mutator_gc_coord.md for a discussion of memory ordering for thread flags.
1804 if (ReadFlag(ThreadFlag::kRunningFlipFunction, std::memory_order_acquire)) {
1805 WaitForFlipFunction(self);
1806 }
1807 function->Run(this);
1808 }
1809
1810 {
1811 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1812 DCHECK_NE(GetState(), ThreadState::kRunnable);
1813 DCHECK_GT(GetSuspendCount(), 0);
1814 DecrementSuspendCount(self);
1815 if (kIsDebugBuild) {
1816 CheckBarrierInactive(&wrapped_barrier);
1817 }
1818 resume_cond_->Broadcast(self);
1819 }
1820
1821 Locks::thread_list_lock_->AssertNotHeld(self);
1822 return true;
1823 }
1824
SetFlipFunction(Closure * function)1825 void Thread::SetFlipFunction(Closure* function) {
1826 // This is called with all threads suspended, except for the calling thread.
1827 DCHECK(IsSuspended() || Thread::Current() == this);
1828 DCHECK(function != nullptr);
1829 DCHECK(GetFlipFunction() == nullptr);
1830 tlsPtr_.flip_function.store(function, std::memory_order_relaxed);
1831 DCHECK(!GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags()));
1832 AtomicSetFlag(ThreadFlag::kPendingFlipFunction, std::memory_order_release);
1833 }
1834
EnsureFlipFunctionStarted(Thread * self,Thread * target,StateAndFlags old_state_and_flags,ThreadExitFlag * tef,bool * finished)1835 bool Thread::EnsureFlipFunctionStarted(Thread* self,
1836 Thread* target,
1837 StateAndFlags old_state_and_flags,
1838 ThreadExitFlag* tef,
1839 bool* finished) {
1840 // Note: If tef is non-null, *target may have been destroyed. We have to be careful about
1841 // accessing it. That is the reason this is static and not a member function.
1842 DCHECK(self == Current());
1843 bool check_exited = (tef != nullptr);
1844 // Check that the thread can't unexpectedly exit while we are running.
1845 DCHECK(self == target || check_exited ||
1846 target->ReadFlag(ThreadFlag::kSuspendRequest, std::memory_order_relaxed) ||
1847 Locks::thread_list_lock_->IsExclusiveHeld(self))
1848 << *target;
1849 bool become_runnable;
1850 auto maybe_release = [=]() NO_THREAD_SAFETY_ANALYSIS /* conditionally unlocks */ {
1851 if (check_exited) {
1852 Locks::thread_list_lock_->Unlock(self);
1853 }
1854 };
1855 auto set_finished = [=](bool value) {
1856 if (finished != nullptr) {
1857 *finished = value;
1858 }
1859 };
1860
1861 if (check_exited) {
1862 Locks::thread_list_lock_->Lock(self);
1863 if (tef->HasExited()) {
1864 Locks::thread_list_lock_->Unlock(self);
1865 set_finished(true);
1866 return false;
1867 }
1868 }
1869 target->VerifyState();
1870 if (old_state_and_flags.GetValue() == 0) {
1871 become_runnable = false;
1872 // Memory_order_relaxed is OK here, since we re-check with memory_order_acquire below before
1873 // acting on a pending flip function.
1874 old_state_and_flags = target->GetStateAndFlags(std::memory_order_relaxed);
1875 } else {
1876 become_runnable = true;
1877 DCHECK(!check_exited);
1878 DCHECK(target == self);
1879 DCHECK(old_state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction));
1880 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest));
1881 }
1882 while (true) {
1883 DCHECK(!check_exited || (Locks::thread_list_lock_->IsExclusiveHeld(self) && !tef->HasExited()));
1884 if (!old_state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction)) {
1885 // Re-read kRunningFlipFunction flag with acquire ordering to ensure that if we claim
1886 // flip function has run then its execution happened-before our return.
1887 bool running_flip =
1888 target->ReadFlag(ThreadFlag::kRunningFlipFunction, std::memory_order_acquire);
1889 maybe_release();
1890 set_finished(!running_flip);
1891 return false;
1892 }
1893 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction));
1894 StateAndFlags new_state_and_flags =
1895 old_state_and_flags.WithFlag(ThreadFlag::kRunningFlipFunction)
1896 .WithoutFlag(ThreadFlag::kPendingFlipFunction);
1897 if (become_runnable) {
1898 DCHECK_EQ(self, target);
1899 DCHECK_NE(self->GetState(), ThreadState::kRunnable);
1900 new_state_and_flags = new_state_and_flags.WithState(ThreadState::kRunnable);
1901 }
1902 if (target->tls32_.state_and_flags.CompareAndSetWeakAcquire(old_state_and_flags.GetValue(),
1903 new_state_and_flags.GetValue())) {
1904 if (become_runnable) {
1905 self->GetMutatorLock()->TransitionFromSuspendedToRunnable(self);
1906 }
1907 art::Locks::mutator_lock_->AssertSharedHeld(self);
1908 maybe_release();
1909 // Thread will not go away while kRunningFlipFunction is set.
1910 target->RunFlipFunction(self);
1911 // At this point, no flip function flags should be set. It's unsafe to DCHECK that, since
1912 // the thread may now have exited.
1913 set_finished(true);
1914 return become_runnable;
1915 }
1916 if (become_runnable) {
1917 DCHECK(!check_exited); // We didn't acquire thread_list_lock_ .
1918 // Let caller retry.
1919 return false;
1920 }
1921 // Again, we re-read with memory_order_acquire before acting on the flags.
1922 old_state_and_flags = target->GetStateAndFlags(std::memory_order_relaxed);
1923 }
1924 // Unreachable.
1925 }
1926
RunFlipFunction(Thread * self)1927 void Thread::RunFlipFunction(Thread* self) {
1928 // This function is called either by the thread running `ThreadList::FlipThreadRoots()` or when
1929 // a thread becomes runnable, after we've successfully set the kRunningFlipFunction ThreadFlag.
1930 DCHECK(ReadFlag(ThreadFlag::kRunningFlipFunction, std::memory_order_relaxed));
1931
1932 Closure* flip_function = GetFlipFunction();
1933 tlsPtr_.flip_function.store(nullptr, std::memory_order_relaxed);
1934 DCHECK(flip_function != nullptr);
1935 VerifyState();
1936 flip_function->Run(this);
1937 DCHECK(!ReadFlag(ThreadFlag::kPendingFlipFunction, std::memory_order_relaxed));
1938 VerifyState();
1939 AtomicClearFlag(ThreadFlag::kRunningFlipFunction, std::memory_order_release);
1940 // From here on this thread may go away, and it is no longer safe to access.
1941
1942 // Notify all threads that are waiting for completion.
1943 // TODO: Should we create a separate mutex and condition variable instead
1944 // of piggy-backing on the `thread_suspend_count_lock_` and `resume_cond_`?
1945 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1946 resume_cond_->Broadcast(self);
1947 }
1948
WaitForFlipFunction(Thread * self) const1949 void Thread::WaitForFlipFunction(Thread* self) const {
1950 // Another thread is running the flip function. Wait for it to complete.
1951 // Check the flag while holding the mutex so that we do not miss the broadcast.
1952 // Repeat the check after waiting to guard against spurious wakeups (and because
1953 // we share the `thread_suspend_count_lock_` and `resume_cond_` with other code).
1954 // Check that the thread can't unexpectedly exit while we are running.
1955 DCHECK(self == this || ReadFlag(ThreadFlag::kSuspendRequest, std::memory_order_relaxed) ||
1956 Locks::thread_list_lock_->IsExclusiveHeld(self));
1957 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1958 while (true) {
1959 // See mutator_gc_coord.md for a discussion of memory ordering for thread flags.
1960 if (!ReadFlag(ThreadFlag::kRunningFlipFunction, std::memory_order_acquire)) {
1961 return;
1962 }
1963 // We sometimes hold mutator lock here. OK since the flip function must complete quickly.
1964 resume_cond_->WaitHoldingLocks(self);
1965 }
1966 }
1967
WaitForFlipFunctionTestingExited(Thread * self,ThreadExitFlag * tef)1968 void Thread::WaitForFlipFunctionTestingExited(Thread* self, ThreadExitFlag* tef) {
1969 Locks::thread_list_lock_->Lock(self);
1970 if (tef->HasExited()) {
1971 Locks::thread_list_lock_->Unlock(self);
1972 return;
1973 }
1974 // We need to hold suspend_count_lock_ to avoid missed wakeups when the flip function finishes.
1975 // We need to hold thread_list_lock_ because the tef test result is only valid while we hold the
1976 // lock, and once kRunningFlipFunction is no longer set, "this" may be deallocated. Hence the
1977 // complicated locking dance.
1978 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1979 while (true) {
1980 // See mutator_gc_coord.md for a discussion of memory ordering for thread flags.
1981 bool running_flip = ReadFlag(ThreadFlag::kRunningFlipFunction, std::memory_order_acquire);
1982 Locks::thread_list_lock_->Unlock(self); // So we can wait or return.
1983 if (!running_flip) {
1984 return;
1985 }
1986 resume_cond_->WaitHoldingLocks(self);
1987 Locks::thread_suspend_count_lock_->Unlock(self); // To re-lock thread_list_lock.
1988 Locks::thread_list_lock_->Lock(self);
1989 Locks::thread_suspend_count_lock_->Lock(self);
1990 if (tef->HasExited()) {
1991 Locks::thread_list_lock_->Unlock(self);
1992 return;
1993 }
1994 }
1995 }
1996
FullSuspendCheck(bool implicit)1997 void Thread::FullSuspendCheck(bool implicit) {
1998 ScopedTrace trace(__FUNCTION__);
1999 DCHECK(!ReadFlag(ThreadFlag::kSuspensionImmune, std::memory_order_relaxed));
2000 DCHECK(this == Thread::Current());
2001 VLOG(threads) << this << " self-suspending";
2002 // Make thread appear suspended to other threads, release mutator_lock_.
2003 // Transition to suspended and back to runnable, re-acquire share on mutator_lock_.
2004 ScopedThreadSuspension(this, ThreadState::kSuspended); // NOLINT
2005 if (implicit) {
2006 // For implicit suspend check we want to `madvise()` away
2007 // the alternate signal stack to avoid wasting memory.
2008 MadviseAwayAlternateSignalStack();
2009 }
2010 VLOG(threads) << this << " self-reviving";
2011 }
2012
GetSchedulerGroupName(pid_t tid)2013 static std::string GetSchedulerGroupName(pid_t tid) {
2014 // /proc/<pid>/cgroup looks like this:
2015 // 2:devices:/
2016 // 1:cpuacct,cpu:/
2017 // We want the third field from the line whose second field contains the "cpu" token.
2018 std::string cgroup_file;
2019 if (!android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid),
2020 &cgroup_file)) {
2021 return "";
2022 }
2023 std::vector<std::string> cgroup_lines;
2024 Split(cgroup_file, '\n', &cgroup_lines);
2025 for (size_t i = 0; i < cgroup_lines.size(); ++i) {
2026 std::vector<std::string> cgroup_fields;
2027 Split(cgroup_lines[i], ':', &cgroup_fields);
2028 std::vector<std::string> cgroups;
2029 Split(cgroup_fields[1], ',', &cgroups);
2030 for (size_t j = 0; j < cgroups.size(); ++j) {
2031 if (cgroups[j] == "cpu") {
2032 return cgroup_fields[2].substr(1); // Skip the leading slash.
2033 }
2034 }
2035 }
2036 return "";
2037 }
2038
DumpState(std::ostream & os,const Thread * thread,pid_t tid)2039 void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
2040 std::string group_name;
2041 int priority;
2042 bool is_daemon = false;
2043 Thread* self = Thread::Current();
2044
2045 // Don't do this if we are aborting since the GC may have all the threads suspended. This will
2046 // cause ScopedObjectAccessUnchecked to deadlock.
2047 if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
2048 ScopedObjectAccessUnchecked soa(self);
2049 priority = WellKnownClasses::java_lang_Thread_priority->GetInt(thread->tlsPtr_.opeer);
2050 is_daemon = WellKnownClasses::java_lang_Thread_daemon->GetBoolean(thread->tlsPtr_.opeer);
2051
2052 ObjPtr<mirror::Object> thread_group =
2053 WellKnownClasses::java_lang_Thread_group->GetObject(thread->tlsPtr_.opeer);
2054
2055 if (thread_group != nullptr) {
2056 ObjPtr<mirror::Object> group_name_object =
2057 WellKnownClasses::java_lang_ThreadGroup_name->GetObject(thread_group);
2058 group_name = (group_name_object != nullptr)
2059 ? group_name_object->AsString()->ToModifiedUtf8()
2060 : "<null>";
2061 }
2062 } else if (thread != nullptr) {
2063 priority = thread->GetNativePriority();
2064 } else {
2065 palette_status_t status = PaletteSchedGetPriority(tid, &priority);
2066 CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO);
2067 }
2068
2069 std::string scheduler_group_name(GetSchedulerGroupName(tid));
2070 if (scheduler_group_name.empty()) {
2071 scheduler_group_name = "default";
2072 }
2073
2074 if (thread != nullptr) {
2075 thread->tls32_.num_name_readers.fetch_add(1, std::memory_order_seq_cst);
2076 os << '"' << thread->tlsPtr_.name.load() << '"';
2077 thread->tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */);
2078 if (is_daemon) {
2079 os << " daemon";
2080 }
2081 os << " prio=" << priority
2082 << " tid=" << thread->GetThreadId()
2083 << " " << thread->GetState();
2084 if (thread->IsStillStarting()) {
2085 os << " (still starting up)";
2086 }
2087 if (thread->tls32_.disable_thread_flip_count != 0) {
2088 os << " DisableFlipCount = " << thread->tls32_.disable_thread_flip_count;
2089 }
2090 os << "\n";
2091 } else {
2092 os << '"' << ::art::GetThreadName(tid) << '"'
2093 << " prio=" << priority
2094 << " (not attached)\n";
2095 }
2096
2097 if (thread != nullptr) {
2098 auto suspend_log_fn = [&]() REQUIRES(Locks::thread_suspend_count_lock_) {
2099 StateAndFlags state_and_flags = thread->GetStateAndFlags(std::memory_order_relaxed);
2100 static_assert(
2101 static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
2102 state_and_flags.SetState(ThreadState::kRunnable); // Clear state bits.
2103 os << " | group=\"" << group_name << "\""
2104 << " sCount=" << thread->tls32_.suspend_count
2105 << " ucsCount=" << thread->tls32_.user_code_suspend_count
2106 << " flags=" << state_and_flags.GetValue()
2107 << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
2108 << " self=" << reinterpret_cast<const void*>(thread) << "\n";
2109 };
2110 if (Locks::thread_suspend_count_lock_->IsExclusiveHeld(self)) {
2111 Locks::thread_suspend_count_lock_->AssertExclusiveHeld(self); // For annotalysis.
2112 suspend_log_fn();
2113 } else {
2114 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
2115 suspend_log_fn();
2116 }
2117 }
2118
2119 os << " | sysTid=" << tid
2120 << " nice=" << getpriority(PRIO_PROCESS, static_cast<id_t>(tid))
2121 << " cgrp=" << scheduler_group_name;
2122 if (thread != nullptr) {
2123 int policy;
2124 sched_param sp;
2125 #if !defined(__APPLE__)
2126 // b/36445592 Don't use pthread_getschedparam since pthread may have exited.
2127 policy = sched_getscheduler(tid);
2128 if (policy == -1) {
2129 PLOG(WARNING) << "sched_getscheduler(" << tid << ")";
2130 }
2131 int sched_getparam_result = sched_getparam(tid, &sp);
2132 if (sched_getparam_result == -1) {
2133 PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)";
2134 sp.sched_priority = -1;
2135 }
2136 #else
2137 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
2138 __FUNCTION__);
2139 #endif
2140 os << " sched=" << policy << "/" << sp.sched_priority
2141 << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
2142 }
2143 os << "\n";
2144
2145 // Grab the scheduler stats for this thread.
2146 std::string scheduler_stats;
2147 if (android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid),
2148 &scheduler_stats)
2149 && !scheduler_stats.empty()) {
2150 scheduler_stats = android::base::Trim(scheduler_stats); // Lose the trailing '\n'.
2151 } else {
2152 scheduler_stats = "0 0 0";
2153 }
2154
2155 char native_thread_state = '?';
2156 int utime = 0;
2157 int stime = 0;
2158 int task_cpu = 0;
2159 GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
2160
2161 os << " | state=" << native_thread_state
2162 << " schedstat=( " << scheduler_stats << " )"
2163 << " utm=" << utime
2164 << " stm=" << stime
2165 << " core=" << task_cpu
2166 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
2167 if (thread != nullptr) {
2168 // TODO(Simulator): Also dump the simulated stack if one exists.
2169 os << " | stack=" << reinterpret_cast<void*>(thread->GetStackBegin<kNativeStackType>())
2170 << "-" << reinterpret_cast<void*>(thread->GetStackEnd<kNativeStackType>())
2171 << " stackSize=" << PrettySize(thread->GetStackSize<kNativeStackType>()) << "\n";
2172 // Dump the held mutexes.
2173 os << " | held mutexes=";
2174 for (size_t i = 0; i < kLockLevelCount; ++i) {
2175 if (i != kMonitorLock) {
2176 BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i));
2177 if (mutex != nullptr) {
2178 os << " \"" << mutex->GetName() << "\"";
2179 if (mutex->IsReaderWriterMutex()) {
2180 ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
2181 if (rw_mutex->GetExclusiveOwnerTid() == tid) {
2182 os << "(exclusive held)";
2183 } else {
2184 os << "(shared held)";
2185 }
2186 }
2187 }
2188 }
2189 }
2190 os << "\n";
2191 }
2192 }
2193
DumpState(std::ostream & os) const2194 void Thread::DumpState(std::ostream& os) const {
2195 Thread::DumpState(os, this, GetTid());
2196 }
2197
2198 struct StackDumpVisitor : public MonitorObjectsStackVisitor {
StackDumpVisitorart::StackDumpVisitor2199 StackDumpVisitor(std::ostream& os_in,
2200 Thread* thread_in,
2201 Context* context,
2202 bool can_allocate,
2203 bool check_suspended = true,
2204 bool dump_locks = true)
2205 REQUIRES_SHARED(Locks::mutator_lock_)
2206 : MonitorObjectsStackVisitor(thread_in,
2207 context,
2208 check_suspended,
2209 can_allocate && dump_locks),
2210 os(os_in),
2211 last_method(nullptr),
2212 last_line_number(0),
2213 repetition_count(0) {}
2214
~StackDumpVisitorart::StackDumpVisitor2215 virtual ~StackDumpVisitor() {
2216 if (frame_count == 0) {
2217 os << " (no managed stack frames)\n";
2218 }
2219 }
2220
2221 static constexpr size_t kMaxRepetition = 3u;
2222
StartMethodart::StackDumpVisitor2223 VisitMethodResult StartMethod(ArtMethod* m, [[maybe_unused]] size_t frame_nr) override
2224 REQUIRES_SHARED(Locks::mutator_lock_) {
2225 m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
2226 ObjPtr<mirror::DexCache> dex_cache = m->GetDexCache();
2227 int line_number = -1;
2228 uint32_t dex_pc = GetDexPc(false);
2229 if (dex_cache != nullptr) { // be tolerant of bad input
2230 const DexFile* dex_file = dex_cache->GetDexFile();
2231 line_number = annotations::GetLineNumFromPC(dex_file, m, dex_pc);
2232 }
2233 if (line_number == last_line_number && last_method == m) {
2234 ++repetition_count;
2235 } else {
2236 if (repetition_count >= kMaxRepetition) {
2237 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
2238 }
2239 repetition_count = 0;
2240 last_line_number = line_number;
2241 last_method = m;
2242 }
2243
2244 if (repetition_count >= kMaxRepetition) {
2245 // Skip visiting=printing anything.
2246 return VisitMethodResult::kSkipMethod;
2247 }
2248
2249 os << " at " << m->PrettyMethod(false);
2250 if (m->IsNative()) {
2251 os << "(Native method)";
2252 } else {
2253 const char* source_file(m->GetDeclaringClassSourceFile());
2254 if (line_number == -1) {
2255 // If we failed to map to a line number, use
2256 // the dex pc as the line number and leave source file null
2257 source_file = nullptr;
2258 line_number = static_cast<int32_t>(dex_pc);
2259 }
2260 os << "(" << (source_file != nullptr ? source_file : "unavailable")
2261 << ":" << line_number << ")";
2262 }
2263 os << "\n";
2264 // Go and visit locks.
2265 return VisitMethodResult::kContinueMethod;
2266 }
2267
EndMethodart::StackDumpVisitor2268 VisitMethodResult EndMethod([[maybe_unused]] ArtMethod* m) override {
2269 return VisitMethodResult::kContinueMethod;
2270 }
2271
VisitWaitingObjectart::StackDumpVisitor2272 void VisitWaitingObject(ObjPtr<mirror::Object> obj, [[maybe_unused]] ThreadState state) override
2273 REQUIRES_SHARED(Locks::mutator_lock_) {
2274 PrintObject(obj, " - waiting on ", ThreadList::kInvalidThreadId);
2275 }
VisitSleepingObjectart::StackDumpVisitor2276 void VisitSleepingObject(ObjPtr<mirror::Object> obj)
2277 override
2278 REQUIRES_SHARED(Locks::mutator_lock_) {
2279 PrintObject(obj, " - sleeping on ", ThreadList::kInvalidThreadId);
2280 }
VisitBlockedOnObjectart::StackDumpVisitor2281 void VisitBlockedOnObject(ObjPtr<mirror::Object> obj,
2282 ThreadState state,
2283 uint32_t owner_tid)
2284 override
2285 REQUIRES_SHARED(Locks::mutator_lock_) {
2286 const char* msg;
2287 switch (state) {
2288 case ThreadState::kBlocked:
2289 msg = " - waiting to lock ";
2290 break;
2291
2292 case ThreadState::kWaitingForLockInflation:
2293 msg = " - waiting for lock inflation of ";
2294 break;
2295
2296 default:
2297 LOG(FATAL) << "Unreachable";
2298 UNREACHABLE();
2299 }
2300 PrintObject(obj, msg, owner_tid);
2301 num_blocked++;
2302 }
VisitLockedObjectart::StackDumpVisitor2303 void VisitLockedObject(ObjPtr<mirror::Object> obj)
2304 override
2305 REQUIRES_SHARED(Locks::mutator_lock_) {
2306 PrintObject(obj, " - locked ", ThreadList::kInvalidThreadId);
2307 num_locked++;
2308 }
2309
PrintObjectart::StackDumpVisitor2310 void PrintObject(ObjPtr<mirror::Object> obj,
2311 const char* msg,
2312 uint32_t owner_tid) REQUIRES_SHARED(Locks::mutator_lock_) {
2313 if (obj == nullptr) {
2314 os << msg << "an unknown object";
2315 } else {
2316 const std::string pretty_type(obj->PrettyTypeOf());
2317 // It's often unsafe to allow lock inflation here. We may be the only runnable thread, or
2318 // this may be called from a checkpoint. We get the hashcode on a best effort basis.
2319 static constexpr int kNumRetries = 3;
2320 static constexpr int kSleepMicros = 10;
2321 int32_t hash_code;
2322 for (int i = 0;; ++i) {
2323 hash_code = obj->IdentityHashCodeNoInflation();
2324 if (hash_code != 0 || i == kNumRetries) {
2325 break;
2326 }
2327 usleep(kSleepMicros);
2328 }
2329 if (hash_code == 0) {
2330 os << msg
2331 << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)",
2332 reinterpret_cast<intptr_t>(obj.Ptr()),
2333 pretty_type.c_str());
2334 } else {
2335 // - waiting on <0x608c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
2336 os << msg << StringPrintf("<0x%08x> (a %s)", hash_code, pretty_type.c_str());
2337 }
2338 }
2339 if (owner_tid != ThreadList::kInvalidThreadId) {
2340 os << " held by thread " << owner_tid;
2341 }
2342 os << "\n";
2343 }
2344
2345 std::ostream& os;
2346 ArtMethod* last_method;
2347 int last_line_number;
2348 size_t repetition_count;
2349 size_t num_blocked = 0;
2350 size_t num_locked = 0;
2351 };
2352
ShouldShowNativeStack(const Thread * thread)2353 static bool ShouldShowNativeStack(const Thread* thread)
2354 REQUIRES_SHARED(Locks::mutator_lock_) {
2355 ThreadState state = thread->GetState();
2356
2357 // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
2358 if (state > ThreadState::kWaiting && state < ThreadState::kStarting) {
2359 return true;
2360 }
2361
2362 // In an Object.wait variant or Thread.sleep? That's not interesting.
2363 if (state == ThreadState::kTimedWaiting ||
2364 state == ThreadState::kSleeping ||
2365 state == ThreadState::kWaiting) {
2366 return false;
2367 }
2368
2369 // Threads with no managed stack frames should be shown.
2370 if (!thread->HasManagedStack()) {
2371 return true;
2372 }
2373
2374 // In some other native method? That's interesting.
2375 // We don't just check kNative because native methods will be in state kSuspended if they're
2376 // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
2377 // thread-startup states if it's early enough in their life cycle (http://b/7432159).
2378 ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
2379 return current_method != nullptr && current_method->IsNative();
2380 }
2381
DumpJavaStack(std::ostream & os,bool check_suspended,bool dump_locks) const2382 Thread::DumpOrder Thread::DumpJavaStack(std::ostream& os,
2383 bool check_suspended,
2384 bool dump_locks) const {
2385 // Dumping the Java stack involves the verifier for locks. The verifier operates under the
2386 // assumption that there is no exception pending on entry. Thus, stash any pending exception.
2387 // Thread::Current() instead of this in case a thread is dumping the stack of another suspended
2388 // thread.
2389 ScopedExceptionStorage ses(Thread::Current());
2390
2391 std::unique_ptr<Context> context(Context::Create());
2392 StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
2393 !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks);
2394 dumper.WalkStack();
2395 if (IsJitSensitiveThread()) {
2396 return DumpOrder::kMain;
2397 } else if (dumper.num_blocked > 0) {
2398 return DumpOrder::kBlocked;
2399 } else if (dumper.num_locked > 0) {
2400 return DumpOrder::kLocked;
2401 } else {
2402 return DumpOrder::kDefault;
2403 }
2404 }
2405
DumpStack(std::ostream & os,bool dump_native_stack,bool force_dump_stack) const2406 Thread::DumpOrder Thread::DumpStack(std::ostream& os,
2407 bool dump_native_stack,
2408 bool force_dump_stack) const {
2409 unwindstack::AndroidLocalUnwinder unwinder;
2410 return DumpStack(os, unwinder, dump_native_stack, force_dump_stack);
2411 }
2412
DumpStack(std::ostream & os,unwindstack::AndroidLocalUnwinder & unwinder,bool dump_native_stack,bool force_dump_stack) const2413 Thread::DumpOrder Thread::DumpStack(std::ostream& os,
2414 unwindstack::AndroidLocalUnwinder& unwinder,
2415 bool dump_native_stack,
2416 bool force_dump_stack) const {
2417 // TODO: we call this code when dying but may not have suspended the thread ourself. The
2418 // IsSuspended check is therefore racy with the use for dumping (normally we inhibit
2419 // the race with the thread_suspend_count_lock_).
2420 bool dump_for_abort = (gAborting > 0);
2421 bool safe_to_dump = (this == Thread::Current() || IsSuspended());
2422 if (!kIsDebugBuild) {
2423 // We always want to dump the stack for an abort, however, there is no point dumping another
2424 // thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
2425 safe_to_dump = (safe_to_dump || dump_for_abort);
2426 }
2427 DumpOrder dump_order = DumpOrder::kDefault;
2428 if (safe_to_dump || force_dump_stack) {
2429 uint64_t nanotime = NanoTime();
2430 // If we're currently in native code, dump that stack before dumping the managed stack.
2431 if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
2432 ArtMethod* method =
2433 GetCurrentMethod(nullptr,
2434 /*check_suspended=*/ !force_dump_stack,
2435 /*abort_on_error=*/ !(dump_for_abort || force_dump_stack));
2436 DumpNativeStack(os, unwinder, GetTid(), " native: ", method);
2437 }
2438 dump_order = DumpJavaStack(os,
2439 /*check_suspended=*/ !force_dump_stack,
2440 /*dump_locks=*/ !force_dump_stack);
2441 Runtime* runtime = Runtime::Current();
2442 std::optional<uint64_t> start = runtime != nullptr ? runtime->SigQuitNanoTime() : std::nullopt;
2443 if (start.has_value()) {
2444 os << "DumpLatencyMs: " << static_cast<float>(nanotime - start.value()) / 1000000.0 << "\n";
2445 }
2446 } else {
2447 os << "Not able to dump stack of thread that isn't suspended";
2448 }
2449 return dump_order;
2450 }
2451
ThreadExitCallback(void * arg)2452 void Thread::ThreadExitCallback(void* arg) {
2453 Thread* self = reinterpret_cast<Thread*>(arg);
2454 if (self->tls32_.thread_exit_check_count == 0) {
2455 LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
2456 "going to use a pthread_key_create destructor?): " << *self;
2457 CHECK(is_started_);
2458 #ifdef __BIONIC__
2459 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self;
2460 #else
2461 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
2462 Thread::self_tls_ = self;
2463 #endif
2464 self->tls32_.thread_exit_check_count = 1;
2465 } else {
2466 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
2467 }
2468 }
2469
Startup()2470 void Thread::Startup() {
2471 CHECK(!is_started_);
2472 is_started_ = true;
2473 {
2474 // MutexLock to keep annotalysis happy.
2475 //
2476 // Note we use null for the thread because Thread::Current can
2477 // return garbage since (is_started_ == true) and
2478 // Thread::pthread_key_self_ is not yet initialized.
2479 // This was seen on glibc.
2480 MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
2481 resume_cond_ = new ConditionVariable("Thread resumption condition variable",
2482 *Locks::thread_suspend_count_lock_);
2483 }
2484
2485 // Allocate a TLS slot.
2486 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
2487 "self key");
2488
2489 // Double-check the TLS slot allocation.
2490 if (pthread_getspecific(pthread_key_self_) != nullptr) {
2491 LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
2492 }
2493 #ifndef __BIONIC__
2494 CHECK(Thread::self_tls_ == nullptr);
2495 #endif
2496 }
2497
FinishStartup()2498 void Thread::FinishStartup() {
2499 Runtime* runtime = Runtime::Current();
2500 CHECK(runtime->IsStarted());
2501
2502 // Finish attaching the main thread.
2503 ScopedObjectAccess soa(Thread::Current());
2504 soa.Self()->CreatePeer("main", false, runtime->GetMainThreadGroup());
2505 soa.Self()->AssertNoPendingException();
2506
2507 runtime->RunRootClinits(soa.Self());
2508
2509 // The thread counts as started from now on. We need to add it to the ThreadGroup. For regular
2510 // threads, this is done in Thread.start() on the Java side.
2511 soa.Self()->NotifyThreadGroup(soa, runtime->GetMainThreadGroup());
2512 soa.Self()->AssertNoPendingException();
2513 }
2514
Shutdown()2515 void Thread::Shutdown() {
2516 CHECK(is_started_);
2517 is_started_ = false;
2518 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
2519 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
2520 if (resume_cond_ != nullptr) {
2521 delete resume_cond_;
2522 resume_cond_ = nullptr;
2523 }
2524 }
2525
NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable & soa,jobject thread_group)2526 void Thread::NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group) {
2527 ObjPtr<mirror::Object> thread_object = soa.Self()->GetPeer();
2528 ObjPtr<mirror::Object> thread_group_object = soa.Decode<mirror::Object>(thread_group);
2529 if (thread_group == nullptr || kIsDebugBuild) {
2530 // There is always a group set. Retrieve it.
2531 thread_group_object = WellKnownClasses::java_lang_Thread_group->GetObject(thread_object);
2532 if (kIsDebugBuild && thread_group != nullptr) {
2533 CHECK(thread_group_object == soa.Decode<mirror::Object>(thread_group));
2534 }
2535 }
2536 WellKnownClasses::java_lang_ThreadGroup_add->InvokeVirtual<'V', 'L'>(
2537 soa.Self(), thread_group_object, thread_object);
2538 }
2539
SignalExitFlags()2540 void Thread::SignalExitFlags() {
2541 ThreadExitFlag* next;
2542 for (ThreadExitFlag* tef = tlsPtr_.thread_exit_flags; tef != nullptr; tef = next) {
2543 DCHECK(!tef->exited_);
2544 tef->exited_ = true;
2545 next = tef->next_;
2546 if (kIsDebugBuild) {
2547 ThreadExitFlag* const garbage_tef = reinterpret_cast<ThreadExitFlag*>(1);
2548 // Link fields should no longer be used.
2549 tef->prev_ = tef->next_ = garbage_tef;
2550 }
2551 }
2552 tlsPtr_.thread_exit_flags = nullptr; // Now unused.
2553 }
2554
Thread(bool daemon)2555 Thread::Thread(bool daemon)
2556 : tls32_(daemon),
2557 wait_monitor_(nullptr),
2558 is_runtime_thread_(false) {
2559 wait_mutex_ = new Mutex("a thread wait mutex", LockLevel::kThreadWaitLock);
2560 wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
2561 tlsPtr_.mutator_lock = Locks::mutator_lock_;
2562 DCHECK(tlsPtr_.mutator_lock != nullptr);
2563 tlsPtr_.name.store(kThreadNameDuringStartup, std::memory_order_relaxed);
2564 CHECK_NE(GetStackOverflowProtectedSize(), 0u);
2565
2566 static_assert((sizeof(Thread) % 4) == 0U,
2567 "art::Thread has a size which is not a multiple of 4.");
2568 DCHECK_EQ(GetStateAndFlags(std::memory_order_relaxed).GetValue(), 0u);
2569 StateAndFlags state_and_flags = StateAndFlags(0u).WithState(ThreadState::kNative);
2570 tls32_.state_and_flags.store(state_and_flags.GetValue(), std::memory_order_relaxed);
2571 tls32_.interrupted.store(false, std::memory_order_relaxed);
2572 // Initialize with no permit; if the java Thread was unparked before being
2573 // started, it will unpark itself before calling into java code.
2574 tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
2575 memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
2576 std::fill(tlsPtr_.rosalloc_runs,
2577 tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
2578 gc::allocator::RosAlloc::GetDedicatedFullRun());
2579 tlsPtr_.checkpoint_function = nullptr;
2580 tlsPtr_.active_suspendall_barrier = nullptr;
2581 tlsPtr_.active_suspend1_barriers = nullptr;
2582 tlsPtr_.flip_function.store(nullptr, std::memory_order_relaxed);
2583 tlsPtr_.thread_local_mark_stack = nullptr;
2584 ResetTlab();
2585 }
2586
CanLoadClasses() const2587 bool Thread::CanLoadClasses() const {
2588 return !IsRuntimeThread() || !Runtime::Current()->IsJavaDebuggable();
2589 }
2590
IsStillStarting() const2591 bool Thread::IsStillStarting() const {
2592 // You might think you can check whether the state is kStarting, but for much of thread startup,
2593 // the thread is in kNative; it might also be in kVmWait.
2594 // You might think you can check whether the peer is null, but the peer is actually created and
2595 // assigned fairly early on, and needs to be.
2596 // It turns out that the last thing to change is the thread name; that's a good proxy for "has
2597 // this thread _ever_ entered kRunnable".
2598 // TODO: I believe that SetThreadName(), ThreadGroup::GetThreads() and many jvmti functions can
2599 // call this while the thread is in the process of starting. Thus we appear to have data races
2600 // here on opeer and jpeer, and our result may be obsolete by the time we return. Aside from the
2601 // data races, it is not immediately clear whether clients are robust against this behavior. It
2602 // may make sense to acquire a per-thread lock during the transition, and have this function
2603 // REQUIRE that. `runtime_shutdown_lock_` might almost work, but is global and currently not
2604 // held long enough.
2605 return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) ||
2606 (tlsPtr_.name.load() == kThreadNameDuringStartup);
2607 }
2608
AssertPendingException() const2609 void Thread::AssertPendingException() const {
2610 CHECK(IsExceptionPending()) << "Pending exception expected.";
2611 }
2612
AssertPendingOOMException() const2613 void Thread::AssertPendingOOMException() const {
2614 AssertPendingException();
2615 auto* e = GetException();
2616 CHECK_EQ(e->GetClass(), WellKnownClasses::java_lang_OutOfMemoryError.Get()) << e->Dump();
2617 }
2618
AssertNoPendingException() const2619 void Thread::AssertNoPendingException() const {
2620 if (UNLIKELY(IsExceptionPending())) {
2621 ScopedObjectAccess soa(Thread::Current());
2622 LOG(FATAL) << "No pending exception expected: " << GetException()->Dump();
2623 }
2624 }
2625
AssertNoPendingExceptionForNewException(const char * msg) const2626 void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
2627 if (UNLIKELY(IsExceptionPending())) {
2628 ScopedObjectAccess soa(Thread::Current());
2629 LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
2630 << GetException()->Dump();
2631 }
2632 }
2633
2634 class MonitorExitVisitor : public SingleRootVisitor {
2635 public:
MonitorExitVisitor(Thread * self)2636 explicit MonitorExitVisitor(Thread* self) : self_(self) { }
2637
2638 // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
VisitRoot(mirror::Object * entered_monitor,const RootInfo & info)2639 void VisitRoot(mirror::Object* entered_monitor,
2640 [[maybe_unused]] const RootInfo& info) override NO_THREAD_SAFETY_ANALYSIS {
2641 if (self_->HoldsLock(entered_monitor)) {
2642 LOG(WARNING) << "Calling MonitorExit on object "
2643 << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")"
2644 << " left locked by native thread "
2645 << *Thread::Current() << " which is detaching";
2646 entered_monitor->MonitorExit(self_);
2647 }
2648 }
2649
2650 private:
2651 Thread* const self_;
2652 };
2653
Destroy(bool should_run_callbacks)2654 void Thread::Destroy(bool should_run_callbacks) {
2655 Thread* self = this;
2656 DCHECK_EQ(self, Thread::Current());
2657
2658 if (tlsPtr_.jni_env != nullptr) {
2659 {
2660 ScopedObjectAccess soa(self);
2661 MonitorExitVisitor visitor(self);
2662 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
2663 tlsPtr_.jni_env->monitors_.VisitRoots(&visitor, RootInfo(kRootVMInternal));
2664 }
2665 // Release locally held global references which releasing may require the mutator lock.
2666 if (tlsPtr_.jpeer != nullptr) {
2667 // If pthread_create fails we don't have a jni env here.
2668 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
2669 tlsPtr_.jpeer = nullptr;
2670 }
2671 if (tlsPtr_.class_loader_override != nullptr) {
2672 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
2673 tlsPtr_.class_loader_override = nullptr;
2674 }
2675 }
2676
2677 if (tlsPtr_.opeer != nullptr) {
2678 ScopedObjectAccess soa(self);
2679 // We may need to call user-supplied managed code, do this before final clean-up.
2680 HandleUncaughtExceptions();
2681 RemoveFromThreadGroup();
2682 Runtime* runtime = Runtime::Current();
2683 if (runtime != nullptr && should_run_callbacks) {
2684 runtime->GetRuntimeCallbacks()->ThreadDeath(self);
2685 }
2686
2687 // this.nativePeer = 0;
2688 SetNativePeer</*kSupportTransaction=*/ true>(tlsPtr_.opeer, nullptr);
2689
2690 // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
2691 // who is waiting.
2692 ObjPtr<mirror::Object> lock =
2693 WellKnownClasses::java_lang_Thread_lock->GetObject(tlsPtr_.opeer);
2694 // (This conditional is only needed for tests, where Thread.lock won't have been set.)
2695 if (lock != nullptr) {
2696 StackHandleScope<1> hs(self);
2697 Handle<mirror::Object> h_obj(hs.NewHandle(lock));
2698 ObjectLock<mirror::Object> locker(self, h_obj);
2699 locker.NotifyAll();
2700 }
2701
2702 tlsPtr_.opeer = nullptr;
2703 }
2704
2705 {
2706 ScopedObjectAccess soa(self);
2707 Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
2708
2709 if (UNLIKELY(self->GetMethodTraceBuffer() != nullptr)) {
2710 Trace::FlushThreadBuffer(self);
2711 }
2712 }
2713 // Mark-stack revocation must be performed at the very end. No
2714 // checkpoint/flip-function or read-barrier should be called after this.
2715 if (gUseReadBarrier) {
2716 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this);
2717 }
2718 }
2719
~Thread()2720 Thread::~Thread() {
2721 CHECK(tlsPtr_.class_loader_override == nullptr);
2722 CHECK(tlsPtr_.jpeer == nullptr);
2723 CHECK(tlsPtr_.opeer == nullptr);
2724 bool initialized = (tlsPtr_.jni_env != nullptr); // Did Thread::Init run?
2725 if (initialized) {
2726 delete tlsPtr_.jni_env;
2727 tlsPtr_.jni_env = nullptr;
2728 }
2729 CHECK_NE(GetState(), ThreadState::kRunnable);
2730 CHECK(!ReadFlag(ThreadFlag::kCheckpointRequest, std::memory_order_relaxed));
2731 CHECK(!ReadFlag(ThreadFlag::kEmptyCheckpointRequest, std::memory_order_relaxed));
2732 CHECK(!ReadFlag(ThreadFlag::kSuspensionImmune, std::memory_order_relaxed));
2733 CHECK(tlsPtr_.checkpoint_function == nullptr);
2734 CHECK_EQ(checkpoint_overflow_.size(), 0u);
2735 // A pending flip function request is OK. FlipThreadRoots will have been notified that we
2736 // exited, and nobody will attempt to process the request.
2737
2738 // Make sure we processed all deoptimization requests.
2739 CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization";
2740 CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) <<
2741 "Not all deoptimized frames have been consumed by the debugger.";
2742
2743 // We may be deleting a still born thread.
2744 SetStateUnsafe(ThreadState::kTerminated);
2745
2746 delete wait_cond_;
2747 delete wait_mutex_;
2748
2749 if (initialized) {
2750 CleanupCpu();
2751 }
2752
2753 SetCachedThreadName(nullptr); // Deallocate name.
2754 delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
2755
2756 CHECK_EQ(tlsPtr_.method_trace_buffer, nullptr);
2757
2758 Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
2759
2760 TearDownAlternateSignalStack();
2761 }
2762
HandleUncaughtExceptions()2763 void Thread::HandleUncaughtExceptions() {
2764 Thread* self = this;
2765 DCHECK_EQ(self, Thread::Current());
2766 if (!self->IsExceptionPending()) {
2767 return;
2768 }
2769
2770 // Get and clear the exception.
2771 ObjPtr<mirror::Object> exception = self->GetException();
2772 self->ClearException();
2773
2774 // Call the Thread instance's dispatchUncaughtException(Throwable)
2775 WellKnownClasses::java_lang_Thread_dispatchUncaughtException->InvokeFinal<'V', 'L'>(
2776 self, tlsPtr_.opeer, exception);
2777
2778 // If the dispatchUncaughtException threw, clear that exception too.
2779 self->ClearException();
2780 }
2781
RemoveFromThreadGroup()2782 void Thread::RemoveFromThreadGroup() {
2783 Thread* self = this;
2784 DCHECK_EQ(self, Thread::Current());
2785 // this.group.threadTerminated(this);
2786 // group can be null if we're in the compiler or a test.
2787 ObjPtr<mirror::Object> group =
2788 WellKnownClasses::java_lang_Thread_group->GetObject(tlsPtr_.opeer);
2789 if (group != nullptr) {
2790 WellKnownClasses::java_lang_ThreadGroup_threadTerminated->InvokeVirtual<'V', 'L'>(
2791 self, group, tlsPtr_.opeer);
2792 }
2793 }
2794
2795 template <bool kPointsToStack>
2796 class JniTransitionReferenceVisitor : public StackVisitor {
2797 public:
JniTransitionReferenceVisitor(Thread * thread,void * obj)2798 JniTransitionReferenceVisitor(Thread* thread, void* obj) REQUIRES_SHARED(Locks::mutator_lock_)
2799 : StackVisitor(thread, /*context=*/ nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
2800 obj_(obj),
2801 found_(false) {}
2802
VisitFrame()2803 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
2804 ArtMethod* m = GetMethod();
2805 if (!m->IsNative() || m->IsCriticalNative()) {
2806 return true;
2807 }
2808 if (kPointsToStack) {
2809 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
2810 size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
2811 uint32_t* current_vreg = reinterpret_cast<uint32_t*>(sp + frame_size + sizeof(ArtMethod*));
2812 if (!m->IsStatic()) {
2813 if (current_vreg == obj_) {
2814 found_ = true;
2815 return false;
2816 }
2817 current_vreg += 1u;
2818 }
2819 uint32_t shorty_length;
2820 const char* shorty = m->GetShorty(&shorty_length);
2821 for (size_t i = 1; i != shorty_length; ++i) {
2822 switch (shorty[i]) {
2823 case 'D':
2824 case 'J':
2825 current_vreg += 2u;
2826 break;
2827 case 'L':
2828 if (current_vreg == obj_) {
2829 found_ = true;
2830 return false;
2831 }
2832 FALLTHROUGH_INTENDED;
2833 default:
2834 current_vreg += 1u;
2835 break;
2836 }
2837 }
2838 // Continue only if the object is somewhere higher on the stack.
2839 return obj_ >= current_vreg;
2840 } else { // if (kPointsToStack)
2841 if (m->IsStatic() && obj_ == m->GetDeclaringClassAddressWithoutBarrier()) {
2842 found_ = true;
2843 return false;
2844 }
2845 return true;
2846 }
2847 }
2848
Found() const2849 bool Found() const {
2850 return found_;
2851 }
2852
2853 private:
2854 void* obj_;
2855 bool found_;
2856 };
2857
IsRawObjOnQuickStack(uint8_t * raw_obj) const2858 bool Thread::IsRawObjOnQuickStack(uint8_t* raw_obj) const {
2859 return (static_cast<size_t>(raw_obj - GetStackBegin<kQuickStackType>()) <
2860 GetStackSize<kQuickStackType>());
2861 }
2862
IsJniTransitionReference(jobject obj) const2863 bool Thread::IsJniTransitionReference(jobject obj) const {
2864 DCHECK(obj != nullptr);
2865 // We need a non-const pointer for stack walk even if we're not modifying the thread state.
2866 Thread* thread = const_cast<Thread*>(this);
2867 uint8_t* raw_obj = reinterpret_cast<uint8_t*>(obj);
2868 if (IsRawObjOnQuickStack(raw_obj)) {
2869 JniTransitionReferenceVisitor</*kPointsToStack=*/ true> visitor(thread, raw_obj);
2870 visitor.WalkStack();
2871 return visitor.Found();
2872 } else {
2873 JniTransitionReferenceVisitor</*kPointsToStack=*/ false> visitor(thread, raw_obj);
2874 visitor.WalkStack();
2875 return visitor.Found();
2876 }
2877 }
2878
HandleScopeVisitRoots(RootVisitor * visitor,uint32_t thread_id)2879 void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
2880 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
2881 visitor, RootInfo(kRootNativeStack, thread_id));
2882 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
2883 cur->VisitRoots(buffered_visitor);
2884 }
2885 }
2886
DecodeGlobalJObject(jobject obj) const2887 ObjPtr<mirror::Object> Thread::DecodeGlobalJObject(jobject obj) const {
2888 DCHECK(obj != nullptr);
2889 IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
2890 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
2891 DCHECK_NE(kind, kJniTransition);
2892 DCHECK_NE(kind, kLocal);
2893 ObjPtr<mirror::Object> result;
2894 bool expect_null = false;
2895 if (kind == kGlobal) {
2896 result = tlsPtr_.jni_env->vm_->DecodeGlobal(ref);
2897 } else {
2898 DCHECK_EQ(kind, kWeakGlobal);
2899 result = tlsPtr_.jni_env->vm_->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
2900 if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
2901 // This is a special case where it's okay to return null.
2902 expect_null = true;
2903 result = nullptr;
2904 }
2905 }
2906
2907 DCHECK(expect_null || result != nullptr)
2908 << "use of deleted " << ToStr<IndirectRefKind>(kind).c_str()
2909 << " " << static_cast<const void*>(obj);
2910 return result;
2911 }
2912
IsJWeakCleared(jweak obj) const2913 bool Thread::IsJWeakCleared(jweak obj) const {
2914 CHECK(obj != nullptr);
2915 IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
2916 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
2917 CHECK_EQ(kind, kWeakGlobal);
2918 return tlsPtr_.jni_env->vm_->IsWeakGlobalCleared(const_cast<Thread*>(this), ref);
2919 }
2920
2921 // Implements java.lang.Thread.interrupted.
Interrupted()2922 bool Thread::Interrupted() {
2923 DCHECK_EQ(Thread::Current(), this);
2924 // No other thread can concurrently reset the interrupted flag.
2925 bool interrupted = tls32_.interrupted.load(std::memory_order_seq_cst);
2926 if (interrupted) {
2927 tls32_.interrupted.store(false, std::memory_order_seq_cst);
2928 }
2929 return interrupted;
2930 }
2931
2932 // Implements java.lang.Thread.isInterrupted.
IsInterrupted()2933 bool Thread::IsInterrupted() {
2934 return tls32_.interrupted.load(std::memory_order_seq_cst);
2935 }
2936
Interrupt(Thread * self)2937 void Thread::Interrupt(Thread* self) {
2938 {
2939 MutexLock mu(self, *wait_mutex_);
2940 if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
2941 return;
2942 }
2943 tls32_.interrupted.store(true, std::memory_order_seq_cst);
2944 NotifyLocked(self);
2945 }
2946 Unpark();
2947 }
2948
Notify()2949 void Thread::Notify() {
2950 Thread* self = Thread::Current();
2951 MutexLock mu(self, *wait_mutex_);
2952 NotifyLocked(self);
2953 }
2954
NotifyLocked(Thread * self)2955 void Thread::NotifyLocked(Thread* self) {
2956 if (wait_monitor_ != nullptr) {
2957 wait_cond_->Signal(self);
2958 }
2959 }
2960
SetClassLoaderOverride(jobject class_loader_override)2961 void Thread::SetClassLoaderOverride(jobject class_loader_override) {
2962 if (tlsPtr_.class_loader_override != nullptr) {
2963 GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
2964 }
2965 tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
2966 }
2967
2968 using ArtMethodDexPcPair = std::pair<ArtMethod*, uint32_t>;
2969
2970 // Counts the stack trace depth and also fetches the first max_saved_frames frames.
2971 class FetchStackTraceVisitor : public StackVisitor {
2972 public:
FetchStackTraceVisitor(Thread * thread,ArtMethodDexPcPair * saved_frames=nullptr,size_t max_saved_frames=0)2973 explicit FetchStackTraceVisitor(Thread* thread,
2974 ArtMethodDexPcPair* saved_frames = nullptr,
2975 size_t max_saved_frames = 0)
2976 REQUIRES_SHARED(Locks::mutator_lock_)
2977 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2978 saved_frames_(saved_frames),
2979 max_saved_frames_(max_saved_frames) {}
2980
VisitFrame()2981 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
2982 // We want to skip frames up to and including the exception's constructor.
2983 // Note we also skip the frame if it doesn't have a method (namely the callee
2984 // save frame)
2985 ArtMethod* m = GetMethod();
2986 if (skipping_ && !m->IsRuntimeMethod() &&
2987 !GetClassRoot<mirror::Throwable>()->IsAssignableFrom(m->GetDeclaringClass())) {
2988 skipping_ = false;
2989 }
2990 if (!skipping_) {
2991 if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save).
2992 if (depth_ < max_saved_frames_) {
2993 saved_frames_[depth_].first = m;
2994 saved_frames_[depth_].second = m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc();
2995 }
2996 ++depth_;
2997 }
2998 } else {
2999 ++skip_depth_;
3000 }
3001 return true;
3002 }
3003
GetDepth() const3004 uint32_t GetDepth() const {
3005 return depth_;
3006 }
3007
GetSkipDepth() const3008 uint32_t GetSkipDepth() const {
3009 return skip_depth_;
3010 }
3011
3012 private:
3013 uint32_t depth_ = 0;
3014 uint32_t skip_depth_ = 0;
3015 bool skipping_ = true;
3016 ArtMethodDexPcPair* saved_frames_;
3017 const size_t max_saved_frames_;
3018
3019 DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor);
3020 };
3021
3022 class BuildInternalStackTraceVisitor : public StackVisitor {
3023 public:
BuildInternalStackTraceVisitor(Thread * self,Thread * thread,uint32_t skip_depth)3024 BuildInternalStackTraceVisitor(Thread* self, Thread* thread, uint32_t skip_depth)
3025 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
3026 self_(self),
3027 skip_depth_(skip_depth),
3028 pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
3029
Init(uint32_t depth)3030 bool Init(uint32_t depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
3031 // Allocate method trace as an object array where the first element is a pointer array that
3032 // contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring
3033 // class of the ArtMethod pointers.
3034 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
3035 StackHandleScope<1> hs(self_);
3036 ObjPtr<mirror::Class> array_class =
3037 GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker);
3038 // The first element is the methods and dex pc array, the other elements are declaring classes
3039 // for the methods to ensure classes in the stack trace don't get unloaded.
3040 Handle<mirror::ObjectArray<mirror::Object>> trace(
3041 hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(
3042 hs.Self(), array_class, static_cast<int32_t>(depth) + 1)));
3043 if (trace == nullptr) {
3044 // Acquire uninterruptible_ in all paths.
3045 self_->StartAssertNoThreadSuspension("Building internal stack trace");
3046 self_->AssertPendingOOMException();
3047 return false;
3048 }
3049 ObjPtr<mirror::PointerArray> methods_and_pcs =
3050 class_linker->AllocPointerArray(self_, depth * 2);
3051 const char* last_no_suspend_cause =
3052 self_->StartAssertNoThreadSuspension("Building internal stack trace");
3053 if (methods_and_pcs == nullptr) {
3054 self_->AssertPendingOOMException();
3055 return false;
3056 }
3057 trace->Set</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(0, methods_and_pcs);
3058 trace_ = trace.Get();
3059 // If We are called from native, use non-transactional mode.
3060 CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
3061 return true;
3062 }
3063
RELEASE(Roles::uninterruptible_)3064 virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) {
3065 self_->EndAssertNoThreadSuspension(nullptr);
3066 }
3067
VisitFrame()3068 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
3069 if (trace_ == nullptr) {
3070 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
3071 }
3072 if (skip_depth_ > 0) {
3073 skip_depth_--;
3074 return true;
3075 }
3076 ArtMethod* m = GetMethod();
3077 if (m->IsRuntimeMethod()) {
3078 return true; // Ignore runtime frames (in particular callee save).
3079 }
3080 AddFrame(m, m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc());
3081 return true;
3082 }
3083
AddFrame(ArtMethod * method,uint32_t dex_pc)3084 void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
3085 ObjPtr<mirror::PointerArray> methods_and_pcs = GetTraceMethodsAndPCs();
3086 methods_and_pcs->SetElementPtrSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
3087 count_, method, pointer_size_);
3088 methods_and_pcs->SetElementPtrSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
3089 static_cast<uint32_t>(methods_and_pcs->GetLength()) / 2 + count_, dex_pc, pointer_size_);
3090 // Save the declaring class of the method to ensure that the declaring classes of the methods
3091 // do not get unloaded while the stack trace is live. However, this does not work for copied
3092 // methods because the declaring class of a copied method points to an interface class which
3093 // may be in a different class loader. Instead, retrieve the class loader associated with the
3094 // allocator that holds the copied method. This is much cheaper than finding the actual class.
3095 ObjPtr<mirror::Object> keep_alive;
3096 if (UNLIKELY(method->IsCopied())) {
3097 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
3098 keep_alive = class_linker->GetHoldingClassLoaderOfCopiedMethod(self_, method);
3099 } else {
3100 keep_alive = method->GetDeclaringClass();
3101 }
3102 trace_->Set</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
3103 static_cast<int32_t>(count_) + 1, keep_alive);
3104 ++count_;
3105 }
3106
GetTraceMethodsAndPCs() const3107 ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) {
3108 return ObjPtr<mirror::PointerArray>::DownCast(trace_->Get(0));
3109 }
3110
GetInternalStackTrace() const3111 mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
3112 return trace_;
3113 }
3114
3115 private:
3116 Thread* const self_;
3117 // How many more frames to skip.
3118 uint32_t skip_depth_;
3119 // Current position down stack trace.
3120 uint32_t count_ = 0;
3121 // An object array where the first element is a pointer array that contains the `ArtMethod`
3122 // pointers on the stack and dex PCs. The rest of the elements are referencing objects
3123 // that shall keep the methods alive, namely the declaring class of the `ArtMethod` for
3124 // declared methods and the class loader for copied methods (because it's faster to find
3125 // the class loader than the actual class that holds the copied method). The `trace_[i+1]`
3126 // contains the declaring class or class loader of the `ArtMethod` of the i'th frame.
3127 // We're initializing a newly allocated trace, so we do not need to record that under
3128 // a transaction. If the transaction is aborted, the whole trace shall be unreachable.
3129 mirror::ObjectArray<mirror::Object>* trace_ = nullptr;
3130 // For cross compilation.
3131 const PointerSize pointer_size_;
3132
3133 DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor);
3134 };
3135
CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable & soa) const3136 ObjPtr<mirror::ObjectArray<mirror::Object>> Thread::CreateInternalStackTrace(
3137 const ScopedObjectAccessAlreadyRunnable& soa) const {
3138 // Compute depth of stack, save frames if possible to avoid needing to recompute many.
3139 constexpr size_t kMaxSavedFrames = 256;
3140 std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]);
3141 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this),
3142 &saved_frames[0],
3143 kMaxSavedFrames);
3144 count_visitor.WalkStack();
3145 const uint32_t depth = count_visitor.GetDepth();
3146 const uint32_t skip_depth = count_visitor.GetSkipDepth();
3147
3148 // Build internal stack trace.
3149 BuildInternalStackTraceVisitor build_trace_visitor(
3150 soa.Self(), const_cast<Thread*>(this), skip_depth);
3151 if (!build_trace_visitor.Init(depth)) {
3152 return nullptr; // Allocation failed.
3153 }
3154 // If we saved all of the frames we don't even need to do the actual stack walk. This is faster
3155 // than doing the stack walk twice.
3156 if (depth < kMaxSavedFrames) {
3157 for (size_t i = 0; i < depth; ++i) {
3158 build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second);
3159 }
3160 } else {
3161 build_trace_visitor.WalkStack();
3162 }
3163
3164 mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
3165 if (kIsDebugBuild) {
3166 ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs();
3167 // Second half of trace_methods is dex PCs.
3168 for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) {
3169 auto* method = trace_methods->GetElementPtrSize<ArtMethod*>(
3170 i, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
3171 CHECK(method != nullptr);
3172 }
3173 }
3174 return trace;
3175 }
3176
IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const3177 bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const {
3178 // Only count the depth since we do not pass a stack frame array as an argument.
3179 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this));
3180 count_visitor.WalkStack();
3181 return count_visitor.GetDepth() == static_cast<uint32_t>(exception->GetStackDepth());
3182 }
3183
CreateStackTraceElement(const ScopedObjectAccessAlreadyRunnable & soa,ArtMethod * method,uint32_t dex_pc)3184 static ObjPtr<mirror::StackTraceElement> CreateStackTraceElement(
3185 const ScopedObjectAccessAlreadyRunnable& soa,
3186 ArtMethod* method,
3187 uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
3188 int32_t line_number;
3189 StackHandleScope<3> hs(soa.Self());
3190 auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
3191 auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
3192 if (method->IsProxyMethod()) {
3193 line_number = -1;
3194 class_name_object.Assign(method->GetDeclaringClass()->GetName());
3195 // source_name_object intentionally left null for proxy methods
3196 } else {
3197 line_number = method->GetLineNumFromDexPC(dex_pc);
3198 // Allocate element, potentially triggering GC
3199 // TODO: reuse class_name_object via Class::name_?
3200 const char* descriptor = method->GetDeclaringClassDescriptor();
3201 CHECK(descriptor != nullptr);
3202 std::string class_name(PrettyDescriptor(descriptor));
3203 class_name_object.Assign(
3204 mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
3205 if (class_name_object == nullptr) {
3206 soa.Self()->AssertPendingOOMException();
3207 return nullptr;
3208 }
3209 const char* source_file = method->GetDeclaringClassSourceFile();
3210 if (line_number == -1) {
3211 // Make the line_number field of StackTraceElement hold the dex pc.
3212 // source_name_object is intentionally left null if we failed to map the dex pc to
3213 // a line number (most probably because there is no debug info). See b/30183883.
3214 line_number = static_cast<int32_t>(dex_pc);
3215 } else {
3216 if (source_file != nullptr) {
3217 source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
3218 if (source_name_object == nullptr) {
3219 soa.Self()->AssertPendingOOMException();
3220 return nullptr;
3221 }
3222 }
3223 }
3224 }
3225 const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
3226 CHECK(method_name != nullptr);
3227 Handle<mirror::String> method_name_object(
3228 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
3229 if (method_name_object == nullptr) {
3230 return nullptr;
3231 }
3232 return mirror::StackTraceElement::Alloc(soa.Self(),
3233 class_name_object,
3234 method_name_object,
3235 source_name_object,
3236 line_number);
3237 }
3238
InternalStackTraceToStackTraceElementArray(const ScopedObjectAccessAlreadyRunnable & soa,jobject internal,jobjectArray output_array,int * stack_depth)3239 jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
3240 const ScopedObjectAccessAlreadyRunnable& soa,
3241 jobject internal,
3242 jobjectArray output_array,
3243 int* stack_depth) {
3244 // Decode the internal stack trace into the depth, method trace and PC trace.
3245 // Subtract one for the methods and PC trace.
3246 int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1;
3247 DCHECK_GE(depth, 0);
3248
3249 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
3250
3251 jobjectArray result;
3252
3253 if (output_array != nullptr) {
3254 // Reuse the array we were given.
3255 result = output_array;
3256 // ...adjusting the number of frames we'll write to not exceed the array length.
3257 const int32_t traces_length =
3258 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->GetLength();
3259 depth = std::min(depth, traces_length);
3260 } else {
3261 // Create java_trace array and place in local reference table
3262 ObjPtr<mirror::ObjectArray<mirror::StackTraceElement>> java_traces =
3263 class_linker->AllocStackTraceElementArray(soa.Self(), static_cast<size_t>(depth));
3264 if (java_traces == nullptr) {
3265 return nullptr;
3266 }
3267 result = soa.AddLocalReference<jobjectArray>(java_traces);
3268 }
3269
3270 if (stack_depth != nullptr) {
3271 *stack_depth = depth;
3272 }
3273
3274 for (uint32_t i = 0; i < static_cast<uint32_t>(depth); ++i) {
3275 ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces =
3276 soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>();
3277 // Methods and dex PC trace is element 0.
3278 DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
3279 const ObjPtr<mirror::PointerArray> method_trace =
3280 ObjPtr<mirror::PointerArray>::DownCast(decoded_traces->Get(0));
3281 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
3282 ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
3283 uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
3284 i + static_cast<uint32_t>(method_trace->GetLength()) / 2, kRuntimePointerSize);
3285 const ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(soa, method, dex_pc);
3286 if (obj == nullptr) {
3287 return nullptr;
3288 }
3289 // We are called from native: use non-transactional mode.
3290 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->Set<false>(
3291 static_cast<int32_t>(i), obj);
3292 }
3293 return result;
3294 }
3295
InitStackFrameInfo(const ScopedObjectAccessAlreadyRunnable & soa,ClassLinker * class_linker,Handle<mirror::StackFrameInfo> stackFrameInfo,ArtMethod * method,uint32_t dex_pc)3296 [[nodiscard]] static ObjPtr<mirror::StackFrameInfo> InitStackFrameInfo(
3297 const ScopedObjectAccessAlreadyRunnable& soa,
3298 ClassLinker* class_linker,
3299 Handle<mirror::StackFrameInfo> stackFrameInfo,
3300 ArtMethod* method,
3301 uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
3302 StackHandleScope<4> hs(soa.Self());
3303 int32_t line_number;
3304 auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
3305 if (method->IsProxyMethod()) {
3306 line_number = -1;
3307 // source_name_object intentionally left null for proxy methods
3308 } else {
3309 line_number = method->GetLineNumFromDexPC(dex_pc);
3310 if (line_number == -1) {
3311 // Make the line_number field of StackFrameInfo hold the dex pc.
3312 // source_name_object is intentionally left null if we failed to map the dex pc to
3313 // a line number (most probably because there is no debug info). See b/30183883.
3314 line_number = static_cast<int32_t>(dex_pc);
3315 } else {
3316 const char* source_file = method->GetDeclaringClassSourceFile();
3317 if (source_file != nullptr) {
3318 source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
3319 if (source_name_object == nullptr) {
3320 soa.Self()->AssertPendingOOMException();
3321 return nullptr;
3322 }
3323 }
3324 }
3325 }
3326
3327 Handle<mirror::Class> declaring_class_object(
3328 hs.NewHandle<mirror::Class>(method->GetDeclaringClass()));
3329
3330 ArtMethod* interface_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
3331 const char* method_name = interface_method->GetName();
3332 CHECK(method_name != nullptr);
3333 Handle<mirror::String> method_name_object(
3334 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
3335 if (method_name_object == nullptr) {
3336 soa.Self()->AssertPendingOOMException();
3337 return nullptr;
3338 }
3339
3340 dex::ProtoIndex proto_idx = interface_method->GetProtoIndex();
3341 Handle<mirror::MethodType> method_type_object(hs.NewHandle<mirror::MethodType>(
3342 class_linker->ResolveMethodType(soa.Self(), proto_idx, interface_method)));
3343 if (method_type_object == nullptr) {
3344 soa.Self()->AssertPendingOOMException();
3345 return nullptr;
3346 }
3347
3348 stackFrameInfo->AssignFields(declaring_class_object,
3349 method_type_object,
3350 method_name_object,
3351 source_name_object,
3352 line_number,
3353 static_cast<int32_t>(dex_pc));
3354 return stackFrameInfo.Get();
3355 }
3356
3357 constexpr jlong FILL_CLASS_REFS_ONLY = 0x2; // StackStreamFactory.FILL_CLASS_REFS_ONLY
3358
InternalStackTraceToStackFrameInfoArray(const ScopedObjectAccessAlreadyRunnable & soa,jlong mode,jobject internal,jint startLevel,jint batchSize,jint startBufferIndex,jobjectArray output_array)3359 jint Thread::InternalStackTraceToStackFrameInfoArray(
3360 const ScopedObjectAccessAlreadyRunnable& soa,
3361 jlong mode, // See java.lang.StackStreamFactory for the mode flags
3362 jobject internal,
3363 jint startLevel,
3364 jint batchSize,
3365 jint startBufferIndex,
3366 jobjectArray output_array) {
3367 // Decode the internal stack trace into the depth, method trace and PC trace.
3368 // Subtract one for the methods and PC trace.
3369 int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1;
3370 DCHECK_GE(depth, 0);
3371
3372 StackHandleScope<6> hs(soa.Self());
3373 Handle<mirror::ObjectArray<mirror::Object>> framesOrClasses =
3374 hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(output_array));
3375
3376 jint endBufferIndex = startBufferIndex;
3377
3378 if (startLevel < 0 || startLevel >= depth) {
3379 return endBufferIndex;
3380 }
3381
3382 int32_t bufferSize = framesOrClasses->GetLength();
3383 if (startBufferIndex < 0 || startBufferIndex >= bufferSize) {
3384 return endBufferIndex;
3385 }
3386
3387 // The FILL_CLASS_REFS_ONLY flag is defined in AbstractStackWalker.fetchStackFrames() javadoc.
3388 bool isClassArray = (mode & FILL_CLASS_REFS_ONLY) != 0;
3389
3390 Handle<mirror::ObjectArray<mirror::Object>> decoded_traces =
3391 hs.NewHandle(soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>());
3392 // Methods and dex PC trace is element 0.
3393 DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
3394 Handle<mirror::PointerArray> method_trace =
3395 hs.NewHandle(ObjPtr<mirror::PointerArray>::DownCast(decoded_traces->Get(0)));
3396
3397 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
3398 Handle<mirror::Class> sfi_class =
3399 hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/StackFrameInfo;"));
3400 DCHECK(sfi_class != nullptr);
3401
3402 MutableHandle<mirror::StackFrameInfo> frame = hs.NewHandle<mirror::StackFrameInfo>(nullptr);
3403 MutableHandle<mirror::Class> clazz = hs.NewHandle<mirror::Class>(nullptr);
3404 for (uint32_t i = static_cast<uint32_t>(startLevel); i < static_cast<uint32_t>(depth); ++i) {
3405 if (endBufferIndex >= startBufferIndex + batchSize || endBufferIndex >= bufferSize) {
3406 break;
3407 }
3408
3409 ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
3410 if (isClassArray) {
3411 clazz.Assign(method->GetDeclaringClass());
3412 framesOrClasses->Set(endBufferIndex, clazz.Get());
3413 } else {
3414 // Prepare parameters for fields in StackFrameInfo
3415 uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
3416 i + static_cast<uint32_t>(method_trace->GetLength()) / 2, kRuntimePointerSize);
3417
3418 ObjPtr<mirror::Object> frameObject = framesOrClasses->Get(endBufferIndex);
3419 // If libcore didn't allocate the object, we just stop here, but it's unlikely.
3420 if (frameObject == nullptr || !frameObject->InstanceOf(sfi_class.Get())) {
3421 break;
3422 }
3423 frame.Assign(ObjPtr<mirror::StackFrameInfo>::DownCast(frameObject));
3424 frame.Assign(InitStackFrameInfo(soa, class_linker, frame, method, dex_pc));
3425 // Break if InitStackFrameInfo fails to allocate objects or assign the fields.
3426 if (frame == nullptr) {
3427 break;
3428 }
3429 }
3430
3431 ++endBufferIndex;
3432 }
3433
3434 return endBufferIndex;
3435 }
3436
CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable & soa) const3437 jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
3438 // This code allocates. Do not allow it to operate with a pending exception.
3439 if (IsExceptionPending()) {
3440 return nullptr;
3441 }
3442
3443 class CollectFramesAndLocksStackVisitor : public MonitorObjectsStackVisitor {
3444 public:
3445 CollectFramesAndLocksStackVisitor(const ScopedObjectAccessAlreadyRunnable& soaa_in,
3446 Thread* self,
3447 Context* context)
3448 : MonitorObjectsStackVisitor(self, context),
3449 wait_jobject_(soaa_in.Env(), nullptr),
3450 block_jobject_(soaa_in.Env(), nullptr),
3451 soaa_(soaa_in) {}
3452
3453 protected:
3454 VisitMethodResult StartMethod(ArtMethod* m, [[maybe_unused]] size_t frame_nr) override
3455 REQUIRES_SHARED(Locks::mutator_lock_) {
3456 ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(
3457 soaa_, m, GetDexPc(/* abort on error */ false));
3458 if (obj == nullptr) {
3459 return VisitMethodResult::kEndStackWalk;
3460 }
3461 stack_trace_elements_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj.Ptr()));
3462 return VisitMethodResult::kContinueMethod;
3463 }
3464
3465 VisitMethodResult EndMethod([[maybe_unused]] ArtMethod* m) override {
3466 lock_objects_.push_back({});
3467 lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_);
3468
3469 DCHECK_EQ(lock_objects_.size(), stack_trace_elements_.size());
3470
3471 return VisitMethodResult::kContinueMethod;
3472 }
3473
3474 void VisitWaitingObject(ObjPtr<mirror::Object> obj, [[maybe_unused]] ThreadState state) override
3475 REQUIRES_SHARED(Locks::mutator_lock_) {
3476 wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
3477 }
3478 void VisitSleepingObject(ObjPtr<mirror::Object> obj)
3479 override
3480 REQUIRES_SHARED(Locks::mutator_lock_) {
3481 wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
3482 }
3483 void VisitBlockedOnObject(ObjPtr<mirror::Object> obj,
3484 [[maybe_unused]] ThreadState state,
3485 [[maybe_unused]] uint32_t owner_tid) override
3486 REQUIRES_SHARED(Locks::mutator_lock_) {
3487 block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
3488 }
3489 void VisitLockedObject(ObjPtr<mirror::Object> obj)
3490 override
3491 REQUIRES_SHARED(Locks::mutator_lock_) {
3492 frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj));
3493 }
3494
3495 public:
3496 std::vector<ScopedLocalRef<jobject>> stack_trace_elements_;
3497 ScopedLocalRef<jobject> wait_jobject_;
3498 ScopedLocalRef<jobject> block_jobject_;
3499 std::vector<std::vector<ScopedLocalRef<jobject>>> lock_objects_;
3500
3501 private:
3502 const ScopedObjectAccessAlreadyRunnable& soaa_;
3503
3504 std::vector<ScopedLocalRef<jobject>> frame_lock_objects_;
3505 };
3506
3507 std::unique_ptr<Context> context(Context::Create());
3508 CollectFramesAndLocksStackVisitor dumper(soa, const_cast<Thread*>(this), context.get());
3509 dumper.WalkStack();
3510
3511 // There should not be a pending exception. Otherwise, return with it pending.
3512 if (IsExceptionPending()) {
3513 return nullptr;
3514 }
3515
3516 // Now go and create Java arrays.
3517
3518 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
3519
3520 StackHandleScope<6> hs(soa.Self());
3521 Handle<mirror::Class> h_aste_array_class = hs.NewHandle(class_linker->FindSystemClass(
3522 soa.Self(),
3523 "[Ldalvik/system/AnnotatedStackTraceElement;"));
3524 if (h_aste_array_class == nullptr) {
3525 return nullptr;
3526 }
3527 Handle<mirror::Class> h_aste_class = hs.NewHandle(h_aste_array_class->GetComponentType());
3528
3529 Handle<mirror::Class> h_o_array_class =
3530 hs.NewHandle(GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker));
3531 DCHECK(h_o_array_class != nullptr); // Class roots must be already initialized.
3532
3533
3534 // Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 .
3535 class_linker->EnsureInitialized(soa.Self(),
3536 h_aste_class,
3537 /* can_init_fields= */ true,
3538 /* can_init_parents= */ true);
3539 if (soa.Self()->IsExceptionPending()) {
3540 // This should not fail in a healthy runtime.
3541 return nullptr;
3542 }
3543
3544 ArtField* stack_trace_element_field =
3545 h_aste_class->FindDeclaredInstanceField("stackTraceElement", "Ljava/lang/StackTraceElement;");
3546 DCHECK(stack_trace_element_field != nullptr);
3547 ArtField* held_locks_field =
3548 h_aste_class->FindDeclaredInstanceField("heldLocks", "[Ljava/lang/Object;");
3549 DCHECK(held_locks_field != nullptr);
3550 ArtField* blocked_on_field =
3551 h_aste_class->FindDeclaredInstanceField("blockedOn", "Ljava/lang/Object;");
3552 DCHECK(blocked_on_field != nullptr);
3553
3554 int32_t length = static_cast<int32_t>(dumper.stack_trace_elements_.size());
3555 ObjPtr<mirror::ObjectArray<mirror::Object>> array =
3556 mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), h_aste_array_class.Get(), length);
3557 if (array == nullptr) {
3558 soa.Self()->AssertPendingOOMException();
3559 return nullptr;
3560 }
3561
3562 ScopedLocalRef<jobjectArray> result(soa.Env(), soa.Env()->AddLocalReference<jobjectArray>(array));
3563
3564 MutableHandle<mirror::Object> handle(hs.NewHandle<mirror::Object>(nullptr));
3565 MutableHandle<mirror::ObjectArray<mirror::Object>> handle2(
3566 hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr));
3567 for (size_t i = 0; i != static_cast<size_t>(length); ++i) {
3568 handle.Assign(h_aste_class->AllocObject(soa.Self()));
3569 if (handle == nullptr) {
3570 soa.Self()->AssertPendingOOMException();
3571 return nullptr;
3572 }
3573
3574 // Set stack trace element.
3575 stack_trace_element_field->SetObject<false>(
3576 handle.Get(), soa.Decode<mirror::Object>(dumper.stack_trace_elements_[i].get()));
3577
3578 // Create locked-on array.
3579 if (!dumper.lock_objects_[i].empty()) {
3580 handle2.Assign(mirror::ObjectArray<mirror::Object>::Alloc(
3581 soa.Self(), h_o_array_class.Get(), static_cast<int32_t>(dumper.lock_objects_[i].size())));
3582 if (handle2 == nullptr) {
3583 soa.Self()->AssertPendingOOMException();
3584 return nullptr;
3585 }
3586 int32_t j = 0;
3587 for (auto& scoped_local : dumper.lock_objects_[i]) {
3588 if (scoped_local == nullptr) {
3589 continue;
3590 }
3591 handle2->Set(j, soa.Decode<mirror::Object>(scoped_local.get()));
3592 DCHECK(!soa.Self()->IsExceptionPending());
3593 j++;
3594 }
3595 held_locks_field->SetObject<false>(handle.Get(), handle2.Get());
3596 }
3597
3598 // Set blocked-on object.
3599 if (i == 0) {
3600 if (dumper.block_jobject_ != nullptr) {
3601 blocked_on_field->SetObject<false>(
3602 handle.Get(), soa.Decode<mirror::Object>(dumper.block_jobject_.get()));
3603 }
3604 }
3605
3606 ScopedLocalRef<jobject> elem(soa.Env(), soa.AddLocalReference<jobject>(handle.Get()));
3607 soa.Env()->SetObjectArrayElement(result.get(), static_cast<jsize>(i), elem.get());
3608 DCHECK(!soa.Self()->IsExceptionPending());
3609 }
3610
3611 return result.release();
3612 }
3613
ThrowNewExceptionF(const char * exception_class_descriptor,const char * fmt,...)3614 void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
3615 va_list args;
3616 va_start(args, fmt);
3617 ThrowNewExceptionV(exception_class_descriptor, fmt, args);
3618 va_end(args);
3619 }
3620
ThrowNewExceptionV(const char * exception_class_descriptor,const char * fmt,va_list ap)3621 void Thread::ThrowNewExceptionV(const char* exception_class_descriptor,
3622 const char* fmt, va_list ap) {
3623 std::string msg;
3624 StringAppendV(&msg, fmt, ap);
3625 ThrowNewException(exception_class_descriptor, msg.c_str());
3626 }
3627
ThrowNewException(const char * exception_class_descriptor,const char * msg)3628 void Thread::ThrowNewException(const char* exception_class_descriptor,
3629 const char* msg) {
3630 // Callers should either clear or call ThrowNewWrappedException.
3631 AssertNoPendingExceptionForNewException(msg);
3632 ThrowNewWrappedException(exception_class_descriptor, msg);
3633 }
3634
GetCurrentClassLoader(Thread * self)3635 static ObjPtr<mirror::ClassLoader> GetCurrentClassLoader(Thread* self)
3636 REQUIRES_SHARED(Locks::mutator_lock_) {
3637 ArtMethod* method = self->GetCurrentMethod(nullptr);
3638 return method != nullptr
3639 ? method->GetDeclaringClass()->GetClassLoader()
3640 : nullptr;
3641 }
3642
ThrowNewWrappedException(const char * exception_class_descriptor,const char * msg)3643 void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
3644 const char* msg) {
3645 DCHECK_EQ(this, Thread::Current());
3646 ScopedObjectAccessUnchecked soa(this);
3647 StackHandleScope<3> hs(soa.Self());
3648
3649 // Disable public sdk checks if we need to throw exceptions.
3650 // The checks are only used in AOT compilation and may block (exception) class
3651 // initialization if it needs access to private fields (e.g. serialVersionUID).
3652 //
3653 // Since throwing an exception will EnsureInitialization and the public sdk may
3654 // block that, disable the checks. It's ok to do so, because the thrown exceptions
3655 // are not part of the application code that needs to verified.
3656 ScopedDisablePublicSdkChecker sdpsc;
3657
3658 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self())));
3659 ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
3660 ClearException();
3661 Runtime* runtime = Runtime::Current();
3662 auto* cl = runtime->GetClassLinker();
3663 Handle<mirror::Class> exception_class(
3664 hs.NewHandle(cl->FindClass(
3665 this, exception_class_descriptor, strlen(exception_class_descriptor), class_loader)));
3666 if (UNLIKELY(exception_class == nullptr)) {
3667 CHECK(IsExceptionPending());
3668 LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
3669 return;
3670 }
3671
3672 if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true,
3673 true))) {
3674 DCHECK(IsExceptionPending());
3675 return;
3676 }
3677 DCHECK_IMPLIES(runtime->IsStarted(), exception_class->IsThrowableClass());
3678 Handle<mirror::Throwable> exception(
3679 hs.NewHandle(ObjPtr<mirror::Throwable>::DownCast(exception_class->AllocObject(this))));
3680
3681 // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
3682 if (exception == nullptr) {
3683 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one.
3684 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenThrowingException());
3685 return;
3686 }
3687
3688 // Choose an appropriate constructor and set up the arguments.
3689 const char* signature;
3690 ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr);
3691 if (msg != nullptr) {
3692 // Ensure we remember this and the method over the String allocation.
3693 msg_string.reset(
3694 soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg)));
3695 if (UNLIKELY(msg_string.get() == nullptr)) {
3696 CHECK(IsExceptionPending()); // OOME.
3697 return;
3698 }
3699 if (cause.get() == nullptr) {
3700 signature = "(Ljava/lang/String;)V";
3701 } else {
3702 signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
3703 }
3704 } else {
3705 if (cause.get() == nullptr) {
3706 signature = "()V";
3707 } else {
3708 signature = "(Ljava/lang/Throwable;)V";
3709 }
3710 }
3711 ArtMethod* exception_init_method =
3712 exception_class->FindConstructor(signature, cl->GetImagePointerSize());
3713
3714 CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
3715 << PrettyDescriptor(exception_class_descriptor);
3716
3717 if (UNLIKELY(!runtime->IsStarted())) {
3718 // Something is trying to throw an exception without a started runtime, which is the common
3719 // case in the compiler. We won't be able to invoke the constructor of the exception, so set
3720 // the exception fields directly.
3721 if (msg != nullptr) {
3722 exception->SetDetailMessage(DecodeJObject(msg_string.get())->AsString());
3723 }
3724 if (cause.get() != nullptr) {
3725 exception->SetCause(DecodeJObject(cause.get())->AsThrowable());
3726 }
3727 ObjPtr<mirror::ObjectArray<mirror::Object>> trace = CreateInternalStackTrace(soa);
3728 if (trace != nullptr) {
3729 exception->SetStackState(trace.Ptr());
3730 }
3731 SetException(exception.Get());
3732 } else {
3733 jvalue jv_args[2];
3734 size_t i = 0;
3735
3736 if (msg != nullptr) {
3737 jv_args[i].l = msg_string.get();
3738 ++i;
3739 }
3740 if (cause.get() != nullptr) {
3741 jv_args[i].l = cause.get();
3742 ++i;
3743 }
3744 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get()));
3745 InvokeWithJValues(soa, ref.get(), exception_init_method, jv_args);
3746 if (LIKELY(!IsExceptionPending())) {
3747 SetException(exception.Get());
3748 }
3749 }
3750 }
3751
ThrowOutOfMemoryError(const char * msg)3752 void Thread::ThrowOutOfMemoryError(const char* msg) {
3753 LOG(WARNING) << "Throwing OutOfMemoryError "
3754 << '"' << msg << '"'
3755 << " (VmSize " << GetProcessStatus("VmSize")
3756 << (tls32_.throwing_OutOfMemoryError ? ", recursive case)" : ")");
3757 ScopedTrace trace("OutOfMemoryError");
3758 if (!tls32_.throwing_OutOfMemoryError) {
3759 tls32_.throwing_OutOfMemoryError = true;
3760 ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
3761 tls32_.throwing_OutOfMemoryError = false;
3762 } else {
3763 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one.
3764 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME());
3765 }
3766 }
3767
CurrentFromGdb()3768 Thread* Thread::CurrentFromGdb() {
3769 return Thread::Current();
3770 }
3771
DumpFromGdb() const3772 void Thread::DumpFromGdb() const {
3773 std::ostringstream ss;
3774 Dump(ss);
3775 std::string str(ss.str());
3776 // log to stderr for debugging command line processes
3777 std::cerr << str;
3778 #ifdef ART_TARGET_ANDROID
3779 // log to logcat for debugging frameworks processes
3780 LOG(INFO) << str;
3781 #endif
3782 }
3783
3784 // Explicitly instantiate 32 and 64bit thread offset dumping support.
3785 template
3786 void Thread::DumpThreadOffset<PointerSize::k32>(std::ostream& os, uint32_t offset);
3787 template
3788 void Thread::DumpThreadOffset<PointerSize::k64>(std::ostream& os, uint32_t offset);
3789
3790 template<PointerSize ptr_size>
DumpThreadOffset(std::ostream & os,uint32_t offset)3791 void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
3792 #define DO_THREAD_OFFSET(x, y) \
3793 if (offset == (x).Uint32Value()) { \
3794 os << (y); \
3795 return; \
3796 }
3797 DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags")
3798 DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table")
3799 DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception")
3800 DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer");
3801 DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env")
3802 DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self")
3803 DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end")
3804 DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id")
3805 DO_THREAD_OFFSET(IsGcMarkingOffset<ptr_size>(), "is_gc_marking")
3806 DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
3807 DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
3808 DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
3809 DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
3810 #undef DO_THREAD_OFFSET
3811
3812 #define JNI_ENTRY_POINT_INFO(x) \
3813 if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
3814 os << #x; \
3815 return; \
3816 }
3817 JNI_ENTRY_POINT_INFO(pDlsymLookup)
3818 JNI_ENTRY_POINT_INFO(pDlsymLookupCritical)
3819 #undef JNI_ENTRY_POINT_INFO
3820
3821 #define QUICK_ENTRY_POINT_INFO(x) \
3822 if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
3823 os << #x; \
3824 return; \
3825 }
3826 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
3827 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved8)
3828 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved16)
3829 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved32)
3830 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved64)
3831 QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
3832 QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
3833 QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks)
3834 QUICK_ENTRY_POINT_INFO(pAllocStringObject)
3835 QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes)
3836 QUICK_ENTRY_POINT_INFO(pAllocStringFromChars)
3837 QUICK_ENTRY_POINT_INFO(pAllocStringFromString)
3838 QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
3839 QUICK_ENTRY_POINT_INFO(pCheckInstanceOf)
3840 QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
3841 QUICK_ENTRY_POINT_INFO(pResolveTypeAndVerifyAccess)
3842 QUICK_ENTRY_POINT_INFO(pResolveType)
3843 QUICK_ENTRY_POINT_INFO(pResolveString)
3844 QUICK_ENTRY_POINT_INFO(pSet8Instance)
3845 QUICK_ENTRY_POINT_INFO(pSet8Static)
3846 QUICK_ENTRY_POINT_INFO(pSet16Instance)
3847 QUICK_ENTRY_POINT_INFO(pSet16Static)
3848 QUICK_ENTRY_POINT_INFO(pSet32Instance)
3849 QUICK_ENTRY_POINT_INFO(pSet32Static)
3850 QUICK_ENTRY_POINT_INFO(pSet64Instance)
3851 QUICK_ENTRY_POINT_INFO(pSet64Static)
3852 QUICK_ENTRY_POINT_INFO(pSetObjInstance)
3853 QUICK_ENTRY_POINT_INFO(pSetObjStatic)
3854 QUICK_ENTRY_POINT_INFO(pGetByteInstance)
3855 QUICK_ENTRY_POINT_INFO(pGetBooleanInstance)
3856 QUICK_ENTRY_POINT_INFO(pGetByteStatic)
3857 QUICK_ENTRY_POINT_INFO(pGetBooleanStatic)
3858 QUICK_ENTRY_POINT_INFO(pGetShortInstance)
3859 QUICK_ENTRY_POINT_INFO(pGetCharInstance)
3860 QUICK_ENTRY_POINT_INFO(pGetShortStatic)
3861 QUICK_ENTRY_POINT_INFO(pGetCharStatic)
3862 QUICK_ENTRY_POINT_INFO(pGet32Instance)
3863 QUICK_ENTRY_POINT_INFO(pGet32Static)
3864 QUICK_ENTRY_POINT_INFO(pGet64Instance)
3865 QUICK_ENTRY_POINT_INFO(pGet64Static)
3866 QUICK_ENTRY_POINT_INFO(pGetObjInstance)
3867 QUICK_ENTRY_POINT_INFO(pGetObjStatic)
3868 QUICK_ENTRY_POINT_INFO(pAputObject)
3869 QUICK_ENTRY_POINT_INFO(pJniMethodStart)
3870 QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
3871 QUICK_ENTRY_POINT_INFO(pJniMethodEntryHook)
3872 QUICK_ENTRY_POINT_INFO(pJniDecodeReferenceResult)
3873 QUICK_ENTRY_POINT_INFO(pJniLockObject)
3874 QUICK_ENTRY_POINT_INFO(pJniUnlockObject)
3875 QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline)
3876 QUICK_ENTRY_POINT_INFO(pLockObject)
3877 QUICK_ENTRY_POINT_INFO(pUnlockObject)
3878 QUICK_ENTRY_POINT_INFO(pCmpgDouble)
3879 QUICK_ENTRY_POINT_INFO(pCmpgFloat)
3880 QUICK_ENTRY_POINT_INFO(pCmplDouble)
3881 QUICK_ENTRY_POINT_INFO(pCmplFloat)
3882 QUICK_ENTRY_POINT_INFO(pCos)
3883 QUICK_ENTRY_POINT_INFO(pSin)
3884 QUICK_ENTRY_POINT_INFO(pAcos)
3885 QUICK_ENTRY_POINT_INFO(pAsin)
3886 QUICK_ENTRY_POINT_INFO(pAtan)
3887 QUICK_ENTRY_POINT_INFO(pAtan2)
3888 QUICK_ENTRY_POINT_INFO(pCbrt)
3889 QUICK_ENTRY_POINT_INFO(pCosh)
3890 QUICK_ENTRY_POINT_INFO(pExp)
3891 QUICK_ENTRY_POINT_INFO(pExpm1)
3892 QUICK_ENTRY_POINT_INFO(pHypot)
3893 QUICK_ENTRY_POINT_INFO(pLog)
3894 QUICK_ENTRY_POINT_INFO(pLog10)
3895 QUICK_ENTRY_POINT_INFO(pNextAfter)
3896 QUICK_ENTRY_POINT_INFO(pSinh)
3897 QUICK_ENTRY_POINT_INFO(pTan)
3898 QUICK_ENTRY_POINT_INFO(pTanh)
3899 QUICK_ENTRY_POINT_INFO(pFmod)
3900 QUICK_ENTRY_POINT_INFO(pL2d)
3901 QUICK_ENTRY_POINT_INFO(pFmodf)
3902 QUICK_ENTRY_POINT_INFO(pL2f)
3903 QUICK_ENTRY_POINT_INFO(pD2iz)
3904 QUICK_ENTRY_POINT_INFO(pF2iz)
3905 QUICK_ENTRY_POINT_INFO(pIdivmod)
3906 QUICK_ENTRY_POINT_INFO(pD2l)
3907 QUICK_ENTRY_POINT_INFO(pF2l)
3908 QUICK_ENTRY_POINT_INFO(pLdiv)
3909 QUICK_ENTRY_POINT_INFO(pLmod)
3910 QUICK_ENTRY_POINT_INFO(pLmul)
3911 QUICK_ENTRY_POINT_INFO(pShlLong)
3912 QUICK_ENTRY_POINT_INFO(pShrLong)
3913 QUICK_ENTRY_POINT_INFO(pUshrLong)
3914 QUICK_ENTRY_POINT_INFO(pIndexOf)
3915 QUICK_ENTRY_POINT_INFO(pStringCompareTo)
3916 QUICK_ENTRY_POINT_INFO(pMemcpy)
3917 QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline)
3918 QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline)
3919 QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge)
3920 QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck)
3921 QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck)
3922 QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
3923 QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
3924 QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
3925 QUICK_ENTRY_POINT_INFO(pInvokePolymorphic)
3926 QUICK_ENTRY_POINT_INFO(pInvokePolymorphicWithHiddenReceiver)
3927 QUICK_ENTRY_POINT_INFO(pTestSuspend)
3928 QUICK_ENTRY_POINT_INFO(pDeliverException)
3929 QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
3930 QUICK_ENTRY_POINT_INFO(pThrowDivZero)
3931 QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
3932 QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
3933 QUICK_ENTRY_POINT_INFO(pDeoptimize)
3934 QUICK_ENTRY_POINT_INFO(pA64Load)
3935 QUICK_ENTRY_POINT_INFO(pA64Store)
3936 QUICK_ENTRY_POINT_INFO(pNewEmptyString)
3937 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B)
3938 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BB)
3939 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI)
3940 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII)
3941 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII)
3942 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString)
3943 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString)
3944 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset)
3945 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset)
3946 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C)
3947 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII)
3948 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC)
3949 QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints)
3950 QUICK_ENTRY_POINT_INFO(pNewStringFromString)
3951 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
3952 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
3953 QUICK_ENTRY_POINT_INFO(pNewStringFromUtf16Bytes_BII)
3954 QUICK_ENTRY_POINT_INFO(pJniReadBarrier)
3955 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg00)
3956 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg01)
3957 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg02)
3958 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg03)
3959 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg04)
3960 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg05)
3961 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg06)
3962 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg07)
3963 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg08)
3964 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg09)
3965 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg10)
3966 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg11)
3967 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg12)
3968 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg13)
3969 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg14)
3970 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg15)
3971 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg16)
3972 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg17)
3973 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg18)
3974 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg19)
3975 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg20)
3976 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg21)
3977 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg22)
3978 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg23)
3979 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg24)
3980 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg25)
3981 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg26)
3982 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg27)
3983 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg28)
3984 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg29)
3985 QUICK_ENTRY_POINT_INFO(pReadBarrierSlow)
3986 QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow)
3987 #undef QUICK_ENTRY_POINT_INFO
3988
3989 os << offset;
3990 }
3991
QuickDeliverException(bool skip_method_exit_callbacks)3992 std::unique_ptr<Context> Thread::QuickDeliverException(bool skip_method_exit_callbacks) {
3993 // Get exception from thread.
3994 ObjPtr<mirror::Throwable> exception = GetException();
3995 CHECK(exception != nullptr);
3996 if (exception == GetDeoptimizationException()) {
3997 // This wasn't a real exception, so just clear it here. If there was an actual exception it
3998 // will be recorded in the DeoptimizationContext and it will be restored later.
3999 ClearException();
4000 return Deoptimize(DeoptimizationKind::kFullFrame,
4001 /*single_frame=*/ false,
4002 skip_method_exit_callbacks);
4003 }
4004
4005 ReadBarrier::MaybeAssertToSpaceInvariant(exception.Ptr());
4006
4007 // This is a real exception: let the instrumentation know about it. Exception throw listener
4008 // could set a breakpoint or install listeners that might require a deoptimization. Hence the
4009 // deoptimization check needs to happen after calling the listener.
4010 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
4011 if (instrumentation->HasExceptionThrownListeners() &&
4012 IsExceptionThrownByCurrentMethod(exception)) {
4013 // Instrumentation may cause GC so keep the exception object safe.
4014 StackHandleScope<1> hs(this);
4015 HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
4016 instrumentation->ExceptionThrownEvent(this, exception);
4017 }
4018 // Does instrumentation need to deoptimize the stack or otherwise go to interpreter for something?
4019 // Note: we do this *after* reporting the exception to instrumentation in case it now requires
4020 // deoptimization. It may happen if a debugger is attached and requests new events (single-step,
4021 // breakpoint, ...) when the exception is reported.
4022 // Frame pop can be requested on a method unwind callback which requires a deopt. We could
4023 // potentially check after each unwind callback to see if a frame pop was requested and deopt if
4024 // needed. Since this is a debug only feature and this path is only taken when an exception is
4025 // thrown, it is not performance critical and we keep it simple by just deopting if method exit
4026 // listeners are installed and frame pop feature is supported.
4027 bool needs_deopt =
4028 instrumentation->HasMethodExitListeners() && Runtime::Current()->AreNonStandardExitsEnabled();
4029 if (Dbg::IsForcedInterpreterNeededForException(this) || IsForceInterpreter() || needs_deopt) {
4030 NthCallerVisitor visitor(this, 0, false);
4031 visitor.WalkStack();
4032 if (visitor.GetCurrentQuickFrame() != nullptr) {
4033 if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.GetOuterMethod(), visitor.caller_pc)) {
4034 // method_type shouldn't matter due to exception handling.
4035 const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault;
4036 // Save the exception into the deoptimization context so it can be restored
4037 // before entering the interpreter.
4038 PushDeoptimizationContext(
4039 JValue(),
4040 /* is_reference= */ false,
4041 exception,
4042 /* from_code= */ false,
4043 method_type);
4044 return Deoptimize(DeoptimizationKind::kFullFrame,
4045 /*single_frame=*/ false,
4046 skip_method_exit_callbacks);
4047 } else {
4048 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
4049 << visitor.caller->PrettyMethod();
4050 }
4051 } else {
4052 // This is either top of call stack, or shadow frame.
4053 DCHECK(visitor.caller == nullptr || visitor.IsShadowFrame());
4054 }
4055 }
4056
4057 // Don't leave exception visible while we try to find the handler, which may cause class
4058 // resolution.
4059 ClearException();
4060 QuickExceptionHandler exception_handler(this, false);
4061 exception_handler.FindCatch(exception, skip_method_exit_callbacks);
4062 if (exception_handler.GetClearException()) {
4063 // Exception was cleared as part of delivery.
4064 DCHECK(!IsExceptionPending());
4065 } else {
4066 // Exception was put back with a throw location.
4067 DCHECK(IsExceptionPending());
4068 // Check the to-space invariant on the re-installed exception (if applicable).
4069 ReadBarrier::MaybeAssertToSpaceInvariant(GetException());
4070 }
4071 return exception_handler.PrepareLongJump();
4072 }
4073
Deoptimize(DeoptimizationKind kind,bool single_frame,bool skip_method_exit_callbacks)4074 std::unique_ptr<Context> Thread::Deoptimize(DeoptimizationKind kind,
4075 bool single_frame,
4076 bool skip_method_exit_callbacks) {
4077 Runtime::Current()->IncrementDeoptimizationCount(kind);
4078 if (VLOG_IS_ON(deopt)) {
4079 if (single_frame) {
4080 // Deopt logging will be in DeoptimizeSingleFrame. It is there to take advantage of the
4081 // specialized visitor that will show whether a method is Quick or Shadow.
4082 } else {
4083 LOG(INFO) << "Deopting:";
4084 Dump(LOG_STREAM(INFO));
4085 }
4086 }
4087
4088 AssertHasDeoptimizationContext();
4089 QuickExceptionHandler exception_handler(this, true);
4090 if (single_frame) {
4091 exception_handler.DeoptimizeSingleFrame(kind);
4092 } else {
4093 exception_handler.DeoptimizeStack(skip_method_exit_callbacks);
4094 }
4095 if (exception_handler.IsFullFragmentDone()) {
4096 return exception_handler.PrepareLongJump(/*smash_caller_saves=*/ true);
4097 } else {
4098 exception_handler.DeoptimizePartialFragmentFixup();
4099 // We cannot smash the caller-saves, as we need the ArtMethod in a parameter register that would
4100 // be caller-saved. This has the downside that we cannot track incorrect register usage down the
4101 // line.
4102 return exception_handler.PrepareLongJump(/*smash_caller_saves=*/ false);
4103 }
4104 }
4105
GetCurrentMethod(uint32_t * dex_pc_out,bool check_suspended,bool abort_on_error) const4106 ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc_out,
4107 bool check_suspended,
4108 bool abort_on_error) const {
4109 // Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
4110 // so we don't abort in a special situation (thinlocked monitor) when dumping the Java
4111 // stack.
4112 ArtMethod* method = nullptr;
4113 uint32_t dex_pc = dex::kDexNoIndex;
4114 StackVisitor::WalkStack(
4115 [&](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
4116 ArtMethod* m = visitor->GetMethod();
4117 if (m->IsRuntimeMethod()) {
4118 // Continue if this is a runtime method.
4119 return true;
4120 }
4121 method = m;
4122 dex_pc = visitor->GetDexPc(abort_on_error);
4123 return false;
4124 },
4125 const_cast<Thread*>(this),
4126 /* context= */ nullptr,
4127 StackVisitor::StackWalkKind::kIncludeInlinedFrames,
4128 check_suspended);
4129
4130 if (dex_pc_out != nullptr) {
4131 *dex_pc_out = dex_pc;
4132 }
4133 return method;
4134 }
4135
HoldsLock(ObjPtr<mirror::Object> object) const4136 bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const {
4137 return object != nullptr && object->GetLockOwnerThreadId() == GetThreadId();
4138 }
4139
4140 extern std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMethod** sp)
4141 REQUIRES_SHARED(Locks::mutator_lock_);
4142
4143 // RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
4144 template <typename RootVisitor, bool kPrecise = false>
4145 class ReferenceMapVisitor : public StackVisitor {
4146 public:
ReferenceMapVisitor(Thread * thread,Context * context,RootVisitor & visitor)4147 ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
4148 REQUIRES_SHARED(Locks::mutator_lock_)
4149 // We are visiting the references in compiled frames, so we do not need
4150 // to know the inlined frames.
4151 : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
4152 visitor_(visitor),
4153 visit_declaring_class_(!Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {}
4154
VisitFrame()4155 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
4156 if (false) {
4157 LOG(INFO) << "Visiting stack roots in " << ArtMethod::PrettyMethod(GetMethod())
4158 << StringPrintf("@ PC:%04x", GetDexPc());
4159 }
4160 ShadowFrame* shadow_frame = GetCurrentShadowFrame();
4161 if (shadow_frame != nullptr) {
4162 VisitShadowFrame(shadow_frame);
4163 } else if (GetCurrentOatQuickMethodHeader()->IsNterpMethodHeader()) {
4164 VisitNterpFrame();
4165 } else {
4166 VisitQuickFrame();
4167 }
4168 return true;
4169 }
4170
VisitShadowFrame(ShadowFrame * shadow_frame)4171 void VisitShadowFrame(ShadowFrame* shadow_frame) REQUIRES_SHARED(Locks::mutator_lock_) {
4172 ArtMethod* m = shadow_frame->GetMethod();
4173 VisitDeclaringClass(m);
4174 DCHECK(m != nullptr);
4175 size_t num_regs = shadow_frame->NumberOfVRegs();
4176 // handle scope for JNI or References for interpreter.
4177 for (size_t reg = 0; reg < num_regs; ++reg) {
4178 mirror::Object* ref = shadow_frame->GetVRegReference(reg);
4179 if (ref != nullptr) {
4180 mirror::Object* new_ref = ref;
4181 visitor_(&new_ref, reg, this);
4182 if (new_ref != ref) {
4183 shadow_frame->SetVRegReference(reg, new_ref);
4184 }
4185 }
4186 }
4187 // Mark lock count map required for structured locking checks.
4188 shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg= */ -1, this);
4189 }
4190
4191 private:
4192 // Visiting the declaring class is necessary so that we don't unload the class of a method that
4193 // is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since
4194 // the threads do not all hold the heap bitmap lock for parallel GC.
VisitDeclaringClass(ArtMethod * method)4195 void VisitDeclaringClass(ArtMethod* method)
4196 REQUIRES_SHARED(Locks::mutator_lock_)
4197 NO_THREAD_SAFETY_ANALYSIS {
4198 if (!visit_declaring_class_) {
4199 return;
4200 }
4201 ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
4202 // klass can be null for runtime methods.
4203 if (klass != nullptr) {
4204 if (kVerifyImageObjectsMarked) {
4205 gc::Heap* const heap = Runtime::Current()->GetHeap();
4206 gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass,
4207 /*fail_ok=*/true);
4208 if (space != nullptr && space->IsImageSpace()) {
4209 bool failed = false;
4210 if (!space->GetLiveBitmap()->Test(klass.Ptr())) {
4211 failed = true;
4212 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space;
4213 } else if (!heap->GetLiveBitmap()->Test(klass.Ptr())) {
4214 failed = true;
4215 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space;
4216 }
4217 if (failed) {
4218 GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
4219 space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT));
4220 LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method
4221 << " klass@" << klass.Ptr();
4222 // Pretty info last in case it crashes.
4223 LOG(FATAL) << "Method " << method->PrettyMethod() << " klass "
4224 << klass->PrettyClass();
4225 }
4226 }
4227 }
4228 mirror::Object* new_ref = klass.Ptr();
4229 visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kMethodDeclaringClass, this);
4230 if (new_ref != klass) {
4231 method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass());
4232 }
4233 }
4234 }
4235
VisitNterpFrame()4236 void VisitNterpFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
4237 ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
4238 StackReference<mirror::Object>* vreg_ref_base =
4239 reinterpret_cast<StackReference<mirror::Object>*>(NterpGetReferenceArray(cur_quick_frame));
4240 StackReference<mirror::Object>* vreg_int_base =
4241 reinterpret_cast<StackReference<mirror::Object>*>(NterpGetRegistersArray(cur_quick_frame));
4242 CodeItemDataAccessor accessor((*cur_quick_frame)->DexInstructionData());
4243 const uint16_t num_regs = accessor.RegistersSize();
4244 // An nterp frame has two arrays: a dex register array and a reference array
4245 // that shadows the dex register array but only containing references
4246 // (non-reference dex registers have nulls). See nterp_helpers.cc.
4247 for (size_t reg = 0; reg < num_regs; ++reg) {
4248 StackReference<mirror::Object>* ref_addr = vreg_ref_base + reg;
4249 mirror::Object* ref = ref_addr->AsMirrorPtr();
4250 if (ref != nullptr) {
4251 mirror::Object* new_ref = ref;
4252 visitor_(&new_ref, reg, this);
4253 if (new_ref != ref) {
4254 ref_addr->Assign(new_ref);
4255 StackReference<mirror::Object>* int_addr = vreg_int_base + reg;
4256 int_addr->Assign(new_ref);
4257 }
4258 }
4259 }
4260 }
4261
4262 template <typename T>
4263 ALWAYS_INLINE
VisitQuickFrameWithVregCallback()4264 inline void VisitQuickFrameWithVregCallback() REQUIRES_SHARED(Locks::mutator_lock_) {
4265 ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
4266 DCHECK(cur_quick_frame != nullptr);
4267 ArtMethod* m = *cur_quick_frame;
4268 VisitDeclaringClass(m);
4269
4270 if (m->IsNative()) {
4271 // TODO: Spill the `this` reference in the AOT-compiled String.charAt()
4272 // slow-path for throwing SIOOBE, so that we can remove this carve-out.
4273 if (UNLIKELY(m->IsIntrinsic()) && m->GetIntrinsic() == Intrinsics::kStringCharAt) {
4274 // The String.charAt() method is AOT-compiled with an intrinsic implementation
4275 // instead of a JNI stub. It has a slow path that constructs a runtime frame
4276 // for throwing SIOOBE and in that path we do not get the `this` pointer
4277 // spilled on the stack, so there is nothing to visit. We can distinguish
4278 // this from the GenericJni path by checking that the PC is in the boot image
4279 // (PC shall be known thanks to the runtime frame for throwing SIOOBE).
4280 // Note that JIT does not emit that intrinic implementation.
4281 const void* pc = reinterpret_cast<const void*>(GetCurrentQuickFramePc());
4282 if (pc != nullptr && Runtime::Current()->GetHeap()->IsInBootImageOatFile(pc)) {
4283 return;
4284 }
4285 }
4286 // Native methods spill their arguments to the reserved vregs in the caller's frame
4287 // and use pointers to these stack references as jobject, jclass, jarray, etc.
4288 // Note: We can come here for a @CriticalNative method when it needs to resolve the
4289 // target native function but there would be no references to visit below.
4290 const size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
4291 const size_t method_pointer_size = static_cast<size_t>(kRuntimePointerSize);
4292 uint32_t* current_vreg = reinterpret_cast<uint32_t*>(
4293 reinterpret_cast<uint8_t*>(cur_quick_frame) + frame_size + method_pointer_size);
4294 auto visit = [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
4295 auto* ref_addr = reinterpret_cast<StackReference<mirror::Object>*>(current_vreg);
4296 mirror::Object* ref = ref_addr->AsMirrorPtr();
4297 if (ref != nullptr) {
4298 mirror::Object* new_ref = ref;
4299 visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kNativeReferenceArgument, this);
4300 if (ref != new_ref) {
4301 ref_addr->Assign(new_ref);
4302 }
4303 }
4304 };
4305 const char* shorty = m->GetShorty();
4306 if (!m->IsStatic()) {
4307 visit();
4308 current_vreg += 1u;
4309 }
4310 for (shorty += 1u; *shorty != 0; ++shorty) {
4311 switch (*shorty) {
4312 case 'D':
4313 case 'J':
4314 current_vreg += 2u;
4315 break;
4316 case 'L':
4317 visit();
4318 FALLTHROUGH_INTENDED;
4319 default:
4320 current_vreg += 1u;
4321 break;
4322 }
4323 }
4324 } else if (!m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) {
4325 // Process register map (which native, runtime and proxy methods don't have)
4326 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
4327 DCHECK(method_header->IsOptimized());
4328 StackReference<mirror::Object>* vreg_base =
4329 reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame);
4330 uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
4331 CodeInfo code_info = kPrecise
4332 ? CodeInfo(method_header) // We will need dex register maps.
4333 : CodeInfo::DecodeGcMasksOnly(method_header);
4334 StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
4335 DCHECK(map.IsValid());
4336
4337 T vreg_info(m, code_info, map, visitor_);
4338
4339 // Visit stack entries that hold pointers.
4340 BitMemoryRegion stack_mask = code_info.GetStackMaskOf(map);
4341 for (size_t i = 0; i < stack_mask.size_in_bits(); ++i) {
4342 if (stack_mask.LoadBit(i)) {
4343 StackReference<mirror::Object>* ref_addr = vreg_base + i;
4344 mirror::Object* ref = ref_addr->AsMirrorPtr();
4345 if (ref != nullptr) {
4346 mirror::Object* new_ref = ref;
4347 vreg_info.VisitStack(&new_ref, i, this);
4348 if (ref != new_ref) {
4349 ref_addr->Assign(new_ref);
4350 }
4351 }
4352 }
4353 }
4354 // Visit callee-save registers that hold pointers.
4355 uint32_t register_mask = code_info.GetRegisterMaskOf(map);
4356 for (uint32_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
4357 if (register_mask & (1 << i)) {
4358 mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
4359 if (kIsDebugBuild && ref_addr == nullptr) {
4360 std::string thread_name;
4361 GetThread()->GetThreadName(thread_name);
4362 LOG(FATAL_WITHOUT_ABORT) << "On thread " << thread_name;
4363 DescribeStack(GetThread());
4364 LOG(FATAL) << "Found an unsaved callee-save register " << i << " (null GPRAddress) "
4365 << "set in register_mask=" << register_mask << " at " << DescribeLocation();
4366 }
4367 if (*ref_addr != nullptr) {
4368 vreg_info.VisitRegister(ref_addr, i, this);
4369 }
4370 }
4371 }
4372 } else if (!m->IsRuntimeMethod() && m->IsProxyMethod()) {
4373 // If this is a proxy method, visit its reference arguments.
4374 DCHECK(!m->IsStatic());
4375 DCHECK(!m->IsNative());
4376 std::vector<StackReference<mirror::Object>*> ref_addrs =
4377 GetProxyReferenceArguments(cur_quick_frame);
4378 for (StackReference<mirror::Object>* ref_addr : ref_addrs) {
4379 mirror::Object* ref = ref_addr->AsMirrorPtr();
4380 if (ref != nullptr) {
4381 mirror::Object* new_ref = ref;
4382 visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kProxyReferenceArgument, this);
4383 if (ref != new_ref) {
4384 ref_addr->Assign(new_ref);
4385 }
4386 }
4387 }
4388 }
4389 }
4390
VisitQuickFrame()4391 void VisitQuickFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
4392 if (kPrecise) {
4393 VisitQuickFramePrecise();
4394 } else {
4395 VisitQuickFrameNonPrecise();
4396 }
4397 }
4398
VisitQuickFrameNonPrecise()4399 void VisitQuickFrameNonPrecise() REQUIRES_SHARED(Locks::mutator_lock_) {
4400 struct UndefinedVRegInfo {
4401 UndefinedVRegInfo([[maybe_unused]] ArtMethod* method,
4402 [[maybe_unused]] const CodeInfo& code_info,
4403 [[maybe_unused]] const StackMap& map,
4404 RootVisitor& _visitor)
4405 : visitor(_visitor) {}
4406
4407 ALWAYS_INLINE
4408 void VisitStack(mirror::Object** ref,
4409 [[maybe_unused]] size_t stack_index,
4410 const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
4411 visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor);
4412 }
4413
4414 ALWAYS_INLINE
4415 void VisitRegister(mirror::Object** ref,
4416 [[maybe_unused]] size_t register_index,
4417 const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
4418 visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor);
4419 }
4420
4421 RootVisitor& visitor;
4422 };
4423 VisitQuickFrameWithVregCallback<UndefinedVRegInfo>();
4424 }
4425
VisitQuickFramePrecise()4426 void VisitQuickFramePrecise() REQUIRES_SHARED(Locks::mutator_lock_) {
4427 struct StackMapVRegInfo {
4428 StackMapVRegInfo(ArtMethod* method,
4429 const CodeInfo& _code_info,
4430 const StackMap& map,
4431 RootVisitor& _visitor)
4432 : number_of_dex_registers(method->DexInstructionData().RegistersSize()),
4433 code_info(_code_info),
4434 dex_register_map(code_info.GetDexRegisterMapOf(map)),
4435 visitor(_visitor) {
4436 DCHECK_EQ(dex_register_map.size(), number_of_dex_registers);
4437 }
4438
4439 // TODO: If necessary, we should consider caching a reverse map instead of the linear
4440 // lookups for each location.
4441 void FindWithType(const size_t index,
4442 const DexRegisterLocation::Kind kind,
4443 mirror::Object** ref,
4444 const StackVisitor* stack_visitor)
4445 REQUIRES_SHARED(Locks::mutator_lock_) {
4446 bool found = false;
4447 for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) {
4448 DexRegisterLocation location = dex_register_map[dex_reg];
4449 if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) {
4450 visitor(ref, dex_reg, stack_visitor);
4451 found = true;
4452 }
4453 }
4454
4455 if (!found) {
4456 // If nothing found, report with unknown.
4457 visitor(ref, JavaFrameRootInfo::kUnknownVreg, stack_visitor);
4458 }
4459 }
4460
4461 void VisitStack(mirror::Object** ref, size_t stack_index, const StackVisitor* stack_visitor)
4462 REQUIRES_SHARED(Locks::mutator_lock_) {
4463 const size_t stack_offset = stack_index * kFrameSlotSize;
4464 FindWithType(stack_offset,
4465 DexRegisterLocation::Kind::kInStack,
4466 ref,
4467 stack_visitor);
4468 }
4469
4470 void VisitRegister(mirror::Object** ref,
4471 size_t register_index,
4472 const StackVisitor* stack_visitor)
4473 REQUIRES_SHARED(Locks::mutator_lock_) {
4474 FindWithType(register_index,
4475 DexRegisterLocation::Kind::kInRegister,
4476 ref,
4477 stack_visitor);
4478 }
4479
4480 size_t number_of_dex_registers;
4481 const CodeInfo& code_info;
4482 DexRegisterMap dex_register_map;
4483 RootVisitor& visitor;
4484 };
4485 VisitQuickFrameWithVregCallback<StackMapVRegInfo>();
4486 }
4487
4488 // Visitor for when we visit a root.
4489 RootVisitor& visitor_;
4490 bool visit_declaring_class_;
4491 };
4492
4493 class RootCallbackVisitor {
4494 public:
RootCallbackVisitor(RootVisitor * visitor,uint32_t tid)4495 RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
4496
operator ()(mirror::Object ** obj,size_t vreg,const StackVisitor * stack_visitor) const4497 void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
4498 REQUIRES_SHARED(Locks::mutator_lock_) {
4499 visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
4500 }
4501
4502 private:
4503 RootVisitor* const visitor_;
4504 const uint32_t tid_;
4505 };
4506
VisitReflectiveTargets(ReflectiveValueVisitor * visitor)4507 void Thread::VisitReflectiveTargets(ReflectiveValueVisitor* visitor) {
4508 for (BaseReflectiveHandleScope* brhs = GetTopReflectiveHandleScope();
4509 brhs != nullptr;
4510 brhs = brhs->GetLink()) {
4511 brhs->VisitTargets(visitor);
4512 }
4513 }
4514
4515 // FIXME: clang-r433403 reports the below function exceeds frame size limit.
4516 // http://b/197647048
4517 #pragma GCC diagnostic push
4518 #pragma GCC diagnostic ignored "-Wframe-larger-than="
4519 template <bool kPrecise>
VisitRoots(RootVisitor * visitor)4520 void Thread::VisitRoots(RootVisitor* visitor) {
4521 const uint32_t thread_id = GetThreadId();
4522 visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
4523 if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
4524 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
4525 RootInfo(kRootNativeStack, thread_id));
4526 }
4527 if (tlsPtr_.async_exception != nullptr) {
4528 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.async_exception),
4529 RootInfo(kRootNativeStack, thread_id));
4530 }
4531 visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
4532 tlsPtr_.jni_env->VisitJniLocalRoots(visitor, RootInfo(kRootJNILocal, thread_id));
4533 tlsPtr_.jni_env->VisitMonitorRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
4534 HandleScopeVisitRoots(visitor, thread_id);
4535 // Visit roots for deoptimization.
4536 if (tlsPtr_.stacked_shadow_frame_record != nullptr) {
4537 RootCallbackVisitor visitor_to_callback(visitor, thread_id);
4538 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback);
4539 for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
4540 record != nullptr;
4541 record = record->GetLink()) {
4542 for (ShadowFrame* shadow_frame = record->GetShadowFrame();
4543 shadow_frame != nullptr;
4544 shadow_frame = shadow_frame->GetLink()) {
4545 mapper.VisitShadowFrame(shadow_frame);
4546 }
4547 }
4548 }
4549 for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
4550 record != nullptr;
4551 record = record->GetLink()) {
4552 if (record->IsReference()) {
4553 visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(),
4554 RootInfo(kRootThreadObject, thread_id));
4555 }
4556 visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(),
4557 RootInfo(kRootThreadObject, thread_id));
4558 }
4559 if (tlsPtr_.frame_id_to_shadow_frame != nullptr) {
4560 RootCallbackVisitor visitor_to_callback(visitor, thread_id);
4561 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback);
4562 for (FrameIdToShadowFrame* record = tlsPtr_.frame_id_to_shadow_frame;
4563 record != nullptr;
4564 record = record->GetNext()) {
4565 mapper.VisitShadowFrame(record->GetShadowFrame());
4566 }
4567 }
4568 // Visit roots on this thread's stack
4569 RuntimeContextType context;
4570 RootCallbackVisitor visitor_to_callback(visitor, thread_id);
4571 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback);
4572 mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false);
4573 }
4574 #pragma GCC diagnostic pop
4575
SweepCacheEntry(IsMarkedVisitor * visitor,const Instruction * inst,size_t * value)4576 static void SweepCacheEntry(IsMarkedVisitor* visitor, const Instruction* inst, size_t* value)
4577 REQUIRES_SHARED(Locks::mutator_lock_) {
4578 if (inst == nullptr) {
4579 return;
4580 }
4581 using Opcode = Instruction::Code;
4582 Opcode opcode = inst->Opcode();
4583 switch (opcode) {
4584 case Opcode::NEW_INSTANCE:
4585 case Opcode::CHECK_CAST:
4586 case Opcode::INSTANCE_OF:
4587 case Opcode::NEW_ARRAY:
4588 case Opcode::CONST_CLASS: {
4589 mirror::Class* klass = reinterpret_cast<mirror::Class*>(*value);
4590 if (klass == nullptr || klass == Runtime::GetWeakClassSentinel()) {
4591 return;
4592 }
4593 mirror::Class* new_klass = down_cast<mirror::Class*>(visitor->IsMarked(klass));
4594 if (new_klass == nullptr) {
4595 *value = reinterpret_cast<size_t>(Runtime::GetWeakClassSentinel());
4596 } else if (new_klass != klass) {
4597 *value = reinterpret_cast<size_t>(new_klass);
4598 }
4599 return;
4600 }
4601 case Opcode::CONST_STRING:
4602 case Opcode::CONST_STRING_JUMBO: {
4603 mirror::Object* object = reinterpret_cast<mirror::Object*>(*value);
4604 if (object == nullptr) {
4605 return;
4606 }
4607 mirror::Object* new_object = visitor->IsMarked(object);
4608 // We know the string is marked because it's a strongly-interned string that
4609 // is always alive (see b/117621117 for trying to make those strings weak).
4610 if (kIsDebugBuild && new_object == nullptr) {
4611 // (b/275005060) Currently the problem is reported only on CC GC.
4612 // Therefore we log it with more information. But since the failure rate
4613 // is quite high, sampling it.
4614 if (gUseReadBarrier) {
4615 Runtime* runtime = Runtime::Current();
4616 gc::collector::ConcurrentCopying* cc = runtime->GetHeap()->ConcurrentCopyingCollector();
4617 CHECK_NE(cc, nullptr);
4618 LOG(FATAL) << cc->DumpReferenceInfo(object, "string")
4619 << " string interned: " << std::boolalpha
4620 << runtime->GetInternTable()->LookupStrong(Thread::Current(),
4621 down_cast<mirror::String*>(object))
4622 << std::noboolalpha;
4623 } else {
4624 // Other GCs
4625 LOG(FATAL) << __FUNCTION__
4626 << ": IsMarked returned null for a strongly interned string: " << object;
4627 }
4628 } else if (new_object != object) {
4629 *value = reinterpret_cast<size_t>(new_object);
4630 }
4631 return;
4632 }
4633 default:
4634 // The following opcode ranges store non-reference values.
4635 if ((Opcode::IGET <= opcode && opcode <= Opcode::SPUT_SHORT) ||
4636 (Opcode::INVOKE_VIRTUAL <= opcode && opcode <= Opcode::INVOKE_INTERFACE_RANGE)) {
4637 return; // Nothing to do for the GC.
4638 }
4639 // New opcode is using the cache. We need to explicitly handle it in this method.
4640 DCHECK(false) << "Unhandled opcode " << inst->Opcode();
4641 }
4642 }
4643
SweepInterpreterCache(IsMarkedVisitor * visitor)4644 void Thread::SweepInterpreterCache(IsMarkedVisitor* visitor) {
4645 for (InterpreterCache::Entry& entry : GetInterpreterCache()->GetArray()) {
4646 SweepCacheEntry(visitor, reinterpret_cast<const Instruction*>(entry.first), &entry.second);
4647 }
4648 }
4649
4650 // FIXME: clang-r433403 reports the below function exceeds frame size limit.
4651 // http://b/197647048
4652 #pragma GCC diagnostic push
4653 #pragma GCC diagnostic ignored "-Wframe-larger-than="
VisitRoots(RootVisitor * visitor,VisitRootFlags flags)4654 void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
4655 if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) {
4656 VisitRoots</* kPrecise= */ true>(visitor);
4657 } else {
4658 VisitRoots</* kPrecise= */ false>(visitor);
4659 }
4660 }
4661 #pragma GCC diagnostic pop
4662
4663 class VerifyRootVisitor : public SingleRootVisitor {
4664 public:
VisitRoot(mirror::Object * root,const RootInfo & info)4665 void VisitRoot(mirror::Object* root, [[maybe_unused]] const RootInfo& info) override
4666 REQUIRES_SHARED(Locks::mutator_lock_) {
4667 VerifyObject(root);
4668 }
4669 };
4670
VerifyStackImpl()4671 void Thread::VerifyStackImpl() {
4672 if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) {
4673 VerifyRootVisitor visitor;
4674 std::unique_ptr<Context> context(Context::Create());
4675 RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId());
4676 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback);
4677 mapper.WalkStack();
4678 }
4679 }
4680
SetTlab(uint8_t * start,uint8_t * end,uint8_t * limit)4681 void Thread::SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit) {
4682 DCHECK_LE(start, end);
4683 DCHECK_LE(end, limit);
4684 tlsPtr_.thread_local_start = start;
4685 tlsPtr_.thread_local_pos = tlsPtr_.thread_local_start;
4686 tlsPtr_.thread_local_end = end;
4687 tlsPtr_.thread_local_limit = limit;
4688 tlsPtr_.thread_local_objects = 0;
4689 }
4690
ResetTlab()4691 void Thread::ResetTlab() {
4692 gc::Heap* const heap = Runtime::Current()->GetHeap();
4693 if (heap->GetHeapSampler().IsEnabled()) {
4694 // Note: We always ResetTlab before SetTlab, therefore we can do the sample
4695 // offset adjustment here.
4696 heap->AdjustSampleOffset(GetTlabPosOffset());
4697 VLOG(heap) << "JHP: ResetTlab, Tid: " << GetTid()
4698 << " adjustment = "
4699 << (tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start);
4700 }
4701 SetTlab(nullptr, nullptr, nullptr);
4702 }
4703
HasTlab() const4704 bool Thread::HasTlab() const {
4705 const bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
4706 if (has_tlab) {
4707 DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr);
4708 } else {
4709 DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr);
4710 }
4711 return has_tlab;
4712 }
4713
AdjustTlab(size_t slide_bytes)4714 void Thread::AdjustTlab(size_t slide_bytes) {
4715 if (HasTlab()) {
4716 tlsPtr_.thread_local_start -= slide_bytes;
4717 tlsPtr_.thread_local_pos -= slide_bytes;
4718 tlsPtr_.thread_local_end -= slide_bytes;
4719 tlsPtr_.thread_local_limit -= slide_bytes;
4720 }
4721 }
4722
operator <<(std::ostream & os,const Thread & thread)4723 std::ostream& operator<<(std::ostream& os, const Thread& thread) {
4724 thread.ShortDump(os);
4725 return os;
4726 }
4727
4728 template <StackType stack_type>
ProtectStack(bool fatal_on_error)4729 bool Thread::ProtectStack(bool fatal_on_error) {
4730 void* pregion = GetStackBegin<stack_type>() - GetStackOverflowProtectedSize();
4731 VLOG(threads) << "Protecting stack at " << pregion;
4732 if (mprotect(pregion, GetStackOverflowProtectedSize(), PROT_NONE) == -1) {
4733 if (fatal_on_error) {
4734 // b/249586057, LOG(FATAL) times out
4735 LOG(ERROR) << "Unable to create protected region in stack for implicit overflow check. "
4736 "Reason: "
4737 << strerror(errno) << " size: " << GetStackOverflowProtectedSize();
4738 exit(1);
4739 }
4740 return false;
4741 }
4742 return true;
4743 }
4744
4745 template <StackType stack_type>
UnprotectStack()4746 bool Thread::UnprotectStack() {
4747 void* pregion = GetStackBegin<stack_type>() - GetStackOverflowProtectedSize();
4748 VLOG(threads) << "Unprotecting stack at " << pregion;
4749 return mprotect(pregion, GetStackOverflowProtectedSize(), PROT_READ|PROT_WRITE) == 0;
4750 }
4751
NumberOfHeldMutexes() const4752 size_t Thread::NumberOfHeldMutexes() const {
4753 size_t count = 0;
4754 for (BaseMutex* mu : tlsPtr_.held_mutexes) {
4755 count += mu != nullptr ? 1 : 0;
4756 }
4757 return count;
4758 }
4759
DeoptimizeWithDeoptimizationException(JValue * result)4760 void Thread::DeoptimizeWithDeoptimizationException(JValue* result) {
4761 DCHECK_EQ(GetException(), Thread::GetDeoptimizationException());
4762 ClearException();
4763 ObjPtr<mirror::Throwable> pending_exception;
4764 bool from_code = false;
4765 DeoptimizationMethodType method_type;
4766 PopDeoptimizationContext(result, &pending_exception, &from_code, &method_type);
4767 SetTopOfStack(nullptr);
4768
4769 // Restore the exception that was pending before deoptimization then interpret the
4770 // deoptimized frames.
4771 if (pending_exception != nullptr) {
4772 SetException(pending_exception);
4773 }
4774
4775 ShadowFrame* shadow_frame = MaybePopDeoptimizedStackedShadowFrame();
4776 // We may not have a shadow frame if we deoptimized at the return of the
4777 // quick_to_interpreter_bridge which got directly called by art_quick_invoke_stub.
4778 if (shadow_frame != nullptr) {
4779 SetTopOfShadowStack(shadow_frame);
4780 interpreter::EnterInterpreterFromDeoptimize(this,
4781 shadow_frame,
4782 result,
4783 from_code,
4784 method_type);
4785 }
4786 }
4787
SetAsyncException(ObjPtr<mirror::Throwable> new_exception)4788 void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) {
4789 CHECK(new_exception != nullptr);
4790 Runtime::Current()->SetAsyncExceptionsThrown();
4791 if (kIsDebugBuild) {
4792 // Make sure we are in a checkpoint.
4793 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
4794 CHECK(this == Thread::Current() || GetSuspendCount() >= 1)
4795 << "It doesn't look like this was called in a checkpoint! this: "
4796 << this << " count: " << GetSuspendCount();
4797 }
4798 tlsPtr_.async_exception = new_exception.Ptr();
4799 }
4800
ObserveAsyncException()4801 bool Thread::ObserveAsyncException() {
4802 DCHECK(this == Thread::Current());
4803 if (tlsPtr_.async_exception != nullptr) {
4804 if (tlsPtr_.exception != nullptr) {
4805 LOG(WARNING) << "Overwriting pending exception with async exception. Pending exception is: "
4806 << tlsPtr_.exception->Dump();
4807 LOG(WARNING) << "Async exception is " << tlsPtr_.async_exception->Dump();
4808 }
4809 tlsPtr_.exception = tlsPtr_.async_exception;
4810 tlsPtr_.async_exception = nullptr;
4811 return true;
4812 } else {
4813 return IsExceptionPending();
4814 }
4815 }
4816
SetException(ObjPtr<mirror::Throwable> new_exception)4817 void Thread::SetException(ObjPtr<mirror::Throwable> new_exception) {
4818 CHECK(new_exception != nullptr);
4819 // TODO: DCHECK(!IsExceptionPending());
4820 tlsPtr_.exception = new_exception.Ptr();
4821 }
4822
IsAotCompiler()4823 bool Thread::IsAotCompiler() {
4824 return Runtime::Current()->IsAotCompiler();
4825 }
4826
GetPeerFromOtherThread()4827 mirror::Object* Thread::GetPeerFromOtherThread() {
4828 Thread* self = Thread::Current();
4829 if (this == self) {
4830 // We often call this on every thread, including ourselves.
4831 return GetPeer();
4832 }
4833 // If "this" thread is not suspended, it could disappear.
4834 DCHECK(IsSuspended()) << *this;
4835 DCHECK(tlsPtr_.jpeer == nullptr);
4836 // Some JVMTI code may unfortunately hold thread_list_lock_, but if it does, it should hold the
4837 // mutator lock in exclusive mode, and we should not have a pending flip function.
4838 if (kIsDebugBuild && Locks::thread_list_lock_->IsExclusiveHeld(self)) {
4839 Locks::mutator_lock_->AssertExclusiveHeld(self);
4840 CHECK(!ReadFlag(ThreadFlag::kPendingFlipFunction, std::memory_order_relaxed));
4841 }
4842 // Ensure that opeer is not obsolete.
4843 EnsureFlipFunctionStarted(self, this);
4844 if (ReadFlag(ThreadFlag::kRunningFlipFunction, std::memory_order_acquire)) {
4845 // Does not release mutator lock. Hence no new flip requests can be issued.
4846 WaitForFlipFunction(self);
4847 }
4848 return tlsPtr_.opeer;
4849 }
4850
LockedGetPeerFromOtherThread(ThreadExitFlag * tef)4851 mirror::Object* Thread::LockedGetPeerFromOtherThread(ThreadExitFlag* tef) {
4852 DCHECK(tlsPtr_.jpeer == nullptr);
4853 Thread* self = Thread::Current();
4854 Locks::thread_list_lock_->AssertHeld(self);
4855 // memory_order_relaxed is OK here, because we recheck it later with acquire order.
4856 if (ReadFlag(ThreadFlag::kPendingFlipFunction, std::memory_order_relaxed)) {
4857 // It is unsafe to call EnsureFlipFunctionStarted with thread_list_lock_. Thus we temporarily
4858 // release it, taking care to handle the case in which "this" thread disapppears while we no
4859 // longer hold it.
4860 Locks::thread_list_lock_->Unlock(self);
4861 EnsureFlipFunctionStarted(self, this, StateAndFlags(0), tef);
4862 Locks::thread_list_lock_->Lock(self);
4863 if (tef->HasExited()) {
4864 return nullptr;
4865 }
4866 }
4867 if (ReadFlag(ThreadFlag::kRunningFlipFunction, std::memory_order_acquire)) {
4868 // Does not release mutator lock. Hence no new flip requests can be issued.
4869 WaitForFlipFunction(self);
4870 }
4871 return tlsPtr_.opeer;
4872 }
4873
SetReadBarrierEntrypoints()4874 void Thread::SetReadBarrierEntrypoints() {
4875 // Make sure entrypoints aren't null.
4876 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active=*/ true);
4877 }
4878
ClearAllInterpreterCaches()4879 void Thread::ClearAllInterpreterCaches() {
4880 static struct ClearInterpreterCacheClosure : Closure {
4881 void Run(Thread* thread) override {
4882 thread->GetInterpreterCache()->Clear(thread);
4883 }
4884 } closure;
4885 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
4886 }
4887
SetNativePriority(int new_priority)4888 void Thread::SetNativePriority(int new_priority) {
4889 palette_status_t status = PaletteSchedSetPriority(GetTid(), new_priority);
4890 CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO);
4891 }
4892
GetNativePriority() const4893 int Thread::GetNativePriority() const {
4894 int priority = 0;
4895 palette_status_t status = PaletteSchedGetPriority(GetTid(), &priority);
4896 CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO);
4897 return priority;
4898 }
4899
AbortInThis(const std::string & message)4900 void Thread::AbortInThis(const std::string& message) {
4901 std::string thread_name;
4902 Thread::Current()->GetThreadName(thread_name);
4903 LOG(ERROR) << message;
4904 LOG(ERROR) << "Aborting culprit thread";
4905 Runtime::Current()->SetAbortMessage(("Caused " + thread_name + " failure : " + message).c_str());
4906 // Unlike Runtime::Abort() we do not fflush(nullptr), since we want to send the signal with as
4907 // little delay as possible.
4908 int res = pthread_kill(tlsPtr_.pthread_self, SIGABRT);
4909 if (res != 0) {
4910 LOG(ERROR) << "pthread_kill failed with " << res << " " << strerror(res) << " target was "
4911 << tls32_.tid;
4912 } else {
4913 // Wait for our process to be aborted.
4914 sleep(10 /* seconds */);
4915 }
4916 // The process should have died long before we got here. Never return.
4917 LOG(FATAL) << "Failed to abort in culprit thread: " << message;
4918 UNREACHABLE();
4919 }
4920
IsSystemDaemon() const4921 bool Thread::IsSystemDaemon() const {
4922 if (GetPeer() == nullptr) {
4923 return false;
4924 }
4925 return WellKnownClasses::java_lang_Thread_systemDaemon->GetBoolean(GetPeer());
4926 }
4927
StateAndFlagsAsHexString() const4928 std::string Thread::StateAndFlagsAsHexString() const {
4929 std::stringstream result_stream;
4930 result_stream << std::hex << GetStateAndFlags(std::memory_order_relaxed).GetValue();
4931 return result_stream.str();
4932 }
4933
ScopedExceptionStorage(art::Thread * self)4934 ScopedExceptionStorage::ScopedExceptionStorage(art::Thread* self)
4935 : self_(self), hs_(self_), excp_(hs_.NewHandle<art::mirror::Throwable>(self_->GetException())) {
4936 self_->ClearException();
4937 }
4938
SuppressOldException(const char * message)4939 void ScopedExceptionStorage::SuppressOldException(const char* message) {
4940 CHECK(self_->IsExceptionPending()) << *self_;
4941 ObjPtr<mirror::Throwable> old_suppressed(excp_.Get());
4942 excp_.Assign(self_->GetException());
4943 if (old_suppressed != nullptr) {
4944 LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump();
4945 }
4946 self_->ClearException();
4947 }
4948
~ScopedExceptionStorage()4949 ScopedExceptionStorage::~ScopedExceptionStorage() {
4950 CHECK(!self_->IsExceptionPending()) << *self_;
4951 if (!excp_.IsNull()) {
4952 self_->SetException(excp_.Get());
4953 }
4954 }
4955
4956 } // namespace art
4957
4958 #pragma clang diagnostic pop // -Wconversion
4959