1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "stack.h"
18
19 #include "android-base/stringprintf.h"
20
21 #include "arch/context.h"
22 #include "art_method-inl.h"
23 #include "base/callee_save_type.h"
24 #include "base/enums.h"
25 #include "base/hex_dump.h"
26 #include "dex/dex_file_types.h"
27 #include "entrypoints/entrypoint_utils-inl.h"
28 #include "entrypoints/quick/callee_save_frame.h"
29 #include "entrypoints/runtime_asm_entrypoints.h"
30 #include "gc/space/image_space.h"
31 #include "gc/space/space-inl.h"
32 #include "interpreter/shadow_frame-inl.h"
33 #include "jit/jit.h"
34 #include "jit/jit_code_cache.h"
35 #include "linear_alloc.h"
36 #include "managed_stack.h"
37 #include "mirror/class-inl.h"
38 #include "mirror/object-inl.h"
39 #include "mirror/object_array-inl.h"
40 #include "oat_quick_method_header.h"
41 #include "quick/quick_method_frame_info.h"
42 #include "runtime.h"
43 #include "thread.h"
44 #include "thread_list.h"
45
46 namespace art {
47
48 using android::base::StringPrintf;
49
50 static constexpr bool kDebugStackWalk = false;
51
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,bool check_suspended)52 StackVisitor::StackVisitor(Thread* thread,
53 Context* context,
54 StackWalkKind walk_kind,
55 bool check_suspended)
56 : StackVisitor(thread, context, walk_kind, 0, check_suspended) {}
57
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,size_t num_frames,bool check_suspended)58 StackVisitor::StackVisitor(Thread* thread,
59 Context* context,
60 StackWalkKind walk_kind,
61 size_t num_frames,
62 bool check_suspended)
63 : thread_(thread),
64 walk_kind_(walk_kind),
65 cur_shadow_frame_(nullptr),
66 cur_quick_frame_(nullptr),
67 cur_quick_frame_pc_(0),
68 cur_oat_quick_method_header_(nullptr),
69 num_frames_(num_frames),
70 cur_depth_(0),
71 context_(context),
72 check_suspended_(check_suspended) {
73 if (check_suspended_) {
74 DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
75 }
76 }
77
GetMethod() const78 ArtMethod* StackVisitor::GetMethod() const {
79 if (cur_shadow_frame_ != nullptr) {
80 return cur_shadow_frame_->GetMethod();
81 } else if (cur_quick_frame_ != nullptr) {
82 if (IsInInlinedFrame()) {
83 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
84 CodeInfo code_info(method_header);
85 DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
86 return GetResolvedMethod(*GetCurrentQuickFrame(), code_info, current_inline_frames_);
87 } else {
88 return *cur_quick_frame_;
89 }
90 }
91 return nullptr;
92 }
93
GetDexPc(bool abort_on_failure) const94 uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
95 if (cur_shadow_frame_ != nullptr) {
96 return cur_shadow_frame_->GetDexPC();
97 } else if (cur_quick_frame_ != nullptr) {
98 if (IsInInlinedFrame()) {
99 return current_inline_frames_.back().GetDexPc();
100 } else if (cur_oat_quick_method_header_ == nullptr) {
101 return dex::kDexNoIndex;
102 } else {
103 return cur_oat_quick_method_header_->ToDexPc(
104 GetMethod(), cur_quick_frame_pc_, abort_on_failure);
105 }
106 } else {
107 return 0;
108 }
109 }
110
111 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
112 REQUIRES_SHARED(Locks::mutator_lock_);
113
GetThisObject() const114 mirror::Object* StackVisitor::GetThisObject() const {
115 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
116 ArtMethod* m = GetMethod();
117 if (m->IsStatic()) {
118 return nullptr;
119 } else if (m->IsNative()) {
120 if (cur_quick_frame_ != nullptr) {
121 HandleScope* hs = reinterpret_cast<HandleScope*>(
122 reinterpret_cast<char*>(cur_quick_frame_) + sizeof(ArtMethod*));
123 return hs->GetReference(0);
124 } else {
125 return cur_shadow_frame_->GetVRegReference(0);
126 }
127 } else if (m->IsProxyMethod()) {
128 if (cur_quick_frame_ != nullptr) {
129 return artQuickGetProxyThisObject(cur_quick_frame_);
130 } else {
131 return cur_shadow_frame_->GetVRegReference(0);
132 }
133 } else {
134 CodeItemDataAccessor accessor(m->DexInstructionData());
135 if (!accessor.HasCodeItem()) {
136 UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: "
137 << ArtMethod::PrettyMethod(m);
138 return nullptr;
139 } else {
140 uint16_t reg = accessor.RegistersSize() - accessor.InsSize();
141 uint32_t value = 0;
142 if (!GetVReg(m, reg, kReferenceVReg, &value)) {
143 return nullptr;
144 }
145 return reinterpret_cast<mirror::Object*>(value);
146 }
147 }
148 }
149
GetNativePcOffset() const150 size_t StackVisitor::GetNativePcOffset() const {
151 DCHECK(!IsShadowFrame());
152 return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_);
153 }
154
GetVRegFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind,uint32_t * val) const155 bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg,
156 VRegKind kind,
157 uint32_t* val) const {
158 size_t frame_id = const_cast<StackVisitor*>(this)->GetFrameId();
159 ShadowFrame* shadow_frame = thread_->FindDebuggerShadowFrame(frame_id);
160 if (shadow_frame != nullptr) {
161 bool* updated_vreg_flags = thread_->GetUpdatedVRegFlags(frame_id);
162 DCHECK(updated_vreg_flags != nullptr);
163 if (updated_vreg_flags[vreg]) {
164 // Value is set by the debugger.
165 if (kind == kReferenceVReg) {
166 *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
167 shadow_frame->GetVRegReference(vreg)));
168 } else {
169 *val = shadow_frame->GetVReg(vreg);
170 }
171 return true;
172 }
173 }
174 // No value is set by the debugger.
175 return false;
176 }
177
GetVReg(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val) const178 bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const {
179 if (cur_quick_frame_ != nullptr) {
180 DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
181 DCHECK(m == GetMethod());
182 // Check if there is value set by the debugger.
183 if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
184 return true;
185 }
186 DCHECK(cur_oat_quick_method_header_->IsOptimized());
187 return GetVRegFromOptimizedCode(m, vreg, kind, val);
188 } else {
189 DCHECK(cur_shadow_frame_ != nullptr);
190 if (kind == kReferenceVReg) {
191 *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
192 cur_shadow_frame_->GetVRegReference(vreg)));
193 } else {
194 *val = cur_shadow_frame_->GetVReg(vreg);
195 }
196 return true;
197 }
198 }
199
GetVRegFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val) const200 bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
201 uint32_t* val) const {
202 DCHECK_EQ(m, GetMethod());
203 // Can't be null or how would we compile its instructions?
204 DCHECK(m->GetCodeItem() != nullptr) << m->PrettyMethod();
205 CodeItemDataAccessor accessor(m->DexInstructionData());
206 uint16_t number_of_dex_registers = accessor.RegistersSize();
207 DCHECK_LT(vreg, number_of_dex_registers);
208 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
209 CodeInfo code_info(method_header);
210
211 uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
212 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
213 DCHECK(stack_map.IsValid());
214
215 DexRegisterMap dex_register_map = IsInInlinedFrame()
216 ? code_info.GetInlineDexRegisterMapOf(stack_map, current_inline_frames_.back())
217 : code_info.GetDexRegisterMapOf(stack_map);
218 if (dex_register_map.empty()) {
219 return false;
220 }
221 DCHECK_EQ(dex_register_map.size(), number_of_dex_registers);
222 DexRegisterLocation::Kind location_kind = dex_register_map[vreg].GetKind();
223 switch (location_kind) {
224 case DexRegisterLocation::Kind::kInStack: {
225 const int32_t offset = dex_register_map[vreg].GetStackOffsetInBytes();
226 BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
227 if (kind == kReferenceVReg && !stack_mask.LoadBit(offset / kFrameSlotSize)) {
228 return false;
229 }
230 const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
231 *val = *reinterpret_cast<const uint32_t*>(addr);
232 return true;
233 }
234 case DexRegisterLocation::Kind::kInRegister: {
235 uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
236 uint32_t reg = dex_register_map[vreg].GetMachineRegister();
237 if (kind == kReferenceVReg && !(register_mask & (1 << reg))) {
238 return false;
239 }
240 return GetRegisterIfAccessible(reg, kind, val);
241 }
242 case DexRegisterLocation::Kind::kInRegisterHigh:
243 case DexRegisterLocation::Kind::kInFpuRegister:
244 case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
245 if (kind == kReferenceVReg) {
246 return false;
247 }
248 uint32_t reg = dex_register_map[vreg].GetMachineRegister();
249 return GetRegisterIfAccessible(reg, kind, val);
250 }
251 case DexRegisterLocation::Kind::kConstant: {
252 uint32_t result = dex_register_map[vreg].GetConstant();
253 if (kind == kReferenceVReg && result != 0) {
254 return false;
255 }
256 *val = result;
257 return true;
258 }
259 case DexRegisterLocation::Kind::kNone:
260 return false;
261 default:
262 LOG(FATAL) << "Unexpected location kind " << dex_register_map[vreg].GetKind();
263 UNREACHABLE();
264 }
265 }
266
GetRegisterIfAccessible(uint32_t reg,VRegKind kind,uint32_t * val) const267 bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
268 const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
269
270 if (kRuntimeISA == InstructionSet::kX86 && is_float) {
271 // X86 float registers are 64-bit and each XMM register is provided as two separate
272 // 32-bit registers by the context.
273 reg = (kind == kDoubleHiVReg) ? (2 * reg + 1) : (2 * reg);
274 }
275
276 // MIPS32 float registers are used as 64-bit (for MIPS32r2 it is pair
277 // F(2n)-F(2n+1), and for MIPS32r6 it is 64-bit register F(2n)). When
278 // accessing upper 32-bits from double, reg + 1 should be used.
279 if ((kRuntimeISA == InstructionSet::kMips) && (kind == kDoubleHiVReg)) {
280 DCHECK_ALIGNED(reg, 2);
281 reg++;
282 }
283
284 if (!IsAccessibleRegister(reg, is_float)) {
285 return false;
286 }
287 uintptr_t ptr_val = GetRegister(reg, is_float);
288 const bool target64 = Is64BitInstructionSet(kRuntimeISA);
289 if (target64) {
290 const bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
291 const bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
292 int64_t value_long = static_cast<int64_t>(ptr_val);
293 if (wide_lo) {
294 ptr_val = static_cast<uintptr_t>(Low32Bits(value_long));
295 } else if (wide_hi) {
296 ptr_val = static_cast<uintptr_t>(High32Bits(value_long));
297 }
298 }
299 *val = ptr_val;
300 return true;
301 }
302
GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const303 bool StackVisitor::GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,
304 VRegKind kind_lo,
305 VRegKind kind_hi,
306 uint64_t* val) const {
307 uint32_t low_32bits;
308 uint32_t high_32bits;
309 bool success = GetVRegFromDebuggerShadowFrame(vreg, kind_lo, &low_32bits);
310 success &= GetVRegFromDebuggerShadowFrame(vreg + 1, kind_hi, &high_32bits);
311 if (success) {
312 *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
313 }
314 return success;
315 }
316
GetVRegPair(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const317 bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
318 VRegKind kind_hi, uint64_t* val) const {
319 if (kind_lo == kLongLoVReg) {
320 DCHECK_EQ(kind_hi, kLongHiVReg);
321 } else if (kind_lo == kDoubleLoVReg) {
322 DCHECK_EQ(kind_hi, kDoubleHiVReg);
323 } else {
324 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
325 UNREACHABLE();
326 }
327 // Check if there is value set by the debugger.
328 if (GetVRegPairFromDebuggerShadowFrame(vreg, kind_lo, kind_hi, val)) {
329 return true;
330 }
331 if (cur_quick_frame_ != nullptr) {
332 DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
333 DCHECK(m == GetMethod());
334 DCHECK(cur_oat_quick_method_header_->IsOptimized());
335 return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
336 } else {
337 DCHECK(cur_shadow_frame_ != nullptr);
338 *val = cur_shadow_frame_->GetVRegLong(vreg);
339 return true;
340 }
341 }
342
GetVRegPairFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const343 bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
344 VRegKind kind_lo, VRegKind kind_hi,
345 uint64_t* val) const {
346 uint32_t low_32bits;
347 uint32_t high_32bits;
348 bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits);
349 success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits);
350 if (success) {
351 *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
352 }
353 return success;
354 }
355
GetRegisterPairIfAccessible(uint32_t reg_lo,uint32_t reg_hi,VRegKind kind_lo,uint64_t * val) const356 bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
357 VRegKind kind_lo, uint64_t* val) const {
358 const bool is_float = (kind_lo == kDoubleLoVReg);
359 if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
360 return false;
361 }
362 uintptr_t ptr_val_lo = GetRegister(reg_lo, is_float);
363 uintptr_t ptr_val_hi = GetRegister(reg_hi, is_float);
364 bool target64 = Is64BitInstructionSet(kRuntimeISA);
365 if (target64) {
366 int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo);
367 int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi);
368 ptr_val_lo = static_cast<uintptr_t>(Low32Bits(value_long_lo));
369 ptr_val_hi = static_cast<uintptr_t>(High32Bits(value_long_hi));
370 }
371 *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo);
372 return true;
373 }
374
SetVReg(ArtMethod * m,uint16_t vreg,uint32_t new_value,VRegKind kind)375 bool StackVisitor::SetVReg(ArtMethod* m,
376 uint16_t vreg,
377 uint32_t new_value,
378 VRegKind kind) {
379 CodeItemDataAccessor accessor(m->DexInstructionData());
380 if (!accessor.HasCodeItem()) {
381 return false;
382 }
383 ShadowFrame* shadow_frame = GetCurrentShadowFrame();
384 if (shadow_frame == nullptr) {
385 // This is a compiled frame: we must prepare and update a shadow frame that will
386 // be executed by the interpreter after deoptimization of the stack.
387 const size_t frame_id = GetFrameId();
388 const uint16_t num_regs = accessor.RegistersSize();
389 shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc());
390 CHECK(shadow_frame != nullptr);
391 // Remember the vreg has been set for debugging and must not be overwritten by the
392 // original value during deoptimization of the stack.
393 thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true;
394 }
395 if (kind == kReferenceVReg) {
396 shadow_frame->SetVRegReference(vreg, reinterpret_cast<mirror::Object*>(new_value));
397 } else {
398 shadow_frame->SetVReg(vreg, new_value);
399 }
400 return true;
401 }
402
SetVRegPair(ArtMethod * m,uint16_t vreg,uint64_t new_value,VRegKind kind_lo,VRegKind kind_hi)403 bool StackVisitor::SetVRegPair(ArtMethod* m,
404 uint16_t vreg,
405 uint64_t new_value,
406 VRegKind kind_lo,
407 VRegKind kind_hi) {
408 if (kind_lo == kLongLoVReg) {
409 DCHECK_EQ(kind_hi, kLongHiVReg);
410 } else if (kind_lo == kDoubleLoVReg) {
411 DCHECK_EQ(kind_hi, kDoubleHiVReg);
412 } else {
413 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
414 UNREACHABLE();
415 }
416 CodeItemDataAccessor accessor(m->DexInstructionData());
417 if (!accessor.HasCodeItem()) {
418 return false;
419 }
420 ShadowFrame* shadow_frame = GetCurrentShadowFrame();
421 if (shadow_frame == nullptr) {
422 // This is a compiled frame: we must prepare for deoptimization (see SetVRegFromDebugger).
423 const size_t frame_id = GetFrameId();
424 const uint16_t num_regs = accessor.RegistersSize();
425 shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc());
426 CHECK(shadow_frame != nullptr);
427 // Remember the vreg pair has been set for debugging and must not be overwritten by the
428 // original value during deoptimization of the stack.
429 thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true;
430 thread_->GetUpdatedVRegFlags(frame_id)[vreg + 1] = true;
431 }
432 shadow_frame->SetVRegLong(vreg, new_value);
433 return true;
434 }
435
IsAccessibleGPR(uint32_t reg) const436 bool StackVisitor::IsAccessibleGPR(uint32_t reg) const {
437 DCHECK(context_ != nullptr);
438 return context_->IsAccessibleGPR(reg);
439 }
440
GetGPRAddress(uint32_t reg) const441 uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const {
442 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
443 DCHECK(context_ != nullptr);
444 return context_->GetGPRAddress(reg);
445 }
446
GetGPR(uint32_t reg) const447 uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
448 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
449 DCHECK(context_ != nullptr);
450 return context_->GetGPR(reg);
451 }
452
IsAccessibleFPR(uint32_t reg) const453 bool StackVisitor::IsAccessibleFPR(uint32_t reg) const {
454 DCHECK(context_ != nullptr);
455 return context_->IsAccessibleFPR(reg);
456 }
457
GetFPR(uint32_t reg) const458 uintptr_t StackVisitor::GetFPR(uint32_t reg) const {
459 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
460 DCHECK(context_ != nullptr);
461 return context_->GetFPR(reg);
462 }
463
GetReturnPc() const464 uintptr_t StackVisitor::GetReturnPc() const {
465 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
466 DCHECK(sp != nullptr);
467 uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
468 return *reinterpret_cast<uintptr_t*>(pc_addr);
469 }
470
SetReturnPc(uintptr_t new_ret_pc)471 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
472 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
473 CHECK(sp != nullptr);
474 uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
475 *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
476 }
477
ComputeNumFrames(Thread * thread,StackWalkKind walk_kind)478 size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
479 struct NumFramesVisitor : public StackVisitor {
480 NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
481 : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
482
483 bool VisitFrame() override {
484 frames++;
485 return true;
486 }
487
488 size_t frames;
489 };
490 NumFramesVisitor visitor(thread, walk_kind);
491 visitor.WalkStack(true);
492 return visitor.frames;
493 }
494
GetNextMethodAndDexPc(ArtMethod ** next_method,uint32_t * next_dex_pc)495 bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) {
496 struct HasMoreFramesVisitor : public StackVisitor {
497 HasMoreFramesVisitor(Thread* thread,
498 StackWalkKind walk_kind,
499 size_t num_frames,
500 size_t frame_height)
501 : StackVisitor(thread, nullptr, walk_kind, num_frames),
502 frame_height_(frame_height),
503 found_frame_(false),
504 has_more_frames_(false),
505 next_method_(nullptr),
506 next_dex_pc_(0) {
507 }
508
509 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
510 if (found_frame_) {
511 ArtMethod* method = GetMethod();
512 if (method != nullptr && !method->IsRuntimeMethod()) {
513 has_more_frames_ = true;
514 next_method_ = method;
515 next_dex_pc_ = GetDexPc();
516 return false; // End stack walk once next method is found.
517 }
518 } else if (GetFrameHeight() == frame_height_) {
519 found_frame_ = true;
520 }
521 return true;
522 }
523
524 size_t frame_height_;
525 bool found_frame_;
526 bool has_more_frames_;
527 ArtMethod* next_method_;
528 uint32_t next_dex_pc_;
529 };
530 HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
531 visitor.WalkStack(true);
532 *next_method = visitor.next_method_;
533 *next_dex_pc = visitor.next_dex_pc_;
534 return visitor.has_more_frames_;
535 }
536
DescribeStack(Thread * thread)537 void StackVisitor::DescribeStack(Thread* thread) {
538 struct DescribeStackVisitor : public StackVisitor {
539 explicit DescribeStackVisitor(Thread* thread_in)
540 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
541
542 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
543 LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
544 return true;
545 }
546 };
547 DescribeStackVisitor visitor(thread);
548 visitor.WalkStack(true);
549 }
550
DescribeLocation() const551 std::string StackVisitor::DescribeLocation() const {
552 std::string result("Visiting method '");
553 ArtMethod* m = GetMethod();
554 if (m == nullptr) {
555 return "upcall";
556 }
557 result += m->PrettyMethod();
558 result += StringPrintf("' at dex PC 0x%04x", GetDexPc());
559 if (!IsShadowFrame()) {
560 result += StringPrintf(" (native PC %p)", reinterpret_cast<void*>(GetCurrentQuickFramePc()));
561 }
562 return result;
563 }
564
SetMethod(ArtMethod * method)565 void StackVisitor::SetMethod(ArtMethod* method) {
566 DCHECK(GetMethod() != nullptr);
567 if (cur_shadow_frame_ != nullptr) {
568 cur_shadow_frame_->SetMethod(method);
569 } else {
570 DCHECK(cur_quick_frame_ != nullptr);
571 CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod: "
572 << GetMethod()->PrettyMethod() << " is inlined into "
573 << GetOuterMethod()->PrettyMethod();
574 *cur_quick_frame_ = method;
575 }
576 }
577
AssertPcIsWithinQuickCode(ArtMethod * method,uintptr_t pc)578 static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
579 REQUIRES_SHARED(Locks::mutator_lock_) {
580 if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) {
581 return;
582 }
583
584 if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
585 return;
586 }
587
588 Runtime* runtime = Runtime::Current();
589 if (runtime->UseJitCompilation() &&
590 runtime->GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(pc))) {
591 return;
592 }
593
594 const void* code = method->GetEntryPointFromQuickCompiledCode();
595 if (code == GetQuickInstrumentationEntryPoint() || code == GetInvokeObsoleteMethodStub()) {
596 return;
597 }
598
599 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
600 if (class_linker->IsQuickToInterpreterBridge(code) ||
601 class_linker->IsQuickResolutionStub(code)) {
602 return;
603 }
604
605 if (runtime->UseJitCompilation() && runtime->GetJit()->GetCodeCache()->ContainsPc(code)) {
606 return;
607 }
608
609 uint32_t code_size = OatQuickMethodHeader::FromEntryPoint(code)->GetCodeSize();
610 uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
611 CHECK(code_start <= pc && pc <= (code_start + code_size))
612 << method->PrettyMethod()
613 << " pc=" << std::hex << pc
614 << " code_start=" << code_start
615 << " code_size=" << code_size;
616 }
617
SanityCheckFrame() const618 void StackVisitor::SanityCheckFrame() const {
619 if (kIsDebugBuild) {
620 ArtMethod* method = GetMethod();
621 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
622 // Runtime methods have null declaring class.
623 if (!method->IsRuntimeMethod()) {
624 CHECK(declaring_class != nullptr);
625 CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass())
626 << declaring_class;
627 } else {
628 CHECK(declaring_class == nullptr);
629 }
630 Runtime* const runtime = Runtime::Current();
631 LinearAlloc* const linear_alloc = runtime->GetLinearAlloc();
632 if (!linear_alloc->Contains(method)) {
633 // Check class linker linear allocs.
634 // We get the canonical method as copied methods may have their declaring
635 // class from another class loader.
636 ArtMethod* canonical = method->GetCanonicalMethod();
637 ObjPtr<mirror::Class> klass = canonical->GetDeclaringClass();
638 LinearAlloc* const class_linear_alloc = (klass != nullptr)
639 ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader())
640 : linear_alloc;
641 if (!class_linear_alloc->Contains(canonical)) {
642 // Check image space.
643 bool in_image = false;
644 for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
645 if (space->IsImageSpace()) {
646 auto* image_space = space->AsImageSpace();
647 const auto& header = image_space->GetImageHeader();
648 const ImageSection& methods = header.GetMethodsSection();
649 const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
650 const size_t offset = reinterpret_cast<const uint8_t*>(canonical) - image_space->Begin();
651 if (methods.Contains(offset) || runtime_methods.Contains(offset)) {
652 in_image = true;
653 break;
654 }
655 }
656 }
657 CHECK(in_image) << canonical->PrettyMethod() << " not in linear alloc or image";
658 }
659 }
660 if (cur_quick_frame_ != nullptr) {
661 AssertPcIsWithinQuickCode(method, cur_quick_frame_pc_);
662 // Frame sanity.
663 size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
664 CHECK_NE(frame_size, 0u);
665 // A rough guess at an upper size we expect to see for a frame.
666 // 256 registers
667 // 2 words HandleScope overhead
668 // 3+3 register spills
669 // TODO: this seems architecture specific for the case of JNI frames.
670 // TODO: 083-compiler-regressions ManyFloatArgs shows this estimate is wrong.
671 // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
672 const size_t kMaxExpectedFrameSize = 2 * KB;
673 CHECK_LE(frame_size, kMaxExpectedFrameSize) << method->PrettyMethod();
674 size_t return_pc_offset = GetCurrentQuickFrameInfo().GetReturnPcOffset();
675 CHECK_LT(return_pc_offset, frame_size);
676 }
677 }
678 }
679
680 // Counts the number of references in the parameter list of the corresponding method.
681 // Note: Thus does _not_ include "this" for non-static methods.
GetNumberOfReferenceArgsWithoutReceiver(ArtMethod * method)682 static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
683 REQUIRES_SHARED(Locks::mutator_lock_) {
684 uint32_t shorty_len;
685 const char* shorty = method->GetShorty(&shorty_len);
686 uint32_t refs = 0;
687 for (uint32_t i = 1; i < shorty_len ; ++i) {
688 if (shorty[i] == 'L') {
689 refs++;
690 }
691 }
692 return refs;
693 }
694
GetCurrentQuickFrameInfo() const695 QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
696 if (cur_oat_quick_method_header_ != nullptr) {
697 return cur_oat_quick_method_header_->GetFrameInfo();
698 }
699
700 ArtMethod* method = GetMethod();
701 Runtime* runtime = Runtime::Current();
702
703 if (method->IsAbstract()) {
704 return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
705 }
706
707 // This goes before IsProxyMethod since runtime methods have a null declaring class.
708 if (method->IsRuntimeMethod()) {
709 return runtime->GetRuntimeMethodFrameInfo(method);
710 }
711
712 if (method->IsProxyMethod()) {
713 // There is only one direct method of a proxy class: the constructor. A direct method is
714 // cloned from the original java.lang.reflect.Proxy and is executed as usual quick
715 // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
716 DCHECK(!method->IsDirect() && !method->IsConstructor())
717 << "Constructors of proxy classes must have a OatQuickMethodHeader";
718 return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
719 }
720
721 // The only remaining case is if the method is native and uses the generic JNI stub,
722 // called either directly or through some (resolution, instrumentation) trampoline.
723 DCHECK(method->IsNative());
724 if (kIsDebugBuild) {
725 ClassLinker* class_linker = runtime->GetClassLinker();
726 const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method,
727 kRuntimePointerSize);
728 CHECK(class_linker->IsQuickGenericJniStub(entry_point) ||
729 // The current entrypoint (after filtering out trampolines) may have changed
730 // from GenericJNI to JIT-compiled stub since we have entered this frame.
731 (runtime->GetJit() != nullptr &&
732 runtime->GetJit()->GetCodeCache()->ContainsPc(entry_point))) << method->PrettyMethod();
733 }
734 // Generic JNI frame.
735 uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
736 size_t scope_size = HandleScope::SizeOf(handle_refs);
737 constexpr QuickMethodFrameInfo callee_info =
738 RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
739
740 // Callee saves + handle scope + method ref + alignment
741 // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
742 size_t frame_size = RoundUp(
743 callee_info.FrameSizeInBytes() - sizeof(void*) + sizeof(ArtMethod*) + scope_size,
744 kStackAlignment);
745 return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
746 }
747
748 template <StackVisitor::CountTransitions kCount>
WalkStack(bool include_transitions)749 void StackVisitor::WalkStack(bool include_transitions) {
750 if (check_suspended_) {
751 DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
752 }
753 CHECK_EQ(cur_depth_, 0U);
754 bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
755 uint32_t instrumentation_stack_depth = 0;
756 size_t inlined_frames_count = 0;
757
758 for (const ManagedStack* current_fragment = thread_->GetManagedStack();
759 current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
760 cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
761 cur_quick_frame_ = current_fragment->GetTopQuickFrame();
762 cur_quick_frame_pc_ = 0;
763 cur_oat_quick_method_header_ = nullptr;
764
765 if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
766 // Can't be both a shadow and a quick fragment.
767 DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
768 ArtMethod* method = *cur_quick_frame_;
769 DCHECK(method != nullptr);
770 bool header_retrieved = false;
771 if (method->IsNative()) {
772 // We do not have a PC for the first frame, so we cannot simply use
773 // ArtMethod::GetOatQuickMethodHeader() as we're unable to distinguish there
774 // between GenericJNI frame and JIT-compiled JNI stub; the entrypoint may have
775 // changed since the frame was entered. The top quick frame tag indicates
776 // GenericJNI here, otherwise it's either AOT-compiled or JNI-compiled JNI stub.
777 if (UNLIKELY(current_fragment->GetTopQuickFrameTag())) {
778 // The generic JNI does not have any method header.
779 cur_oat_quick_method_header_ = nullptr;
780 } else {
781 const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
782 CHECK(existing_entry_point != nullptr);
783 Runtime* runtime = Runtime::Current();
784 ClassLinker* class_linker = runtime->GetClassLinker();
785 // Check whether we can quickly get the header from the current entrypoint.
786 if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
787 !class_linker->IsQuickResolutionStub(existing_entry_point) &&
788 existing_entry_point != GetQuickInstrumentationEntryPoint()) {
789 cur_oat_quick_method_header_ =
790 OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
791 } else {
792 const void* code = method->GetOatMethodQuickCode(class_linker->GetImagePointerSize());
793 if (code != nullptr) {
794 cur_oat_quick_method_header_ = OatQuickMethodHeader::FromEntryPoint(code);
795 } else {
796 // This must be a JITted JNI stub frame.
797 CHECK(runtime->GetJit() != nullptr);
798 code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
799 CHECK(code != nullptr) << method->PrettyMethod();
800 cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
801 }
802 }
803 }
804 header_retrieved = true;
805 }
806 while (method != nullptr) {
807 if (!header_retrieved) {
808 cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
809 }
810 header_retrieved = false; // Force header retrieval in next iteration.
811 SanityCheckFrame();
812
813 if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
814 && (cur_oat_quick_method_header_ != nullptr)
815 && cur_oat_quick_method_header_->IsOptimized()
816 // JNI methods cannot have any inlined frames.
817 && !method->IsNative()) {
818 DCHECK_NE(cur_quick_frame_pc_, 0u);
819 current_code_info_ = CodeInfo(cur_oat_quick_method_header_,
820 CodeInfo::DecodeFlags::InlineInfoOnly);
821 uint32_t native_pc_offset =
822 cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
823 StackMap stack_map = current_code_info_.GetStackMapForNativePcOffset(native_pc_offset);
824 if (stack_map.IsValid() && stack_map.HasInlineInfo()) {
825 DCHECK_EQ(current_inline_frames_.size(), 0u);
826 for (current_inline_frames_ = current_code_info_.GetInlineInfosOf(stack_map);
827 !current_inline_frames_.empty();
828 current_inline_frames_.pop_back()) {
829 bool should_continue = VisitFrame();
830 if (UNLIKELY(!should_continue)) {
831 return;
832 }
833 cur_depth_++;
834 inlined_frames_count++;
835 }
836 }
837 }
838
839 bool should_continue = VisitFrame();
840 if (UNLIKELY(!should_continue)) {
841 return;
842 }
843
844 QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
845 if (context_ != nullptr) {
846 context_->FillCalleeSaves(reinterpret_cast<uint8_t*>(cur_quick_frame_), frame_info);
847 }
848 // Compute PC for next stack frame from return PC.
849 size_t frame_size = frame_info.FrameSizeInBytes();
850 size_t return_pc_offset = frame_size - sizeof(void*);
851 uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
852 uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
853
854 if (UNLIKELY(exit_stubs_installed ||
855 reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc)) {
856 // While profiling, the return pc is restored from the side stack, except when walking
857 // the stack for an exception where the side stack will be unwound in VisitFrame.
858 if (reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc) {
859 CHECK_LT(instrumentation_stack_depth, thread_->GetInstrumentationStack()->size());
860 const instrumentation::InstrumentationStackFrame& instrumentation_frame =
861 (*thread_->GetInstrumentationStack())[instrumentation_stack_depth];
862 instrumentation_stack_depth++;
863 if (GetMethod() ==
864 Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves)) {
865 // Skip runtime save all callee frames which are used to deliver exceptions.
866 } else if (instrumentation_frame.interpreter_entry_) {
867 ArtMethod* callee =
868 Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs);
869 CHECK_EQ(GetMethod(), callee) << "Expected: " << ArtMethod::PrettyMethod(callee)
870 << " Found: " << ArtMethod::PrettyMethod(GetMethod());
871 } else {
872 // Instrumentation generally doesn't distinguish between a method's obsolete and
873 // non-obsolete version.
874 CHECK_EQ(instrumentation_frame.method_->GetNonObsoleteMethod(),
875 GetMethod()->GetNonObsoleteMethod())
876 << "Expected: "
877 << ArtMethod::PrettyMethod(instrumentation_frame.method_->GetNonObsoleteMethod())
878 << " Found: " << ArtMethod::PrettyMethod(GetMethod()->GetNonObsoleteMethod());
879 }
880 if (num_frames_ != 0) {
881 // Check agreement of frame Ids only if num_frames_ is computed to avoid infinite
882 // recursion.
883 size_t frame_id = instrumentation::Instrumentation::ComputeFrameId(
884 thread_,
885 cur_depth_,
886 inlined_frames_count);
887 CHECK_EQ(instrumentation_frame.frame_id_, frame_id);
888 }
889 return_pc = instrumentation_frame.return_pc_;
890 }
891 }
892
893 cur_quick_frame_pc_ = return_pc;
894 uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
895 cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
896
897 if (kDebugStackWalk) {
898 LOG(INFO) << ArtMethod::PrettyMethod(method) << "@" << method << " size=" << frame_size
899 << std::boolalpha
900 << " optimized=" << (cur_oat_quick_method_header_ != nullptr &&
901 cur_oat_quick_method_header_->IsOptimized())
902 << " native=" << method->IsNative()
903 << std::noboolalpha
904 << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
905 << "," << (method->IsNative() ? method->GetEntryPointFromJni() : nullptr)
906 << " next=" << *cur_quick_frame_;
907 }
908
909 if (kCount == CountTransitions::kYes || !method->IsRuntimeMethod()) {
910 cur_depth_++;
911 }
912 method = *cur_quick_frame_;
913 }
914 } else if (cur_shadow_frame_ != nullptr) {
915 do {
916 SanityCheckFrame();
917 bool should_continue = VisitFrame();
918 if (UNLIKELY(!should_continue)) {
919 return;
920 }
921 cur_depth_++;
922 cur_shadow_frame_ = cur_shadow_frame_->GetLink();
923 } while (cur_shadow_frame_ != nullptr);
924 }
925 if (include_transitions) {
926 bool should_continue = VisitFrame();
927 if (!should_continue) {
928 return;
929 }
930 }
931 if (kCount == CountTransitions::kYes) {
932 cur_depth_++;
933 }
934 }
935 if (num_frames_ != 0) {
936 CHECK_EQ(cur_depth_, num_frames_);
937 }
938 }
939
940 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kYes>(bool);
941 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kNo>(bool);
942
943 } // namespace art
944