• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "stack.h"
18 #include <limits>
19 
20 #include "android-base/stringprintf.h"
21 
22 #include "arch/context.h"
23 #include "art_method-inl.h"
24 #include "base/callee_save_type.h"
25 #include "base/enums.h"
26 #include "base/hex_dump.h"
27 #include "dex/dex_file_types.h"
28 #include "entrypoints/entrypoint_utils-inl.h"
29 #include "entrypoints/quick/callee_save_frame.h"
30 #include "entrypoints/runtime_asm_entrypoints.h"
31 #include "gc/space/image_space.h"
32 #include "gc/space/space-inl.h"
33 #include "interpreter/shadow_frame-inl.h"
34 #include "jit/jit.h"
35 #include "jit/jit_code_cache.h"
36 #include "linear_alloc.h"
37 #include "managed_stack.h"
38 #include "mirror/class-inl.h"
39 #include "mirror/object-inl.h"
40 #include "mirror/object_array-inl.h"
41 #include "nterp_helpers.h"
42 #include "oat_quick_method_header.h"
43 #include "obj_ptr-inl.h"
44 #include "quick/quick_method_frame_info.h"
45 #include "runtime.h"
46 #include "thread.h"
47 #include "thread_list.h"
48 
49 namespace art {
50 
51 using android::base::StringPrintf;
52 
53 static constexpr bool kDebugStackWalk = false;
54 
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,bool check_suspended)55 StackVisitor::StackVisitor(Thread* thread,
56                            Context* context,
57                            StackWalkKind walk_kind,
58                            bool check_suspended)
59     : StackVisitor(thread, context, walk_kind, 0, check_suspended) {}
60 
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,size_t num_frames,bool check_suspended)61 StackVisitor::StackVisitor(Thread* thread,
62                            Context* context,
63                            StackWalkKind walk_kind,
64                            size_t num_frames,
65                            bool check_suspended)
66     : thread_(thread),
67       walk_kind_(walk_kind),
68       cur_shadow_frame_(nullptr),
69       cur_quick_frame_(nullptr),
70       cur_quick_frame_pc_(0),
71       cur_oat_quick_method_header_(nullptr),
72       num_frames_(num_frames),
73       cur_depth_(0),
74       cur_inline_info_(nullptr, CodeInfo()),
75       cur_stack_map_(0, StackMap()),
76       context_(context),
77       check_suspended_(check_suspended) {
78   if (check_suspended_) {
79     DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
80   }
81 }
82 
GetCurrentInlineInfo() const83 CodeInfo* StackVisitor::GetCurrentInlineInfo() const {
84   DCHECK(!(*cur_quick_frame_)->IsNative());
85   const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
86   if (cur_inline_info_.first != header) {
87     cur_inline_info_ = std::make_pair(header, CodeInfo::DecodeInlineInfoOnly(header));
88   }
89   return &cur_inline_info_.second;
90 }
91 
GetCurrentStackMap() const92 StackMap* StackVisitor::GetCurrentStackMap() const {
93   DCHECK(!(*cur_quick_frame_)->IsNative());
94   const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
95   if (cur_stack_map_.first != cur_quick_frame_pc_) {
96     uint32_t pc = header->NativeQuickPcOffset(cur_quick_frame_pc_);
97     cur_stack_map_ = std::make_pair(cur_quick_frame_pc_,
98                                     GetCurrentInlineInfo()->GetStackMapForNativePcOffset(pc));
99   }
100   return &cur_stack_map_.second;
101 }
102 
GetMethod() const103 ArtMethod* StackVisitor::GetMethod() const {
104   if (cur_shadow_frame_ != nullptr) {
105     return cur_shadow_frame_->GetMethod();
106   } else if (cur_quick_frame_ != nullptr) {
107     if (IsInInlinedFrame()) {
108       CodeInfo* code_info = GetCurrentInlineInfo();
109       DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
110       return GetResolvedMethod(*GetCurrentQuickFrame(), *code_info, current_inline_frames_);
111     } else {
112       return *cur_quick_frame_;
113     }
114   }
115   return nullptr;
116 }
117 
GetDexPc(bool abort_on_failure) const118 uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
119   if (cur_shadow_frame_ != nullptr) {
120     return cur_shadow_frame_->GetDexPC();
121   } else if (cur_quick_frame_ != nullptr) {
122     if (IsInInlinedFrame()) {
123       return current_inline_frames_.back().GetDexPc();
124     } else if (cur_oat_quick_method_header_ == nullptr) {
125       return dex::kDexNoIndex;
126     } else if ((*GetCurrentQuickFrame())->IsNative()) {
127       return cur_oat_quick_method_header_->ToDexPc(
128           GetCurrentQuickFrame(), cur_quick_frame_pc_, abort_on_failure);
129     } else if (cur_oat_quick_method_header_->IsOptimized()) {
130       StackMap* stack_map = GetCurrentStackMap();
131       DCHECK(stack_map->IsValid());
132       return stack_map->GetDexPc();
133     } else {
134       DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
135       return NterpGetDexPC(cur_quick_frame_);
136     }
137   } else {
138     return 0;
139   }
140 }
141 
142 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
143     REQUIRES_SHARED(Locks::mutator_lock_);
144 
GetThisObject() const145 ObjPtr<mirror::Object> StackVisitor::GetThisObject() const {
146   DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
147   ArtMethod* m = GetMethod();
148   if (m->IsStatic()) {
149     return nullptr;
150   } else if (m->IsNative()) {
151     if (cur_quick_frame_ != nullptr) {
152       HandleScope* hs;
153       if (cur_oat_quick_method_header_ != nullptr) {
154         hs = reinterpret_cast<HandleScope*>(
155             reinterpret_cast<char*>(cur_quick_frame_) + sizeof(ArtMethod*));
156       } else {
157         // GenericJNI frames have the HandleScope under the managed frame.
158         uint32_t shorty_len;
159         const char* shorty = m->GetShorty(&shorty_len);
160         const size_t num_handle_scope_references =
161             /* this */ 1u + std::count(shorty + 1, shorty + shorty_len, 'L');
162         hs = GetGenericJniHandleScope(cur_quick_frame_, num_handle_scope_references);
163       }
164       return hs->GetReference(0);
165     } else {
166       return cur_shadow_frame_->GetVRegReference(0);
167     }
168   } else if (m->IsProxyMethod()) {
169     if (cur_quick_frame_ != nullptr) {
170       return artQuickGetProxyThisObject(cur_quick_frame_);
171     } else {
172       return cur_shadow_frame_->GetVRegReference(0);
173     }
174   } else {
175     CodeItemDataAccessor accessor(m->DexInstructionData());
176     if (!accessor.HasCodeItem()) {
177       UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: "
178           << ArtMethod::PrettyMethod(m);
179       return nullptr;
180     } else {
181       uint16_t reg = accessor.RegistersSize() - accessor.InsSize();
182       uint32_t value = 0;
183       if (!GetVReg(m, reg, kReferenceVReg, &value)) {
184         return nullptr;
185       }
186       return reinterpret_cast<mirror::Object*>(value);
187     }
188   }
189 }
190 
GetNativePcOffset() const191 size_t StackVisitor::GetNativePcOffset() const {
192   DCHECK(!IsShadowFrame());
193   return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_);
194 }
195 
GetVRegFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind,uint32_t * val) const196 bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg,
197                                                   VRegKind kind,
198                                                   uint32_t* val) const {
199   size_t frame_id = const_cast<StackVisitor*>(this)->GetFrameId();
200   ShadowFrame* shadow_frame = thread_->FindDebuggerShadowFrame(frame_id);
201   if (shadow_frame != nullptr) {
202     bool* updated_vreg_flags = thread_->GetUpdatedVRegFlags(frame_id);
203     DCHECK(updated_vreg_flags != nullptr);
204     if (updated_vreg_flags[vreg]) {
205       // Value is set by the debugger.
206       if (kind == kReferenceVReg) {
207         *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
208             shadow_frame->GetVRegReference(vreg)));
209       } else {
210         *val = shadow_frame->GetVReg(vreg);
211       }
212       return true;
213     }
214   }
215   // No value is set by the debugger.
216   return false;
217 }
218 
GetVReg(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val,std::optional<DexRegisterLocation> location) const219 bool StackVisitor::GetVReg(ArtMethod* m,
220                            uint16_t vreg,
221                            VRegKind kind,
222                            uint32_t* val,
223                            std::optional<DexRegisterLocation> location) const {
224   if (cur_quick_frame_ != nullptr) {
225     DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
226     DCHECK(m == GetMethod());
227     // Check if there is value set by the debugger.
228     if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
229       return true;
230     }
231     bool result = false;
232     if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
233       result = true;
234       *val = (kind == kReferenceVReg)
235           ? NterpGetVRegReference(cur_quick_frame_, vreg)
236           : NterpGetVReg(cur_quick_frame_, vreg);
237     } else {
238       DCHECK(cur_oat_quick_method_header_->IsOptimized());
239       if (location.has_value() && kind != kReferenceVReg) {
240         uint32_t val2 = *val;
241         // The caller already known the register location, so we can use the faster overload
242         // which does not decode the stack maps.
243         result = GetVRegFromOptimizedCode(location.value(), kind, val);
244         // Compare to the slower overload.
245         DCHECK_EQ(result, GetVRegFromOptimizedCode(m, vreg, kind, &val2));
246         DCHECK_EQ(*val, val2);
247       } else {
248         result = GetVRegFromOptimizedCode(m, vreg, kind, val);
249       }
250     }
251     if (kind == kReferenceVReg) {
252       // Perform a read barrier in case we are in a different thread and GC is ongoing.
253       mirror::Object* out = reinterpret_cast<mirror::Object*>(static_cast<uintptr_t>(*val));
254       uintptr_t ptr_out = reinterpret_cast<uintptr_t>(GcRoot<mirror::Object>(out).Read());
255       DCHECK_LT(ptr_out, std::numeric_limits<uint32_t>::max());
256       *val = static_cast<uint32_t>(ptr_out);
257     }
258     return result;
259   } else {
260     DCHECK(cur_shadow_frame_ != nullptr);
261     if (kind == kReferenceVReg) {
262       *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
263           cur_shadow_frame_->GetVRegReference(vreg)));
264     } else {
265       *val = cur_shadow_frame_->GetVReg(vreg);
266     }
267     return true;
268   }
269 }
270 
GetVRegFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val) const271 bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
272                                             uint32_t* val) const {
273   DCHECK_EQ(m, GetMethod());
274   // Can't be null or how would we compile its instructions?
275   DCHECK(m->GetCodeItem() != nullptr) << m->PrettyMethod();
276   CodeItemDataAccessor accessor(m->DexInstructionData());
277   uint16_t number_of_dex_registers = accessor.RegistersSize();
278   DCHECK_LT(vreg, number_of_dex_registers);
279   const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
280   CodeInfo code_info(method_header);
281 
282   uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
283   StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
284   DCHECK(stack_map.IsValid());
285 
286   DexRegisterMap dex_register_map = IsInInlinedFrame()
287       ? code_info.GetInlineDexRegisterMapOf(stack_map, current_inline_frames_.back())
288       : code_info.GetDexRegisterMapOf(stack_map);
289   if (dex_register_map.empty()) {
290     return false;
291   }
292   DCHECK_EQ(dex_register_map.size(), number_of_dex_registers);
293   DexRegisterLocation::Kind location_kind = dex_register_map[vreg].GetKind();
294   switch (location_kind) {
295     case DexRegisterLocation::Kind::kInStack: {
296       const int32_t offset = dex_register_map[vreg].GetStackOffsetInBytes();
297       BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
298       if (kind == kReferenceVReg && !stack_mask.LoadBit(offset / kFrameSlotSize)) {
299         return false;
300       }
301       const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
302       *val = *reinterpret_cast<const uint32_t*>(addr);
303       return true;
304     }
305     case DexRegisterLocation::Kind::kInRegister: {
306       uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
307       uint32_t reg = dex_register_map[vreg].GetMachineRegister();
308       if (kind == kReferenceVReg && !(register_mask & (1 << reg))) {
309         return false;
310       }
311       return GetRegisterIfAccessible(reg, kind, val);
312     }
313     case DexRegisterLocation::Kind::kInRegisterHigh:
314     case DexRegisterLocation::Kind::kInFpuRegister:
315     case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
316       if (kind == kReferenceVReg) {
317         return false;
318       }
319       uint32_t reg = dex_register_map[vreg].GetMachineRegister();
320       return GetRegisterIfAccessible(reg, kind, val);
321     }
322     case DexRegisterLocation::Kind::kConstant: {
323       uint32_t result = dex_register_map[vreg].GetConstant();
324       if (kind == kReferenceVReg && result != 0) {
325         return false;
326       }
327       *val = result;
328       return true;
329     }
330     case DexRegisterLocation::Kind::kNone:
331       return false;
332     default:
333       LOG(FATAL) << "Unexpected location kind " << dex_register_map[vreg].GetKind();
334       UNREACHABLE();
335   }
336 }
337 
GetVRegFromOptimizedCode(DexRegisterLocation location,VRegKind kind,uint32_t * val) const338 bool StackVisitor::GetVRegFromOptimizedCode(DexRegisterLocation location,
339                                             VRegKind kind,
340                                             uint32_t* val) const {
341   switch (location.GetKind()) {
342     case DexRegisterLocation::Kind::kInvalid:
343       break;
344     case DexRegisterLocation::Kind::kInStack: {
345       const uint8_t* sp = reinterpret_cast<const uint8_t*>(cur_quick_frame_);
346       *val = *reinterpret_cast<const uint32_t*>(sp + location.GetStackOffsetInBytes());
347       return true;
348     }
349     case DexRegisterLocation::Kind::kInRegister:
350     case DexRegisterLocation::Kind::kInRegisterHigh:
351     case DexRegisterLocation::Kind::kInFpuRegister:
352     case DexRegisterLocation::Kind::kInFpuRegisterHigh:
353       return GetRegisterIfAccessible(location.GetMachineRegister(), kind, val);
354     case DexRegisterLocation::Kind::kConstant:
355       *val = location.GetConstant();
356       return true;
357     case DexRegisterLocation::Kind::kNone:
358       return false;
359   }
360   LOG(FATAL) << "Unexpected location kind " << location.GetKind();
361   UNREACHABLE();
362 }
363 
GetRegisterIfAccessible(uint32_t reg,VRegKind kind,uint32_t * val) const364 bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
365   const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
366 
367   if (kRuntimeISA == InstructionSet::kX86 && is_float) {
368     // X86 float registers are 64-bit and each XMM register is provided as two separate
369     // 32-bit registers by the context.
370     reg = (kind == kDoubleHiVReg) ? (2 * reg + 1) : (2 * reg);
371   }
372 
373   if (!IsAccessibleRegister(reg, is_float)) {
374     return false;
375   }
376   uintptr_t ptr_val = GetRegister(reg, is_float);
377   const bool target64 = Is64BitInstructionSet(kRuntimeISA);
378   if (target64) {
379     const bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
380     const bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
381     int64_t value_long = static_cast<int64_t>(ptr_val);
382     if (wide_lo) {
383       ptr_val = static_cast<uintptr_t>(Low32Bits(value_long));
384     } else if (wide_hi) {
385       ptr_val = static_cast<uintptr_t>(High32Bits(value_long));
386     }
387   }
388   *val = ptr_val;
389   return true;
390 }
391 
GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const392 bool StackVisitor::GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,
393                                                       VRegKind kind_lo,
394                                                       VRegKind kind_hi,
395                                                       uint64_t* val) const {
396   uint32_t low_32bits;
397   uint32_t high_32bits;
398   bool success = GetVRegFromDebuggerShadowFrame(vreg, kind_lo, &low_32bits);
399   success &= GetVRegFromDebuggerShadowFrame(vreg + 1, kind_hi, &high_32bits);
400   if (success) {
401     *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
402   }
403   return success;
404 }
405 
GetVRegPair(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const406 bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
407                                VRegKind kind_hi, uint64_t* val) const {
408   if (kind_lo == kLongLoVReg) {
409     DCHECK_EQ(kind_hi, kLongHiVReg);
410   } else if (kind_lo == kDoubleLoVReg) {
411     DCHECK_EQ(kind_hi, kDoubleHiVReg);
412   } else {
413     LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
414     UNREACHABLE();
415   }
416   // Check if there is value set by the debugger.
417   if (GetVRegPairFromDebuggerShadowFrame(vreg, kind_lo, kind_hi, val)) {
418     return true;
419   }
420   if (cur_quick_frame_ == nullptr) {
421     DCHECK(cur_shadow_frame_ != nullptr);
422     *val = cur_shadow_frame_->GetVRegLong(vreg);
423     return true;
424   }
425   if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
426     uint64_t val_lo = NterpGetVReg(cur_quick_frame_, vreg);
427     uint64_t val_hi = NterpGetVReg(cur_quick_frame_, vreg + 1);
428     *val = (val_hi << 32) + val_lo;
429     return true;
430   }
431 
432   DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
433   DCHECK(m == GetMethod());
434   DCHECK(cur_oat_quick_method_header_->IsOptimized());
435   return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
436 }
437 
GetVRegPairFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const438 bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
439                                                 VRegKind kind_lo, VRegKind kind_hi,
440                                                 uint64_t* val) const {
441   uint32_t low_32bits;
442   uint32_t high_32bits;
443   bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits);
444   success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits);
445   if (success) {
446     *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
447   }
448   return success;
449 }
450 
GetRegisterPairIfAccessible(uint32_t reg_lo,uint32_t reg_hi,VRegKind kind_lo,uint64_t * val) const451 bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
452                                                VRegKind kind_lo, uint64_t* val) const {
453   const bool is_float = (kind_lo == kDoubleLoVReg);
454   if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
455     return false;
456   }
457   uintptr_t ptr_val_lo = GetRegister(reg_lo, is_float);
458   uintptr_t ptr_val_hi = GetRegister(reg_hi, is_float);
459   bool target64 = Is64BitInstructionSet(kRuntimeISA);
460   if (target64) {
461     int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo);
462     int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi);
463     ptr_val_lo = static_cast<uintptr_t>(Low32Bits(value_long_lo));
464     ptr_val_hi = static_cast<uintptr_t>(High32Bits(value_long_hi));
465   }
466   *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo);
467   return true;
468 }
469 
PrepareSetVReg(ArtMethod * m,uint16_t vreg,bool wide)470 ShadowFrame* StackVisitor::PrepareSetVReg(ArtMethod* m, uint16_t vreg, bool wide) {
471   CodeItemDataAccessor accessor(m->DexInstructionData());
472   if (!accessor.HasCodeItem()) {
473     return nullptr;
474   }
475   ShadowFrame* shadow_frame = GetCurrentShadowFrame();
476   if (shadow_frame == nullptr) {
477     // This is a compiled frame: we must prepare and update a shadow frame that will
478     // be executed by the interpreter after deoptimization of the stack.
479     const size_t frame_id = GetFrameId();
480     const uint16_t num_regs = accessor.RegistersSize();
481     shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc());
482     CHECK(shadow_frame != nullptr);
483     // Remember the vreg(s) has been set for debugging and must not be overwritten by the
484     // original value during deoptimization of the stack.
485     thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true;
486     if (wide) {
487       thread_->GetUpdatedVRegFlags(frame_id)[vreg + 1] = true;
488     }
489   }
490   return shadow_frame;
491 }
492 
SetVReg(ArtMethod * m,uint16_t vreg,uint32_t new_value,VRegKind kind)493 bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) {
494   DCHECK(kind == kIntVReg || kind == kFloatVReg);
495   ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
496   if (shadow_frame == nullptr) {
497     return false;
498   }
499   shadow_frame->SetVReg(vreg, new_value);
500   return true;
501 }
502 
SetVRegReference(ArtMethod * m,uint16_t vreg,ObjPtr<mirror::Object> new_value)503 bool StackVisitor::SetVRegReference(ArtMethod* m, uint16_t vreg, ObjPtr<mirror::Object> new_value) {
504   ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
505   if (shadow_frame == nullptr) {
506     return false;
507   }
508   shadow_frame->SetVRegReference(vreg, new_value);
509   return true;
510 }
511 
SetVRegPair(ArtMethod * m,uint16_t vreg,uint64_t new_value,VRegKind kind_lo,VRegKind kind_hi)512 bool StackVisitor::SetVRegPair(ArtMethod* m,
513                                uint16_t vreg,
514                                uint64_t new_value,
515                                VRegKind kind_lo,
516                                VRegKind kind_hi) {
517   if (kind_lo == kLongLoVReg) {
518     DCHECK_EQ(kind_hi, kLongHiVReg);
519   } else if (kind_lo == kDoubleLoVReg) {
520     DCHECK_EQ(kind_hi, kDoubleHiVReg);
521   } else {
522     LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
523     UNREACHABLE();
524   }
525   ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ true);
526   if (shadow_frame == nullptr) {
527     return false;
528   }
529   shadow_frame->SetVRegLong(vreg, new_value);
530   return true;
531 }
532 
IsAccessibleGPR(uint32_t reg) const533 bool StackVisitor::IsAccessibleGPR(uint32_t reg) const {
534   DCHECK(context_ != nullptr);
535   return context_->IsAccessibleGPR(reg);
536 }
537 
GetGPRAddress(uint32_t reg) const538 uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const {
539   DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
540   DCHECK(context_ != nullptr);
541   return context_->GetGPRAddress(reg);
542 }
543 
GetGPR(uint32_t reg) const544 uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
545   DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
546   DCHECK(context_ != nullptr);
547   return context_->GetGPR(reg);
548 }
549 
IsAccessibleFPR(uint32_t reg) const550 bool StackVisitor::IsAccessibleFPR(uint32_t reg) const {
551   DCHECK(context_ != nullptr);
552   return context_->IsAccessibleFPR(reg);
553 }
554 
GetFPR(uint32_t reg) const555 uintptr_t StackVisitor::GetFPR(uint32_t reg) const {
556   DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
557   DCHECK(context_ != nullptr);
558   return context_->GetFPR(reg);
559 }
560 
GetReturnPcAddr() const561 uintptr_t StackVisitor::GetReturnPcAddr() const {
562   uintptr_t sp = reinterpret_cast<uintptr_t>(GetCurrentQuickFrame());
563   DCHECK_NE(sp, 0u);
564   return sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
565 }
566 
GetReturnPc() const567 uintptr_t StackVisitor::GetReturnPc() const {
568   return *reinterpret_cast<uintptr_t*>(GetReturnPcAddr());
569 }
570 
SetReturnPc(uintptr_t new_ret_pc)571 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
572   *reinterpret_cast<uintptr_t*>(GetReturnPcAddr()) = new_ret_pc;
573 }
574 
ComputeNumFrames(Thread * thread,StackWalkKind walk_kind)575 size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
576   struct NumFramesVisitor : public StackVisitor {
577     NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
578         : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
579 
580     bool VisitFrame() override {
581       frames++;
582       return true;
583     }
584 
585     size_t frames;
586   };
587   NumFramesVisitor visitor(thread, walk_kind);
588   visitor.WalkStack(true);
589   return visitor.frames;
590 }
591 
GetNextMethodAndDexPc(ArtMethod ** next_method,uint32_t * next_dex_pc)592 bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) {
593   struct HasMoreFramesVisitor : public StackVisitor {
594     HasMoreFramesVisitor(Thread* thread,
595                          StackWalkKind walk_kind,
596                          size_t num_frames,
597                          size_t frame_height)
598         : StackVisitor(thread, nullptr, walk_kind, num_frames),
599           frame_height_(frame_height),
600           found_frame_(false),
601           has_more_frames_(false),
602           next_method_(nullptr),
603           next_dex_pc_(0) {
604     }
605 
606     bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
607       if (found_frame_) {
608         ArtMethod* method = GetMethod();
609         if (method != nullptr && !method->IsRuntimeMethod()) {
610           has_more_frames_ = true;
611           next_method_ = method;
612           next_dex_pc_ = GetDexPc();
613           return false;  // End stack walk once next method is found.
614         }
615       } else if (GetFrameHeight() == frame_height_) {
616         found_frame_ = true;
617       }
618       return true;
619     }
620 
621     size_t frame_height_;
622     bool found_frame_;
623     bool has_more_frames_;
624     ArtMethod* next_method_;
625     uint32_t next_dex_pc_;
626   };
627   HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
628   visitor.WalkStack(true);
629   *next_method = visitor.next_method_;
630   *next_dex_pc = visitor.next_dex_pc_;
631   return visitor.has_more_frames_;
632 }
633 
DescribeStack(Thread * thread)634 void StackVisitor::DescribeStack(Thread* thread) {
635   struct DescribeStackVisitor : public StackVisitor {
636     explicit DescribeStackVisitor(Thread* thread_in)
637         : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
638 
639     bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
640       LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
641       return true;
642     }
643   };
644   DescribeStackVisitor visitor(thread);
645   visitor.WalkStack(true);
646 }
647 
DescribeLocation() const648 std::string StackVisitor::DescribeLocation() const {
649   std::string result("Visiting method '");
650   ArtMethod* m = GetMethod();
651   if (m == nullptr) {
652     return "upcall";
653   }
654   result += m->PrettyMethod();
655   result += StringPrintf("' at dex PC 0x%04x", GetDexPc());
656   if (!IsShadowFrame()) {
657     result += StringPrintf(" (native PC %p)", reinterpret_cast<void*>(GetCurrentQuickFramePc()));
658   }
659   return result;
660 }
661 
SetMethod(ArtMethod * method)662 void StackVisitor::SetMethod(ArtMethod* method) {
663   DCHECK(GetMethod() != nullptr);
664   if (cur_shadow_frame_ != nullptr) {
665     cur_shadow_frame_->SetMethod(method);
666   } else {
667     DCHECK(cur_quick_frame_ != nullptr);
668     CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod: "
669                                << GetMethod()->PrettyMethod() << " is inlined into "
670                                << GetOuterMethod()->PrettyMethod();
671     *cur_quick_frame_ = method;
672   }
673 }
674 
AssertPcIsWithinQuickCode(ArtMethod * method,uintptr_t pc)675 static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
676     REQUIRES_SHARED(Locks::mutator_lock_) {
677   if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) {
678     return;
679   }
680 
681   if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
682     return;
683   }
684 
685   Runtime* runtime = Runtime::Current();
686   if (runtime->UseJitCompilation() &&
687       runtime->GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(pc))) {
688     return;
689   }
690 
691   const void* code = method->GetEntryPointFromQuickCompiledCode();
692   if (code == GetQuickInstrumentationEntryPoint() || code == GetInvokeObsoleteMethodStub()) {
693     return;
694   }
695 
696   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
697   if (class_linker->IsQuickToInterpreterBridge(code) ||
698       class_linker->IsQuickResolutionStub(code)) {
699     return;
700   }
701 
702   if (runtime->UseJitCompilation() && runtime->GetJit()->GetCodeCache()->ContainsPc(code)) {
703     return;
704   }
705 
706   uint32_t code_size = OatQuickMethodHeader::FromEntryPoint(code)->GetCodeSize();
707   uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
708   CHECK(code_start <= pc && pc <= (code_start + code_size))
709       << method->PrettyMethod()
710       << " pc=" << std::hex << pc
711       << " code_start=" << code_start
712       << " code_size=" << code_size;
713 }
714 
SanityCheckFrame() const715 void StackVisitor::SanityCheckFrame() const {
716   if (kIsDebugBuild) {
717     ArtMethod* method = GetMethod();
718     ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
719     // Runtime methods have null declaring class.
720     if (!method->IsRuntimeMethod()) {
721       CHECK(declaring_class != nullptr);
722       CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass())
723           << declaring_class;
724     } else {
725       CHECK(declaring_class == nullptr);
726     }
727     Runtime* const runtime = Runtime::Current();
728     LinearAlloc* const linear_alloc = runtime->GetLinearAlloc();
729     if (!linear_alloc->Contains(method)) {
730       // Check class linker linear allocs.
731       // We get the canonical method as copied methods may have their declaring
732       // class from another class loader.
733       const PointerSize ptrSize = runtime->GetClassLinker()->GetImagePointerSize();
734       ArtMethod* canonical = method->GetCanonicalMethod(ptrSize);
735       ObjPtr<mirror::Class> klass = canonical->GetDeclaringClass();
736       LinearAlloc* const class_linear_alloc = (klass != nullptr)
737           ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader())
738           : linear_alloc;
739       if (!class_linear_alloc->Contains(canonical)) {
740         // Check image space.
741         bool in_image = false;
742         for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
743           if (space->IsImageSpace()) {
744             auto* image_space = space->AsImageSpace();
745             const auto& header = image_space->GetImageHeader();
746             const ImageSection& methods = header.GetMethodsSection();
747             const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
748             const size_t offset =  reinterpret_cast<const uint8_t*>(canonical) - image_space->Begin();
749             if (methods.Contains(offset) || runtime_methods.Contains(offset)) {
750               in_image = true;
751               break;
752             }
753           }
754         }
755         CHECK(in_image) << canonical->PrettyMethod() << " not in linear alloc or image";
756       }
757     }
758     if (cur_quick_frame_ != nullptr) {
759       AssertPcIsWithinQuickCode(method, cur_quick_frame_pc_);
760       // Frame sanity.
761       size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
762       CHECK_NE(frame_size, 0u);
763       // For compiled code, we could try to have a rough guess at an upper size we expect
764       // to see for a frame:
765       // 256 registers
766       // 2 words HandleScope overhead
767       // 3+3 register spills
768       // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
769       const size_t kMaxExpectedFrameSize = interpreter::kMaxNterpFrame;
770       CHECK_LE(frame_size, kMaxExpectedFrameSize) << method->PrettyMethod();
771       size_t return_pc_offset = GetCurrentQuickFrameInfo().GetReturnPcOffset();
772       CHECK_LT(return_pc_offset, frame_size);
773     }
774   }
775 }
776 
GetCurrentQuickFrameInfo() const777 QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
778   if (cur_oat_quick_method_header_ != nullptr) {
779     if (cur_oat_quick_method_header_->IsOptimized()) {
780       return cur_oat_quick_method_header_->GetFrameInfo();
781     } else {
782       DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
783       return NterpFrameInfo(cur_quick_frame_);
784     }
785   }
786 
787   ArtMethod* method = GetMethod();
788   Runtime* runtime = Runtime::Current();
789 
790   if (method->IsAbstract()) {
791     return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
792   }
793 
794   // This goes before IsProxyMethod since runtime methods have a null declaring class.
795   if (method->IsRuntimeMethod()) {
796     return runtime->GetRuntimeMethodFrameInfo(method);
797   }
798 
799   if (method->IsProxyMethod()) {
800     // There is only one direct method of a proxy class: the constructor. A direct method is
801     // cloned from the original java.lang.reflect.Proxy and is executed as usual quick
802     // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
803     DCHECK(!method->IsDirect() && !method->IsConstructor())
804         << "Constructors of proxy classes must have a OatQuickMethodHeader";
805     return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
806   }
807 
808   // The only remaining cases are for native methods that either
809   //   - use the Generic JNI stub, called either directly or through some
810   //     (resolution, instrumentation) trampoline; or
811   //   - fake a Generic JNI frame in art_jni_dlsym_lookup_critical_stub.
812   DCHECK(method->IsNative());
813   if (kIsDebugBuild && !method->IsCriticalNative()) {
814     ClassLinker* class_linker = runtime->GetClassLinker();
815     const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method,
816                                                                              kRuntimePointerSize);
817     CHECK(class_linker->IsQuickGenericJniStub(entry_point) ||
818           // The current entrypoint (after filtering out trampolines) may have changed
819           // from GenericJNI to JIT-compiled stub since we have entered this frame.
820           (runtime->GetJit() != nullptr &&
821            runtime->GetJit()->GetCodeCache()->ContainsPc(entry_point))) << method->PrettyMethod();
822   }
823   // Generic JNI frame is just like the SaveRefsAndArgs frame.
824   // Note that HandleScope, if any, is below the frame.
825   return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
826 }
827 
828 template <StackVisitor::CountTransitions kCount>
WalkStack(bool include_transitions)829 void StackVisitor::WalkStack(bool include_transitions) {
830   if (check_suspended_) {
831     DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
832   }
833   CHECK_EQ(cur_depth_, 0U);
834   size_t inlined_frames_count = 0;
835 
836   for (const ManagedStack* current_fragment = thread_->GetManagedStack();
837        current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
838     cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
839     cur_quick_frame_ = current_fragment->GetTopQuickFrame();
840     cur_quick_frame_pc_ = 0;
841     DCHECK(cur_oat_quick_method_header_ == nullptr);
842     if (cur_quick_frame_ != nullptr) {  // Handle quick stack frames.
843       // Can't be both a shadow and a quick fragment.
844       DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
845       ArtMethod* method = *cur_quick_frame_;
846       DCHECK(method != nullptr);
847       bool header_retrieved = false;
848       if (method->IsNative()) {
849         // We do not have a PC for the first frame, so we cannot simply use
850         // ArtMethod::GetOatQuickMethodHeader() as we're unable to distinguish there
851         // between GenericJNI frame and JIT-compiled JNI stub; the entrypoint may have
852         // changed since the frame was entered. The top quick frame tag indicates
853         // GenericJNI here, otherwise it's either AOT-compiled or JNI-compiled JNI stub.
854         if (UNLIKELY(current_fragment->GetTopQuickFrameTag())) {
855           // The generic JNI does not have any method header.
856           cur_oat_quick_method_header_ = nullptr;
857         } else {
858           const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
859           CHECK(existing_entry_point != nullptr);
860           Runtime* runtime = Runtime::Current();
861           ClassLinker* class_linker = runtime->GetClassLinker();
862           // Check whether we can quickly get the header from the current entrypoint.
863           if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
864               !class_linker->IsQuickResolutionStub(existing_entry_point) &&
865               existing_entry_point != GetQuickInstrumentationEntryPoint()) {
866             cur_oat_quick_method_header_ =
867                 OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
868           } else {
869             const void* code = method->GetOatMethodQuickCode(class_linker->GetImagePointerSize());
870             if (code != nullptr) {
871               cur_oat_quick_method_header_ = OatQuickMethodHeader::FromEntryPoint(code);
872             } else {
873               // This must be a JITted JNI stub frame.
874               CHECK(runtime->GetJit() != nullptr);
875               code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
876               CHECK(code != nullptr) << method->PrettyMethod();
877               cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
878             }
879           }
880         }
881         header_retrieved = true;
882       }
883       while (method != nullptr) {
884         if (!header_retrieved) {
885           cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
886         }
887         header_retrieved = false;  // Force header retrieval in next iteration.
888         SanityCheckFrame();
889 
890         if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
891             && (cur_oat_quick_method_header_ != nullptr)
892             && cur_oat_quick_method_header_->IsOptimized()
893             && !method->IsNative()  // JNI methods cannot have any inlined frames.
894             && CodeInfo::HasInlineInfo(cur_oat_quick_method_header_->GetOptimizedCodeInfoPtr())) {
895           DCHECK_NE(cur_quick_frame_pc_, 0u);
896           CodeInfo* code_info = GetCurrentInlineInfo();
897           StackMap* stack_map = GetCurrentStackMap();
898           if (stack_map->IsValid() && stack_map->HasInlineInfo()) {
899             DCHECK_EQ(current_inline_frames_.size(), 0u);
900             for (current_inline_frames_ = code_info->GetInlineInfosOf(*stack_map);
901                  !current_inline_frames_.empty();
902                  current_inline_frames_.pop_back()) {
903               bool should_continue = VisitFrame();
904               if (UNLIKELY(!should_continue)) {
905                 return;
906               }
907               cur_depth_++;
908               inlined_frames_count++;
909             }
910           }
911         }
912 
913         bool should_continue = VisitFrame();
914         if (UNLIKELY(!should_continue)) {
915           return;
916         }
917 
918         QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
919         if (context_ != nullptr) {
920           context_->FillCalleeSaves(reinterpret_cast<uint8_t*>(cur_quick_frame_), frame_info);
921         }
922         // Compute PC for next stack frame from return PC.
923         size_t frame_size = frame_info.FrameSizeInBytes();
924         uintptr_t return_pc_addr = GetReturnPcAddr();
925         uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
926 
927         if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc)) {
928           // While profiling, the return pc is restored from the side stack, except when walking
929           // the stack for an exception where the side stack will be unwound in VisitFrame.
930           const std::map<uintptr_t, instrumentation::InstrumentationStackFrame>&
931               instrumentation_stack = *thread_->GetInstrumentationStack();
932           auto it = instrumentation_stack.find(return_pc_addr);
933           CHECK(it != instrumentation_stack.end());
934           const instrumentation::InstrumentationStackFrame& instrumentation_frame = it->second;
935           if (GetMethod() ==
936               Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves)) {
937             // Skip runtime save all callee frames which are used to deliver exceptions.
938           } else if (instrumentation_frame.interpreter_entry_) {
939             ArtMethod* callee =
940                 Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs);
941             CHECK_EQ(GetMethod(), callee) << "Expected: " << ArtMethod::PrettyMethod(callee)
942                                           << " Found: " << ArtMethod::PrettyMethod(GetMethod());
943           } else if (!instrumentation_frame.method_->IsRuntimeMethod()) {
944             // Trampolines get replaced with their actual method in the stack,
945             // so don't do the check below for runtime methods.
946             // Instrumentation generally doesn't distinguish between a method's obsolete and
947             // non-obsolete version.
948             CHECK_EQ(instrumentation_frame.method_->GetNonObsoleteMethod(),
949                      GetMethod()->GetNonObsoleteMethod())
950                 << "Expected: "
951                 << ArtMethod::PrettyMethod(instrumentation_frame.method_->GetNonObsoleteMethod())
952                 << " Found: " << ArtMethod::PrettyMethod(GetMethod()->GetNonObsoleteMethod());
953           }
954           return_pc = instrumentation_frame.return_pc_;
955         }
956 
957         cur_quick_frame_pc_ = return_pc;
958         uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
959         cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
960 
961         if (kDebugStackWalk) {
962           LOG(INFO) << ArtMethod::PrettyMethod(method) << "@" << method << " size=" << frame_size
963               << std::boolalpha
964               << " optimized=" << (cur_oat_quick_method_header_ != nullptr &&
965                                    cur_oat_quick_method_header_->IsOptimized())
966               << " native=" << method->IsNative()
967               << std::noboolalpha
968               << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
969               << "," << (method->IsNative() ? method->GetEntryPointFromJni() : nullptr)
970               << " next=" << *cur_quick_frame_;
971         }
972 
973         if (kCount == CountTransitions::kYes || !method->IsRuntimeMethod()) {
974           cur_depth_++;
975         }
976         method = *cur_quick_frame_;
977       }
978       // We reached a transition frame, it doesn't have a method header.
979       cur_oat_quick_method_header_ = nullptr;
980     } else if (cur_shadow_frame_ != nullptr) {
981       do {
982         SanityCheckFrame();
983         bool should_continue = VisitFrame();
984         if (UNLIKELY(!should_continue)) {
985           return;
986         }
987         cur_depth_++;
988         cur_shadow_frame_ = cur_shadow_frame_->GetLink();
989       } while (cur_shadow_frame_ != nullptr);
990     }
991     if (include_transitions) {
992       bool should_continue = VisitFrame();
993       if (!should_continue) {
994         return;
995       }
996     }
997     if (kCount == CountTransitions::kYes) {
998       cur_depth_++;
999     }
1000   }
1001   if (num_frames_ != 0) {
1002     CHECK_EQ(cur_depth_, num_frames_);
1003   }
1004 }
1005 
1006 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kYes>(bool);
1007 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kNo>(bool);
1008 
1009 }  // namespace art
1010