• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "stack.h"
18 #include <limits>
19 
20 #include "android-base/stringprintf.h"
21 
22 #include "arch/context.h"
23 #include "art_method-inl.h"
24 #include "base/callee_save_type.h"
25 #include "base/hex_dump.h"
26 #include "base/pointer_size.h"
27 #include "base/utils.h"
28 #include "dex/dex_file_types.h"
29 #include "entrypoints/entrypoint_utils-inl.h"
30 #include "entrypoints/quick/callee_save_frame.h"
31 #include "entrypoints/runtime_asm_entrypoints.h"
32 #include "gc/space/image_space.h"
33 #include "gc/space/space-inl.h"
34 #include "interpreter/mterp/nterp.h"
35 #include "interpreter/shadow_frame-inl.h"
36 #include "jit/jit.h"
37 #include "jit/jit_code_cache.h"
38 #include "linear_alloc.h"
39 #include "managed_stack.h"
40 #include "mirror/class-inl.h"
41 #include "mirror/object-inl.h"
42 #include "mirror/object_array-inl.h"
43 #include "nterp_helpers.h"
44 #include "oat/oat_quick_method_header.h"
45 #include "obj_ptr-inl.h"
46 #include "quick/quick_method_frame_info.h"
47 #include "runtime.h"
48 #include "thread.h"
49 #include "thread_list.h"
50 
51 namespace art HIDDEN {
52 
53 using android::base::StringPrintf;
54 
55 static constexpr bool kDebugStackWalk = false;
56 
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,bool check_suspended)57 StackVisitor::StackVisitor(Thread* thread,
58                            Context* context,
59                            StackWalkKind walk_kind,
60                            bool check_suspended)
61     : StackVisitor(thread, context, walk_kind, 0, check_suspended) {}
62 
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,size_t num_frames,bool check_suspended)63 StackVisitor::StackVisitor(Thread* thread,
64                            Context* context,
65                            StackWalkKind walk_kind,
66                            size_t num_frames,
67                            bool check_suspended)
68     : thread_(thread),
69       walk_kind_(walk_kind),
70       cur_shadow_frame_(nullptr),
71       cur_quick_frame_(nullptr),
72       cur_quick_frame_pc_(0),
73       cur_oat_quick_method_header_(nullptr),
74       num_frames_(num_frames),
75       cur_depth_(0),
76       cur_inline_info_(nullptr, CodeInfo()),
77       cur_stack_map_(0, StackMap()),
78       context_(context),
79       check_suspended_(check_suspended) {
80   if (check_suspended_) {
81     DCHECK(thread == Thread::Current() || thread->GetState() != ThreadState::kRunnable) << *thread;
82   }
83 }
84 
GetCurrentInlineInfo() const85 CodeInfo* StackVisitor::GetCurrentInlineInfo() const {
86   DCHECK(!(*cur_quick_frame_)->IsNative());
87   const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
88   if (cur_inline_info_.first != header) {
89     cur_inline_info_ = std::make_pair(header, CodeInfo::DecodeInlineInfoOnly(header));
90   }
91   return &cur_inline_info_.second;
92 }
93 
GetCurrentStackMap() const94 StackMap* StackVisitor::GetCurrentStackMap() const {
95   DCHECK(!(*cur_quick_frame_)->IsNative());
96   const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
97   if (cur_stack_map_.first != cur_quick_frame_pc_) {
98     uint32_t pc = header->NativeQuickPcOffset(cur_quick_frame_pc_);
99     cur_stack_map_ = std::make_pair(cur_quick_frame_pc_,
100                                     GetCurrentInlineInfo()->GetStackMapForNativePcOffset(pc));
101   }
102   return &cur_stack_map_.second;
103 }
104 
GetMethod() const105 ArtMethod* StackVisitor::GetMethod() const {
106   if (cur_shadow_frame_ != nullptr) {
107     return cur_shadow_frame_->GetMethod();
108   } else if (cur_quick_frame_ != nullptr) {
109     if (IsInInlinedFrame()) {
110       CodeInfo* code_info = GetCurrentInlineInfo();
111       DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
112       return GetResolvedMethod(*GetCurrentQuickFrame(), *code_info, current_inline_frames_);
113     } else {
114       return *cur_quick_frame_;
115     }
116   }
117   return nullptr;
118 }
119 
GetDexPc(bool abort_on_failure) const120 uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
121   if (cur_shadow_frame_ != nullptr) {
122     return cur_shadow_frame_->GetDexPC();
123   } else if (cur_quick_frame_ != nullptr) {
124     if (IsInInlinedFrame()) {
125       return current_inline_frames_.back().GetDexPc();
126     } else if (cur_oat_quick_method_header_ == nullptr) {
127       return dex::kDexNoIndex;
128     } else if ((*GetCurrentQuickFrame())->IsNative()) {
129       return cur_oat_quick_method_header_->ToDexPc(
130           GetCurrentQuickFrame(), cur_quick_frame_pc_, abort_on_failure);
131     } else if (cur_oat_quick_method_header_->IsOptimized()) {
132       StackMap* stack_map = GetCurrentStackMap();
133       CHECK(stack_map->IsValid()) << "StackMap not found for " << std::hex << cur_quick_frame_pc_;
134       return stack_map->GetDexPc();
135     } else {
136       DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
137       return NterpGetDexPC(cur_quick_frame_);
138     }
139   } else {
140     return 0;
141   }
142 }
143 
ComputeDexPcList(uint32_t handler_dex_pc) const144 std::vector<uint32_t> StackVisitor::ComputeDexPcList(uint32_t handler_dex_pc) const {
145   std::vector<uint32_t> result;
146   if (cur_shadow_frame_ == nullptr && cur_quick_frame_ != nullptr && IsInInlinedFrame()) {
147     const BitTableRange<InlineInfo>& infos = current_inline_frames_;
148     DCHECK_NE(infos.size(), 0u);
149 
150     // Outermost dex_pc.
151     result.push_back(GetCurrentStackMap()->GetDexPc());
152 
153     // The mid dex_pcs. Note that we skip the last one since we want to change that for
154     // `handler_dex_pc`.
155     for (size_t index = 0; index < infos.size() - 1; ++index) {
156       result.push_back(infos[index].GetDexPc());
157     }
158   }
159 
160   // The innermost dex_pc has to be the handler dex_pc. In the case of no inline frames, it will be
161   // just the one dex_pc. In the case of inlining we will be replacing the innermost InlineInfo's
162   // dex_pc with this one.
163   result.push_back(handler_dex_pc);
164   return result;
165 }
166 
167 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
168     REQUIRES_SHARED(Locks::mutator_lock_);
169 
GetThisObject() const170 ObjPtr<mirror::Object> StackVisitor::GetThisObject() const {
171   DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
172   ArtMethod* m = GetMethod();
173   if (m->IsStatic()) {
174     return nullptr;
175   } else if (m->IsNative()) {
176     if (cur_quick_frame_ != nullptr) {
177       // The `this` reference is stored in the first out vreg in the caller's frame.
178       const size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
179       auto* stack_ref = reinterpret_cast<StackReference<mirror::Object>*>(
180           reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size + sizeof(ArtMethod*));
181       return stack_ref->AsMirrorPtr();
182     } else {
183       return cur_shadow_frame_->GetVRegReference(0);
184     }
185   } else if (m->IsProxyMethod()) {
186     if (cur_quick_frame_ != nullptr) {
187       return artQuickGetProxyThisObject(cur_quick_frame_);
188     } else {
189       return cur_shadow_frame_->GetVRegReference(0);
190     }
191   } else {
192     CodeItemDataAccessor accessor(m->DexInstructionData());
193     if (!accessor.HasCodeItem()) {
194       UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: "
195           << ArtMethod::PrettyMethod(m);
196       return nullptr;
197     } else {
198       uint16_t reg = accessor.RegistersSize() - accessor.InsSize();
199       uint32_t value = 0;
200       if (!GetVReg(m, reg, kReferenceVReg, &value)) {
201         return nullptr;
202       }
203       return reinterpret_cast<mirror::Object*>(value);
204     }
205   }
206 }
207 
GetNativePcOffset() const208 size_t StackVisitor::GetNativePcOffset() const {
209   DCHECK(!IsShadowFrame());
210   return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_);
211 }
212 
GetVRegFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind,uint32_t * val) const213 bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg,
214                                                   VRegKind kind,
215                                                   uint32_t* val) const {
216   size_t frame_id = const_cast<StackVisitor*>(this)->GetFrameId();
217   ShadowFrame* shadow_frame = thread_->FindDebuggerShadowFrame(frame_id);
218   if (shadow_frame != nullptr) {
219     bool* updated_vreg_flags = thread_->GetUpdatedVRegFlags(frame_id);
220     DCHECK(updated_vreg_flags != nullptr);
221     if (updated_vreg_flags[vreg]) {
222       // Value is set by the debugger.
223       if (kind == kReferenceVReg) {
224         *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
225             shadow_frame->GetVRegReference(vreg)));
226       } else {
227         *val = shadow_frame->GetVReg(vreg);
228       }
229       return true;
230     }
231   }
232   // No value is set by the debugger.
233   return false;
234 }
235 
GetVReg(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val,std::optional<DexRegisterLocation> location,bool need_full_register_list) const236 bool StackVisitor::GetVReg(ArtMethod* m,
237                            uint16_t vreg,
238                            VRegKind kind,
239                            uint32_t* val,
240                            std::optional<DexRegisterLocation> location,
241                            bool need_full_register_list) const {
242   if (cur_quick_frame_ != nullptr) {
243     DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
244     DCHECK(m == GetMethod());
245     // Check if there is value set by the debugger.
246     if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
247       return true;
248     }
249     bool result = false;
250     if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
251       result = true;
252       *val = (kind == kReferenceVReg)
253           ? NterpGetVRegReference(cur_quick_frame_, vreg)
254           : NterpGetVReg(cur_quick_frame_, vreg);
255     } else {
256       DCHECK(cur_oat_quick_method_header_->IsOptimized());
257       if (location.has_value() && kind != kReferenceVReg) {
258         uint32_t val2 = *val;
259         // The caller already known the register location, so we can use the faster overload
260         // which does not decode the stack maps.
261         result = GetVRegFromOptimizedCode(location.value(), val);
262         // Compare to the slower overload.
263         DCHECK_EQ(result, GetVRegFromOptimizedCode(m, vreg, kind, &val2, need_full_register_list));
264         DCHECK_EQ(*val, val2);
265       } else {
266         result = GetVRegFromOptimizedCode(m, vreg, kind, val, need_full_register_list);
267       }
268     }
269     if (kind == kReferenceVReg) {
270       // Perform a read barrier in case we are in a different thread and GC is ongoing.
271       mirror::Object* out = reinterpret_cast<mirror::Object*>(static_cast<uintptr_t>(*val));
272       uintptr_t ptr_out = reinterpret_cast<uintptr_t>(GcRoot<mirror::Object>(out).Read());
273       DCHECK_LT(ptr_out, std::numeric_limits<uint32_t>::max());
274       *val = static_cast<uint32_t>(ptr_out);
275     }
276     return result;
277   } else {
278     DCHECK(cur_shadow_frame_ != nullptr);
279     if (kind == kReferenceVReg) {
280       *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
281           cur_shadow_frame_->GetVRegReference(vreg)));
282     } else {
283       *val = cur_shadow_frame_->GetVReg(vreg);
284     }
285     return true;
286   }
287 }
288 
GetNumberOfRegisters(CodeInfo * code_info,int depth) const289 size_t StackVisitor::GetNumberOfRegisters(CodeInfo* code_info, int depth) const {
290   return depth == 0
291     ? code_info->GetNumberOfDexRegisters()
292     : current_inline_frames_[depth - 1].GetNumberOfDexRegisters();
293 }
294 
GetVRegFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val,bool need_full_register_list) const295 bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m,
296                                             uint16_t vreg,
297                                             VRegKind kind,
298                                             uint32_t* val,
299                                             bool need_full_register_list) const {
300   DCHECK_EQ(m, GetMethod());
301   // Can't be null or how would we compile its instructions?
302   DCHECK(m->GetCodeItem() != nullptr) << m->PrettyMethod();
303   const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
304   CodeInfo code_info(method_header);
305 
306   uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
307   StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
308   DCHECK(stack_map.IsValid());
309 
310   DexRegisterMap dex_register_map = (IsInInlinedFrame() && !need_full_register_list)
311     ? code_info.GetInlineDexRegisterMapOf(stack_map, current_inline_frames_.back())
312     : code_info.GetDexRegisterMapOf(stack_map,
313                                     /* first= */ 0,
314                                     GetNumberOfRegisters(&code_info, InlineDepth()));
315 
316   if (dex_register_map.empty()) {
317     return false;
318   }
319 
320   const size_t number_of_dex_registers = dex_register_map.size();
321   DCHECK_LT(vreg, number_of_dex_registers);
322   DexRegisterLocation::Kind location_kind = dex_register_map[vreg].GetKind();
323   switch (location_kind) {
324     case DexRegisterLocation::Kind::kInStack: {
325       const int32_t offset = dex_register_map[vreg].GetStackOffsetInBytes();
326       BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
327       if (kind == kReferenceVReg && !stack_mask.LoadBit(offset / kFrameSlotSize)) {
328         return false;
329       }
330       const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
331       *val = *reinterpret_cast<const uint32_t*>(addr);
332       return true;
333     }
334     case DexRegisterLocation::Kind::kInRegister: {
335       uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
336       uint32_t reg = dex_register_map[vreg].GetMachineRegister();
337       if (kind == kReferenceVReg && !(register_mask & (1 << reg))) {
338         return false;
339       }
340       return GetRegisterIfAccessible(reg, location_kind, val);
341     }
342     case DexRegisterLocation::Kind::kInRegisterHigh:
343     case DexRegisterLocation::Kind::kInFpuRegister:
344     case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
345       if (kind == kReferenceVReg) {
346         return false;
347       }
348       uint32_t reg = dex_register_map[vreg].GetMachineRegister();
349       return GetRegisterIfAccessible(reg, location_kind, val);
350     }
351     case DexRegisterLocation::Kind::kConstant: {
352       uint32_t result = dex_register_map[vreg].GetConstant();
353       if (kind == kReferenceVReg && result != 0) {
354         return false;
355       }
356       *val = result;
357       return true;
358     }
359     case DexRegisterLocation::Kind::kNone:
360       return false;
361     default:
362       LOG(FATAL) << "Unexpected location kind " << dex_register_map[vreg].GetKind();
363       UNREACHABLE();
364   }
365 }
366 
GetVRegFromOptimizedCode(DexRegisterLocation location,uint32_t * val) const367 bool StackVisitor::GetVRegFromOptimizedCode(DexRegisterLocation location, uint32_t* val) const {
368   switch (location.GetKind()) {
369     case DexRegisterLocation::Kind::kInvalid:
370       break;
371     case DexRegisterLocation::Kind::kInStack: {
372       const uint8_t* sp = reinterpret_cast<const uint8_t*>(cur_quick_frame_);
373       *val = *reinterpret_cast<const uint32_t*>(sp + location.GetStackOffsetInBytes());
374       return true;
375     }
376     case DexRegisterLocation::Kind::kInRegister:
377     case DexRegisterLocation::Kind::kInRegisterHigh:
378     case DexRegisterLocation::Kind::kInFpuRegister:
379     case DexRegisterLocation::Kind::kInFpuRegisterHigh:
380       return GetRegisterIfAccessible(location.GetMachineRegister(), location.GetKind(), val);
381     case DexRegisterLocation::Kind::kConstant:
382       *val = location.GetConstant();
383       return true;
384     case DexRegisterLocation::Kind::kNone:
385       return false;
386   }
387   LOG(FATAL) << "Unexpected location kind " << location.GetKind();
388   UNREACHABLE();
389 }
390 
GetRegisterIfAccessible(uint32_t reg,DexRegisterLocation::Kind location_kind,uint32_t * val) const391 bool StackVisitor::GetRegisterIfAccessible(uint32_t reg,
392                                            DexRegisterLocation::Kind location_kind,
393                                            uint32_t* val) const {
394   const bool is_float = (location_kind == DexRegisterLocation::Kind::kInFpuRegister) ||
395                         (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh);
396 
397   if (kRuntimeISA == InstructionSet::kX86 && is_float) {
398     // X86 float registers are 64-bit and each XMM register is provided as two separate
399     // 32-bit registers by the context.
400     reg = (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh)
401         ? (2 * reg + 1)
402         : (2 * reg);
403   }
404 
405   if (!IsAccessibleRegister(reg, is_float)) {
406     return false;
407   }
408   uintptr_t ptr_val = GetRegister(reg, is_float);
409   const bool target64 = Is64BitInstructionSet(kRuntimeISA);
410   if (target64) {
411     const bool is_high = (location_kind == DexRegisterLocation::Kind::kInRegisterHigh) ||
412                          (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh);
413     int64_t value_long = static_cast<int64_t>(ptr_val);
414     ptr_val = static_cast<uintptr_t>(is_high ? High32Bits(value_long) : Low32Bits(value_long));
415   }
416   *val = ptr_val;
417   return true;
418 }
419 
GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const420 bool StackVisitor::GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,
421                                                       VRegKind kind_lo,
422                                                       VRegKind kind_hi,
423                                                       uint64_t* val) const {
424   uint32_t low_32bits;
425   uint32_t high_32bits;
426   bool success = GetVRegFromDebuggerShadowFrame(vreg, kind_lo, &low_32bits);
427   success &= GetVRegFromDebuggerShadowFrame(vreg + 1, kind_hi, &high_32bits);
428   if (success) {
429     *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
430   }
431   return success;
432 }
433 
GetVRegPair(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const434 bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
435                                VRegKind kind_hi, uint64_t* val) const {
436   if (kind_lo == kLongLoVReg) {
437     DCHECK_EQ(kind_hi, kLongHiVReg);
438   } else if (kind_lo == kDoubleLoVReg) {
439     DCHECK_EQ(kind_hi, kDoubleHiVReg);
440   } else {
441     LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
442     UNREACHABLE();
443   }
444   // Check if there is value set by the debugger.
445   if (GetVRegPairFromDebuggerShadowFrame(vreg, kind_lo, kind_hi, val)) {
446     return true;
447   }
448   if (cur_quick_frame_ == nullptr) {
449     DCHECK(cur_shadow_frame_ != nullptr);
450     *val = cur_shadow_frame_->GetVRegLong(vreg);
451     return true;
452   }
453   if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
454     uint64_t val_lo = NterpGetVReg(cur_quick_frame_, vreg);
455     uint64_t val_hi = NterpGetVReg(cur_quick_frame_, vreg + 1);
456     *val = (val_hi << 32) + val_lo;
457     return true;
458   }
459 
460   DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
461   DCHECK(m == GetMethod());
462   DCHECK(cur_oat_quick_method_header_->IsOptimized());
463   return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
464 }
465 
GetVRegPairFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const466 bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
467                                                 VRegKind kind_lo, VRegKind kind_hi,
468                                                 uint64_t* val) const {
469   uint32_t low_32bits;
470   uint32_t high_32bits;
471   bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits);
472   success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits);
473   if (success) {
474     *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
475   }
476   return success;
477 }
478 
PrepareSetVReg(ArtMethod * m,uint16_t vreg,bool wide)479 ShadowFrame* StackVisitor::PrepareSetVReg(ArtMethod* m, uint16_t vreg, bool wide) {
480   CodeItemDataAccessor accessor(m->DexInstructionData());
481   if (!accessor.HasCodeItem()) {
482     return nullptr;
483   }
484   ShadowFrame* shadow_frame = GetCurrentShadowFrame();
485   if (shadow_frame == nullptr) {
486     // This is a compiled frame: we must prepare and update a shadow frame that will
487     // be executed by the interpreter after deoptimization of the stack.
488     const size_t frame_id = GetFrameId();
489     const uint16_t num_regs = accessor.RegistersSize();
490     shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc());
491     CHECK(shadow_frame != nullptr);
492     // Remember the vreg(s) has been set for debugging and must not be overwritten by the
493     // original value during deoptimization of the stack.
494     thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true;
495     if (wide) {
496       thread_->GetUpdatedVRegFlags(frame_id)[vreg + 1] = true;
497     }
498   }
499   return shadow_frame;
500 }
501 
SetVReg(ArtMethod * m,uint16_t vreg,uint32_t new_value,VRegKind kind)502 bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) {
503   DCHECK(kind == kIntVReg || kind == kFloatVReg);
504   ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
505   if (shadow_frame == nullptr) {
506     return false;
507   }
508   shadow_frame->SetVReg(vreg, new_value);
509   return true;
510 }
511 
SetVRegReference(ArtMethod * m,uint16_t vreg,ObjPtr<mirror::Object> new_value)512 bool StackVisitor::SetVRegReference(ArtMethod* m, uint16_t vreg, ObjPtr<mirror::Object> new_value) {
513   ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
514   if (shadow_frame == nullptr) {
515     return false;
516   }
517   shadow_frame->SetVRegReference(vreg, new_value);
518   return true;
519 }
520 
SetVRegPair(ArtMethod * m,uint16_t vreg,uint64_t new_value,VRegKind kind_lo,VRegKind kind_hi)521 bool StackVisitor::SetVRegPair(ArtMethod* m,
522                                uint16_t vreg,
523                                uint64_t new_value,
524                                VRegKind kind_lo,
525                                VRegKind kind_hi) {
526   if (kind_lo == kLongLoVReg) {
527     DCHECK_EQ(kind_hi, kLongHiVReg);
528   } else if (kind_lo == kDoubleLoVReg) {
529     DCHECK_EQ(kind_hi, kDoubleHiVReg);
530   } else {
531     LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
532     UNREACHABLE();
533   }
534   ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ true);
535   if (shadow_frame == nullptr) {
536     return false;
537   }
538   shadow_frame->SetVRegLong(vreg, new_value);
539   return true;
540 }
541 
IsAccessibleGPR(uint32_t reg) const542 bool StackVisitor::IsAccessibleGPR(uint32_t reg) const {
543   DCHECK(context_ != nullptr);
544   return context_->IsAccessibleGPR(reg);
545 }
546 
GetGPRAddress(uint32_t reg) const547 uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const {
548   DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
549   DCHECK(context_ != nullptr);
550   return context_->GetGPRAddress(reg);
551 }
552 
GetGPR(uint32_t reg) const553 uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
554   DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
555   DCHECK(context_ != nullptr);
556   return context_->GetGPR(reg);
557 }
558 
IsAccessibleFPR(uint32_t reg) const559 bool StackVisitor::IsAccessibleFPR(uint32_t reg) const {
560   DCHECK(context_ != nullptr);
561   return context_->IsAccessibleFPR(reg);
562 }
563 
GetFPR(uint32_t reg) const564 uintptr_t StackVisitor::GetFPR(uint32_t reg) const {
565   DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
566   DCHECK(context_ != nullptr);
567   return context_->GetFPR(reg);
568 }
569 
GetReturnPcAddr() const570 uintptr_t StackVisitor::GetReturnPcAddr() const {
571   uintptr_t sp = reinterpret_cast<uintptr_t>(GetCurrentQuickFrame());
572   DCHECK_NE(sp, 0u);
573   return sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
574 }
575 
GetReturnPc() const576 uintptr_t StackVisitor::GetReturnPc() const {
577   return *reinterpret_cast<uintptr_t*>(GetReturnPcAddr());
578 }
579 
SetReturnPc(uintptr_t new_ret_pc)580 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
581   *reinterpret_cast<uintptr_t*>(GetReturnPcAddr()) = new_ret_pc;
582 }
583 
ComputeNumFrames(Thread * thread,StackWalkKind walk_kind)584 size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
585   struct NumFramesVisitor : public StackVisitor {
586     NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
587         : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
588 
589     bool VisitFrame() override {
590       frames++;
591       return true;
592     }
593 
594     size_t frames;
595   };
596   NumFramesVisitor visitor(thread, walk_kind);
597   visitor.WalkStack(true);
598   return visitor.frames;
599 }
600 
GetNextMethodAndDexPc(ArtMethod ** next_method,uint32_t * next_dex_pc)601 bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) {
602   struct HasMoreFramesVisitor : public StackVisitor {
603     HasMoreFramesVisitor(Thread* thread,
604                          StackWalkKind walk_kind,
605                          size_t num_frames,
606                          size_t frame_height)
607         : StackVisitor(thread, nullptr, walk_kind, num_frames),
608           frame_height_(frame_height),
609           found_frame_(false),
610           has_more_frames_(false),
611           next_method_(nullptr),
612           next_dex_pc_(0) {
613     }
614 
615     bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
616       if (found_frame_) {
617         ArtMethod* method = GetMethod();
618         if (method != nullptr && !method->IsRuntimeMethod()) {
619           has_more_frames_ = true;
620           next_method_ = method;
621           next_dex_pc_ = GetDexPc();
622           return false;  // End stack walk once next method is found.
623         }
624       } else if (GetFrameHeight() == frame_height_) {
625         found_frame_ = true;
626       }
627       return true;
628     }
629 
630     size_t frame_height_;
631     bool found_frame_;
632     bool has_more_frames_;
633     ArtMethod* next_method_;
634     uint32_t next_dex_pc_;
635   };
636   HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
637   visitor.WalkStack(true);
638   *next_method = visitor.next_method_;
639   *next_dex_pc = visitor.next_dex_pc_;
640   return visitor.has_more_frames_;
641 }
642 
DescribeStack(Thread * thread)643 void StackVisitor::DescribeStack(Thread* thread) {
644   struct DescribeStackVisitor : public StackVisitor {
645     explicit DescribeStackVisitor(Thread* thread_in)
646         : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
647 
648     bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
649       LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
650       return true;
651     }
652   };
653   DescribeStackVisitor visitor(thread);
654   visitor.WalkStack(true);
655 }
656 
DescribeLocation() const657 std::string StackVisitor::DescribeLocation() const {
658   std::string result("Visiting method '");
659   ArtMethod* m = GetMethod();
660   if (m == nullptr) {
661     return "upcall";
662   }
663   result += m->PrettyMethod();
664   result += StringPrintf("' at dex PC 0x%04x", GetDexPc());
665   if (!IsShadowFrame()) {
666     result += StringPrintf(" (native PC %p)", reinterpret_cast<void*>(GetCurrentQuickFramePc()));
667   }
668   return result;
669 }
670 
SetMethod(ArtMethod * method)671 void StackVisitor::SetMethod(ArtMethod* method) {
672   DCHECK(GetMethod() != nullptr);
673   if (cur_shadow_frame_ != nullptr) {
674     cur_shadow_frame_->SetMethod(method);
675   } else {
676     DCHECK(cur_quick_frame_ != nullptr);
677     CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod: "
678                                << GetMethod()->PrettyMethod() << " is inlined into "
679                                << GetOuterMethod()->PrettyMethod();
680     *cur_quick_frame_ = method;
681   }
682 }
683 
ValidateFrame() const684 void StackVisitor::ValidateFrame() const {
685   if (!kIsDebugBuild) {
686     return;
687   }
688   ArtMethod* method = GetMethod();
689   ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
690   // Runtime methods have null declaring class.
691   if (!method->IsRuntimeMethod()) {
692     CHECK(declaring_class != nullptr);
693     CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass())
694         << declaring_class;
695   } else {
696     CHECK(declaring_class == nullptr);
697   }
698   Runtime* const runtime = Runtime::Current();
699   LinearAlloc* const linear_alloc = runtime->GetLinearAlloc();
700   if (!linear_alloc->Contains(method)) {
701     // Check class linker linear allocs.
702     // We get the canonical method as copied methods may have been allocated
703     // by a different class loader.
704     const PointerSize ptrSize = runtime->GetClassLinker()->GetImagePointerSize();
705     ArtMethod* canonical = method->GetCanonicalMethod(ptrSize);
706     ObjPtr<mirror::Class> klass = canonical->GetDeclaringClass();
707     LinearAlloc* const class_linear_alloc = (klass != nullptr)
708         ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader())
709         : linear_alloc;
710     if (!class_linear_alloc->Contains(canonical)) {
711       // Check image space.
712       bool in_image = false;
713       for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
714         if (space->IsImageSpace()) {
715           auto* image_space = space->AsImageSpace();
716           const auto& header = image_space->GetImageHeader();
717           const ImageSection& methods = header.GetMethodsSection();
718           const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
719           const size_t offset =  reinterpret_cast<const uint8_t*>(canonical) - image_space->Begin();
720           if (methods.Contains(offset) || runtime_methods.Contains(offset)) {
721             in_image = true;
722             break;
723           }
724         }
725       }
726       CHECK(in_image) << canonical->PrettyMethod() << " not in linear alloc or image";
727     }
728   }
729   if (cur_quick_frame_ != nullptr) {
730     // Frame consistency checks.
731     size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
732     CHECK_NE(frame_size, 0u);
733     // For compiled code, we could try to have a rough guess at an upper size we expect
734     // to see for a frame:
735     // 256 registers
736     // 2 words HandleScope overhead
737     // 3+3 register spills
738     // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
739     const size_t kMaxExpectedFrameSize = interpreter::kNterpMaxFrame;
740     CHECK_LE(frame_size, kMaxExpectedFrameSize) << method->PrettyMethod();
741     size_t return_pc_offset = GetCurrentQuickFrameInfo().GetReturnPcOffset();
742     CHECK_LT(return_pc_offset, frame_size);
743   }
744 }
745 
GetCurrentQuickFrameInfo() const746 QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
747   if (cur_oat_quick_method_header_ != nullptr) {
748     if (cur_oat_quick_method_header_->IsOptimized()) {
749       return cur_oat_quick_method_header_->GetFrameInfo();
750     } else {
751       DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
752       return NterpFrameInfo(cur_quick_frame_);
753     }
754   }
755 
756   ArtMethod* method = GetMethod();
757   Runtime* runtime = Runtime::Current();
758 
759   if (method->IsAbstract()) {
760     return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
761   }
762 
763   // This goes before IsProxyMethod since runtime methods have a null declaring class.
764   if (method->IsRuntimeMethod()) {
765     return runtime->GetRuntimeMethodFrameInfo(method);
766   }
767 
768   if (method->IsProxyMethod()) {
769     // There is only one direct method of a proxy class: the constructor. A direct method is
770     // cloned from the original java.lang.reflect.Proxy and is executed as usual quick
771     // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
772     DCHECK(!method->IsDirect() && !method->IsConstructor())
773         << "Constructors of proxy classes must have a OatQuickMethodHeader";
774     return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
775   }
776 
777   // The only remaining cases are for native methods that either
778   //   - use the Generic JNI stub, called either directly or through some
779   //     (resolution, instrumentation) trampoline; or
780   //   - fake a Generic JNI frame in art_jni_dlsym_lookup_critical_stub.
781   DCHECK(method->IsNative());
782   // Generic JNI frame is just like the SaveRefsAndArgs frame.
783   // Note that HandleScope, if any, is below the frame.
784   return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
785 }
786 
GetShouldDeoptimizeFlagAddr() const787 uint8_t* StackVisitor::GetShouldDeoptimizeFlagAddr() const REQUIRES_SHARED(Locks::mutator_lock_) {
788   DCHECK(GetCurrentOatQuickMethodHeader()->HasShouldDeoptimizeFlag());
789   QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
790   size_t frame_size = frame_info.FrameSizeInBytes();
791   uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
792   size_t core_spill_size =
793       POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeISA);
794   size_t fpu_spill_size =
795       POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeISA);
796   size_t offset = frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize;
797   uint8_t* should_deoptimize_addr = sp + offset;
798   DCHECK_EQ(*should_deoptimize_addr & ~static_cast<uint8_t>(DeoptimizeFlagValue::kAll), 0);
799   return should_deoptimize_addr;
800 }
801 
802 template <StackVisitor::CountTransitions kCount>
WalkStack(bool include_transitions)803 void StackVisitor::WalkStack(bool include_transitions) {
804   if (check_suspended_) {
805     DCHECK(thread_ == Thread::Current() || thread_->GetState() != ThreadState::kRunnable);
806   }
807   CHECK_EQ(cur_depth_, 0U);
808 
809   for (const ManagedStack* current_fragment = thread_->GetManagedStack();
810        current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
811     cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
812     cur_quick_frame_ = current_fragment->GetTopQuickFrame();
813     cur_quick_frame_pc_ = 0;
814     DCHECK(cur_oat_quick_method_header_ == nullptr);
815 
816     if (kDebugStackWalk) {
817       LOG(INFO) << "Tid=" << thread_-> GetThreadId()
818           << ", ManagedStack fragement: " << current_fragment;
819     }
820 
821     if (cur_quick_frame_ != nullptr) {  // Handle quick stack frames.
822       // Can't be both a shadow and a quick fragment.
823       DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
824       ArtMethod* method = *cur_quick_frame_;
825       DCHECK(method != nullptr);
826       bool header_retrieved = false;
827       if (method->IsNative()) {
828         // We do not have a PC for the first frame, so we cannot simply use
829         // ArtMethod::GetOatQuickMethodHeader() as we're unable to distinguish there
830         // between GenericJNI frame and JIT-compiled JNI stub; the entrypoint may have
831         // changed since the frame was entered. The top quick frame tag indicates
832         // GenericJNI here, otherwise it's either AOT-compiled or JNI-compiled JNI stub.
833         if (UNLIKELY(current_fragment->GetTopQuickFrameGenericJniTag())) {
834           // The generic JNI does not have any method header.
835           cur_oat_quick_method_header_ = nullptr;
836         } else if (UNLIKELY(current_fragment->GetTopQuickFrameJitJniTag())) {
837           // Should be JITed code.
838           Runtime* runtime = Runtime::Current();
839           const void* code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
840           CHECK(code != nullptr) << method->PrettyMethod();
841           cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
842         } else {
843           // We are sure we are not running GenericJni here. Though the entry point could still be
844           // GenericJnistub. The entry point is usually JITed or AOT code. It could be also a
845           // resolution stub if the class isn't visibly initialized yet.
846           const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
847           CHECK(existing_entry_point != nullptr);
848           Runtime* runtime = Runtime::Current();
849           ClassLinker* class_linker = runtime->GetClassLinker();
850           // Check whether we can quickly get the header from the current entrypoint.
851           if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
852               !class_linker->IsQuickResolutionStub(existing_entry_point)) {
853             cur_oat_quick_method_header_ =
854                 OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
855           } else {
856             const void* code = method->GetOatMethodQuickCode(class_linker->GetImagePointerSize());
857             if (code != nullptr) {
858               cur_oat_quick_method_header_ = OatQuickMethodHeader::FromEntryPoint(code);
859             } else {
860               // For non-debuggable runtimes, the JNI stub can be JIT-compiled or AOT-compiled, and
861               // can also reuse the stub in boot images. Since we checked for AOT code earlier, we
862               // must be running JITed code or boot JNI stub.
863               // For debuggable runtimes, we won't be here as we never use AOT code in debuggable.
864               // And the JIT situation is handled earlier as its SP will be tagged. But there is a
865               // special case where we change runtime state from non-debuggable to debuggable in
866               // the JNI implementation and do deopt inside, which could be treated as
867               // a case of non-debuggable as well.
868               if (runtime->GetJit() != nullptr) {
869                 code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
870               }
871               if (code == nullptr) {
872                 // Check if current method uses the boot JNI stub.
873                 const void* boot_jni_stub = class_linker->FindBootJniStub(method);
874                 if (boot_jni_stub != nullptr) {
875                   code = EntryPointToCodePointer(boot_jni_stub);
876                 }
877               }
878               CHECK(code != nullptr) << method->PrettyMethod();
879               cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
880             }
881           }
882         }
883         header_retrieved = true;
884       }
885       while (method != nullptr) {
886         if (!header_retrieved) {
887           cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
888         }
889         header_retrieved = false;  // Force header retrieval in next iteration.
890 
891         if (kDebugStackWalk) {
892           LOG(INFO) << "Early print: Tid=" << thread_-> GetThreadId() << ", method: "
893               << ArtMethod::PrettyMethod(method) << "@" << method;
894         }
895         ValidateFrame();
896         if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
897             && (cur_oat_quick_method_header_ != nullptr)
898             && cur_oat_quick_method_header_->IsOptimized()
899             && !method->IsNative()  // JNI methods cannot have any inlined frames.
900             && CodeInfo::HasInlineInfo(cur_oat_quick_method_header_->GetOptimizedCodeInfoPtr())) {
901           DCHECK_NE(cur_quick_frame_pc_, 0u);
902           CodeInfo* code_info = GetCurrentInlineInfo();
903           StackMap* stack_map = GetCurrentStackMap();
904           if (stack_map->IsValid() && stack_map->HasInlineInfo()) {
905             DCHECK_EQ(current_inline_frames_.size(), 0u);
906             for (current_inline_frames_ = code_info->GetInlineInfosOf(*stack_map);
907                  !current_inline_frames_.empty();
908                  current_inline_frames_.pop_back()) {
909               bool should_continue = VisitFrame();
910               if (UNLIKELY(!should_continue)) {
911                 return;
912               }
913               cur_depth_++;
914             }
915           }
916         }
917 
918         bool should_continue = VisitFrame();
919         if (UNLIKELY(!should_continue)) {
920           return;
921         }
922 
923         QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
924         if (context_ != nullptr) {
925           context_->FillCalleeSaves(reinterpret_cast<uint8_t*>(cur_quick_frame_), frame_info);
926         }
927         // Compute PC for next stack frame from return PC.
928         size_t frame_size = frame_info.FrameSizeInBytes();
929         uintptr_t return_pc_addr = GetReturnPcAddr();
930 
931         cur_quick_frame_pc_ = *reinterpret_cast<uintptr_t*>(return_pc_addr);
932         uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
933         cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
934 
935         if (kDebugStackWalk) {
936           LOG(INFO) << "Tid=" << thread_-> GetThreadId() << ", method: "
937               << ArtMethod::PrettyMethod(method) << "@" << method << " size=" << frame_size
938               << std::boolalpha
939               << " optimized=" << (cur_oat_quick_method_header_ != nullptr &&
940                                    cur_oat_quick_method_header_->IsOptimized())
941               << " native=" << method->IsNative()
942               << std::noboolalpha
943               << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
944               << "," << (method->IsNative() ? method->GetEntryPointFromJni() : nullptr)
945               << " next=" << *cur_quick_frame_;
946         }
947 
948         if (kCount == CountTransitions::kYes || !method->IsRuntimeMethod()) {
949           cur_depth_++;
950         }
951         method = *cur_quick_frame_;
952       }
953       // We reached a transition frame, it doesn't have a method header.
954       cur_oat_quick_method_header_ = nullptr;
955     } else if (cur_shadow_frame_ != nullptr) {
956       do {
957         if (kDebugStackWalk) {
958           ArtMethod* method = cur_shadow_frame_->GetMethod();
959           LOG(INFO) << "Tid=" << thread_-> GetThreadId() << ", method: "
960               << ArtMethod::PrettyMethod(method) << "@" << method
961               << ", ShadowFrame";
962         }
963         ValidateFrame();
964         bool should_continue = VisitFrame();
965         if (UNLIKELY(!should_continue)) {
966           return;
967         }
968         cur_depth_++;
969         cur_shadow_frame_ = cur_shadow_frame_->GetLink();
970       } while (cur_shadow_frame_ != nullptr);
971     }
972     if (include_transitions) {
973       bool should_continue = VisitFrame();
974       if (!should_continue) {
975         return;
976       }
977     }
978     if (kCount == CountTransitions::kYes) {
979       cur_depth_++;
980     }
981   }
982   if (num_frames_ != 0) {
983     CHECK_EQ(cur_depth_, num_frames_);
984   }
985 }
986 
987 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kYes>(bool);
988 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kNo>(bool);
989 
990 }  // namespace art
991