• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "stack.h"
18 #include <limits>
19 
20 #include "android-base/stringprintf.h"
21 
22 #include "arch/context.h"
23 #include "art_method-inl.h"
24 #include "base/callee_save_type.h"
25 #include "base/enums.h"
26 #include "base/hex_dump.h"
27 #include "dex/dex_file_types.h"
28 #include "entrypoints/entrypoint_utils-inl.h"
29 #include "entrypoints/quick/callee_save_frame.h"
30 #include "entrypoints/runtime_asm_entrypoints.h"
31 #include "gc/space/image_space.h"
32 #include "gc/space/space-inl.h"
33 #include "interpreter/mterp/nterp.h"
34 #include "interpreter/shadow_frame-inl.h"
35 #include "jit/jit.h"
36 #include "jit/jit_code_cache.h"
37 #include "linear_alloc.h"
38 #include "managed_stack.h"
39 #include "mirror/class-inl.h"
40 #include "mirror/object-inl.h"
41 #include "mirror/object_array-inl.h"
42 #include "nterp_helpers.h"
43 #include "oat_quick_method_header.h"
44 #include "obj_ptr-inl.h"
45 #include "quick/quick_method_frame_info.h"
46 #include "runtime.h"
47 #include "thread.h"
48 #include "thread_list.h"
49 
50 namespace art {
51 
52 using android::base::StringPrintf;
53 
54 static constexpr bool kDebugStackWalk = false;
55 
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,bool check_suspended)56 StackVisitor::StackVisitor(Thread* thread,
57                            Context* context,
58                            StackWalkKind walk_kind,
59                            bool check_suspended)
60     : StackVisitor(thread, context, walk_kind, 0, check_suspended) {}
61 
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,size_t num_frames,bool check_suspended)62 StackVisitor::StackVisitor(Thread* thread,
63                            Context* context,
64                            StackWalkKind walk_kind,
65                            size_t num_frames,
66                            bool check_suspended)
67     : thread_(thread),
68       walk_kind_(walk_kind),
69       cur_shadow_frame_(nullptr),
70       cur_quick_frame_(nullptr),
71       cur_quick_frame_pc_(0),
72       cur_oat_quick_method_header_(nullptr),
73       num_frames_(num_frames),
74       cur_depth_(0),
75       cur_inline_info_(nullptr, CodeInfo()),
76       cur_stack_map_(0, StackMap()),
77       context_(context),
78       check_suspended_(check_suspended) {
79   if (check_suspended_) {
80     DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
81   }
82 }
83 
GetCurrentInlineInfo() const84 CodeInfo* StackVisitor::GetCurrentInlineInfo() const {
85   DCHECK(!(*cur_quick_frame_)->IsNative());
86   const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
87   if (cur_inline_info_.first != header) {
88     cur_inline_info_ = std::make_pair(header, CodeInfo::DecodeInlineInfoOnly(header));
89   }
90   return &cur_inline_info_.second;
91 }
92 
GetCurrentStackMap() const93 StackMap* StackVisitor::GetCurrentStackMap() const {
94   DCHECK(!(*cur_quick_frame_)->IsNative());
95   const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
96   if (cur_stack_map_.first != cur_quick_frame_pc_) {
97     uint32_t pc = header->NativeQuickPcOffset(cur_quick_frame_pc_);
98     cur_stack_map_ = std::make_pair(cur_quick_frame_pc_,
99                                     GetCurrentInlineInfo()->GetStackMapForNativePcOffset(pc));
100   }
101   return &cur_stack_map_.second;
102 }
103 
GetMethod() const104 ArtMethod* StackVisitor::GetMethod() const {
105   if (cur_shadow_frame_ != nullptr) {
106     return cur_shadow_frame_->GetMethod();
107   } else if (cur_quick_frame_ != nullptr) {
108     if (IsInInlinedFrame()) {
109       CodeInfo* code_info = GetCurrentInlineInfo();
110       DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
111       return GetResolvedMethod(*GetCurrentQuickFrame(), *code_info, current_inline_frames_);
112     } else {
113       return *cur_quick_frame_;
114     }
115   }
116   return nullptr;
117 }
118 
GetDexPc(bool abort_on_failure) const119 uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
120   if (cur_shadow_frame_ != nullptr) {
121     return cur_shadow_frame_->GetDexPC();
122   } else if (cur_quick_frame_ != nullptr) {
123     if (IsInInlinedFrame()) {
124       return current_inline_frames_.back().GetDexPc();
125     } else if (cur_oat_quick_method_header_ == nullptr) {
126       return dex::kDexNoIndex;
127     } else if ((*GetCurrentQuickFrame())->IsNative()) {
128       return cur_oat_quick_method_header_->ToDexPc(
129           GetCurrentQuickFrame(), cur_quick_frame_pc_, abort_on_failure);
130     } else if (cur_oat_quick_method_header_->IsOptimized()) {
131       StackMap* stack_map = GetCurrentStackMap();
132       CHECK(stack_map->IsValid()) << "StackMap not found for " << std::hex << cur_quick_frame_pc_;
133       return stack_map->GetDexPc();
134     } else {
135       DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
136       return NterpGetDexPC(cur_quick_frame_);
137     }
138   } else {
139     return 0;
140   }
141 }
142 
ComputeDexPcList(uint32_t handler_dex_pc) const143 std::vector<uint32_t> StackVisitor::ComputeDexPcList(uint32_t handler_dex_pc) const {
144   std::vector<uint32_t> result;
145   if (cur_shadow_frame_ == nullptr && cur_quick_frame_ != nullptr && IsInInlinedFrame()) {
146     const BitTableRange<InlineInfo>& infos = current_inline_frames_;
147     DCHECK_NE(infos.size(), 0u);
148 
149     // Outermost dex_pc.
150     result.push_back(GetCurrentStackMap()->GetDexPc());
151 
152     // The mid dex_pcs. Note that we skip the last one since we want to change that for
153     // `handler_dex_pc`.
154     for (size_t index = 0; index < infos.size() - 1; ++index) {
155       result.push_back(infos[index].GetDexPc());
156     }
157   }
158 
159   // The innermost dex_pc has to be the handler dex_pc. In the case of no inline frames, it will be
160   // just the one dex_pc. In the case of inlining we will be replacing the innermost InlineInfo's
161   // dex_pc with this one.
162   result.push_back(handler_dex_pc);
163   return result;
164 }
165 
166 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
167     REQUIRES_SHARED(Locks::mutator_lock_);
168 
GetThisObject() const169 ObjPtr<mirror::Object> StackVisitor::GetThisObject() const {
170   DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
171   ArtMethod* m = GetMethod();
172   if (m->IsStatic()) {
173     return nullptr;
174   } else if (m->IsNative()) {
175     if (cur_quick_frame_ != nullptr) {
176       // The `this` reference is stored in the first out vreg in the caller's frame.
177       const size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
178       auto* stack_ref = reinterpret_cast<StackReference<mirror::Object>*>(
179           reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size + sizeof(ArtMethod*));
180       return stack_ref->AsMirrorPtr();
181     } else {
182       return cur_shadow_frame_->GetVRegReference(0);
183     }
184   } else if (m->IsProxyMethod()) {
185     if (cur_quick_frame_ != nullptr) {
186       return artQuickGetProxyThisObject(cur_quick_frame_);
187     } else {
188       return cur_shadow_frame_->GetVRegReference(0);
189     }
190   } else {
191     CodeItemDataAccessor accessor(m->DexInstructionData());
192     if (!accessor.HasCodeItem()) {
193       UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: "
194           << ArtMethod::PrettyMethod(m);
195       return nullptr;
196     } else {
197       uint16_t reg = accessor.RegistersSize() - accessor.InsSize();
198       uint32_t value = 0;
199       if (!GetVReg(m, reg, kReferenceVReg, &value)) {
200         return nullptr;
201       }
202       return reinterpret_cast<mirror::Object*>(value);
203     }
204   }
205 }
206 
GetNativePcOffset() const207 size_t StackVisitor::GetNativePcOffset() const {
208   DCHECK(!IsShadowFrame());
209   return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_);
210 }
211 
GetVRegFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind,uint32_t * val) const212 bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg,
213                                                   VRegKind kind,
214                                                   uint32_t* val) const {
215   size_t frame_id = const_cast<StackVisitor*>(this)->GetFrameId();
216   ShadowFrame* shadow_frame = thread_->FindDebuggerShadowFrame(frame_id);
217   if (shadow_frame != nullptr) {
218     bool* updated_vreg_flags = thread_->GetUpdatedVRegFlags(frame_id);
219     DCHECK(updated_vreg_flags != nullptr);
220     if (updated_vreg_flags[vreg]) {
221       // Value is set by the debugger.
222       if (kind == kReferenceVReg) {
223         *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
224             shadow_frame->GetVRegReference(vreg)));
225       } else {
226         *val = shadow_frame->GetVReg(vreg);
227       }
228       return true;
229     }
230   }
231   // No value is set by the debugger.
232   return false;
233 }
234 
GetVReg(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val,std::optional<DexRegisterLocation> location,bool need_full_register_list) const235 bool StackVisitor::GetVReg(ArtMethod* m,
236                            uint16_t vreg,
237                            VRegKind kind,
238                            uint32_t* val,
239                            std::optional<DexRegisterLocation> location,
240                            bool need_full_register_list) const {
241   if (cur_quick_frame_ != nullptr) {
242     DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
243     DCHECK(m == GetMethod());
244     // Check if there is value set by the debugger.
245     if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
246       return true;
247     }
248     bool result = false;
249     if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
250       result = true;
251       *val = (kind == kReferenceVReg)
252           ? NterpGetVRegReference(cur_quick_frame_, vreg)
253           : NterpGetVReg(cur_quick_frame_, vreg);
254     } else {
255       DCHECK(cur_oat_quick_method_header_->IsOptimized());
256       if (location.has_value() && kind != kReferenceVReg) {
257         uint32_t val2 = *val;
258         // The caller already known the register location, so we can use the faster overload
259         // which does not decode the stack maps.
260         result = GetVRegFromOptimizedCode(location.value(), val);
261         // Compare to the slower overload.
262         DCHECK_EQ(result, GetVRegFromOptimizedCode(m, vreg, kind, &val2, need_full_register_list));
263         DCHECK_EQ(*val, val2);
264       } else {
265         result = GetVRegFromOptimizedCode(m, vreg, kind, val, need_full_register_list);
266       }
267     }
268     if (kind == kReferenceVReg) {
269       // Perform a read barrier in case we are in a different thread and GC is ongoing.
270       mirror::Object* out = reinterpret_cast<mirror::Object*>(static_cast<uintptr_t>(*val));
271       uintptr_t ptr_out = reinterpret_cast<uintptr_t>(GcRoot<mirror::Object>(out).Read());
272       DCHECK_LT(ptr_out, std::numeric_limits<uint32_t>::max());
273       *val = static_cast<uint32_t>(ptr_out);
274     }
275     return result;
276   } else {
277     DCHECK(cur_shadow_frame_ != nullptr);
278     if (kind == kReferenceVReg) {
279       *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
280           cur_shadow_frame_->GetVRegReference(vreg)));
281     } else {
282       *val = cur_shadow_frame_->GetVReg(vreg);
283     }
284     return true;
285   }
286 }
287 
GetNumberOfRegisters(CodeInfo * code_info,int depth) const288 size_t StackVisitor::GetNumberOfRegisters(CodeInfo* code_info, int depth) const {
289   return depth == 0
290     ? code_info->GetNumberOfDexRegisters()
291     : current_inline_frames_[depth - 1].GetNumberOfDexRegisters();
292 }
293 
GetVRegFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val,bool need_full_register_list) const294 bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m,
295                                             uint16_t vreg,
296                                             VRegKind kind,
297                                             uint32_t* val,
298                                             bool need_full_register_list) const {
299   DCHECK_EQ(m, GetMethod());
300   // Can't be null or how would we compile its instructions?
301   DCHECK(m->GetCodeItem() != nullptr) << m->PrettyMethod();
302   const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
303   CodeInfo code_info(method_header);
304 
305   uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
306   StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
307   DCHECK(stack_map.IsValid());
308 
309   DexRegisterMap dex_register_map = (IsInInlinedFrame() && !need_full_register_list)
310     ? code_info.GetInlineDexRegisterMapOf(stack_map, current_inline_frames_.back())
311     : code_info.GetDexRegisterMapOf(stack_map,
312                                     /* first= */ 0,
313                                     GetNumberOfRegisters(&code_info, InlineDepth()));
314 
315   if (dex_register_map.empty()) {
316     return false;
317   }
318 
319   const size_t number_of_dex_registers = dex_register_map.size();
320   DCHECK_LT(vreg, number_of_dex_registers);
321   DexRegisterLocation::Kind location_kind = dex_register_map[vreg].GetKind();
322   switch (location_kind) {
323     case DexRegisterLocation::Kind::kInStack: {
324       const int32_t offset = dex_register_map[vreg].GetStackOffsetInBytes();
325       BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
326       if (kind == kReferenceVReg && !stack_mask.LoadBit(offset / kFrameSlotSize)) {
327         return false;
328       }
329       const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
330       *val = *reinterpret_cast<const uint32_t*>(addr);
331       return true;
332     }
333     case DexRegisterLocation::Kind::kInRegister: {
334       uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
335       uint32_t reg = dex_register_map[vreg].GetMachineRegister();
336       if (kind == kReferenceVReg && !(register_mask & (1 << reg))) {
337         return false;
338       }
339       return GetRegisterIfAccessible(reg, location_kind, val);
340     }
341     case DexRegisterLocation::Kind::kInRegisterHigh:
342     case DexRegisterLocation::Kind::kInFpuRegister:
343     case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
344       if (kind == kReferenceVReg) {
345         return false;
346       }
347       uint32_t reg = dex_register_map[vreg].GetMachineRegister();
348       return GetRegisterIfAccessible(reg, location_kind, val);
349     }
350     case DexRegisterLocation::Kind::kConstant: {
351       uint32_t result = dex_register_map[vreg].GetConstant();
352       if (kind == kReferenceVReg && result != 0) {
353         return false;
354       }
355       *val = result;
356       return true;
357     }
358     case DexRegisterLocation::Kind::kNone:
359       return false;
360     default:
361       LOG(FATAL) << "Unexpected location kind " << dex_register_map[vreg].GetKind();
362       UNREACHABLE();
363   }
364 }
365 
GetVRegFromOptimizedCode(DexRegisterLocation location,uint32_t * val) const366 bool StackVisitor::GetVRegFromOptimizedCode(DexRegisterLocation location, uint32_t* val) const {
367   switch (location.GetKind()) {
368     case DexRegisterLocation::Kind::kInvalid:
369       break;
370     case DexRegisterLocation::Kind::kInStack: {
371       const uint8_t* sp = reinterpret_cast<const uint8_t*>(cur_quick_frame_);
372       *val = *reinterpret_cast<const uint32_t*>(sp + location.GetStackOffsetInBytes());
373       return true;
374     }
375     case DexRegisterLocation::Kind::kInRegister:
376     case DexRegisterLocation::Kind::kInRegisterHigh:
377     case DexRegisterLocation::Kind::kInFpuRegister:
378     case DexRegisterLocation::Kind::kInFpuRegisterHigh:
379       return GetRegisterIfAccessible(location.GetMachineRegister(), location.GetKind(), val);
380     case DexRegisterLocation::Kind::kConstant:
381       *val = location.GetConstant();
382       return true;
383     case DexRegisterLocation::Kind::kNone:
384       return false;
385   }
386   LOG(FATAL) << "Unexpected location kind " << location.GetKind();
387   UNREACHABLE();
388 }
389 
GetRegisterIfAccessible(uint32_t reg,DexRegisterLocation::Kind location_kind,uint32_t * val) const390 bool StackVisitor::GetRegisterIfAccessible(uint32_t reg,
391                                            DexRegisterLocation::Kind location_kind,
392                                            uint32_t* val) const {
393   const bool is_float = (location_kind == DexRegisterLocation::Kind::kInFpuRegister) ||
394                         (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh);
395 
396   if (kRuntimeISA == InstructionSet::kX86 && is_float) {
397     // X86 float registers are 64-bit and each XMM register is provided as two separate
398     // 32-bit registers by the context.
399     reg = (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh)
400         ? (2 * reg + 1)
401         : (2 * reg);
402   }
403 
404   if (!IsAccessibleRegister(reg, is_float)) {
405     return false;
406   }
407   uintptr_t ptr_val = GetRegister(reg, is_float);
408   const bool target64 = Is64BitInstructionSet(kRuntimeISA);
409   if (target64) {
410     const bool is_high = (location_kind == DexRegisterLocation::Kind::kInRegisterHigh) ||
411                          (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh);
412     int64_t value_long = static_cast<int64_t>(ptr_val);
413     ptr_val = static_cast<uintptr_t>(is_high ? High32Bits(value_long) : Low32Bits(value_long));
414   }
415   *val = ptr_val;
416   return true;
417 }
418 
GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const419 bool StackVisitor::GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,
420                                                       VRegKind kind_lo,
421                                                       VRegKind kind_hi,
422                                                       uint64_t* val) const {
423   uint32_t low_32bits;
424   uint32_t high_32bits;
425   bool success = GetVRegFromDebuggerShadowFrame(vreg, kind_lo, &low_32bits);
426   success &= GetVRegFromDebuggerShadowFrame(vreg + 1, kind_hi, &high_32bits);
427   if (success) {
428     *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
429   }
430   return success;
431 }
432 
GetVRegPair(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const433 bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
434                                VRegKind kind_hi, uint64_t* val) const {
435   if (kind_lo == kLongLoVReg) {
436     DCHECK_EQ(kind_hi, kLongHiVReg);
437   } else if (kind_lo == kDoubleLoVReg) {
438     DCHECK_EQ(kind_hi, kDoubleHiVReg);
439   } else {
440     LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
441     UNREACHABLE();
442   }
443   // Check if there is value set by the debugger.
444   if (GetVRegPairFromDebuggerShadowFrame(vreg, kind_lo, kind_hi, val)) {
445     return true;
446   }
447   if (cur_quick_frame_ == nullptr) {
448     DCHECK(cur_shadow_frame_ != nullptr);
449     *val = cur_shadow_frame_->GetVRegLong(vreg);
450     return true;
451   }
452   if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
453     uint64_t val_lo = NterpGetVReg(cur_quick_frame_, vreg);
454     uint64_t val_hi = NterpGetVReg(cur_quick_frame_, vreg + 1);
455     *val = (val_hi << 32) + val_lo;
456     return true;
457   }
458 
459   DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
460   DCHECK(m == GetMethod());
461   DCHECK(cur_oat_quick_method_header_->IsOptimized());
462   return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
463 }
464 
GetVRegPairFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const465 bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
466                                                 VRegKind kind_lo, VRegKind kind_hi,
467                                                 uint64_t* val) const {
468   uint32_t low_32bits;
469   uint32_t high_32bits;
470   bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits);
471   success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits);
472   if (success) {
473     *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
474   }
475   return success;
476 }
477 
PrepareSetVReg(ArtMethod * m,uint16_t vreg,bool wide)478 ShadowFrame* StackVisitor::PrepareSetVReg(ArtMethod* m, uint16_t vreg, bool wide) {
479   CodeItemDataAccessor accessor(m->DexInstructionData());
480   if (!accessor.HasCodeItem()) {
481     return nullptr;
482   }
483   ShadowFrame* shadow_frame = GetCurrentShadowFrame();
484   if (shadow_frame == nullptr) {
485     // This is a compiled frame: we must prepare and update a shadow frame that will
486     // be executed by the interpreter after deoptimization of the stack.
487     const size_t frame_id = GetFrameId();
488     const uint16_t num_regs = accessor.RegistersSize();
489     shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc());
490     CHECK(shadow_frame != nullptr);
491     // Remember the vreg(s) has been set for debugging and must not be overwritten by the
492     // original value during deoptimization of the stack.
493     thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true;
494     if (wide) {
495       thread_->GetUpdatedVRegFlags(frame_id)[vreg + 1] = true;
496     }
497   }
498   return shadow_frame;
499 }
500 
SetVReg(ArtMethod * m,uint16_t vreg,uint32_t new_value,VRegKind kind)501 bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) {
502   DCHECK(kind == kIntVReg || kind == kFloatVReg);
503   ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
504   if (shadow_frame == nullptr) {
505     return false;
506   }
507   shadow_frame->SetVReg(vreg, new_value);
508   return true;
509 }
510 
SetVRegReference(ArtMethod * m,uint16_t vreg,ObjPtr<mirror::Object> new_value)511 bool StackVisitor::SetVRegReference(ArtMethod* m, uint16_t vreg, ObjPtr<mirror::Object> new_value) {
512   ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
513   if (shadow_frame == nullptr) {
514     return false;
515   }
516   shadow_frame->SetVRegReference(vreg, new_value);
517   return true;
518 }
519 
SetVRegPair(ArtMethod * m,uint16_t vreg,uint64_t new_value,VRegKind kind_lo,VRegKind kind_hi)520 bool StackVisitor::SetVRegPair(ArtMethod* m,
521                                uint16_t vreg,
522                                uint64_t new_value,
523                                VRegKind kind_lo,
524                                VRegKind kind_hi) {
525   if (kind_lo == kLongLoVReg) {
526     DCHECK_EQ(kind_hi, kLongHiVReg);
527   } else if (kind_lo == kDoubleLoVReg) {
528     DCHECK_EQ(kind_hi, kDoubleHiVReg);
529   } else {
530     LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
531     UNREACHABLE();
532   }
533   ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ true);
534   if (shadow_frame == nullptr) {
535     return false;
536   }
537   shadow_frame->SetVRegLong(vreg, new_value);
538   return true;
539 }
540 
IsAccessibleGPR(uint32_t reg) const541 bool StackVisitor::IsAccessibleGPR(uint32_t reg) const {
542   DCHECK(context_ != nullptr);
543   return context_->IsAccessibleGPR(reg);
544 }
545 
GetGPRAddress(uint32_t reg) const546 uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const {
547   DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
548   DCHECK(context_ != nullptr);
549   return context_->GetGPRAddress(reg);
550 }
551 
GetGPR(uint32_t reg) const552 uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
553   DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
554   DCHECK(context_ != nullptr);
555   return context_->GetGPR(reg);
556 }
557 
IsAccessibleFPR(uint32_t reg) const558 bool StackVisitor::IsAccessibleFPR(uint32_t reg) const {
559   DCHECK(context_ != nullptr);
560   return context_->IsAccessibleFPR(reg);
561 }
562 
GetFPR(uint32_t reg) const563 uintptr_t StackVisitor::GetFPR(uint32_t reg) const {
564   DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
565   DCHECK(context_ != nullptr);
566   return context_->GetFPR(reg);
567 }
568 
GetReturnPcAddr() const569 uintptr_t StackVisitor::GetReturnPcAddr() const {
570   uintptr_t sp = reinterpret_cast<uintptr_t>(GetCurrentQuickFrame());
571   DCHECK_NE(sp, 0u);
572   return sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
573 }
574 
GetReturnPc() const575 uintptr_t StackVisitor::GetReturnPc() const {
576   return *reinterpret_cast<uintptr_t*>(GetReturnPcAddr());
577 }
578 
SetReturnPc(uintptr_t new_ret_pc)579 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
580   *reinterpret_cast<uintptr_t*>(GetReturnPcAddr()) = new_ret_pc;
581 }
582 
ComputeNumFrames(Thread * thread,StackWalkKind walk_kind)583 size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
584   struct NumFramesVisitor : public StackVisitor {
585     NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
586         : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
587 
588     bool VisitFrame() override {
589       frames++;
590       return true;
591     }
592 
593     size_t frames;
594   };
595   NumFramesVisitor visitor(thread, walk_kind);
596   visitor.WalkStack(true);
597   return visitor.frames;
598 }
599 
GetNextMethodAndDexPc(ArtMethod ** next_method,uint32_t * next_dex_pc)600 bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) {
601   struct HasMoreFramesVisitor : public StackVisitor {
602     HasMoreFramesVisitor(Thread* thread,
603                          StackWalkKind walk_kind,
604                          size_t num_frames,
605                          size_t frame_height)
606         : StackVisitor(thread, nullptr, walk_kind, num_frames),
607           frame_height_(frame_height),
608           found_frame_(false),
609           has_more_frames_(false),
610           next_method_(nullptr),
611           next_dex_pc_(0) {
612     }
613 
614     bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
615       if (found_frame_) {
616         ArtMethod* method = GetMethod();
617         if (method != nullptr && !method->IsRuntimeMethod()) {
618           has_more_frames_ = true;
619           next_method_ = method;
620           next_dex_pc_ = GetDexPc();
621           return false;  // End stack walk once next method is found.
622         }
623       } else if (GetFrameHeight() == frame_height_) {
624         found_frame_ = true;
625       }
626       return true;
627     }
628 
629     size_t frame_height_;
630     bool found_frame_;
631     bool has_more_frames_;
632     ArtMethod* next_method_;
633     uint32_t next_dex_pc_;
634   };
635   HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
636   visitor.WalkStack(true);
637   *next_method = visitor.next_method_;
638   *next_dex_pc = visitor.next_dex_pc_;
639   return visitor.has_more_frames_;
640 }
641 
DescribeStack(Thread * thread)642 void StackVisitor::DescribeStack(Thread* thread) {
643   struct DescribeStackVisitor : public StackVisitor {
644     explicit DescribeStackVisitor(Thread* thread_in)
645         : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
646 
647     bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
648       LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
649       return true;
650     }
651   };
652   DescribeStackVisitor visitor(thread);
653   visitor.WalkStack(true);
654 }
655 
DescribeLocation() const656 std::string StackVisitor::DescribeLocation() const {
657   std::string result("Visiting method '");
658   ArtMethod* m = GetMethod();
659   if (m == nullptr) {
660     return "upcall";
661   }
662   result += m->PrettyMethod();
663   result += StringPrintf("' at dex PC 0x%04x", GetDexPc());
664   if (!IsShadowFrame()) {
665     result += StringPrintf(" (native PC %p)", reinterpret_cast<void*>(GetCurrentQuickFramePc()));
666   }
667   return result;
668 }
669 
SetMethod(ArtMethod * method)670 void StackVisitor::SetMethod(ArtMethod* method) {
671   DCHECK(GetMethod() != nullptr);
672   if (cur_shadow_frame_ != nullptr) {
673     cur_shadow_frame_->SetMethod(method);
674   } else {
675     DCHECK(cur_quick_frame_ != nullptr);
676     CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod: "
677                                << GetMethod()->PrettyMethod() << " is inlined into "
678                                << GetOuterMethod()->PrettyMethod();
679     *cur_quick_frame_ = method;
680   }
681 }
682 
ValidateFrame() const683 void StackVisitor::ValidateFrame() const {
684   if (!kIsDebugBuild) {
685     return;
686   }
687   ArtMethod* method = GetMethod();
688   ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
689   // Runtime methods have null declaring class.
690   if (!method->IsRuntimeMethod()) {
691     CHECK(declaring_class != nullptr);
692     CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass())
693         << declaring_class;
694   } else {
695     CHECK(declaring_class == nullptr);
696   }
697   Runtime* const runtime = Runtime::Current();
698   LinearAlloc* const linear_alloc = runtime->GetLinearAlloc();
699   if (!linear_alloc->Contains(method)) {
700     // Check class linker linear allocs.
701     // We get the canonical method as copied methods may have been allocated
702     // by a different class loader.
703     const PointerSize ptrSize = runtime->GetClassLinker()->GetImagePointerSize();
704     ArtMethod* canonical = method->GetCanonicalMethod(ptrSize);
705     ObjPtr<mirror::Class> klass = canonical->GetDeclaringClass();
706     LinearAlloc* const class_linear_alloc = (klass != nullptr)
707         ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader())
708         : linear_alloc;
709     if (!class_linear_alloc->Contains(canonical)) {
710       // Check image space.
711       bool in_image = false;
712       for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
713         if (space->IsImageSpace()) {
714           auto* image_space = space->AsImageSpace();
715           const auto& header = image_space->GetImageHeader();
716           const ImageSection& methods = header.GetMethodsSection();
717           const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
718           const size_t offset =  reinterpret_cast<const uint8_t*>(canonical) - image_space->Begin();
719           if (methods.Contains(offset) || runtime_methods.Contains(offset)) {
720             in_image = true;
721             break;
722           }
723         }
724       }
725       CHECK(in_image) << canonical->PrettyMethod() << " not in linear alloc or image";
726     }
727   }
728   if (cur_quick_frame_ != nullptr) {
729     // Frame consistency checks.
730     size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
731     CHECK_NE(frame_size, 0u);
732     // For compiled code, we could try to have a rough guess at an upper size we expect
733     // to see for a frame:
734     // 256 registers
735     // 2 words HandleScope overhead
736     // 3+3 register spills
737     // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
738     const size_t kMaxExpectedFrameSize = interpreter::kNterpMaxFrame;
739     CHECK_LE(frame_size, kMaxExpectedFrameSize) << method->PrettyMethod();
740     size_t return_pc_offset = GetCurrentQuickFrameInfo().GetReturnPcOffset();
741     CHECK_LT(return_pc_offset, frame_size);
742   }
743 }
744 
GetCurrentQuickFrameInfo() const745 QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
746   if (cur_oat_quick_method_header_ != nullptr) {
747     if (cur_oat_quick_method_header_->IsOptimized()) {
748       return cur_oat_quick_method_header_->GetFrameInfo();
749     } else {
750       DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
751       return NterpFrameInfo(cur_quick_frame_);
752     }
753   }
754 
755   ArtMethod* method = GetMethod();
756   Runtime* runtime = Runtime::Current();
757 
758   if (method->IsAbstract()) {
759     return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
760   }
761 
762   // This goes before IsProxyMethod since runtime methods have a null declaring class.
763   if (method->IsRuntimeMethod()) {
764     return runtime->GetRuntimeMethodFrameInfo(method);
765   }
766 
767   if (method->IsProxyMethod()) {
768     // There is only one direct method of a proxy class: the constructor. A direct method is
769     // cloned from the original java.lang.reflect.Proxy and is executed as usual quick
770     // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
771     DCHECK(!method->IsDirect() && !method->IsConstructor())
772         << "Constructors of proxy classes must have a OatQuickMethodHeader";
773     return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
774   }
775 
776   // The only remaining cases are for native methods that either
777   //   - use the Generic JNI stub, called either directly or through some
778   //     (resolution, instrumentation) trampoline; or
779   //   - fake a Generic JNI frame in art_jni_dlsym_lookup_critical_stub.
780   DCHECK(method->IsNative());
781   // Generic JNI frame is just like the SaveRefsAndArgs frame.
782   // Note that HandleScope, if any, is below the frame.
783   return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
784 }
785 
GetShouldDeoptimizeFlagAddr() const786 uint8_t* StackVisitor::GetShouldDeoptimizeFlagAddr() const REQUIRES_SHARED(Locks::mutator_lock_) {
787   DCHECK(GetCurrentOatQuickMethodHeader()->HasShouldDeoptimizeFlag());
788   QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
789   size_t frame_size = frame_info.FrameSizeInBytes();
790   uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
791   size_t core_spill_size =
792       POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeISA);
793   size_t fpu_spill_size =
794       POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeISA);
795   size_t offset = frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize;
796   uint8_t* should_deoptimize_addr = sp + offset;
797   DCHECK_EQ(*should_deoptimize_addr & ~static_cast<uint8_t>(DeoptimizeFlagValue::kAll), 0);
798   return should_deoptimize_addr;
799 }
800 
801 template <StackVisitor::CountTransitions kCount>
WalkStack(bool include_transitions)802 void StackVisitor::WalkStack(bool include_transitions) {
803   if (check_suspended_) {
804     DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
805   }
806   CHECK_EQ(cur_depth_, 0U);
807 
808   for (const ManagedStack* current_fragment = thread_->GetManagedStack();
809        current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
810     cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
811     cur_quick_frame_ = current_fragment->GetTopQuickFrame();
812     cur_quick_frame_pc_ = 0;
813     DCHECK(cur_oat_quick_method_header_ == nullptr);
814 
815     if (kDebugStackWalk) {
816       LOG(INFO) << "Tid=" << thread_-> GetThreadId()
817           << ", ManagedStack fragement: " << current_fragment;
818     }
819 
820     if (cur_quick_frame_ != nullptr) {  // Handle quick stack frames.
821       // Can't be both a shadow and a quick fragment.
822       DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
823       ArtMethod* method = *cur_quick_frame_;
824       DCHECK(method != nullptr);
825       bool header_retrieved = false;
826       if (method->IsNative()) {
827         // We do not have a PC for the first frame, so we cannot simply use
828         // ArtMethod::GetOatQuickMethodHeader() as we're unable to distinguish there
829         // between GenericJNI frame and JIT-compiled JNI stub; the entrypoint may have
830         // changed since the frame was entered. The top quick frame tag indicates
831         // GenericJNI here, otherwise it's either AOT-compiled or JNI-compiled JNI stub.
832         if (UNLIKELY(current_fragment->GetTopQuickFrameGenericJniTag())) {
833           // The generic JNI does not have any method header.
834           cur_oat_quick_method_header_ = nullptr;
835         } else if (UNLIKELY(current_fragment->GetTopQuickFrameJitJniTag())) {
836           // Should be JITed code.
837           Runtime* runtime = Runtime::Current();
838           const void* code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
839           CHECK(code != nullptr) << method->PrettyMethod();
840           cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
841         } else {
842           // We are sure we are not running GenericJni here. Though the entry point could still be
843           // GenericJnistub. The entry point is usually JITed or AOT code. It could be lso a
844           // resolution stub if the class isn't visibly initialized yet.
845           const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
846           CHECK(existing_entry_point != nullptr);
847           Runtime* runtime = Runtime::Current();
848           ClassLinker* class_linker = runtime->GetClassLinker();
849           // Check whether we can quickly get the header from the current entrypoint.
850           if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
851               !class_linker->IsQuickResolutionStub(existing_entry_point)) {
852             cur_oat_quick_method_header_ =
853                 OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
854           } else {
855             const void* code = method->GetOatMethodQuickCode(class_linker->GetImagePointerSize());
856             if (code != nullptr) {
857               cur_oat_quick_method_header_ = OatQuickMethodHeader::FromEntryPoint(code);
858             } else {
859               // This must be a JITted JNI stub frame. For non-debuggable runtimes we only generate
860               // JIT stubs if there are no AOT stubs for native methods. Since we checked for AOT
861               // code earlier, we must be running JITed code. For debuggable runtimes we might have
862               // JIT code even when AOT code is present but we tag SP in JITed JNI stubs
863               // in debuggable runtimes. This case is handled earlier.
864               CHECK(runtime->GetJit() != nullptr);
865               code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
866               CHECK(code != nullptr) << method->PrettyMethod();
867               cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
868             }
869           }
870         }
871         header_retrieved = true;
872       }
873       while (method != nullptr) {
874         if (!header_retrieved) {
875           cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
876         }
877         header_retrieved = false;  // Force header retrieval in next iteration.
878 
879         if (kDebugStackWalk) {
880           LOG(INFO) << "Early print: Tid=" << thread_-> GetThreadId() << ", method: "
881               << ArtMethod::PrettyMethod(method) << "@" << method;
882         }
883         ValidateFrame();
884         if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
885             && (cur_oat_quick_method_header_ != nullptr)
886             && cur_oat_quick_method_header_->IsOptimized()
887             && !method->IsNative()  // JNI methods cannot have any inlined frames.
888             && CodeInfo::HasInlineInfo(cur_oat_quick_method_header_->GetOptimizedCodeInfoPtr())) {
889           DCHECK_NE(cur_quick_frame_pc_, 0u);
890           CodeInfo* code_info = GetCurrentInlineInfo();
891           StackMap* stack_map = GetCurrentStackMap();
892           if (stack_map->IsValid() && stack_map->HasInlineInfo()) {
893             DCHECK_EQ(current_inline_frames_.size(), 0u);
894             for (current_inline_frames_ = code_info->GetInlineInfosOf(*stack_map);
895                  !current_inline_frames_.empty();
896                  current_inline_frames_.pop_back()) {
897               bool should_continue = VisitFrame();
898               if (UNLIKELY(!should_continue)) {
899                 return;
900               }
901               cur_depth_++;
902             }
903           }
904         }
905 
906         bool should_continue = VisitFrame();
907         if (UNLIKELY(!should_continue)) {
908           return;
909         }
910 
911         QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
912         if (context_ != nullptr) {
913           context_->FillCalleeSaves(reinterpret_cast<uint8_t*>(cur_quick_frame_), frame_info);
914         }
915         // Compute PC for next stack frame from return PC.
916         size_t frame_size = frame_info.FrameSizeInBytes();
917         uintptr_t return_pc_addr = GetReturnPcAddr();
918 
919         cur_quick_frame_pc_ = *reinterpret_cast<uintptr_t*>(return_pc_addr);
920         uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
921         cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
922 
923         if (kDebugStackWalk) {
924           LOG(INFO) << "Tid=" << thread_-> GetThreadId() << ", method: "
925               << ArtMethod::PrettyMethod(method) << "@" << method << " size=" << frame_size
926               << std::boolalpha
927               << " optimized=" << (cur_oat_quick_method_header_ != nullptr &&
928                                    cur_oat_quick_method_header_->IsOptimized())
929               << " native=" << method->IsNative()
930               << std::noboolalpha
931               << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
932               << "," << (method->IsNative() ? method->GetEntryPointFromJni() : nullptr)
933               << " next=" << *cur_quick_frame_;
934         }
935 
936         if (kCount == CountTransitions::kYes || !method->IsRuntimeMethod()) {
937           cur_depth_++;
938         }
939         method = *cur_quick_frame_;
940       }
941       // We reached a transition frame, it doesn't have a method header.
942       cur_oat_quick_method_header_ = nullptr;
943     } else if (cur_shadow_frame_ != nullptr) {
944       do {
945         if (kDebugStackWalk) {
946           ArtMethod* method = cur_shadow_frame_->GetMethod();
947           LOG(INFO) << "Tid=" << thread_-> GetThreadId() << ", method: "
948               << ArtMethod::PrettyMethod(method) << "@" << method
949               << ", ShadowFrame";
950         }
951         ValidateFrame();
952         bool should_continue = VisitFrame();
953         if (UNLIKELY(!should_continue)) {
954           return;
955         }
956         cur_depth_++;
957         cur_shadow_frame_ = cur_shadow_frame_->GetLink();
958       } while (cur_shadow_frame_ != nullptr);
959     }
960     if (include_transitions) {
961       bool should_continue = VisitFrame();
962       if (!should_continue) {
963         return;
964       }
965     }
966     if (kCount == CountTransitions::kYes) {
967       cur_depth_++;
968     }
969   }
970   if (num_frames_ != 0) {
971     CHECK_EQ(cur_depth_, num_frames_);
972   }
973 }
974 
975 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kYes>(bool);
976 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kNo>(bool);
977 
978 }  // namespace art
979