• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_STACK_H_
18 #define ART_RUNTIME_STACK_H_
19 
20 #include <stdint.h>
21 #include <string>
22 
23 #include "base/macros.h"
24 #include "base/mutex.h"
25 #include "quick/quick_method_frame_info.h"
26 
27 namespace art {
28 
29 namespace mirror {
30   class Object;
31 }  // namespace mirror
32 
33 class ArtMethod;
34 class Context;
35 class HandleScope;
36 class OatQuickMethodHeader;
37 class ShadowFrame;
38 class Thread;
39 union JValue;
40 
41 // The kind of vreg being accessed in calls to Set/GetVReg.
42 enum VRegKind {
43   kReferenceVReg,
44   kIntVReg,
45   kFloatVReg,
46   kLongLoVReg,
47   kLongHiVReg,
48   kDoubleLoVReg,
49   kDoubleHiVReg,
50   kConstant,
51   kImpreciseConstant,
52   kUndefined,
53 };
54 std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);
55 
56 // Size in bytes of the should_deoptimize flag on stack.
57 // We just need 4 bytes for our purpose regardless of the architecture. Frame size
58 // calculation will automatically do alignment for the final frame size.
59 static constexpr size_t kShouldDeoptimizeFlagSize = 4;
60 
61 /*
62  * Our current stack layout.
63  * The Dalvik registers come first, followed by the
64  * Method*, followed by other special temporaries if any, followed by
65  * regular compiler temporary. As of now we only have the Method* as
66  * as a special compiler temporary.
67  * A compiler temporary can be thought of as a virtual register that
68  * does not exist in the dex but holds intermediate values to help
69  * optimizations and code generation. A special compiler temporary is
70  * one whose location in frame is well known while non-special ones
71  * do not have a requirement on location in frame as long as code
72  * generator itself knows how to access them.
73  *
74  * TODO: Update this documentation?
75  *
76  *     +-------------------------------+
77  *     | IN[ins-1]                     |  {Note: resides in caller's frame}
78  *     |       .                       |
79  *     | IN[0]                         |
80  *     | caller's ArtMethod            |  ... ArtMethod*
81  *     +===============================+  {Note: start of callee's frame}
82  *     | core callee-save spill        |  {variable sized}
83  *     +-------------------------------+
84  *     | fp callee-save spill          |
85  *     +-------------------------------+
86  *     | filler word                   |  {For compatibility, if V[locals-1] used as wide
87  *     +-------------------------------+
88  *     | V[locals-1]                   |
89  *     | V[locals-2]                   |
90  *     |      .                        |
91  *     |      .                        |  ... (reg == 2)
92  *     | V[1]                          |  ... (reg == 1)
93  *     | V[0]                          |  ... (reg == 0) <---- "locals_start"
94  *     +-------------------------------+
95  *     | stack alignment padding       |  {0 to (kStackAlignWords-1) of padding}
96  *     +-------------------------------+
97  *     | Compiler temp region          |  ... (reg >= max_num_special_temps)
98  *     |      .                        |
99  *     |      .                        |
100  *     | V[max_num_special_temps + 1]  |
101  *     | V[max_num_special_temps + 0]  |
102  *     +-------------------------------+
103  *     | OUT[outs-1]                   |
104  *     | OUT[outs-2]                   |
105  *     |       .                       |
106  *     | OUT[0]                        |
107  *     | ArtMethod*                    |  ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
108  *     +===============================+
109  */
110 
111 class StackVisitor {
112  public:
113   // This enum defines a flag to control whether inlined frames are included
114   // when walking the stack.
115   enum class StackWalkKind {
116     kIncludeInlinedFrames,
117     kSkipInlinedFrames,
118   };
119 
120  protected:
121   StackVisitor(Thread* thread,
122                Context* context,
123                StackWalkKind walk_kind,
124                bool check_suspended = true);
125 
126   bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
127       REQUIRES_SHARED(Locks::mutator_lock_);
128 
129  public:
~StackVisitor()130   virtual ~StackVisitor() {}
131   StackVisitor(const StackVisitor&) = default;
132   StackVisitor(StackVisitor&&) = default;
133 
134   // Return 'true' if we should continue to visit more frames, 'false' to stop.
135   virtual bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
136 
137   enum class CountTransitions {
138     kYes,
139     kNo,
140   };
141 
142   template <CountTransitions kCount = CountTransitions::kYes>
143   void WalkStack(bool include_transitions = false)
144       REQUIRES_SHARED(Locks::mutator_lock_);
145 
GetThread()146   Thread* GetThread() const {
147     return thread_;
148   }
149 
150   ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_);
151 
152   // Sets this stack frame's method pointer. This requires a full lock of the MutatorLock. This
153   // doesn't work with inlined methods.
154   void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
155 
GetOuterMethod()156   ArtMethod* GetOuterMethod() const {
157     return *GetCurrentQuickFrame();
158   }
159 
IsShadowFrame()160   bool IsShadowFrame() const {
161     return cur_shadow_frame_ != nullptr;
162   }
163 
164   uint32_t GetDexPc(bool abort_on_failure = true) const REQUIRES_SHARED(Locks::mutator_lock_);
165 
166   mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);
167 
168   size_t GetNativePcOffset() const REQUIRES_SHARED(Locks::mutator_lock_);
169 
170   // Returns the height of the stack in the managed stack frames, including transitions.
GetFrameHeight()171   size_t GetFrameHeight() REQUIRES_SHARED(Locks::mutator_lock_) {
172     return GetNumFrames() - cur_depth_ - 1;
173   }
174 
175   // Returns a frame ID for JDWP use, starting from 1.
GetFrameId()176   size_t GetFrameId() REQUIRES_SHARED(Locks::mutator_lock_) {
177     return GetFrameHeight() + 1;
178   }
179 
GetNumFrames()180   size_t GetNumFrames() REQUIRES_SHARED(Locks::mutator_lock_) {
181     if (num_frames_ == 0) {
182       num_frames_ = ComputeNumFrames(thread_, walk_kind_);
183     }
184     return num_frames_;
185   }
186 
GetFrameDepth()187   size_t GetFrameDepth() const REQUIRES_SHARED(Locks::mutator_lock_) {
188     return cur_depth_;
189   }
190 
191   // Get the method and dex pc immediately after the one that's currently being visited.
192   bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
193       REQUIRES_SHARED(Locks::mutator_lock_);
194 
195   bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
196       REQUIRES_SHARED(Locks::mutator_lock_);
197 
198   bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
199                    uint64_t* val) const
200       REQUIRES_SHARED(Locks::mutator_lock_);
201 
202   // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
203   // is triggered to make the values effective.
204   bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
205       REQUIRES_SHARED(Locks::mutator_lock_);
206 
207   // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
208   // is triggered to make the values effective.
209   bool SetVRegPair(ArtMethod* m,
210                    uint16_t vreg,
211                    uint64_t new_value,
212                    VRegKind kind_lo,
213                    VRegKind kind_hi)
214       REQUIRES_SHARED(Locks::mutator_lock_);
215 
216   uintptr_t* GetGPRAddress(uint32_t reg) const;
217 
218   uintptr_t GetReturnPc() const REQUIRES_SHARED(Locks::mutator_lock_);
219 
220   void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_);
221 
IsInInlinedFrame()222   bool IsInInlinedFrame() const {
223     return current_inlining_depth_ != 0;
224   }
225 
GetCurrentInliningDepth()226   size_t GetCurrentInliningDepth() const {
227     return current_inlining_depth_;
228   }
229 
GetCurrentQuickFramePc()230   uintptr_t GetCurrentQuickFramePc() const {
231     return cur_quick_frame_pc_;
232   }
233 
GetCurrentQuickFrame()234   ArtMethod** GetCurrentQuickFrame() const {
235     return cur_quick_frame_;
236   }
237 
GetCurrentShadowFrame()238   ShadowFrame* GetCurrentShadowFrame() const {
239     return cur_shadow_frame_;
240   }
241 
GetCurrentHandleScope(size_t pointer_size)242   HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
243     ArtMethod** sp = GetCurrentQuickFrame();
244     // Skip ArtMethod*; handle scope comes next;
245     return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
246   }
247 
248   std::string DescribeLocation() const REQUIRES_SHARED(Locks::mutator_lock_);
249 
250   static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
251       REQUIRES_SHARED(Locks::mutator_lock_);
252 
253   static void DescribeStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
254 
GetCurrentOatQuickMethodHeader()255   const OatQuickMethodHeader* GetCurrentOatQuickMethodHeader() const {
256     return cur_oat_quick_method_header_;
257   }
258 
259   QuickMethodFrameInfo GetCurrentQuickFrameInfo() const REQUIRES_SHARED(Locks::mutator_lock_);
260 
261  private:
262   // Private constructor known in the case that num_frames_ has already been computed.
263   StackVisitor(Thread* thread,
264                Context* context,
265                StackWalkKind walk_kind,
266                size_t num_frames,
267                bool check_suspended = true)
268       REQUIRES_SHARED(Locks::mutator_lock_);
269 
IsAccessibleRegister(uint32_t reg,bool is_float)270   bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
271     return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg);
272   }
GetRegister(uint32_t reg,bool is_float)273   uintptr_t GetRegister(uint32_t reg, bool is_float) const {
274     DCHECK(IsAccessibleRegister(reg, is_float));
275     return is_float ? GetFPR(reg) : GetGPR(reg);
276   }
277 
278   bool IsAccessibleGPR(uint32_t reg) const;
279   uintptr_t GetGPR(uint32_t reg) const;
280 
281   bool IsAccessibleFPR(uint32_t reg) const;
282   uintptr_t GetFPR(uint32_t reg) const;
283 
284   bool GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const
285       REQUIRES_SHARED(Locks::mutator_lock_);
286   bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
287                                 uint32_t* val) const
288       REQUIRES_SHARED(Locks::mutator_lock_);
289 
290   bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
291                                           uint64_t* val) const
292       REQUIRES_SHARED(Locks::mutator_lock_);
293   bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
294                                     VRegKind kind_lo, VRegKind kind_hi,
295                                     uint64_t* val) const
296       REQUIRES_SHARED(Locks::mutator_lock_);
297   bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
298                                    uint64_t* val) const
299       REQUIRES_SHARED(Locks::mutator_lock_);
300 
301   void SanityCheckFrame() const REQUIRES_SHARED(Locks::mutator_lock_);
302 
303   Thread* const thread_;
304   const StackWalkKind walk_kind_;
305   ShadowFrame* cur_shadow_frame_;
306   ArtMethod** cur_quick_frame_;
307   uintptr_t cur_quick_frame_pc_;
308   const OatQuickMethodHeader* cur_oat_quick_method_header_;
309   // Lazily computed, number of frames in the stack.
310   size_t num_frames_;
311   // Depth of the frame we're currently at.
312   size_t cur_depth_;
313   // Current inlining depth of the method we are currently at.
314   // 0 if there is no inlined frame.
315   size_t current_inlining_depth_;
316 
317  protected:
318   Context* const context_;
319   const bool check_suspended_;
320 };
321 
322 }  // namespace art
323 
324 #endif  // ART_RUNTIME_STACK_H_
325