• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_STACK_H_
18 #define ART_RUNTIME_STACK_H_
19 
20 #include <optional>
21 #include <stdint.h>
22 #include <string>
23 
24 #include "base/locks.h"
25 #include "base/macros.h"
26 #include "obj_ptr.h"
27 #include "quick/quick_method_frame_info.h"
28 #include "stack_map.h"
29 
30 namespace art {
31 
32 namespace mirror {
33 class Object;
34 }  // namespace mirror
35 
36 class ArtMethod;
37 class Context;
38 class HandleScope;
39 class OatQuickMethodHeader;
40 class ShadowFrame;
41 class Thread;
42 union JValue;
43 
44 // The kind of vreg being accessed in calls to Set/GetVReg.
45 enum VRegKind {
46   kReferenceVReg,
47   kIntVReg,
48   kFloatVReg,
49   kLongLoVReg,
50   kLongHiVReg,
51   kDoubleLoVReg,
52   kDoubleHiVReg,
53   kConstant,
54   kImpreciseConstant,
55   kUndefined,
56 };
57 std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);
58 
59 // Size in bytes of the should_deoptimize flag on stack.
60 // We just need 4 bytes for our purpose regardless of the architecture. Frame size
61 // calculation will automatically do alignment for the final frame size.
62 static constexpr size_t kShouldDeoptimizeFlagSize = 4;
63 
64 /*
65  * Our current stack layout.
66  * The Dalvik registers come first, followed by the
67  * Method*, followed by other special temporaries if any, followed by
68  * regular compiler temporary. As of now we only have the Method* as
69  * as a special compiler temporary.
70  * A compiler temporary can be thought of as a virtual register that
71  * does not exist in the dex but holds intermediate values to help
72  * optimizations and code generation. A special compiler temporary is
73  * one whose location in frame is well known while non-special ones
74  * do not have a requirement on location in frame as long as code
75  * generator itself knows how to access them.
76  *
77  * TODO: Update this documentation?
78  *
79  *     +-------------------------------+
80  *     | IN[ins-1]                     |  {Note: resides in caller's frame}
81  *     |       .                       |
82  *     | IN[0]                         |
83  *     | caller's ArtMethod            |  ... ArtMethod*
84  *     +===============================+  {Note: start of callee's frame}
85  *     | core callee-save spill        |  {variable sized}
86  *     +-------------------------------+
87  *     | fp callee-save spill          |
88  *     +-------------------------------+
89  *     | filler word                   |  {For compatibility, if V[locals-1] used as wide
90  *     +-------------------------------+
91  *     | V[locals-1]                   |
92  *     | V[locals-2]                   |
93  *     |      .                        |
94  *     |      .                        |  ... (reg == 2)
95  *     | V[1]                          |  ... (reg == 1)
96  *     | V[0]                          |  ... (reg == 0) <---- "locals_start"
97  *     +-------------------------------+
98  *     | stack alignment padding       |  {0 to (kStackAlignWords-1) of padding}
99  *     +-------------------------------+
100  *     | Compiler temp region          |  ... (reg >= max_num_special_temps)
101  *     |      .                        |
102  *     |      .                        |
103  *     | V[max_num_special_temps + 1]  |
104  *     | V[max_num_special_temps + 0]  |
105  *     +-------------------------------+
106  *     | OUT[outs-1]                   |
107  *     | OUT[outs-2]                   |
108  *     |       .                       |
109  *     | OUT[0]                        |
110  *     | ArtMethod*                    |  ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
111  *     +===============================+
112  */
113 
114 class StackVisitor {
115  public:
116   // This enum defines a flag to control whether inlined frames are included
117   // when walking the stack.
118   enum class StackWalkKind {
119     kIncludeInlinedFrames,
120     kSkipInlinedFrames,
121   };
122 
123  protected:
124   StackVisitor(Thread* thread,
125                Context* context,
126                StackWalkKind walk_kind,
127                bool check_suspended = true);
128 
129   bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
130       REQUIRES_SHARED(Locks::mutator_lock_);
131 
132  public:
~StackVisitor()133   virtual ~StackVisitor() {}
134   StackVisitor(const StackVisitor&) = default;
135   StackVisitor(StackVisitor&&) = default;
136 
137   // Return 'true' if we should continue to visit more frames, 'false' to stop.
138   virtual bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
139 
140   enum class CountTransitions {
141     kYes,
142     kNo,
143   };
144 
145   template <CountTransitions kCount = CountTransitions::kYes>
146   void WalkStack(bool include_transitions = false) REQUIRES_SHARED(Locks::mutator_lock_);
147 
148   // Convenience helper function to walk the stack with a lambda as a visitor.
149   template <CountTransitions kCountTransitions = CountTransitions::kYes,
150             typename T>
151   ALWAYS_INLINE static void WalkStack(const T& fn,
152                                       Thread* thread,
153                                       Context* context,
154                                       StackWalkKind walk_kind,
155                                       bool check_suspended = true,
156                                       bool include_transitions = false)
REQUIRES_SHARED(Locks::mutator_lock_)157       REQUIRES_SHARED(Locks::mutator_lock_) {
158     class LambdaStackVisitor : public StackVisitor {
159      public:
160       LambdaStackVisitor(const T& fn,
161                          Thread* thread,
162                          Context* context,
163                          StackWalkKind walk_kind,
164                          bool check_suspended = true)
165           : StackVisitor(thread, context, walk_kind, check_suspended), fn_(fn) {}
166 
167       bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
168         return fn_(this);
169       }
170 
171      private:
172       T fn_;
173     };
174     LambdaStackVisitor visitor(fn, thread, context, walk_kind, check_suspended);
175     visitor.template WalkStack<kCountTransitions>(include_transitions);
176   }
177 
GetThread()178   Thread* GetThread() const {
179     return thread_;
180   }
181 
182   ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_);
183 
184   // Sets this stack frame's method pointer. This requires a full lock of the MutatorLock. This
185   // doesn't work with inlined methods.
186   void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
187 
GetOuterMethod()188   ArtMethod* GetOuterMethod() const {
189     return *GetCurrentQuickFrame();
190   }
191 
IsShadowFrame()192   bool IsShadowFrame() const {
193     return cur_shadow_frame_ != nullptr;
194   }
195 
196   uint32_t GetDexPc(bool abort_on_failure = true) const REQUIRES_SHARED(Locks::mutator_lock_);
197 
198   ObjPtr<mirror::Object> GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);
199 
200   size_t GetNativePcOffset() const REQUIRES_SHARED(Locks::mutator_lock_);
201 
202   // Returns the height of the stack in the managed stack frames, including transitions.
GetFrameHeight()203   size_t GetFrameHeight() REQUIRES_SHARED(Locks::mutator_lock_) {
204     return GetNumFrames() - cur_depth_ - 1;
205   }
206 
207   // Returns a frame ID for JDWP use, starting from 1.
GetFrameId()208   size_t GetFrameId() REQUIRES_SHARED(Locks::mutator_lock_) {
209     return GetFrameHeight() + 1;
210   }
211 
GetNumFrames()212   size_t GetNumFrames() REQUIRES_SHARED(Locks::mutator_lock_) {
213     if (num_frames_ == 0) {
214       num_frames_ = ComputeNumFrames(thread_, walk_kind_);
215     }
216     return num_frames_;
217   }
218 
GetFrameDepth()219   size_t GetFrameDepth() const REQUIRES_SHARED(Locks::mutator_lock_) {
220     return cur_depth_;
221   }
222 
223   // Get the method and dex pc immediately after the one that's currently being visited.
224   bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
225       REQUIRES_SHARED(Locks::mutator_lock_);
226 
227   bool GetVReg(ArtMethod* m,
228                uint16_t vreg,
229                VRegKind kind,
230                uint32_t* val,
231                std::optional<DexRegisterLocation> location =
232                    std::optional<DexRegisterLocation>()) const
233       REQUIRES_SHARED(Locks::mutator_lock_);
234 
235   bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
236                    uint64_t* val) const
237       REQUIRES_SHARED(Locks::mutator_lock_);
238 
239   // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
240   // is triggered to make the values effective.
241   bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
242       REQUIRES_SHARED(Locks::mutator_lock_);
243 
244   // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
245   // is triggered to make the values effective.
246   bool SetVRegReference(ArtMethod* m, uint16_t vreg, ObjPtr<mirror::Object> new_value)
247       REQUIRES_SHARED(Locks::mutator_lock_);
248 
249   // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
250   // is triggered to make the values effective.
251   bool SetVRegPair(ArtMethod* m,
252                    uint16_t vreg,
253                    uint64_t new_value,
254                    VRegKind kind_lo,
255                    VRegKind kind_hi)
256       REQUIRES_SHARED(Locks::mutator_lock_);
257 
258   uintptr_t* GetGPRAddress(uint32_t reg) const;
259 
260   uintptr_t GetReturnPc() const REQUIRES_SHARED(Locks::mutator_lock_);
261   uintptr_t GetReturnPcAddr() const REQUIRES_SHARED(Locks::mutator_lock_);
262 
263   void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_);
264 
IsInInlinedFrame()265   bool IsInInlinedFrame() const {
266     return !current_inline_frames_.empty();
267   }
268 
GetCurrentInlinedFrame()269   InlineInfo GetCurrentInlinedFrame() const {
270     return current_inline_frames_.back();
271   }
272 
GetCurrentQuickFramePc()273   uintptr_t GetCurrentQuickFramePc() const {
274     return cur_quick_frame_pc_;
275   }
276 
GetCurrentQuickFrame()277   ArtMethod** GetCurrentQuickFrame() const {
278     return cur_quick_frame_;
279   }
280 
GetCurrentShadowFrame()281   ShadowFrame* GetCurrentShadowFrame() const {
282     return cur_shadow_frame_;
283   }
284 
GetCurrentHandleScope(size_t pointer_size)285   HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
286     ArtMethod** sp = GetCurrentQuickFrame();
287     // Skip ArtMethod*; handle scope comes next;
288     return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
289   }
290 
291   std::string DescribeLocation() const REQUIRES_SHARED(Locks::mutator_lock_);
292 
293   static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
294       REQUIRES_SHARED(Locks::mutator_lock_);
295 
296   static void DescribeStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
297 
GetCurrentOatQuickMethodHeader()298   const OatQuickMethodHeader* GetCurrentOatQuickMethodHeader() const {
299     return cur_oat_quick_method_header_;
300   }
301 
302   QuickMethodFrameInfo GetCurrentQuickFrameInfo() const REQUIRES_SHARED(Locks::mutator_lock_);
303 
304  private:
305   // Private constructor known in the case that num_frames_ has already been computed.
306   StackVisitor(Thread* thread,
307                Context* context,
308                StackWalkKind walk_kind,
309                size_t num_frames,
310                bool check_suspended = true)
311       REQUIRES_SHARED(Locks::mutator_lock_);
312 
IsAccessibleRegister(uint32_t reg,bool is_float)313   bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
314     return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg);
315   }
GetRegister(uint32_t reg,bool is_float)316   uintptr_t GetRegister(uint32_t reg, bool is_float) const {
317     DCHECK(IsAccessibleRegister(reg, is_float));
318     return is_float ? GetFPR(reg) : GetGPR(reg);
319   }
320 
321   bool IsAccessibleGPR(uint32_t reg) const;
322   uintptr_t GetGPR(uint32_t reg) const;
323 
324   bool IsAccessibleFPR(uint32_t reg) const;
325   uintptr_t GetFPR(uint32_t reg) const;
326 
327   bool GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const
328       REQUIRES_SHARED(Locks::mutator_lock_);
329   bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
330                                 uint32_t* val) const
331       REQUIRES_SHARED(Locks::mutator_lock_);
332 
333   bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
334                                           uint64_t* val) const
335       REQUIRES_SHARED(Locks::mutator_lock_);
336   bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
337                                     VRegKind kind_lo, VRegKind kind_hi,
338                                     uint64_t* val) const
339       REQUIRES_SHARED(Locks::mutator_lock_);
340   bool GetVRegFromOptimizedCode(DexRegisterLocation location, VRegKind kind, uint32_t* val) const
341       REQUIRES_SHARED(Locks::mutator_lock_);
342   bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
343                                    uint64_t* val) const
344       REQUIRES_SHARED(Locks::mutator_lock_);
345 
346   ShadowFrame* PrepareSetVReg(ArtMethod* m, uint16_t vreg, bool wide)
347       REQUIRES_SHARED(Locks::mutator_lock_);
348 
349   void SanityCheckFrame() const REQUIRES_SHARED(Locks::mutator_lock_);
350 
351   ALWAYS_INLINE CodeInfo* GetCurrentInlineInfo() const;
352   ALWAYS_INLINE StackMap* GetCurrentStackMap() const;
353 
354   Thread* const thread_;
355   const StackWalkKind walk_kind_;
356   ShadowFrame* cur_shadow_frame_;
357   ArtMethod** cur_quick_frame_;
358   uintptr_t cur_quick_frame_pc_;
359   const OatQuickMethodHeader* cur_oat_quick_method_header_;
360   // Lazily computed, number of frames in the stack.
361   size_t num_frames_;
362   // Depth of the frame we're currently at.
363   size_t cur_depth_;
364   // Current inlined frames of the method we are currently at.
365   // We keep poping frames from the end as we visit the frames.
366   BitTableRange<InlineInfo> current_inline_frames_;
367 
368   // Cache the most recently decoded inline info data.
369   // The 'current_inline_frames_' refers to this data, so we need to keep it alive anyway.
370   // Marked mutable since the cache fields are updated from const getters.
371   mutable std::pair<const OatQuickMethodHeader*, CodeInfo> cur_inline_info_;
372   mutable std::pair<uintptr_t, StackMap> cur_stack_map_;
373 
374  protected:
375   Context* const context_;
376   const bool check_suspended_;
377 };
378 
379 }  // namespace art
380 
381 #endif  // ART_RUNTIME_STACK_H_
382