• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_STACK_H_
18 #define ART_RUNTIME_STACK_H_
19 
20 #include <stdint.h>
21 
22 #include <optional>
23 #include <string>
24 
25 #include "base/locks.h"
26 #include "base/macros.h"
27 #include "deoptimization_kind.h"
28 #include "obj_ptr.h"
29 #include "quick/quick_method_frame_info.h"
30 #include "stack_map.h"
31 
32 namespace art {
33 
34 namespace mirror {
35 class Object;
36 }  // namespace mirror
37 
38 class ArtMethod;
39 class Context;
40 class HandleScope;
41 class OatQuickMethodHeader;
42 class ShadowFrame;
43 class Thread;
44 union JValue;
45 
46 // The kind of vreg being accessed in calls to Set/GetVReg.
47 enum VRegKind {
48   kReferenceVReg,
49   kIntVReg,
50   kFloatVReg,
51   kLongLoVReg,
52   kLongHiVReg,
53   kDoubleLoVReg,
54   kDoubleHiVReg,
55   kConstant,
56   kImpreciseConstant,
57   kUndefined,
58 };
59 std::ostream& operator<<(std::ostream& os, VRegKind rhs);
60 
61 // Size in bytes of the should_deoptimize flag on stack.
62 // We just need 4 bytes for our purpose regardless of the architecture. Frame size
63 // calculation will automatically do alignment for the final frame size.
64 static constexpr size_t kShouldDeoptimizeFlagSize = 4;
65 
66 /*
67  * Our current stack layout.
68  * The Dalvik registers come first, followed by the
69  * Method*, followed by other special temporaries if any, followed by
70  * regular compiler temporary. As of now we only have the Method* as
71  * as a special compiler temporary.
72  * A compiler temporary can be thought of as a virtual register that
73  * does not exist in the dex but holds intermediate values to help
74  * optimizations and code generation. A special compiler temporary is
75  * one whose location in frame is well known while non-special ones
76  * do not have a requirement on location in frame as long as code
77  * generator itself knows how to access them.
78  *
79  * TODO: Update this documentation?
80  *
81  *     +-------------------------------+
82  *     | IN[ins-1]                     |  {Note: resides in caller's frame}
83  *     |       .                       |
84  *     | IN[0]                         |
85  *     | caller's ArtMethod            |  ... ArtMethod*
86  *     +===============================+  {Note: start of callee's frame}
87  *     | core callee-save spill        |  {variable sized}
88  *     +-------------------------------+
89  *     | fp callee-save spill          |
90  *     +-------------------------------+
91  *     | filler word                   |  {For compatibility, if V[locals-1] used as wide
92  *     +-------------------------------+
93  *     | V[locals-1]                   |
94  *     | V[locals-2]                   |
95  *     |      .                        |
96  *     |      .                        |  ... (reg == 2)
97  *     | V[1]                          |  ... (reg == 1)
98  *     | V[0]                          |  ... (reg == 0) <---- "locals_start"
99  *     +-------------------------------+
100  *     | stack alignment padding       |  {0 to (kStackAlignWords-1) of padding}
101  *     +-------------------------------+
102  *     | Compiler temp region          |  ... (reg >= max_num_special_temps)
103  *     |      .                        |
104  *     |      .                        |
105  *     | V[max_num_special_temps + 1]  |
106  *     | V[max_num_special_temps + 0]  |
107  *     +-------------------------------+
108  *     | OUT[outs-1]                   |
109  *     | OUT[outs-2]                   |
110  *     |       .                       |
111  *     | OUT[0]                        |
112  *     | ArtMethod*                    |  ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
113  *     +===============================+
114  */
115 
116 class StackVisitor {
117  public:
118   // This enum defines a flag to control whether inlined frames are included
119   // when walking the stack.
120   enum class StackWalkKind {
121     kIncludeInlinedFrames,
122     kSkipInlinedFrames,
123   };
124 
125  protected:
126   StackVisitor(Thread* thread,
127                Context* context,
128                StackWalkKind walk_kind,
129                bool check_suspended = true);
130 
131   bool GetRegisterIfAccessible(uint32_t reg, DexRegisterLocation::Kind kind, uint32_t* val) const
132       REQUIRES_SHARED(Locks::mutator_lock_);
133 
134  public:
~StackVisitor()135   virtual ~StackVisitor() {}
136   StackVisitor(const StackVisitor&) = default;
137   StackVisitor(StackVisitor&&) = default;
138 
139   // Return 'true' if we should continue to visit more frames, 'false' to stop.
140   virtual bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
141 
142   enum class CountTransitions {
143     kYes,
144     kNo,
145   };
146 
147   template <CountTransitions kCount = CountTransitions::kYes>
148   void WalkStack(bool include_transitions = false) REQUIRES_SHARED(Locks::mutator_lock_);
149 
150   // Convenience helper function to walk the stack with a lambda as a visitor.
151   template <CountTransitions kCountTransitions = CountTransitions::kYes,
152             typename T>
153   ALWAYS_INLINE static void WalkStack(const T& fn,
154                                       Thread* thread,
155                                       Context* context,
156                                       StackWalkKind walk_kind,
157                                       bool check_suspended = true,
158                                       bool include_transitions = false)
REQUIRES_SHARED(Locks::mutator_lock_)159       REQUIRES_SHARED(Locks::mutator_lock_) {
160     class LambdaStackVisitor : public StackVisitor {
161      public:
162       LambdaStackVisitor(const T& fn,
163                          Thread* thread,
164                          Context* context,
165                          StackWalkKind walk_kind,
166                          bool check_suspended = true)
167           : StackVisitor(thread, context, walk_kind, check_suspended), fn_(fn) {}
168 
169       bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
170         return fn_(this);
171       }
172 
173      private:
174       T fn_;
175     };
176     LambdaStackVisitor visitor(fn, thread, context, walk_kind, check_suspended);
177     visitor.template WalkStack<kCountTransitions>(include_transitions);
178   }
179 
GetThread()180   Thread* GetThread() const {
181     return thread_;
182   }
183 
184   ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_);
185 
186   // Sets this stack frame's method pointer. This requires a full lock of the MutatorLock. This
187   // doesn't work with inlined methods.
188   void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
189 
GetOuterMethod()190   ArtMethod* GetOuterMethod() const {
191     return *GetCurrentQuickFrame();
192   }
193 
IsShadowFrame()194   bool IsShadowFrame() const {
195     return cur_shadow_frame_ != nullptr;
196   }
197 
198   uint32_t GetDexPc(bool abort_on_failure = true) const REQUIRES_SHARED(Locks::mutator_lock_);
199 
200   ObjPtr<mirror::Object> GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);
201 
202   size_t GetNativePcOffset() const REQUIRES_SHARED(Locks::mutator_lock_);
203 
204   // Returns the height of the stack in the managed stack frames, including transitions.
GetFrameHeight()205   size_t GetFrameHeight() REQUIRES_SHARED(Locks::mutator_lock_) {
206     return GetNumFrames() - cur_depth_ - 1;
207   }
208 
209   // Returns a frame ID for JDWP use, starting from 1.
GetFrameId()210   size_t GetFrameId() REQUIRES_SHARED(Locks::mutator_lock_) {
211     return GetFrameHeight() + 1;
212   }
213 
GetNumFrames()214   size_t GetNumFrames() REQUIRES_SHARED(Locks::mutator_lock_) {
215     if (num_frames_ == 0) {
216       num_frames_ = ComputeNumFrames(thread_, walk_kind_);
217     }
218     return num_frames_;
219   }
220 
GetFrameDepth()221   size_t GetFrameDepth() const REQUIRES_SHARED(Locks::mutator_lock_) {
222     return cur_depth_;
223   }
224 
225   // Get the method and dex pc immediately after the one that's currently being visited.
226   bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
227       REQUIRES_SHARED(Locks::mutator_lock_);
228 
229   bool GetVReg(ArtMethod* m,
230                uint16_t vreg,
231                VRegKind kind,
232                uint32_t* val,
233                std::optional<DexRegisterLocation> location =
234                    std::optional<DexRegisterLocation>()) const
235       REQUIRES_SHARED(Locks::mutator_lock_);
236 
237   bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
238                    uint64_t* val) const
239       REQUIRES_SHARED(Locks::mutator_lock_);
240 
241   // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
242   // is triggered to make the values effective.
243   bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
244       REQUIRES_SHARED(Locks::mutator_lock_);
245 
246   // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
247   // is triggered to make the values effective.
248   bool SetVRegReference(ArtMethod* m, uint16_t vreg, ObjPtr<mirror::Object> new_value)
249       REQUIRES_SHARED(Locks::mutator_lock_);
250 
251   // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
252   // is triggered to make the values effective.
253   bool SetVRegPair(ArtMethod* m,
254                    uint16_t vreg,
255                    uint64_t new_value,
256                    VRegKind kind_lo,
257                    VRegKind kind_hi)
258       REQUIRES_SHARED(Locks::mutator_lock_);
259 
260   uintptr_t* GetGPRAddress(uint32_t reg) const;
261 
262   uintptr_t GetReturnPc() const REQUIRES_SHARED(Locks::mutator_lock_);
263   uintptr_t GetReturnPcAddr() const REQUIRES_SHARED(Locks::mutator_lock_);
264 
265   void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_);
266 
IsInInlinedFrame()267   bool IsInInlinedFrame() const {
268     return !current_inline_frames_.empty();
269   }
270 
GetCurrentInlinedFrame()271   InlineInfo GetCurrentInlinedFrame() const {
272     return current_inline_frames_.back();
273   }
274 
GetCurrentQuickFramePc()275   uintptr_t GetCurrentQuickFramePc() const {
276     return cur_quick_frame_pc_;
277   }
278 
GetCurrentQuickFrame()279   ArtMethod** GetCurrentQuickFrame() const {
280     return cur_quick_frame_;
281   }
282 
GetCurrentShadowFrame()283   ShadowFrame* GetCurrentShadowFrame() const {
284     return cur_shadow_frame_;
285   }
286 
287   std::string DescribeLocation() const REQUIRES_SHARED(Locks::mutator_lock_);
288 
289   static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
290       REQUIRES_SHARED(Locks::mutator_lock_);
291 
292   static void DescribeStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
293 
GetCurrentOatQuickMethodHeader()294   const OatQuickMethodHeader* GetCurrentOatQuickMethodHeader() const {
295     return cur_oat_quick_method_header_;
296   }
297 
298   QuickMethodFrameInfo GetCurrentQuickFrameInfo() const REQUIRES_SHARED(Locks::mutator_lock_);
299 
SetShouldDeoptimizeFlag(DeoptimizeFlagValue value)300   void SetShouldDeoptimizeFlag(DeoptimizeFlagValue value) REQUIRES_SHARED(Locks::mutator_lock_) {
301     uint8_t* should_deoptimize_addr = GetShouldDeoptimizeFlagAddr();
302     *should_deoptimize_addr = *should_deoptimize_addr | static_cast<uint8_t>(value);
303   };
304 
GetShouldDeoptimizeFlag()305   uint8_t GetShouldDeoptimizeFlag() const REQUIRES_SHARED(Locks::mutator_lock_) {
306     return *GetShouldDeoptimizeFlagAddr();
307   }
308 
309  private:
310   // Private constructor known in the case that num_frames_ has already been computed.
311   StackVisitor(Thread* thread,
312                Context* context,
313                StackWalkKind walk_kind,
314                size_t num_frames,
315                bool check_suspended = true)
316       REQUIRES_SHARED(Locks::mutator_lock_);
317 
IsAccessibleRegister(uint32_t reg,bool is_float)318   bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
319     return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg);
320   }
GetRegister(uint32_t reg,bool is_float)321   uintptr_t GetRegister(uint32_t reg, bool is_float) const {
322     DCHECK(IsAccessibleRegister(reg, is_float));
323     return is_float ? GetFPR(reg) : GetGPR(reg);
324   }
325 
326   bool IsAccessibleGPR(uint32_t reg) const;
327   uintptr_t GetGPR(uint32_t reg) const;
328 
329   bool IsAccessibleFPR(uint32_t reg) const;
330   uintptr_t GetFPR(uint32_t reg) const;
331 
332   bool GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const
333       REQUIRES_SHARED(Locks::mutator_lock_);
334   bool GetVRegFromOptimizedCode(ArtMethod* m,
335                                 uint16_t vreg,
336                                 VRegKind kind,
337                                 uint32_t* val) const
338       REQUIRES_SHARED(Locks::mutator_lock_);
339 
340   bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,
341                                           VRegKind kind_lo,
342                                           VRegKind kind_hi,
343                                           uint64_t* val) const
344       REQUIRES_SHARED(Locks::mutator_lock_);
345   bool GetVRegPairFromOptimizedCode(ArtMethod* m,
346                                     uint16_t vreg,
347                                     VRegKind kind_lo,
348                                     VRegKind kind_hi,
349                                     uint64_t* val) const
350       REQUIRES_SHARED(Locks::mutator_lock_);
351   bool GetVRegFromOptimizedCode(DexRegisterLocation location, uint32_t* val) const
352       REQUIRES_SHARED(Locks::mutator_lock_);
353 
354   ShadowFrame* PrepareSetVReg(ArtMethod* m, uint16_t vreg, bool wide)
355       REQUIRES_SHARED(Locks::mutator_lock_);
356 
357   void ValidateFrame() const REQUIRES_SHARED(Locks::mutator_lock_);
358 
359   ALWAYS_INLINE CodeInfo* GetCurrentInlineInfo() const;
360   ALWAYS_INLINE StackMap* GetCurrentStackMap() const;
361 
362   Thread* const thread_;
363   const StackWalkKind walk_kind_;
364   ShadowFrame* cur_shadow_frame_;
365   ArtMethod** cur_quick_frame_;
366   uintptr_t cur_quick_frame_pc_;
367   const OatQuickMethodHeader* cur_oat_quick_method_header_;
368   // Lazily computed, number of frames in the stack.
369   size_t num_frames_;
370   // Depth of the frame we're currently at.
371   size_t cur_depth_;
372   // Current inlined frames of the method we are currently at.
373   // We keep poping frames from the end as we visit the frames.
374   BitTableRange<InlineInfo> current_inline_frames_;
375 
376   // Cache the most recently decoded inline info data.
377   // The 'current_inline_frames_' refers to this data, so we need to keep it alive anyway.
378   // Marked mutable since the cache fields are updated from const getters.
379   mutable std::pair<const OatQuickMethodHeader*, CodeInfo> cur_inline_info_;
380   mutable std::pair<uintptr_t, StackMap> cur_stack_map_;
381 
382   uint8_t* GetShouldDeoptimizeFlagAddr() const REQUIRES_SHARED(Locks::mutator_lock_);
383 
384  protected:
385   Context* const context_;
386   const bool check_suspended_;
387 };
388 
389 }  // namespace art
390 
391 #endif  // ART_RUNTIME_STACK_H_
392