• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_STACK_H_
18 #define ART_RUNTIME_STACK_H_
19 
20 #include "dex_file.h"
21 #include "instrumentation.h"
22 #include "base/macros.h"
23 #include "arch/context.h"
24 
25 #include <stdint.h>
26 #include <string>
27 
28 namespace art {
29 
30 namespace mirror {
31   class ArtMethod;
32   class Object;
33 }  // namespace mirror
34 
35 class Context;
36 class ShadowFrame;
37 class StackIndirectReferenceTable;
38 class ScopedObjectAccess;
39 class Thread;
40 
41 // The kind of vreg being accessed in calls to Set/GetVReg.
42 enum VRegKind {
43   kReferenceVReg,
44   kIntVReg,
45   kFloatVReg,
46   kLongLoVReg,
47   kLongHiVReg,
48   kDoubleLoVReg,
49   kDoubleHiVReg,
50   kConstant,
51   kImpreciseConstant,
52   kUndefined,
53 };
54 
55 // ShadowFrame has 3 possible layouts:
56 //  - portable - a unified array of VRegs and references. Precise references need GC maps.
57 //  - interpreter - separate VRegs and reference arrays. References are in the reference array.
58 //  - JNI - just VRegs, but where every VReg holds a reference.
59 class ShadowFrame {
60  public:
61   // Compute size of ShadowFrame in bytes.
ComputeSize(uint32_t num_vregs)62   static size_t ComputeSize(uint32_t num_vregs) {
63     return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) +
64            (sizeof(mirror::Object*) * num_vregs);
65   }
66 
67   // Create ShadowFrame in heap for deoptimization.
Create(uint32_t num_vregs,ShadowFrame * link,mirror::ArtMethod * method,uint32_t dex_pc)68   static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
69                              mirror::ArtMethod* method, uint32_t dex_pc) {
70     uint8_t* memory = new uint8_t[ComputeSize(num_vregs)];
71     ShadowFrame* sf = new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
72     return sf;
73   }
74 
75   // Create ShadowFrame for interpreter using provided memory.
Create(uint32_t num_vregs,ShadowFrame * link,mirror::ArtMethod * method,uint32_t dex_pc,void * memory)76   static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
77                              mirror::ArtMethod* method, uint32_t dex_pc, void* memory) {
78     ShadowFrame* sf = new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
79     return sf;
80   }
~ShadowFrame()81   ~ShadowFrame() {}
82 
HasReferenceArray()83   bool HasReferenceArray() const {
84 #if defined(ART_USE_PORTABLE_COMPILER)
85     return (number_of_vregs_ & kHasReferenceArray) != 0;
86 #else
87     return true;
88 #endif
89   }
90 
NumberOfVRegs()91   uint32_t NumberOfVRegs() const {
92 #if defined(ART_USE_PORTABLE_COMPILER)
93     return number_of_vregs_ & ~kHasReferenceArray;
94 #else
95     return number_of_vregs_;
96 #endif
97   }
98 
SetNumberOfVRegs(uint32_t number_of_vregs)99   void SetNumberOfVRegs(uint32_t number_of_vregs) {
100 #if defined(ART_USE_PORTABLE_COMPILER)
101     number_of_vregs_ = number_of_vregs | (number_of_vregs_ & kHasReferenceArray);
102 #else
103     UNUSED(number_of_vregs);
104     UNIMPLEMENTED(FATAL) << "Should only be called when portable is enabled";
105 #endif
106   }
107 
GetDexPC()108   uint32_t GetDexPC() const {
109     return dex_pc_;
110   }
111 
SetDexPC(uint32_t dex_pc)112   void SetDexPC(uint32_t dex_pc) {
113     dex_pc_ = dex_pc;
114   }
115 
GetLink()116   ShadowFrame* GetLink() const {
117     return link_;
118   }
119 
SetLink(ShadowFrame * frame)120   void SetLink(ShadowFrame* frame) {
121     DCHECK_NE(this, frame);
122     link_ = frame;
123   }
124 
GetVReg(size_t i)125   int32_t GetVReg(size_t i) const {
126     DCHECK_LT(i, NumberOfVRegs());
127     const uint32_t* vreg = &vregs_[i];
128     return *reinterpret_cast<const int32_t*>(vreg);
129   }
130 
GetVRegFloat(size_t i)131   float GetVRegFloat(size_t i) const {
132     DCHECK_LT(i, NumberOfVRegs());
133     // NOTE: Strict-aliasing?
134     const uint32_t* vreg = &vregs_[i];
135     return *reinterpret_cast<const float*>(vreg);
136   }
137 
GetVRegLong(size_t i)138   int64_t GetVRegLong(size_t i) const {
139     DCHECK_LT(i, NumberOfVRegs());
140     const uint32_t* vreg = &vregs_[i];
141     return *reinterpret_cast<const int64_t*>(vreg);
142   }
143 
GetVRegDouble(size_t i)144   double GetVRegDouble(size_t i) const {
145     DCHECK_LT(i, NumberOfVRegs());
146     const uint32_t* vreg = &vregs_[i];
147     return *reinterpret_cast<const double*>(vreg);
148   }
149 
GetVRegReference(size_t i)150   mirror::Object* GetVRegReference(size_t i) const {
151     DCHECK_LT(i, NumberOfVRegs());
152     if (HasReferenceArray()) {
153       return References()[i];
154     } else {
155       const uint32_t* vreg = &vregs_[i];
156       return *reinterpret_cast<mirror::Object* const*>(vreg);
157     }
158   }
159 
160   // Get view of vregs as range of consecutive arguments starting at i.
GetVRegArgs(size_t i)161   uint32_t* GetVRegArgs(size_t i) {
162     return &vregs_[i];
163   }
164 
SetVReg(size_t i,int32_t val)165   void SetVReg(size_t i, int32_t val) {
166     DCHECK_LT(i, NumberOfVRegs());
167     uint32_t* vreg = &vregs_[i];
168     *reinterpret_cast<int32_t*>(vreg) = val;
169   }
170 
SetVRegFloat(size_t i,float val)171   void SetVRegFloat(size_t i, float val) {
172     DCHECK_LT(i, NumberOfVRegs());
173     uint32_t* vreg = &vregs_[i];
174     *reinterpret_cast<float*>(vreg) = val;
175   }
176 
SetVRegLong(size_t i,int64_t val)177   void SetVRegLong(size_t i, int64_t val) {
178     DCHECK_LT(i, NumberOfVRegs());
179     uint32_t* vreg = &vregs_[i];
180     *reinterpret_cast<int64_t*>(vreg) = val;
181   }
182 
SetVRegDouble(size_t i,double val)183   void SetVRegDouble(size_t i, double val) {
184     DCHECK_LT(i, NumberOfVRegs());
185     uint32_t* vreg = &vregs_[i];
186     *reinterpret_cast<double*>(vreg) = val;
187   }
188 
SetVRegReference(size_t i,mirror::Object * val)189   void SetVRegReference(size_t i, mirror::Object* val) {
190     DCHECK_LT(i, NumberOfVRegs());
191     uint32_t* vreg = &vregs_[i];
192     *reinterpret_cast<mirror::Object**>(vreg) = val;
193     if (HasReferenceArray()) {
194       References()[i] = val;
195     }
196   }
197 
GetMethod()198   mirror::ArtMethod* GetMethod() const {
199     DCHECK_NE(method_, static_cast<void*>(NULL));
200     return method_;
201   }
202 
203   mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
204 
205   mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
206 
207   ThrowLocation GetCurrentLocationForThrow() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
208 
SetMethod(mirror::ArtMethod * method)209   void SetMethod(mirror::ArtMethod* method) {
210 #if defined(ART_USE_PORTABLE_COMPILER)
211     DCHECK_NE(method, static_cast<void*>(NULL));
212     method_ = method;
213 #else
214     UNUSED(method);
215     UNIMPLEMENTED(FATAL) << "Should only be called when portable is enabled";
216 #endif
217   }
218 
Contains(mirror::Object ** shadow_frame_entry_obj)219   bool Contains(mirror::Object** shadow_frame_entry_obj) const {
220     if (HasReferenceArray()) {
221       return ((&References()[0] <= shadow_frame_entry_obj) &&
222               (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1])));
223     } else {
224       uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj);
225       return ((&vregs_[0] <= shadow_frame_entry) &&
226               (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1])));
227     }
228   }
229 
LinkOffset()230   static size_t LinkOffset() {
231     return OFFSETOF_MEMBER(ShadowFrame, link_);
232   }
233 
MethodOffset()234   static size_t MethodOffset() {
235     return OFFSETOF_MEMBER(ShadowFrame, method_);
236   }
237 
DexPCOffset()238   static size_t DexPCOffset() {
239     return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
240   }
241 
NumberOfVRegsOffset()242   static size_t NumberOfVRegsOffset() {
243     return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_);
244   }
245 
VRegsOffset()246   static size_t VRegsOffset() {
247     return OFFSETOF_MEMBER(ShadowFrame, vregs_);
248   }
249 
250  private:
ShadowFrame(uint32_t num_vregs,ShadowFrame * link,mirror::ArtMethod * method,uint32_t dex_pc,bool has_reference_array)251   ShadowFrame(uint32_t num_vregs, ShadowFrame* link, mirror::ArtMethod* method,
252               uint32_t dex_pc, bool has_reference_array)
253       : number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) {
254     if (has_reference_array) {
255 #if defined(ART_USE_PORTABLE_COMPILER)
256       CHECK_LT(num_vregs, static_cast<uint32_t>(kHasReferenceArray));
257       number_of_vregs_ |= kHasReferenceArray;
258 #endif
259       memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(mirror::Object*)));
260     } else {
261       memset(vregs_, 0, num_vregs * sizeof(uint32_t));
262     }
263   }
264 
References()265   mirror::Object* const* References() const {
266     DCHECK(HasReferenceArray());
267     const uint32_t* vreg_end = &vregs_[NumberOfVRegs()];
268     return reinterpret_cast<mirror::Object* const*>(vreg_end);
269   }
270 
References()271   mirror::Object** References() {
272     return const_cast<mirror::Object**>(const_cast<const ShadowFrame*>(this)->References());
273   }
274 
275 #if defined(ART_USE_PORTABLE_COMPILER)
276   enum ShadowFrameFlag {
277     kHasReferenceArray = 1ul << 31
278   };
279   // TODO: make const in the portable case.
280   uint32_t number_of_vregs_;
281 #else
282   const uint32_t number_of_vregs_;
283 #endif
284   // Link to previous shadow frame or NULL.
285   ShadowFrame* link_;
286 #if defined(ART_USE_PORTABLE_COMPILER)
287   // TODO: make const in the portable case.
288   mirror::ArtMethod* method_;
289 #else
290   mirror::ArtMethod* const method_;
291 #endif
292   uint32_t dex_pc_;
293   uint32_t vregs_[0];
294 
295   DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame);
296 };
297 
298 // The managed stack is used to record fragments of managed code stacks. Managed code stacks
299 // may either be shadow frames or lists of frames using fixed frame sizes. Transition records are
300 // necessary for transitions between code using different frame layouts and transitions into native
301 // code.
302 class PACKED(4) ManagedStack {
303  public:
ManagedStack()304   ManagedStack()
305       : link_(NULL), top_shadow_frame_(NULL), top_quick_frame_(NULL), top_quick_frame_pc_(0) {}
306 
PushManagedStackFragment(ManagedStack * fragment)307   void PushManagedStackFragment(ManagedStack* fragment) {
308     // Copy this top fragment into given fragment.
309     memcpy(fragment, this, sizeof(ManagedStack));
310     // Clear this fragment, which has become the top.
311     memset(this, 0, sizeof(ManagedStack));
312     // Link our top fragment onto the given fragment.
313     link_ = fragment;
314   }
315 
PopManagedStackFragment(const ManagedStack & fragment)316   void PopManagedStackFragment(const ManagedStack& fragment) {
317     DCHECK(&fragment == link_);
318     // Copy this given fragment back to the top.
319     memcpy(this, &fragment, sizeof(ManagedStack));
320   }
321 
GetLink()322   ManagedStack* GetLink() const {
323     return link_;
324   }
325 
GetTopQuickFrame()326   mirror::ArtMethod** GetTopQuickFrame() const {
327     return top_quick_frame_;
328   }
329 
SetTopQuickFrame(mirror::ArtMethod ** top)330   void SetTopQuickFrame(mirror::ArtMethod** top) {
331     DCHECK(top_shadow_frame_ == NULL);
332     top_quick_frame_ = top;
333   }
334 
GetTopQuickFramePc()335   uintptr_t GetTopQuickFramePc() const {
336     return top_quick_frame_pc_;
337   }
338 
SetTopQuickFramePc(uintptr_t pc)339   void SetTopQuickFramePc(uintptr_t pc) {
340     DCHECK(top_shadow_frame_ == NULL);
341     top_quick_frame_pc_ = pc;
342   }
343 
TopQuickFrameOffset()344   static size_t TopQuickFrameOffset() {
345     return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_);
346   }
347 
TopQuickFramePcOffset()348   static size_t TopQuickFramePcOffset() {
349     return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_pc_);
350   }
351 
PushShadowFrame(ShadowFrame * new_top_frame)352   ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
353     DCHECK(top_quick_frame_ == NULL);
354     ShadowFrame* old_frame = top_shadow_frame_;
355     top_shadow_frame_ = new_top_frame;
356     new_top_frame->SetLink(old_frame);
357     return old_frame;
358   }
359 
PopShadowFrame()360   ShadowFrame* PopShadowFrame() {
361     DCHECK(top_quick_frame_ == NULL);
362     CHECK(top_shadow_frame_ != NULL);
363     ShadowFrame* frame = top_shadow_frame_;
364     top_shadow_frame_ = frame->GetLink();
365     return frame;
366   }
367 
GetTopShadowFrame()368   ShadowFrame* GetTopShadowFrame() const {
369     return top_shadow_frame_;
370   }
371 
SetTopShadowFrame(ShadowFrame * top)372   void SetTopShadowFrame(ShadowFrame* top) {
373     DCHECK(top_quick_frame_ == NULL);
374     top_shadow_frame_ = top;
375   }
376 
TopShadowFrameOffset()377   static size_t TopShadowFrameOffset() {
378     return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
379   }
380 
381   size_t NumJniShadowFrameReferences() const;
382 
383   bool ShadowFramesContain(mirror::Object** shadow_frame_entry) const;
384 
385  private:
386   ManagedStack* link_;
387   ShadowFrame* top_shadow_frame_;
388   mirror::ArtMethod** top_quick_frame_;
389   uintptr_t top_quick_frame_pc_;
390 };
391 
392 class StackVisitor {
393  protected:
394   StackVisitor(Thread* thread, Context* context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
395 
396  public:
~StackVisitor()397   virtual ~StackVisitor() {}
398 
399   // Return 'true' if we should continue to visit more frames, 'false' to stop.
400   virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
401 
402   void WalkStack(bool include_transitions = false)
403       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
404 
GetMethod()405   mirror::ArtMethod* GetMethod() const {
406     if (cur_shadow_frame_ != NULL) {
407       return cur_shadow_frame_->GetMethod();
408     } else if (cur_quick_frame_ != NULL) {
409       return *cur_quick_frame_;
410     } else {
411       return NULL;
412     }
413   }
414 
IsShadowFrame()415   bool IsShadowFrame() const {
416     return cur_shadow_frame_ != NULL;
417   }
418 
419   uint32_t GetDexPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
420 
421   mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
422 
423   size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
424 
CalleeSaveAddress(int num,size_t frame_size)425   uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const {
426     // Callee saves are held at the top of the frame
427     DCHECK(GetMethod() != NULL);
428     byte* save_addr =
429         reinterpret_cast<byte*>(cur_quick_frame_) + frame_size - ((num + 1) * kPointerSize);
430 #if defined(__i386__)
431     save_addr -= kPointerSize;  // account for return address
432 #endif
433     return reinterpret_cast<uintptr_t*>(save_addr);
434   }
435 
436   // Returns the height of the stack in the managed stack frames, including transitions.
GetFrameHeight()437   size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
438     return GetNumFrames() - cur_depth_ - 1;
439   }
440 
441   // Returns a frame ID for JDWP use, starting from 1.
GetFrameId()442   size_t GetFrameId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
443     return GetFrameHeight() + 1;
444   }
445 
GetNumFrames()446   size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
447     if (num_frames_ == 0) {
448       num_frames_ = ComputeNumFrames(thread_);
449     }
450     return num_frames_;
451   }
452 
453   uint32_t GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind) const
454       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
455 
456   void SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
457       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
458 
459   uintptr_t GetGPR(uint32_t reg) const;
460   void SetGPR(uint32_t reg, uintptr_t value);
461 
GetVReg(mirror::ArtMethod ** cur_quick_frame,const DexFile::CodeItem * code_item,uint32_t core_spills,uint32_t fp_spills,size_t frame_size,uint16_t vreg)462   uint32_t GetVReg(mirror::ArtMethod** cur_quick_frame, const DexFile::CodeItem* code_item,
463                    uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
464                    uint16_t vreg) const {
465     int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
466     DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame());
467     byte* vreg_addr = reinterpret_cast<byte*>(cur_quick_frame) + offset;
468     return *reinterpret_cast<uint32_t*>(vreg_addr);
469   }
470 
471   uintptr_t GetReturnPc() const;
472 
473   void SetReturnPc(uintptr_t new_ret_pc);
474 
475   /*
476    * Return sp-relative offset for a Dalvik virtual register, compiler
477    * spill or Method* in bytes using Method*.
478    * Note that (reg >= 0) refers to a Dalvik register, (reg == -2)
479    * denotes Method* and (reg <= -3) denotes a compiler temp.
480    *
481    *     +------------------------+
482    *     | IN[ins-1]              |  {Note: resides in caller's frame}
483    *     |       .                |
484    *     | IN[0]                  |
485    *     | caller's Method*       |
486    *     +========================+  {Note: start of callee's frame}
487    *     | core callee-save spill |  {variable sized}
488    *     +------------------------+
489    *     | fp callee-save spill   |
490    *     +------------------------+
491    *     | filler word            |  {For compatibility, if V[locals-1] used as wide
492    *     +------------------------+
493    *     | V[locals-1]            |
494    *     | V[locals-2]            |
495    *     |      .                 |
496    *     |      .                 |  ... (reg == 2)
497    *     | V[1]                   |  ... (reg == 1)
498    *     | V[0]                   |  ... (reg == 0) <---- "locals_start"
499    *     +------------------------+
500    *     | Compiler temps         |  ... (reg == -2)
501    *     |                        |  ... (reg == -3)
502    *     |                        |  ... (reg == -4)
503    *     +------------------------+
504    *     | stack alignment padding|  {0 to (kStackAlignWords-1) of padding}
505    *     +------------------------+
506    *     | OUT[outs-1]            |
507    *     | OUT[outs-2]            |
508    *     |       .                |
509    *     | OUT[0]                 |
510    *     | curMethod*             |  ... (reg == -1) <<== sp, 16-byte aligned
511    *     +========================+
512    */
GetVRegOffset(const DexFile::CodeItem * code_item,uint32_t core_spills,uint32_t fp_spills,size_t frame_size,int reg)513   static int GetVRegOffset(const DexFile::CodeItem* code_item,
514                            uint32_t core_spills, uint32_t fp_spills,
515                            size_t frame_size, int reg) {
516     DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
517     int num_spills = __builtin_popcount(core_spills) + __builtin_popcount(fp_spills) + 1;  // Filler.
518     int num_ins = code_item->ins_size_;
519     int num_regs = code_item->registers_size_ - num_ins;
520     int locals_start = frame_size - ((num_spills + num_regs) * sizeof(uint32_t));
521     if (reg == -2) {
522       return 0;  // Method*
523     } else if (reg <= -3) {
524       return locals_start - ((reg + 1) * sizeof(uint32_t));  // Compiler temp.
525     } else if (reg < num_regs) {
526       return locals_start + (reg * sizeof(uint32_t));        // Dalvik local reg.
527     } else {
528       return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + sizeof(uint32_t);  // Dalvik in.
529     }
530   }
531 
GetCurrentQuickFramePc()532   uintptr_t GetCurrentQuickFramePc() const {
533     return cur_quick_frame_pc_;
534   }
535 
GetCurrentQuickFrame()536   mirror::ArtMethod** GetCurrentQuickFrame() const {
537     return cur_quick_frame_;
538   }
539 
GetCurrentShadowFrame()540   ShadowFrame* GetCurrentShadowFrame() const {
541     return cur_shadow_frame_;
542   }
543 
GetCurrentSirt()544   StackIndirectReferenceTable* GetCurrentSirt() const {
545     mirror::ArtMethod** sp = GetCurrentQuickFrame();
546     ++sp;  // Skip Method*; SIRT comes next;
547     return reinterpret_cast<StackIndirectReferenceTable*>(sp);
548   }
549 
550   std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
551 
552   static size_t ComputeNumFrames(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
553 
554   static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
555 
556  private:
557   instrumentation::InstrumentationStackFrame GetInstrumentationStackFrame(uint32_t depth) const;
558 
559   void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
560 
561   Thread* const thread_;
562   ShadowFrame* cur_shadow_frame_;
563   mirror::ArtMethod** cur_quick_frame_;
564   uintptr_t cur_quick_frame_pc_;
565   // Lazily computed, number of frames in the stack.
566   size_t num_frames_;
567   // Depth of the frame we're currently at.
568   size_t cur_depth_;
569 
570  protected:
571   Context* const context_;
572 };
573 
574 }  // namespace art
575 
576 #endif  // ART_RUNTIME_STACK_H_
577