1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_STACK_H_ 18 #define ART_RUNTIME_STACK_H_ 19 20 #include <stdint.h> 21 #include <string> 22 23 #include "arch/instruction_set.h" 24 #include "base/macros.h" 25 #include "base/mutex.h" 26 #include "dex_file.h" 27 #include "gc_root.h" 28 #include "mirror/object_reference.h" 29 #include "quick/quick_method_frame_info.h" 30 #include "read_barrier.h" 31 #include "verify_object.h" 32 33 namespace art { 34 35 namespace mirror { 36 class Object; 37 } // namespace mirror 38 39 class ArtMethod; 40 class Context; 41 class HandleScope; 42 class InlineInfo; 43 class OatQuickMethodHeader; 44 class ScopedObjectAccess; 45 class ShadowFrame; 46 class StackVisitor; 47 class Thread; 48 49 // The kind of vreg being accessed in calls to Set/GetVReg. 50 enum VRegKind { 51 kReferenceVReg, 52 kIntVReg, 53 kFloatVReg, 54 kLongLoVReg, 55 kLongHiVReg, 56 kDoubleLoVReg, 57 kDoubleHiVReg, 58 kConstant, 59 kImpreciseConstant, 60 kUndefined, 61 }; 62 std::ostream& operator<<(std::ostream& os, const VRegKind& rhs); 63 64 // A reference from the shadow stack to a MirrorType object within the Java heap. 65 template<class MirrorType> 66 class MANAGED StackReference : public mirror::CompressedReference<MirrorType> { 67 }; 68 69 // Forward declaration. Just calls the destructor. 70 struct ShadowFrameDeleter; 71 using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>; 72 73 // Counting locks by storing object pointers into a vector. Duplicate entries mark recursive locks. 74 // The vector will be visited with the ShadowFrame during GC (so all the locked-on objects are 75 // thread roots). 76 // Note: implementation is split so that the call sites may be optimized to no-ops in case no 77 // lock counting is necessary. The actual implementation is in the cc file to avoid 78 // dependencies. 79 class LockCountData { 80 public: 81 // Add the given object to the list of monitors, that is, objects that have been locked. This 82 // will not throw (but be skipped if there is an exception pending on entry). 83 void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); 84 85 // Try to remove the given object from the monitor list, indicating an unlock operation. 86 // This will throw an IllegalMonitorStateException (clearing any already pending exception), in 87 // case that there wasn't a lock recorded for the object. 88 void RemoveMonitorOrThrow(Thread* self, 89 const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); 90 91 // Check whether all acquired monitors have been released. This will potentially throw an 92 // IllegalMonitorStateException, clearing any already pending exception. Returns true if the 93 // check shows that everything is OK wrt/ lock counting, false otherwise. 94 bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); 95 96 template <typename T, typename... Args> VisitMonitors(T visitor,Args &&...args)97 void VisitMonitors(T visitor, Args&&... args) SHARED_REQUIRES(Locks::mutator_lock_) { 98 if (monitors_ != nullptr) { 99 // Visitors may change the Object*. Be careful with the foreach loop. 100 for (mirror::Object*& obj : *monitors_) { 101 visitor(/* inout */ &obj, std::forward<Args>(args)...); 102 } 103 } 104 } 105 106 private: 107 // Stores references to the locked-on objects. As noted, this should be visited during thread 108 // marking. 109 std::unique_ptr<std::vector<mirror::Object*>> monitors_; 110 }; 111 112 // ShadowFrame has 2 possible layouts: 113 // - interpreter - separate VRegs and reference arrays. References are in the reference array. 114 // - JNI - just VRegs, but where every VReg holds a reference. 115 class ShadowFrame { 116 public: 117 // Compute size of ShadowFrame in bytes assuming it has a reference array. ComputeSize(uint32_t num_vregs)118 static size_t ComputeSize(uint32_t num_vregs) { 119 return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) + 120 (sizeof(StackReference<mirror::Object>) * num_vregs); 121 } 122 123 // Create ShadowFrame in heap for deoptimization. CreateDeoptimizedFrame(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc)124 static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link, 125 ArtMethod* method, uint32_t dex_pc) { 126 uint8_t* memory = new uint8_t[ComputeSize(num_vregs)]; 127 return CreateShadowFrameImpl(num_vregs, link, method, dex_pc, memory); 128 } 129 130 // Delete a ShadowFrame allocated on the heap for deoptimization. DeleteDeoptimizedFrame(ShadowFrame * sf)131 static void DeleteDeoptimizedFrame(ShadowFrame* sf) { 132 sf->~ShadowFrame(); // Explicitly destruct. 133 uint8_t* memory = reinterpret_cast<uint8_t*>(sf); 134 delete[] memory; 135 } 136 137 // Create a shadow frame in a fresh alloca. This needs to be in the context of the caller. 138 // Inlining doesn't work, the compiler will still undo the alloca. So this needs to be a macro. 139 #define CREATE_SHADOW_FRAME(num_vregs, link, method, dex_pc) ({ \ 140 size_t frame_size = ShadowFrame::ComputeSize(num_vregs); \ 141 void* alloca_mem = alloca(frame_size); \ 142 ShadowFrameAllocaUniquePtr( \ 143 ShadowFrame::CreateShadowFrameImpl((num_vregs), (link), (method), (dex_pc), \ 144 (alloca_mem))); \ 145 }) 146 ~ShadowFrame()147 ~ShadowFrame() {} 148 149 // TODO(iam): Clean references array up since they're always there, 150 // we don't need to do conditionals. HasReferenceArray()151 bool HasReferenceArray() const { 152 return true; 153 } 154 NumberOfVRegs()155 uint32_t NumberOfVRegs() const { 156 return number_of_vregs_; 157 } 158 GetDexPC()159 uint32_t GetDexPC() const { 160 return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - code_item_->insns_; 161 } 162 GetCachedHotnessCountdown()163 int16_t GetCachedHotnessCountdown() const { 164 return cached_hotness_countdown_; 165 } 166 SetCachedHotnessCountdown(int16_t cached_hotness_countdown)167 void SetCachedHotnessCountdown(int16_t cached_hotness_countdown) { 168 cached_hotness_countdown_ = cached_hotness_countdown; 169 } 170 GetHotnessCountdown()171 int16_t GetHotnessCountdown() const { 172 return hotness_countdown_; 173 } 174 SetHotnessCountdown(int16_t hotness_countdown)175 void SetHotnessCountdown(int16_t hotness_countdown) { 176 hotness_countdown_ = hotness_countdown; 177 } 178 SetDexPC(uint32_t dex_pc)179 void SetDexPC(uint32_t dex_pc) { 180 dex_pc_ = dex_pc; 181 dex_pc_ptr_ = nullptr; 182 } 183 GetLink()184 ShadowFrame* GetLink() const { 185 return link_; 186 } 187 SetLink(ShadowFrame * frame)188 void SetLink(ShadowFrame* frame) { 189 DCHECK_NE(this, frame); 190 link_ = frame; 191 } 192 GetVReg(size_t i)193 int32_t GetVReg(size_t i) const { 194 DCHECK_LT(i, NumberOfVRegs()); 195 const uint32_t* vreg = &vregs_[i]; 196 return *reinterpret_cast<const int32_t*>(vreg); 197 } 198 GetVRegAddr(size_t i)199 uint32_t* GetVRegAddr(size_t i) { 200 return &vregs_[i]; 201 } 202 GetShadowRefAddr(size_t i)203 uint32_t* GetShadowRefAddr(size_t i) { 204 DCHECK(HasReferenceArray()); 205 DCHECK_LT(i, NumberOfVRegs()); 206 return &vregs_[i + NumberOfVRegs()]; 207 } 208 SetCodeItem(const DexFile::CodeItem * code_item)209 void SetCodeItem(const DexFile::CodeItem* code_item) { 210 code_item_ = code_item; 211 } 212 GetVRegFloat(size_t i)213 float GetVRegFloat(size_t i) const { 214 DCHECK_LT(i, NumberOfVRegs()); 215 // NOTE: Strict-aliasing? 216 const uint32_t* vreg = &vregs_[i]; 217 return *reinterpret_cast<const float*>(vreg); 218 } 219 GetVRegLong(size_t i)220 int64_t GetVRegLong(size_t i) const { 221 DCHECK_LT(i, NumberOfVRegs()); 222 const uint32_t* vreg = &vregs_[i]; 223 // Alignment attribute required for GCC 4.8 224 typedef const int64_t unaligned_int64 __attribute__ ((aligned (4))); 225 return *reinterpret_cast<unaligned_int64*>(vreg); 226 } 227 GetVRegDouble(size_t i)228 double GetVRegDouble(size_t i) const { 229 DCHECK_LT(i, NumberOfVRegs()); 230 const uint32_t* vreg = &vregs_[i]; 231 // Alignment attribute required for GCC 4.8 232 typedef const double unaligned_double __attribute__ ((aligned (4))); 233 return *reinterpret_cast<unaligned_double*>(vreg); 234 } 235 236 // Look up the reference given its virtual register number. 237 // If this returns non-null then this does not mean the vreg is currently a reference 238 // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain. 239 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> GetVRegReference(size_t i)240 mirror::Object* GetVRegReference(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) { 241 DCHECK_LT(i, NumberOfVRegs()); 242 mirror::Object* ref; 243 if (HasReferenceArray()) { 244 ref = References()[i].AsMirrorPtr(); 245 } else { 246 const uint32_t* vreg_ptr = &vregs_[i]; 247 ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr(); 248 } 249 if (kUseReadBarrier) { 250 ReadBarrier::AssertToSpaceInvariant(ref); 251 } 252 if (kVerifyFlags & kVerifyReads) { 253 VerifyObject(ref); 254 } 255 return ref; 256 } 257 258 // Get view of vregs as range of consecutive arguments starting at i. GetVRegArgs(size_t i)259 uint32_t* GetVRegArgs(size_t i) { 260 return &vregs_[i]; 261 } 262 SetVReg(size_t i,int32_t val)263 void SetVReg(size_t i, int32_t val) { 264 DCHECK_LT(i, NumberOfVRegs()); 265 uint32_t* vreg = &vregs_[i]; 266 *reinterpret_cast<int32_t*>(vreg) = val; 267 // This is needed for moving collectors since these can update the vreg references if they 268 // happen to agree with references in the reference array. 269 if (kMovingCollector && HasReferenceArray()) { 270 References()[i].Clear(); 271 } 272 } 273 SetVRegFloat(size_t i,float val)274 void SetVRegFloat(size_t i, float val) { 275 DCHECK_LT(i, NumberOfVRegs()); 276 uint32_t* vreg = &vregs_[i]; 277 *reinterpret_cast<float*>(vreg) = val; 278 // This is needed for moving collectors since these can update the vreg references if they 279 // happen to agree with references in the reference array. 280 if (kMovingCollector && HasReferenceArray()) { 281 References()[i].Clear(); 282 } 283 } 284 SetVRegLong(size_t i,int64_t val)285 void SetVRegLong(size_t i, int64_t val) { 286 DCHECK_LT(i, NumberOfVRegs()); 287 uint32_t* vreg = &vregs_[i]; 288 // Alignment attribute required for GCC 4.8 289 typedef int64_t unaligned_int64 __attribute__ ((aligned (4))); 290 *reinterpret_cast<unaligned_int64*>(vreg) = val; 291 // This is needed for moving collectors since these can update the vreg references if they 292 // happen to agree with references in the reference array. 293 if (kMovingCollector && HasReferenceArray()) { 294 References()[i].Clear(); 295 References()[i + 1].Clear(); 296 } 297 } 298 SetVRegDouble(size_t i,double val)299 void SetVRegDouble(size_t i, double val) { 300 DCHECK_LT(i, NumberOfVRegs()); 301 uint32_t* vreg = &vregs_[i]; 302 // Alignment attribute required for GCC 4.8 303 typedef double unaligned_double __attribute__ ((aligned (4))); 304 *reinterpret_cast<unaligned_double*>(vreg) = val; 305 // This is needed for moving collectors since these can update the vreg references if they 306 // happen to agree with references in the reference array. 307 if (kMovingCollector && HasReferenceArray()) { 308 References()[i].Clear(); 309 References()[i + 1].Clear(); 310 } 311 } 312 313 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> SetVRegReference(size_t i,mirror::Object * val)314 void SetVRegReference(size_t i, mirror::Object* val) SHARED_REQUIRES(Locks::mutator_lock_) { 315 DCHECK_LT(i, NumberOfVRegs()); 316 if (kVerifyFlags & kVerifyWrites) { 317 VerifyObject(val); 318 } 319 if (kUseReadBarrier) { 320 ReadBarrier::AssertToSpaceInvariant(val); 321 } 322 uint32_t* vreg = &vregs_[i]; 323 reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val); 324 if (HasReferenceArray()) { 325 References()[i].Assign(val); 326 } 327 } 328 GetMethod()329 ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_) { 330 DCHECK(method_ != nullptr); 331 return method_; 332 } 333 334 mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_); 335 336 mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_REQUIRES(Locks::mutator_lock_); 337 Contains(StackReference<mirror::Object> * shadow_frame_entry_obj)338 bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const { 339 if (HasReferenceArray()) { 340 return ((&References()[0] <= shadow_frame_entry_obj) && 341 (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1]))); 342 } else { 343 uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj); 344 return ((&vregs_[0] <= shadow_frame_entry) && 345 (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1]))); 346 } 347 } 348 GetLockCountData()349 LockCountData& GetLockCountData() { 350 return lock_count_data_; 351 } 352 LockCountDataOffset()353 static size_t LockCountDataOffset() { 354 return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_); 355 } 356 LinkOffset()357 static size_t LinkOffset() { 358 return OFFSETOF_MEMBER(ShadowFrame, link_); 359 } 360 MethodOffset()361 static size_t MethodOffset() { 362 return OFFSETOF_MEMBER(ShadowFrame, method_); 363 } 364 DexPCOffset()365 static size_t DexPCOffset() { 366 return OFFSETOF_MEMBER(ShadowFrame, dex_pc_); 367 } 368 NumberOfVRegsOffset()369 static size_t NumberOfVRegsOffset() { 370 return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_); 371 } 372 VRegsOffset()373 static size_t VRegsOffset() { 374 return OFFSETOF_MEMBER(ShadowFrame, vregs_); 375 } 376 ResultRegisterOffset()377 static size_t ResultRegisterOffset() { 378 return OFFSETOF_MEMBER(ShadowFrame, result_register_); 379 } 380 DexPCPtrOffset()381 static size_t DexPCPtrOffset() { 382 return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_); 383 } 384 CodeItemOffset()385 static size_t CodeItemOffset() { 386 return OFFSETOF_MEMBER(ShadowFrame, code_item_); 387 } 388 CachedHotnessCountdownOffset()389 static size_t CachedHotnessCountdownOffset() { 390 return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_); 391 } 392 HotnessCountdownOffset()393 static size_t HotnessCountdownOffset() { 394 return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_); 395 } 396 397 // Create ShadowFrame for interpreter using provided memory. CreateShadowFrameImpl(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc,void * memory)398 static ShadowFrame* CreateShadowFrameImpl(uint32_t num_vregs, 399 ShadowFrame* link, 400 ArtMethod* method, 401 uint32_t dex_pc, 402 void* memory) { 403 return new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true); 404 } 405 GetDexPCPtr()406 const uint16_t* GetDexPCPtr() { 407 return dex_pc_ptr_; 408 } 409 GetResultRegister()410 JValue* GetResultRegister() { 411 return result_register_; 412 } 413 414 private: ShadowFrame(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc,bool has_reference_array)415 ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method, 416 uint32_t dex_pc, bool has_reference_array) 417 : link_(link), method_(method), result_register_(nullptr), dex_pc_ptr_(nullptr), 418 code_item_(nullptr), number_of_vregs_(num_vregs), dex_pc_(dex_pc) { 419 // TODO(iam): Remove this parameter, it's an an artifact of portable removal 420 DCHECK(has_reference_array); 421 if (has_reference_array) { 422 memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>))); 423 } else { 424 memset(vregs_, 0, num_vregs * sizeof(uint32_t)); 425 } 426 } 427 References()428 const StackReference<mirror::Object>* References() const { 429 DCHECK(HasReferenceArray()); 430 const uint32_t* vreg_end = &vregs_[NumberOfVRegs()]; 431 return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end); 432 } 433 References()434 StackReference<mirror::Object>* References() { 435 return const_cast<StackReference<mirror::Object>*>( 436 const_cast<const ShadowFrame*>(this)->References()); 437 } 438 439 // Link to previous shadow frame or null. 440 ShadowFrame* link_; 441 ArtMethod* method_; 442 JValue* result_register_; 443 const uint16_t* dex_pc_ptr_; 444 const DexFile::CodeItem* code_item_; 445 LockCountData lock_count_data_; // This may contain GC roots when lock counting is active. 446 const uint32_t number_of_vregs_; 447 uint32_t dex_pc_; 448 int16_t cached_hotness_countdown_; 449 int16_t hotness_countdown_; 450 451 // This is a two-part array: 452 // - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4 453 // bytes. 454 // - [number_of_vregs..number_of_vregs*2) holds only reference registers. Each element here is 455 // ptr-sized. 456 // In other words when a primitive is stored in vX, the second (reference) part of the array will 457 // be null. When a reference is stored in vX, the second (reference) part of the array will be a 458 // copy of vX. 459 uint32_t vregs_[0]; 460 461 DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame); 462 }; 463 464 struct ShadowFrameDeleter { operatorShadowFrameDeleter465 inline void operator()(ShadowFrame* frame) { 466 if (frame != nullptr) { 467 frame->~ShadowFrame(); 468 } 469 } 470 }; 471 472 class JavaFrameRootInfo : public RootInfo { 473 public: JavaFrameRootInfo(uint32_t thread_id,const StackVisitor * stack_visitor,size_t vreg)474 JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg) 475 : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) { 476 } 477 virtual void Describe(std::ostream& os) const OVERRIDE 478 SHARED_REQUIRES(Locks::mutator_lock_); 479 480 private: 481 const StackVisitor* const stack_visitor_; 482 const size_t vreg_; 483 }; 484 485 // The managed stack is used to record fragments of managed code stacks. Managed code stacks 486 // may either be shadow frames or lists of frames using fixed frame sizes. Transition records are 487 // necessary for transitions between code using different frame layouts and transitions into native 488 // code. 489 class PACKED(4) ManagedStack { 490 public: ManagedStack()491 ManagedStack() 492 : top_quick_frame_(nullptr), link_(nullptr), top_shadow_frame_(nullptr) {} 493 PushManagedStackFragment(ManagedStack * fragment)494 void PushManagedStackFragment(ManagedStack* fragment) { 495 // Copy this top fragment into given fragment. 496 memcpy(fragment, this, sizeof(ManagedStack)); 497 // Clear this fragment, which has become the top. 498 memset(this, 0, sizeof(ManagedStack)); 499 // Link our top fragment onto the given fragment. 500 link_ = fragment; 501 } 502 PopManagedStackFragment(const ManagedStack & fragment)503 void PopManagedStackFragment(const ManagedStack& fragment) { 504 DCHECK(&fragment == link_); 505 // Copy this given fragment back to the top. 506 memcpy(this, &fragment, sizeof(ManagedStack)); 507 } 508 GetLink()509 ManagedStack* GetLink() const { 510 return link_; 511 } 512 GetTopQuickFrame()513 ArtMethod** GetTopQuickFrame() const { 514 return top_quick_frame_; 515 } 516 SetTopQuickFrame(ArtMethod ** top)517 void SetTopQuickFrame(ArtMethod** top) { 518 DCHECK(top_shadow_frame_ == nullptr); 519 top_quick_frame_ = top; 520 } 521 TopQuickFrameOffset()522 static size_t TopQuickFrameOffset() { 523 return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_); 524 } 525 PushShadowFrame(ShadowFrame * new_top_frame)526 ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) { 527 DCHECK(top_quick_frame_ == nullptr); 528 ShadowFrame* old_frame = top_shadow_frame_; 529 top_shadow_frame_ = new_top_frame; 530 new_top_frame->SetLink(old_frame); 531 return old_frame; 532 } 533 PopShadowFrame()534 ShadowFrame* PopShadowFrame() { 535 DCHECK(top_quick_frame_ == nullptr); 536 CHECK(top_shadow_frame_ != nullptr); 537 ShadowFrame* frame = top_shadow_frame_; 538 top_shadow_frame_ = frame->GetLink(); 539 return frame; 540 } 541 GetTopShadowFrame()542 ShadowFrame* GetTopShadowFrame() const { 543 return top_shadow_frame_; 544 } 545 SetTopShadowFrame(ShadowFrame * top)546 void SetTopShadowFrame(ShadowFrame* top) { 547 DCHECK(top_quick_frame_ == nullptr); 548 top_shadow_frame_ = top; 549 } 550 TopShadowFrameOffset()551 static size_t TopShadowFrameOffset() { 552 return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_); 553 } 554 555 size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_); 556 557 bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const; 558 559 private: 560 ArtMethod** top_quick_frame_; 561 ManagedStack* link_; 562 ShadowFrame* top_shadow_frame_; 563 }; 564 565 class StackVisitor { 566 public: 567 // This enum defines a flag to control whether inlined frames are included 568 // when walking the stack. 569 enum class StackWalkKind { 570 kIncludeInlinedFrames, 571 kIncludeInlinedFramesNoResolve, 572 kSkipInlinedFrames, 573 }; 574 575 protected: 576 StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind) 577 SHARED_REQUIRES(Locks::mutator_lock_); 578 579 bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const 580 SHARED_REQUIRES(Locks::mutator_lock_); 581 582 public: ~StackVisitor()583 virtual ~StackVisitor() {} 584 585 // Return 'true' if we should continue to visit more frames, 'false' to stop. 586 virtual bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) = 0; 587 588 void WalkStack(bool include_transitions = false) 589 SHARED_REQUIRES(Locks::mutator_lock_); 590 GetThread()591 Thread* GetThread() const { 592 return thread_; 593 } 594 595 ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_); 596 GetOuterMethod()597 ArtMethod* GetOuterMethod() const { 598 return *GetCurrentQuickFrame(); 599 } 600 IsShadowFrame()601 bool IsShadowFrame() const { 602 return cur_shadow_frame_ != nullptr; 603 } 604 605 uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_REQUIRES(Locks::mutator_lock_); 606 607 mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_); 608 609 size_t GetNativePcOffset() const SHARED_REQUIRES(Locks::mutator_lock_); 610 611 // Returns the height of the stack in the managed stack frames, including transitions. GetFrameHeight()612 size_t GetFrameHeight() SHARED_REQUIRES(Locks::mutator_lock_) { 613 return GetNumFrames() - cur_depth_ - 1; 614 } 615 616 // Returns a frame ID for JDWP use, starting from 1. GetFrameId()617 size_t GetFrameId() SHARED_REQUIRES(Locks::mutator_lock_) { 618 return GetFrameHeight() + 1; 619 } 620 GetNumFrames()621 size_t GetNumFrames() SHARED_REQUIRES(Locks::mutator_lock_) { 622 if (num_frames_ == 0) { 623 num_frames_ = ComputeNumFrames(thread_, walk_kind_); 624 } 625 return num_frames_; 626 } 627 GetFrameDepth()628 size_t GetFrameDepth() SHARED_REQUIRES(Locks::mutator_lock_) { 629 return cur_depth_; 630 } 631 632 // Get the method and dex pc immediately after the one that's currently being visited. 633 bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) 634 SHARED_REQUIRES(Locks::mutator_lock_); 635 636 bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const 637 SHARED_REQUIRES(Locks::mutator_lock_); 638 639 bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, 640 uint64_t* val) const 641 SHARED_REQUIRES(Locks::mutator_lock_); 642 643 // Values will be set in debugger shadow frames. Debugger will make sure deoptimization 644 // is triggered to make the values effective. 645 bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) 646 SHARED_REQUIRES(Locks::mutator_lock_); 647 648 // Values will be set in debugger shadow frames. Debugger will make sure deoptimization 649 // is triggered to make the values effective. 650 bool SetVRegPair(ArtMethod* m, 651 uint16_t vreg, 652 uint64_t new_value, 653 VRegKind kind_lo, 654 VRegKind kind_hi) 655 SHARED_REQUIRES(Locks::mutator_lock_); 656 657 uintptr_t* GetGPRAddress(uint32_t reg) const; 658 659 // This is a fast-path for getting/setting values in a quick frame. GetVRegAddrFromQuickCode(ArtMethod ** cur_quick_frame,const DexFile::CodeItem * code_item,uint32_t core_spills,uint32_t fp_spills,size_t frame_size,uint16_t vreg)660 uint32_t* GetVRegAddrFromQuickCode(ArtMethod** cur_quick_frame, 661 const DexFile::CodeItem* code_item, 662 uint32_t core_spills, uint32_t fp_spills, size_t frame_size, 663 uint16_t vreg) const { 664 int offset = GetVRegOffsetFromQuickCode( 665 code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA); 666 DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame()); 667 uint8_t* vreg_addr = reinterpret_cast<uint8_t*>(cur_quick_frame) + offset; 668 return reinterpret_cast<uint32_t*>(vreg_addr); 669 } 670 671 uintptr_t GetReturnPc() const SHARED_REQUIRES(Locks::mutator_lock_); 672 673 void SetReturnPc(uintptr_t new_ret_pc) SHARED_REQUIRES(Locks::mutator_lock_); 674 675 /* 676 * Return sp-relative offset for a Dalvik virtual register, compiler 677 * spill or Method* in bytes using Method*. 678 * Note that (reg == -1) denotes an invalid Dalvik register. For the 679 * positive values, the Dalvik registers come first, followed by the 680 * Method*, followed by other special temporaries if any, followed by 681 * regular compiler temporary. As of now we only have the Method* as 682 * as a special compiler temporary. 683 * A compiler temporary can be thought of as a virtual register that 684 * does not exist in the dex but holds intermediate values to help 685 * optimizations and code generation. A special compiler temporary is 686 * one whose location in frame is well known while non-special ones 687 * do not have a requirement on location in frame as long as code 688 * generator itself knows how to access them. 689 * 690 * +-------------------------------+ 691 * | IN[ins-1] | {Note: resides in caller's frame} 692 * | . | 693 * | IN[0] | 694 * | caller's ArtMethod | ... ArtMethod* 695 * +===============================+ {Note: start of callee's frame} 696 * | core callee-save spill | {variable sized} 697 * +-------------------------------+ 698 * | fp callee-save spill | 699 * +-------------------------------+ 700 * | filler word | {For compatibility, if V[locals-1] used as wide 701 * +-------------------------------+ 702 * | V[locals-1] | 703 * | V[locals-2] | 704 * | . | 705 * | . | ... (reg == 2) 706 * | V[1] | ... (reg == 1) 707 * | V[0] | ... (reg == 0) <---- "locals_start" 708 * +-------------------------------+ 709 * | stack alignment padding | {0 to (kStackAlignWords-1) of padding} 710 * +-------------------------------+ 711 * | Compiler temp region | ... (reg >= max_num_special_temps) 712 * | . | 713 * | . | 714 * | V[max_num_special_temps + 1] | 715 * | V[max_num_special_temps + 0] | 716 * +-------------------------------+ 717 * | OUT[outs-1] | 718 * | OUT[outs-2] | 719 * | . | 720 * | OUT[0] | 721 * | ArtMethod* | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned 722 * +===============================+ 723 */ 724 static int GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, 725 uint32_t core_spills, uint32_t fp_spills, 726 size_t frame_size, int reg, InstructionSet isa); 727 GetOutVROffset(uint16_t out_num,InstructionSet isa)728 static int GetOutVROffset(uint16_t out_num, InstructionSet isa) { 729 // According to stack model, the first out is above the Method referernce. 730 return InstructionSetPointerSize(isa) + out_num * sizeof(uint32_t); 731 } 732 IsInInlinedFrame()733 bool IsInInlinedFrame() const { 734 return current_inlining_depth_ != 0; 735 } 736 GetCurrentInliningDepth()737 size_t GetCurrentInliningDepth() const { 738 return current_inlining_depth_; 739 } 740 GetCurrentQuickFramePc()741 uintptr_t GetCurrentQuickFramePc() const { 742 return cur_quick_frame_pc_; 743 } 744 GetCurrentQuickFrame()745 ArtMethod** GetCurrentQuickFrame() const { 746 return cur_quick_frame_; 747 } 748 GetCurrentShadowFrame()749 ShadowFrame* GetCurrentShadowFrame() const { 750 return cur_shadow_frame_; 751 } 752 IsCurrentFrameInInterpreter()753 bool IsCurrentFrameInInterpreter() const { 754 return cur_shadow_frame_ != nullptr; 755 } 756 GetCurrentHandleScope(size_t pointer_size)757 HandleScope* GetCurrentHandleScope(size_t pointer_size) const { 758 ArtMethod** sp = GetCurrentQuickFrame(); 759 // Skip ArtMethod*; handle scope comes next; 760 return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size); 761 } 762 763 std::string DescribeLocation() const SHARED_REQUIRES(Locks::mutator_lock_); 764 765 static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) 766 SHARED_REQUIRES(Locks::mutator_lock_); 767 768 static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_); 769 GetCurrentOatQuickMethodHeader()770 const OatQuickMethodHeader* GetCurrentOatQuickMethodHeader() const { 771 return cur_oat_quick_method_header_; 772 } 773 774 QuickMethodFrameInfo GetCurrentQuickFrameInfo() const SHARED_REQUIRES(Locks::mutator_lock_); 775 776 private: 777 // Private constructor known in the case that num_frames_ has already been computed. 778 StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames) 779 SHARED_REQUIRES(Locks::mutator_lock_); 780 IsAccessibleRegister(uint32_t reg,bool is_float)781 bool IsAccessibleRegister(uint32_t reg, bool is_float) const { 782 return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg); 783 } GetRegister(uint32_t reg,bool is_float)784 uintptr_t GetRegister(uint32_t reg, bool is_float) const { 785 DCHECK(IsAccessibleRegister(reg, is_float)); 786 return is_float ? GetFPR(reg) : GetGPR(reg); 787 } 788 789 bool IsAccessibleGPR(uint32_t reg) const; 790 uintptr_t GetGPR(uint32_t reg) const; 791 792 bool IsAccessibleFPR(uint32_t reg) const; 793 uintptr_t GetFPR(uint32_t reg) const; 794 795 bool GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const 796 SHARED_REQUIRES(Locks::mutator_lock_); 797 bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind, 798 uint32_t* val) const 799 SHARED_REQUIRES(Locks::mutator_lock_); 800 801 bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, 802 uint64_t* val) const 803 SHARED_REQUIRES(Locks::mutator_lock_); 804 bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg, 805 VRegKind kind_lo, VRegKind kind_hi, 806 uint64_t* val) const 807 SHARED_REQUIRES(Locks::mutator_lock_); 808 bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo, 809 uint64_t* val) const 810 SHARED_REQUIRES(Locks::mutator_lock_); 811 812 void SanityCheckFrame() const SHARED_REQUIRES(Locks::mutator_lock_); 813 814 InlineInfo GetCurrentInlineInfo() const SHARED_REQUIRES(Locks::mutator_lock_); 815 816 Thread* const thread_; 817 const StackWalkKind walk_kind_; 818 ShadowFrame* cur_shadow_frame_; 819 ArtMethod** cur_quick_frame_; 820 uintptr_t cur_quick_frame_pc_; 821 const OatQuickMethodHeader* cur_oat_quick_method_header_; 822 // Lazily computed, number of frames in the stack. 823 size_t num_frames_; 824 // Depth of the frame we're currently at. 825 size_t cur_depth_; 826 // Current inlining depth of the method we are currently at. 827 // 0 if there is no inlined frame. 828 size_t current_inlining_depth_; 829 830 protected: 831 Context* const context_; 832 }; 833 834 } // namespace art 835 836 #endif // ART_RUNTIME_STACK_H_ 837