1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_ 18 #define ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_ 19 20 #include <cstdint> 21 #include <cstring> 22 #include <string> 23 24 #include "base/locks.h" 25 #include "base/macros.h" 26 #include "lock_count_data.h" 27 #include "read_barrier.h" 28 #include "stack_reference.h" 29 #include "verify_object.h" 30 31 namespace art { 32 33 namespace mirror { 34 class Object; 35 } // namespace mirror 36 37 class ArtMethod; 38 class ShadowFrame; 39 template<class MirrorType> class ObjPtr; 40 class Thread; 41 union JValue; 42 43 // Forward declaration. Just calls the destructor. 44 struct ShadowFrameDeleter; 45 using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>; 46 47 // ShadowFrame has 2 possible layouts: 48 // - interpreter - separate VRegs and reference arrays. References are in the reference array. 49 // - JNI - just VRegs, but where every VReg holds a reference. 50 class ShadowFrame { 51 private: 52 // Used to keep track of extra state the shadowframe has. 53 enum class FrameFlags : uint32_t { 54 // We have been requested to notify when this frame gets popped. 55 kNotifyFramePop = 1 << 0, 56 // We have been asked to pop this frame off the stack as soon as possible. 57 kForcePopFrame = 1 << 1, 58 // We have been asked to re-execute the last instruction. 59 kForceRetryInst = 1 << 2, 60 // Mark that we expect the next frame to retry the last instruction (used by instrumentation and 61 // debuggers to keep track of required events) 62 kSkipMethodExitEvents = 1 << 3, 63 // Used to suppress exception events caused by other instrumentation events. 64 kSkipNextExceptionEvent = 1 << 4, 65 }; 66 67 public: 68 // Compute size of ShadowFrame in bytes assuming it has a reference array. ComputeSize(uint32_t num_vregs)69 static size_t ComputeSize(uint32_t num_vregs) { 70 return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) + 71 (sizeof(StackReference<mirror::Object>) * num_vregs); 72 } 73 74 // Create ShadowFrame in heap for deoptimization. CreateDeoptimizedFrame(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc)75 static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link, 76 ArtMethod* method, uint32_t dex_pc) { 77 uint8_t* memory = new uint8_t[ComputeSize(num_vregs)]; 78 return CreateShadowFrameImpl(num_vregs, link, method, dex_pc, memory); 79 } 80 81 // Delete a ShadowFrame allocated on the heap for deoptimization. DeleteDeoptimizedFrame(ShadowFrame * sf)82 static void DeleteDeoptimizedFrame(ShadowFrame* sf) { 83 sf->~ShadowFrame(); // Explicitly destruct. 84 uint8_t* memory = reinterpret_cast<uint8_t*>(sf); 85 delete[] memory; 86 } 87 88 // Create a shadow frame in a fresh alloca. This needs to be in the context of the caller. 89 // Inlining doesn't work, the compiler will still undo the alloca. So this needs to be a macro. 90 #define CREATE_SHADOW_FRAME(num_vregs, link, method, dex_pc) ({ \ 91 size_t frame_size = ShadowFrame::ComputeSize(num_vregs); \ 92 void* alloca_mem = alloca(frame_size); \ 93 ShadowFrameAllocaUniquePtr( \ 94 ShadowFrame::CreateShadowFrameImpl((num_vregs), (link), (method), (dex_pc), \ 95 (alloca_mem))); \ 96 }) 97 ~ShadowFrame()98 ~ShadowFrame() {} 99 NumberOfVRegs()100 uint32_t NumberOfVRegs() const { 101 return number_of_vregs_; 102 } 103 GetDexPC()104 uint32_t GetDexPC() const { 105 return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - dex_instructions_; 106 } 107 GetCachedHotnessCountdown()108 int16_t GetCachedHotnessCountdown() const { 109 return cached_hotness_countdown_; 110 } 111 SetCachedHotnessCountdown(int16_t cached_hotness_countdown)112 void SetCachedHotnessCountdown(int16_t cached_hotness_countdown) { 113 cached_hotness_countdown_ = cached_hotness_countdown; 114 } 115 GetHotnessCountdown()116 int16_t GetHotnessCountdown() const { 117 return hotness_countdown_; 118 } 119 SetHotnessCountdown(int16_t hotness_countdown)120 void SetHotnessCountdown(int16_t hotness_countdown) { 121 hotness_countdown_ = hotness_countdown; 122 } 123 SetDexPC(uint32_t dex_pc)124 void SetDexPC(uint32_t dex_pc) { 125 dex_pc_ = dex_pc; 126 dex_pc_ptr_ = nullptr; 127 } 128 GetLink()129 ShadowFrame* GetLink() const { 130 return link_; 131 } 132 SetLink(ShadowFrame * frame)133 void SetLink(ShadowFrame* frame) { 134 DCHECK_NE(this, frame); 135 link_ = frame; 136 } 137 GetVReg(size_t i)138 int32_t GetVReg(size_t i) const { 139 DCHECK_LT(i, NumberOfVRegs()); 140 const uint32_t* vreg = &vregs_[i]; 141 return *reinterpret_cast<const int32_t*>(vreg); 142 } 143 144 // Shorts are extended to Ints in VRegs. Interpreter intrinsics needs them as shorts. GetVRegShort(size_t i)145 int16_t GetVRegShort(size_t i) const { 146 return static_cast<int16_t>(GetVReg(i)); 147 } 148 GetVRegAddr(size_t i)149 uint32_t* GetVRegAddr(size_t i) { 150 return &vregs_[i]; 151 } 152 GetShadowRefAddr(size_t i)153 uint32_t* GetShadowRefAddr(size_t i) { 154 DCHECK_LT(i, NumberOfVRegs()); 155 return &vregs_[i + NumberOfVRegs()]; 156 } 157 GetDexInstructions()158 const uint16_t* GetDexInstructions() const { 159 return dex_instructions_; 160 } 161 GetVRegFloat(size_t i)162 float GetVRegFloat(size_t i) const { 163 DCHECK_LT(i, NumberOfVRegs()); 164 // NOTE: Strict-aliasing? 165 const uint32_t* vreg = &vregs_[i]; 166 return *reinterpret_cast<const float*>(vreg); 167 } 168 GetVRegLong(size_t i)169 int64_t GetVRegLong(size_t i) const { 170 DCHECK_LT(i + 1, NumberOfVRegs()); 171 const uint32_t* vreg = &vregs_[i]; 172 typedef const int64_t unaligned_int64 __attribute__ ((aligned (4))); 173 return *reinterpret_cast<unaligned_int64*>(vreg); 174 } 175 GetVRegDouble(size_t i)176 double GetVRegDouble(size_t i) const { 177 DCHECK_LT(i + 1, NumberOfVRegs()); 178 const uint32_t* vreg = &vregs_[i]; 179 typedef const double unaligned_double __attribute__ ((aligned (4))); 180 return *reinterpret_cast<unaligned_double*>(vreg); 181 } 182 183 // Look up the reference given its virtual register number. 184 // If this returns non-null then this does not mean the vreg is currently a reference 185 // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain. 186 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> GetVRegReference(size_t i)187 mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) { 188 DCHECK_LT(i, NumberOfVRegs()); 189 mirror::Object* ref; 190 ref = References()[i].AsMirrorPtr(); 191 ReadBarrier::MaybeAssertToSpaceInvariant(ref); 192 if (kVerifyFlags & kVerifyReads) { 193 VerifyObject(ref); 194 } 195 return ref; 196 } 197 198 // Get view of vregs as range of consecutive arguments starting at i. GetVRegArgs(size_t i)199 uint32_t* GetVRegArgs(size_t i) { 200 return &vregs_[i]; 201 } 202 SetVReg(size_t i,int32_t val)203 void SetVReg(size_t i, int32_t val) { 204 DCHECK_LT(i, NumberOfVRegs()); 205 uint32_t* vreg = &vregs_[i]; 206 *reinterpret_cast<int32_t*>(vreg) = val; 207 // This is needed for moving collectors since these can update the vreg references if they 208 // happen to agree with references in the reference array. 209 References()[i].Clear(); 210 } 211 SetVRegFloat(size_t i,float val)212 void SetVRegFloat(size_t i, float val) { 213 DCHECK_LT(i, NumberOfVRegs()); 214 uint32_t* vreg = &vregs_[i]; 215 *reinterpret_cast<float*>(vreg) = val; 216 // This is needed for moving collectors since these can update the vreg references if they 217 // happen to agree with references in the reference array. 218 References()[i].Clear(); 219 } 220 SetVRegLong(size_t i,int64_t val)221 void SetVRegLong(size_t i, int64_t val) { 222 DCHECK_LT(i + 1, NumberOfVRegs()); 223 uint32_t* vreg = &vregs_[i]; 224 typedef int64_t unaligned_int64 __attribute__ ((aligned (4))); 225 *reinterpret_cast<unaligned_int64*>(vreg) = val; 226 // This is needed for moving collectors since these can update the vreg references if they 227 // happen to agree with references in the reference array. 228 References()[i].Clear(); 229 References()[i + 1].Clear(); 230 } 231 SetVRegDouble(size_t i,double val)232 void SetVRegDouble(size_t i, double val) { 233 DCHECK_LT(i + 1, NumberOfVRegs()); 234 uint32_t* vreg = &vregs_[i]; 235 typedef double unaligned_double __attribute__ ((aligned (4))); 236 *reinterpret_cast<unaligned_double*>(vreg) = val; 237 // This is needed for moving collectors since these can update the vreg references if they 238 // happen to agree with references in the reference array. 239 References()[i].Clear(); 240 References()[i + 1].Clear(); 241 } 242 243 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> 244 void SetVRegReference(size_t i, ObjPtr<mirror::Object> val) 245 REQUIRES_SHARED(Locks::mutator_lock_); 246 SetMethod(ArtMethod * method)247 void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_) { 248 DCHECK(method != nullptr); 249 DCHECK(method_ != nullptr); 250 method_ = method; 251 } 252 GetMethod()253 ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) { 254 DCHECK(method_ != nullptr); 255 return method_; 256 } 257 258 mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_); 259 260 mirror::Object* GetThisObject(uint16_t num_ins) const REQUIRES_SHARED(Locks::mutator_lock_); 261 Contains(StackReference<mirror::Object> * shadow_frame_entry_obj)262 bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const { 263 return ((&References()[0] <= shadow_frame_entry_obj) && 264 (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1]))); 265 } 266 GetLockCountData()267 LockCountData& GetLockCountData() { 268 return lock_count_data_; 269 } 270 LockCountDataOffset()271 static constexpr size_t LockCountDataOffset() { 272 return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_); 273 } 274 LinkOffset()275 static constexpr size_t LinkOffset() { 276 return OFFSETOF_MEMBER(ShadowFrame, link_); 277 } 278 MethodOffset()279 static constexpr size_t MethodOffset() { 280 return OFFSETOF_MEMBER(ShadowFrame, method_); 281 } 282 DexPCOffset()283 static constexpr size_t DexPCOffset() { 284 return OFFSETOF_MEMBER(ShadowFrame, dex_pc_); 285 } 286 NumberOfVRegsOffset()287 static constexpr size_t NumberOfVRegsOffset() { 288 return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_); 289 } 290 VRegsOffset()291 static constexpr size_t VRegsOffset() { 292 return OFFSETOF_MEMBER(ShadowFrame, vregs_); 293 } 294 ResultRegisterOffset()295 static constexpr size_t ResultRegisterOffset() { 296 return OFFSETOF_MEMBER(ShadowFrame, result_register_); 297 } 298 DexPCPtrOffset()299 static constexpr size_t DexPCPtrOffset() { 300 return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_); 301 } 302 DexInstructionsOffset()303 static constexpr size_t DexInstructionsOffset() { 304 return OFFSETOF_MEMBER(ShadowFrame, dex_instructions_); 305 } 306 CachedHotnessCountdownOffset()307 static constexpr size_t CachedHotnessCountdownOffset() { 308 return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_); 309 } 310 HotnessCountdownOffset()311 static constexpr size_t HotnessCountdownOffset() { 312 return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_); 313 } 314 315 // Create ShadowFrame for interpreter using provided memory. CreateShadowFrameImpl(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc,void * memory)316 static ShadowFrame* CreateShadowFrameImpl(uint32_t num_vregs, 317 ShadowFrame* link, 318 ArtMethod* method, 319 uint32_t dex_pc, 320 void* memory) { 321 return new (memory) ShadowFrame(num_vregs, link, method, dex_pc); 322 } 323 GetDexPCPtr()324 const uint16_t* GetDexPCPtr() { 325 return dex_pc_ptr_; 326 } 327 SetDexPCPtr(uint16_t * dex_pc_ptr)328 void SetDexPCPtr(uint16_t* dex_pc_ptr) { 329 dex_pc_ptr_ = dex_pc_ptr; 330 } 331 GetResultRegister()332 JValue* GetResultRegister() { 333 return result_register_; 334 } 335 NeedsNotifyPop()336 bool NeedsNotifyPop() const { 337 return GetFrameFlag(FrameFlags::kNotifyFramePop); 338 } 339 SetNotifyPop(bool notify)340 void SetNotifyPop(bool notify) { 341 UpdateFrameFlag(notify, FrameFlags::kNotifyFramePop); 342 } 343 GetForcePopFrame()344 bool GetForcePopFrame() const { 345 return GetFrameFlag(FrameFlags::kForcePopFrame); 346 } 347 SetForcePopFrame(bool enable)348 void SetForcePopFrame(bool enable) { 349 UpdateFrameFlag(enable, FrameFlags::kForcePopFrame); 350 } 351 GetForceRetryInstruction()352 bool GetForceRetryInstruction() const { 353 return GetFrameFlag(FrameFlags::kForceRetryInst); 354 } 355 SetForceRetryInstruction(bool enable)356 void SetForceRetryInstruction(bool enable) { 357 UpdateFrameFlag(enable, FrameFlags::kForceRetryInst); 358 } 359 GetSkipMethodExitEvents()360 bool GetSkipMethodExitEvents() const { 361 return GetFrameFlag(FrameFlags::kSkipMethodExitEvents); 362 } 363 SetSkipMethodExitEvents(bool enable)364 void SetSkipMethodExitEvents(bool enable) { 365 UpdateFrameFlag(enable, FrameFlags::kSkipMethodExitEvents); 366 } 367 GetSkipNextExceptionEvent()368 bool GetSkipNextExceptionEvent() const { 369 return GetFrameFlag(FrameFlags::kSkipNextExceptionEvent); 370 } 371 SetSkipNextExceptionEvent(bool enable)372 void SetSkipNextExceptionEvent(bool enable) { 373 UpdateFrameFlag(enable, FrameFlags::kSkipNextExceptionEvent); 374 } 375 CheckConsistentVRegs()376 void CheckConsistentVRegs() const { 377 if (kIsDebugBuild) { 378 // A shadow frame visible to GC requires the following rule: for a given vreg, 379 // its vreg reference equivalent should be the same, or null. 380 for (uint32_t i = 0; i < NumberOfVRegs(); ++i) { 381 int32_t reference_value = References()[i].AsVRegValue(); 382 CHECK((GetVReg(i) == reference_value) || (reference_value == 0)); 383 } 384 } 385 } 386 387 private: ShadowFrame(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc)388 ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method, uint32_t dex_pc) 389 : link_(link), 390 method_(method), 391 result_register_(nullptr), 392 dex_pc_ptr_(nullptr), 393 dex_instructions_(nullptr), 394 number_of_vregs_(num_vregs), 395 dex_pc_(dex_pc), 396 cached_hotness_countdown_(0), 397 hotness_countdown_(0), 398 frame_flags_(0) { 399 memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>))); 400 } 401 UpdateFrameFlag(bool enable,FrameFlags flag)402 void UpdateFrameFlag(bool enable, FrameFlags flag) { 403 if (enable) { 404 frame_flags_ |= static_cast<uint32_t>(flag); 405 } else { 406 frame_flags_ &= ~static_cast<uint32_t>(flag); 407 } 408 } 409 GetFrameFlag(FrameFlags flag)410 bool GetFrameFlag(FrameFlags flag) const { 411 return (frame_flags_ & static_cast<uint32_t>(flag)) != 0; 412 } 413 References()414 const StackReference<mirror::Object>* References() const { 415 const uint32_t* vreg_end = &vregs_[NumberOfVRegs()]; 416 return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end); 417 } 418 References()419 StackReference<mirror::Object>* References() { 420 return const_cast<StackReference<mirror::Object>*>( 421 const_cast<const ShadowFrame*>(this)->References()); 422 } 423 424 // Link to previous shadow frame or null. 425 ShadowFrame* link_; 426 ArtMethod* method_; 427 JValue* result_register_; 428 const uint16_t* dex_pc_ptr_; 429 // Dex instruction base of the code item. 430 const uint16_t* dex_instructions_; 431 LockCountData lock_count_data_; // This may contain GC roots when lock counting is active. 432 const uint32_t number_of_vregs_; 433 uint32_t dex_pc_; 434 int16_t cached_hotness_countdown_; 435 int16_t hotness_countdown_; 436 437 // This is a set of ShadowFrame::FrameFlags which denote special states this frame is in. 438 // NB alignment requires that this field takes 4 bytes no matter its size. Only 3 bits are 439 // currently used. 440 uint32_t frame_flags_; 441 442 // This is a two-part array: 443 // - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4 444 // bytes. 445 // - [number_of_vregs..number_of_vregs*2) holds only reference registers. Each element here is 446 // ptr-sized. 447 // In other words when a primitive is stored in vX, the second (reference) part of the array will 448 // be null. When a reference is stored in vX, the second (reference) part of the array will be a 449 // copy of vX. 450 uint32_t vregs_[0]; 451 452 DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame); 453 }; 454 455 struct ShadowFrameDeleter { operatorShadowFrameDeleter456 inline void operator()(ShadowFrame* frame) { 457 if (frame != nullptr) { 458 frame->~ShadowFrame(); 459 } 460 } 461 }; 462 463 } // namespace art 464 465 #endif // ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_ 466