1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_ 18 #define ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_ 19 20 #include <cstring> 21 #include <stdint.h> 22 #include <string> 23 24 #include "base/macros.h" 25 #include "base/mutex.h" 26 #include "dex_file.h" 27 #include "lock_count_data.h" 28 #include "read_barrier.h" 29 #include "stack_reference.h" 30 #include "verify_object.h" 31 32 namespace art { 33 34 namespace mirror { 35 class Object; 36 } // namespace mirror 37 38 class ArtMethod; 39 class ShadowFrame; 40 class Thread; 41 union JValue; 42 43 // Forward declaration. Just calls the destructor. 44 struct ShadowFrameDeleter; 45 using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>; 46 47 // ShadowFrame has 2 possible layouts: 48 // - interpreter - separate VRegs and reference arrays. References are in the reference array. 49 // - JNI - just VRegs, but where every VReg holds a reference. 50 class ShadowFrame { 51 public: 52 // Compute size of ShadowFrame in bytes assuming it has a reference array. ComputeSize(uint32_t num_vregs)53 static size_t ComputeSize(uint32_t num_vregs) { 54 return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) + 55 (sizeof(StackReference<mirror::Object>) * num_vregs); 56 } 57 58 // Create ShadowFrame in heap for deoptimization. CreateDeoptimizedFrame(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc)59 static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link, 60 ArtMethod* method, uint32_t dex_pc) { 61 uint8_t* memory = new uint8_t[ComputeSize(num_vregs)]; 62 return CreateShadowFrameImpl(num_vregs, link, method, dex_pc, memory); 63 } 64 65 // Delete a ShadowFrame allocated on the heap for deoptimization. DeleteDeoptimizedFrame(ShadowFrame * sf)66 static void DeleteDeoptimizedFrame(ShadowFrame* sf) { 67 sf->~ShadowFrame(); // Explicitly destruct. 68 uint8_t* memory = reinterpret_cast<uint8_t*>(sf); 69 delete[] memory; 70 } 71 72 // Create a shadow frame in a fresh alloca. This needs to be in the context of the caller. 73 // Inlining doesn't work, the compiler will still undo the alloca. So this needs to be a macro. 74 #define CREATE_SHADOW_FRAME(num_vregs, link, method, dex_pc) ({ \ 75 size_t frame_size = ShadowFrame::ComputeSize(num_vregs); \ 76 void* alloca_mem = alloca(frame_size); \ 77 ShadowFrameAllocaUniquePtr( \ 78 ShadowFrame::CreateShadowFrameImpl((num_vregs), (link), (method), (dex_pc), \ 79 (alloca_mem))); \ 80 }) 81 ~ShadowFrame()82 ~ShadowFrame() {} 83 84 // TODO(iam): Clean references array up since they're always there, 85 // we don't need to do conditionals. HasReferenceArray()86 bool HasReferenceArray() const { 87 return true; 88 } 89 NumberOfVRegs()90 uint32_t NumberOfVRegs() const { 91 return number_of_vregs_; 92 } 93 GetDexPC()94 uint32_t GetDexPC() const { 95 return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - code_item_->insns_; 96 } 97 GetCachedHotnessCountdown()98 int16_t GetCachedHotnessCountdown() const { 99 return cached_hotness_countdown_; 100 } 101 SetCachedHotnessCountdown(int16_t cached_hotness_countdown)102 void SetCachedHotnessCountdown(int16_t cached_hotness_countdown) { 103 cached_hotness_countdown_ = cached_hotness_countdown; 104 } 105 GetHotnessCountdown()106 int16_t GetHotnessCountdown() const { 107 return hotness_countdown_; 108 } 109 SetHotnessCountdown(int16_t hotness_countdown)110 void SetHotnessCountdown(int16_t hotness_countdown) { 111 hotness_countdown_ = hotness_countdown; 112 } 113 SetDexPC(uint32_t dex_pc)114 void SetDexPC(uint32_t dex_pc) { 115 dex_pc_ = dex_pc; 116 dex_pc_ptr_ = nullptr; 117 } 118 GetLink()119 ShadowFrame* GetLink() const { 120 return link_; 121 } 122 SetLink(ShadowFrame * frame)123 void SetLink(ShadowFrame* frame) { 124 DCHECK_NE(this, frame); 125 link_ = frame; 126 } 127 GetVReg(size_t i)128 int32_t GetVReg(size_t i) const { 129 DCHECK_LT(i, NumberOfVRegs()); 130 const uint32_t* vreg = &vregs_[i]; 131 return *reinterpret_cast<const int32_t*>(vreg); 132 } 133 134 // Shorts are extended to Ints in VRegs. Interpreter intrinsics needs them as shorts. GetVRegShort(size_t i)135 int16_t GetVRegShort(size_t i) const { 136 return static_cast<int16_t>(GetVReg(i)); 137 } 138 GetVRegAddr(size_t i)139 uint32_t* GetVRegAddr(size_t i) { 140 return &vregs_[i]; 141 } 142 GetShadowRefAddr(size_t i)143 uint32_t* GetShadowRefAddr(size_t i) { 144 DCHECK(HasReferenceArray()); 145 DCHECK_LT(i, NumberOfVRegs()); 146 return &vregs_[i + NumberOfVRegs()]; 147 } 148 SetCodeItem(const DexFile::CodeItem * code_item)149 void SetCodeItem(const DexFile::CodeItem* code_item) { 150 code_item_ = code_item; 151 } 152 GetCodeItem()153 const DexFile::CodeItem* GetCodeItem() const { 154 return code_item_; 155 } 156 GetVRegFloat(size_t i)157 float GetVRegFloat(size_t i) const { 158 DCHECK_LT(i, NumberOfVRegs()); 159 // NOTE: Strict-aliasing? 160 const uint32_t* vreg = &vregs_[i]; 161 return *reinterpret_cast<const float*>(vreg); 162 } 163 GetVRegLong(size_t i)164 int64_t GetVRegLong(size_t i) const { 165 DCHECK_LT(i, NumberOfVRegs()); 166 const uint32_t* vreg = &vregs_[i]; 167 typedef const int64_t unaligned_int64 __attribute__ ((aligned (4))); 168 return *reinterpret_cast<unaligned_int64*>(vreg); 169 } 170 GetVRegDouble(size_t i)171 double GetVRegDouble(size_t i) const { 172 DCHECK_LT(i, NumberOfVRegs()); 173 const uint32_t* vreg = &vregs_[i]; 174 typedef const double unaligned_double __attribute__ ((aligned (4))); 175 return *reinterpret_cast<unaligned_double*>(vreg); 176 } 177 178 // Look up the reference given its virtual register number. 179 // If this returns non-null then this does not mean the vreg is currently a reference 180 // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain. 181 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> GetVRegReference(size_t i)182 mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) { 183 DCHECK_LT(i, NumberOfVRegs()); 184 mirror::Object* ref; 185 if (HasReferenceArray()) { 186 ref = References()[i].AsMirrorPtr(); 187 } else { 188 const uint32_t* vreg_ptr = &vregs_[i]; 189 ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr(); 190 } 191 if (kUseReadBarrier) { 192 ReadBarrier::AssertToSpaceInvariant(ref); 193 } 194 if (kVerifyFlags & kVerifyReads) { 195 VerifyObject(ref); 196 } 197 return ref; 198 } 199 200 // Get view of vregs as range of consecutive arguments starting at i. GetVRegArgs(size_t i)201 uint32_t* GetVRegArgs(size_t i) { 202 return &vregs_[i]; 203 } 204 SetVReg(size_t i,int32_t val)205 void SetVReg(size_t i, int32_t val) { 206 DCHECK_LT(i, NumberOfVRegs()); 207 uint32_t* vreg = &vregs_[i]; 208 *reinterpret_cast<int32_t*>(vreg) = val; 209 // This is needed for moving collectors since these can update the vreg references if they 210 // happen to agree with references in the reference array. 211 if (kMovingCollector && HasReferenceArray()) { 212 References()[i].Clear(); 213 } 214 } 215 SetVRegFloat(size_t i,float val)216 void SetVRegFloat(size_t i, float val) { 217 DCHECK_LT(i, NumberOfVRegs()); 218 uint32_t* vreg = &vregs_[i]; 219 *reinterpret_cast<float*>(vreg) = val; 220 // This is needed for moving collectors since these can update the vreg references if they 221 // happen to agree with references in the reference array. 222 if (kMovingCollector && HasReferenceArray()) { 223 References()[i].Clear(); 224 } 225 } 226 SetVRegLong(size_t i,int64_t val)227 void SetVRegLong(size_t i, int64_t val) { 228 DCHECK_LT(i, NumberOfVRegs()); 229 uint32_t* vreg = &vregs_[i]; 230 typedef int64_t unaligned_int64 __attribute__ ((aligned (4))); 231 *reinterpret_cast<unaligned_int64*>(vreg) = val; 232 // This is needed for moving collectors since these can update the vreg references if they 233 // happen to agree with references in the reference array. 234 if (kMovingCollector && HasReferenceArray()) { 235 References()[i].Clear(); 236 References()[i + 1].Clear(); 237 } 238 } 239 SetVRegDouble(size_t i,double val)240 void SetVRegDouble(size_t i, double val) { 241 DCHECK_LT(i, NumberOfVRegs()); 242 uint32_t* vreg = &vregs_[i]; 243 typedef double unaligned_double __attribute__ ((aligned (4))); 244 *reinterpret_cast<unaligned_double*>(vreg) = val; 245 // This is needed for moving collectors since these can update the vreg references if they 246 // happen to agree with references in the reference array. 247 if (kMovingCollector && HasReferenceArray()) { 248 References()[i].Clear(); 249 References()[i + 1].Clear(); 250 } 251 } 252 253 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> SetVRegReference(size_t i,mirror::Object * val)254 void SetVRegReference(size_t i, mirror::Object* val) REQUIRES_SHARED(Locks::mutator_lock_) { 255 DCHECK_LT(i, NumberOfVRegs()); 256 if (kVerifyFlags & kVerifyWrites) { 257 VerifyObject(val); 258 } 259 if (kUseReadBarrier) { 260 ReadBarrier::AssertToSpaceInvariant(val); 261 } 262 uint32_t* vreg = &vregs_[i]; 263 reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val); 264 if (HasReferenceArray()) { 265 References()[i].Assign(val); 266 } 267 } 268 SetMethod(ArtMethod * method)269 void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_) { 270 DCHECK(method != nullptr); 271 DCHECK(method_ != nullptr); 272 method_ = method; 273 } 274 GetMethod()275 ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) { 276 DCHECK(method_ != nullptr); 277 return method_; 278 } 279 280 mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_); 281 282 mirror::Object* GetThisObject(uint16_t num_ins) const REQUIRES_SHARED(Locks::mutator_lock_); 283 Contains(StackReference<mirror::Object> * shadow_frame_entry_obj)284 bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const { 285 if (HasReferenceArray()) { 286 return ((&References()[0] <= shadow_frame_entry_obj) && 287 (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1]))); 288 } else { 289 uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj); 290 return ((&vregs_[0] <= shadow_frame_entry) && 291 (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1]))); 292 } 293 } 294 GetLockCountData()295 LockCountData& GetLockCountData() { 296 return lock_count_data_; 297 } 298 LockCountDataOffset()299 static size_t LockCountDataOffset() { 300 return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_); 301 } 302 LinkOffset()303 static size_t LinkOffset() { 304 return OFFSETOF_MEMBER(ShadowFrame, link_); 305 } 306 MethodOffset()307 static size_t MethodOffset() { 308 return OFFSETOF_MEMBER(ShadowFrame, method_); 309 } 310 DexPCOffset()311 static size_t DexPCOffset() { 312 return OFFSETOF_MEMBER(ShadowFrame, dex_pc_); 313 } 314 NumberOfVRegsOffset()315 static size_t NumberOfVRegsOffset() { 316 return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_); 317 } 318 VRegsOffset()319 static size_t VRegsOffset() { 320 return OFFSETOF_MEMBER(ShadowFrame, vregs_); 321 } 322 ResultRegisterOffset()323 static size_t ResultRegisterOffset() { 324 return OFFSETOF_MEMBER(ShadowFrame, result_register_); 325 } 326 DexPCPtrOffset()327 static size_t DexPCPtrOffset() { 328 return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_); 329 } 330 CodeItemOffset()331 static size_t CodeItemOffset() { 332 return OFFSETOF_MEMBER(ShadowFrame, code_item_); 333 } 334 CachedHotnessCountdownOffset()335 static size_t CachedHotnessCountdownOffset() { 336 return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_); 337 } 338 HotnessCountdownOffset()339 static size_t HotnessCountdownOffset() { 340 return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_); 341 } 342 343 // Create ShadowFrame for interpreter using provided memory. CreateShadowFrameImpl(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc,void * memory)344 static ShadowFrame* CreateShadowFrameImpl(uint32_t num_vregs, 345 ShadowFrame* link, 346 ArtMethod* method, 347 uint32_t dex_pc, 348 void* memory) { 349 return new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true); 350 } 351 GetDexPCPtr()352 const uint16_t* GetDexPCPtr() { 353 return dex_pc_ptr_; 354 } 355 SetDexPCPtr(uint16_t * dex_pc_ptr)356 void SetDexPCPtr(uint16_t* dex_pc_ptr) { 357 dex_pc_ptr_ = dex_pc_ptr; 358 } 359 GetResultRegister()360 JValue* GetResultRegister() { 361 return result_register_; 362 } 363 364 private: ShadowFrame(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc,bool has_reference_array)365 ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method, 366 uint32_t dex_pc, bool has_reference_array) 367 : link_(link), 368 method_(method), 369 result_register_(nullptr), 370 dex_pc_ptr_(nullptr), 371 code_item_(nullptr), 372 number_of_vregs_(num_vregs), 373 dex_pc_(dex_pc), 374 cached_hotness_countdown_(0), 375 hotness_countdown_(0) { 376 // TODO(iam): Remove this parameter, it's an an artifact of portable removal 377 DCHECK(has_reference_array); 378 if (has_reference_array) { 379 memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>))); 380 } else { 381 memset(vregs_, 0, num_vregs * sizeof(uint32_t)); 382 } 383 } 384 References()385 const StackReference<mirror::Object>* References() const { 386 DCHECK(HasReferenceArray()); 387 const uint32_t* vreg_end = &vregs_[NumberOfVRegs()]; 388 return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end); 389 } 390 References()391 StackReference<mirror::Object>* References() { 392 return const_cast<StackReference<mirror::Object>*>( 393 const_cast<const ShadowFrame*>(this)->References()); 394 } 395 396 // Link to previous shadow frame or null. 397 ShadowFrame* link_; 398 ArtMethod* method_; 399 JValue* result_register_; 400 const uint16_t* dex_pc_ptr_; 401 const DexFile::CodeItem* code_item_; 402 LockCountData lock_count_data_; // This may contain GC roots when lock counting is active. 403 const uint32_t number_of_vregs_; 404 uint32_t dex_pc_; 405 int16_t cached_hotness_countdown_; 406 int16_t hotness_countdown_; 407 408 // This is a two-part array: 409 // - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4 410 // bytes. 411 // - [number_of_vregs..number_of_vregs*2) holds only reference registers. Each element here is 412 // ptr-sized. 413 // In other words when a primitive is stored in vX, the second (reference) part of the array will 414 // be null. When a reference is stored in vX, the second (reference) part of the array will be a 415 // copy of vX. 416 uint32_t vregs_[0]; 417 418 DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame); 419 }; 420 421 struct ShadowFrameDeleter { operatorShadowFrameDeleter422 inline void operator()(ShadowFrame* frame) { 423 if (frame != nullptr) { 424 frame->~ShadowFrame(); 425 } 426 } 427 }; 428 429 } // namespace art 430 431 #endif // ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_ 432