1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_ 18 #define ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_ 19 20 #include "arch/instruction_set.h" 21 #include "base/locks.h" 22 #include "base/macros.h" 23 #include "base/utils.h" 24 #include "quick/quick_method_frame_info.h" 25 #include "stack_map.h" 26 27 namespace art { 28 29 class ArtMethod; 30 31 // Size in bytes of the should_deoptimize flag on stack. 32 // We just need 4 bytes for our purpose regardless of the architecture. Frame size 33 // calculation will automatically do alignment for the final frame size. 34 static constexpr size_t kShouldDeoptimizeFlagSize = 4; 35 36 // OatQuickMethodHeader precedes the raw code chunk generated by the compiler. 37 class PACKED(4) OatQuickMethodHeader { 38 public: 39 OatQuickMethodHeader(uint32_t code_info_offset = 0) { 40 SetCodeInfoOffset(code_info_offset); 41 } 42 43 static OatQuickMethodHeader* NterpMethodHeader; 44 45 bool IsNterpMethodHeader() const; 46 IsNterpPc(uintptr_t pc)47 static bool IsNterpPc(uintptr_t pc) { 48 return OatQuickMethodHeader::NterpMethodHeader != nullptr && 49 OatQuickMethodHeader::NterpMethodHeader->Contains(pc); 50 } 51 FromCodePointer(const void * code_ptr)52 static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) { 53 uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr); 54 uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_); 55 DCHECK(IsAlignedParam(code, GetInstructionSetCodeAlignment(kRuntimeISA)) || 56 IsAlignedParam(header, GetInstructionSetCodeAlignment(kRuntimeISA))) 57 << std::hex << code << " " << std::hex << header; 58 return reinterpret_cast<OatQuickMethodHeader*>(header); 59 } 60 FromEntryPoint(const void * entry_point)61 static OatQuickMethodHeader* FromEntryPoint(const void* entry_point) { 62 return FromCodePointer(EntryPointToCodePointer(entry_point)); 63 } 64 InstructionAlignedSize()65 static size_t InstructionAlignedSize() { 66 return RoundUp(sizeof(OatQuickMethodHeader), GetInstructionSetCodeAlignment(kRuntimeISA)); 67 } 68 69 OatQuickMethodHeader(const OatQuickMethodHeader&) = default; 70 OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default; 71 NativeQuickPcOffset(const uintptr_t pc)72 uintptr_t NativeQuickPcOffset(const uintptr_t pc) const { 73 return pc - reinterpret_cast<uintptr_t>(GetEntryPoint()); 74 } 75 IsOptimized()76 ALWAYS_INLINE bool IsOptimized() const { 77 uintptr_t code = reinterpret_cast<uintptr_t>(code_); 78 DCHECK_NE(data_, 0u) << std::hex << code; // Probably a padding of native code. 79 DCHECK_NE(data_, kInvalidData) << std::hex << code; // Probably a stub or trampoline. 80 return (data_ & kIsCodeInfoMask) != 0; 81 } 82 GetOptimizedCodeInfoPtr()83 ALWAYS_INLINE const uint8_t* GetOptimizedCodeInfoPtr() const { 84 uint32_t offset = GetCodeInfoOffset(); 85 DCHECK_NE(offset, 0u); 86 return code_ - offset; 87 } 88 GetOptimizedCodeInfoPtr()89 ALWAYS_INLINE uint8_t* GetOptimizedCodeInfoPtr() { 90 uint32_t offset = GetCodeInfoOffset(); 91 DCHECK_NE(offset, 0u); 92 return code_ - offset; 93 } 94 GetCode()95 ALWAYS_INLINE const uint8_t* GetCode() const { 96 return code_; 97 } 98 GetCodeSize()99 ALWAYS_INLINE uint32_t GetCodeSize() const { 100 return LIKELY(IsOptimized()) 101 ? CodeInfo::DecodeCodeSize(GetOptimizedCodeInfoPtr()) 102 : (data_ & kCodeSizeMask); 103 } 104 GetCodeInfoOffset()105 ALWAYS_INLINE uint32_t GetCodeInfoOffset() const { 106 DCHECK(IsOptimized()); 107 return data_ & kCodeInfoMask; 108 } 109 SetCodeInfoOffset(uint32_t offset)110 void SetCodeInfoOffset(uint32_t offset) { 111 data_ = kIsCodeInfoMask | offset; 112 DCHECK_EQ(GetCodeInfoOffset(), offset); 113 } 114 Contains(uintptr_t pc)115 bool Contains(uintptr_t pc) const { 116 // We should not call `Contains` on a stub or trampoline. 117 DCHECK_NE(data_, kInvalidData) << std::hex << reinterpret_cast<uintptr_t>(code_); 118 // Remove hwasan tag to make comparison below valid. The PC from the stack does not have it. 119 uintptr_t code_start = reinterpret_cast<uintptr_t>(HWASanUntag(code_)); 120 static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA"); 121 if (kRuntimeISA == InstructionSet::kArm) { 122 // On Thumb-2, the pc is offset by one. 123 code_start++; 124 } 125 return code_start <= pc && pc <= (code_start + GetCodeSize()); 126 } 127 GetEntryPoint()128 const uint8_t* GetEntryPoint() const { 129 // When the runtime architecture is ARM, `kRuntimeISA` is set to `kArm` 130 // (not `kThumb2`), *but* we always generate code for the Thumb-2 131 // instruction set anyway. Thumb-2 requires the entrypoint to be of 132 // offset 1. 133 static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA"); 134 return (kRuntimeISA == InstructionSet::kArm) 135 ? reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(code_) | 1) 136 : code_; 137 } 138 139 template <bool kCheckFrameSize = true> GetFrameSizeInBytes()140 uint32_t GetFrameSizeInBytes() const { 141 uint32_t result = GetFrameInfo().FrameSizeInBytes(); 142 if (kCheckFrameSize) { 143 DCHECK_ALIGNED(result, kStackAlignment); 144 } 145 return result; 146 } 147 GetFrameInfo()148 QuickMethodFrameInfo GetFrameInfo() const { 149 DCHECK(IsOptimized()); 150 return CodeInfo::DecodeFrameInfo(GetOptimizedCodeInfoPtr()); 151 } 152 GetShouldDeoptimizeFlagOffset()153 size_t GetShouldDeoptimizeFlagOffset() const { 154 DCHECK(IsOptimized()); 155 QuickMethodFrameInfo frame_info = GetFrameInfo(); 156 size_t frame_size = frame_info.FrameSizeInBytes(); 157 size_t core_spill_size = 158 POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeISA); 159 size_t fpu_spill_size = 160 POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeISA); 161 return frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize; 162 } 163 164 // For non-catch handlers. Only used in test code. 165 uintptr_t ToNativeQuickPc(ArtMethod* method, 166 const uint32_t dex_pc, 167 bool abort_on_failure = true) const; 168 169 // For catch handlers. 170 uintptr_t ToNativeQuickPcForCatchHandlers(ArtMethod* method, 171 ArrayRef<const uint32_t> dex_pc_list, 172 /* out */ uint32_t* stack_map_row, 173 bool abort_on_failure = true) const; 174 175 uint32_t ToDexPc(ArtMethod** frame, 176 const uintptr_t pc, 177 bool abort_on_failure = true) const 178 REQUIRES_SHARED(Locks::mutator_lock_); 179 SetHasShouldDeoptimizeFlag()180 void SetHasShouldDeoptimizeFlag() { 181 DCHECK(!HasShouldDeoptimizeFlag()); 182 data_ |= kShouldDeoptimizeMask; 183 } 184 HasShouldDeoptimizeFlag()185 bool HasShouldDeoptimizeFlag() const { 186 return (data_ & kShouldDeoptimizeMask) != 0; 187 } 188 189 private: 190 static constexpr uint32_t kShouldDeoptimizeMask = 0x80000000; 191 static constexpr uint32_t kIsCodeInfoMask = 0x40000000; 192 static constexpr uint32_t kCodeInfoMask = 0x3FFFFFFF; // If kIsCodeInfoMask is set. 193 static constexpr uint32_t kCodeSizeMask = 0x3FFFFFFF; // If kIsCodeInfoMask is clear. 194 195 // In order to not confuse a stub with Java-generated code, we prefix each 196 // stub with a 0xFFFFFFFF marker. 197 static constexpr uint32_t kInvalidData = 0xFFFFFFFF; 198 199 uint32_t data_ = 0u; // Combination of fields using the above masks. 200 uint8_t code_[0]; // The actual method code. 201 }; 202 203 } // namespace art 204 205 #endif // ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_ 206