• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_OAT_OAT_QUICK_METHOD_HEADER_H_
18 #define ART_RUNTIME_OAT_OAT_QUICK_METHOD_HEADER_H_
19 
20 #include <optional>
21 
22 #include "arch/instruction_set.h"
23 #include "base/locks.h"
24 #include "base/macros.h"
25 #include "base/utils.h"
26 #include "quick/quick_method_frame_info.h"
27 #include "stack_map.h"
28 
29 namespace art HIDDEN {
30 
31 class ArtMethod;
32 
33 // Size in bytes of the should_deoptimize flag on stack.
34 // We just need 4 bytes for our purpose regardless of the architecture. Frame size
35 // calculation will automatically do alignment for the final frame size.
36 static constexpr size_t kShouldDeoptimizeFlagSize = 4;
37 
38 // OatQuickMethodHeader precedes the raw code chunk generated by the compiler.
39 class PACKED(4) OatQuickMethodHeader {
40  public:
41   OatQuickMethodHeader(uint32_t code_info_offset = 0) {
42     SetCodeInfoOffset(code_info_offset);
43   }
44 
45   static OatQuickMethodHeader* NterpMethodHeader;
46   EXPORT static ArrayRef<const uint8_t> NterpWithClinitImpl;
47   EXPORT static ArrayRef<const uint8_t> NterpImpl;
48 
49   EXPORT bool IsNterpMethodHeader() const;
50 
IsNterpPc(uintptr_t pc)51   static bool IsNterpPc(uintptr_t pc) {
52     return OatQuickMethodHeader::NterpMethodHeader != nullptr &&
53         OatQuickMethodHeader::NterpMethodHeader->Contains(pc);
54   }
55 
FromCodePointer(const void * code_ptr)56   static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) {
57     uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr);
58     uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_);
59     DCHECK(IsAlignedParam(code, GetInstructionSetCodeAlignment(kRuntimeISA)) ||
60            IsAlignedParam(header, GetInstructionSetCodeAlignment(kRuntimeISA)))
61         << std::hex << code << " " << std::hex << header;
62     return reinterpret_cast<OatQuickMethodHeader*>(header);
63   }
64 
FromEntryPoint(const void * entry_point)65   static OatQuickMethodHeader* FromEntryPoint(const void* entry_point) {
66     return FromCodePointer(EntryPointToCodePointer(entry_point));
67   }
68 
InstructionAlignedSize()69   static size_t InstructionAlignedSize() {
70     return RoundUp(sizeof(OatQuickMethodHeader), GetInstructionSetCodeAlignment(kRuntimeISA));
71   }
72 
73   OatQuickMethodHeader(const OatQuickMethodHeader&) = default;
74   OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;
75 
NativeQuickPcOffset(const uintptr_t pc)76   uintptr_t NativeQuickPcOffset(const uintptr_t pc) const {
77     return pc - reinterpret_cast<uintptr_t>(GetEntryPoint());
78   }
79 
80   // Check if this is hard-written assembly (i.e. inside libart.so).
81   // Returns std::nullop on Mac.
82   static std::optional<bool> IsStub(const uint8_t* pc);
83 
IsOptimized()84   ALWAYS_INLINE bool IsOptimized() const {
85     if (code_ == NterpWithClinitImpl.data() || code_ == NterpImpl.data()) {
86       DCHECK(IsStub(code_).value_or(true));
87       return false;
88     }
89     DCHECK(!IsStub(code_).value_or(false));
90     return true;
91   }
92 
GetOptimizedCodeInfoPtr()93   ALWAYS_INLINE const uint8_t* GetOptimizedCodeInfoPtr() const {
94     uint32_t offset = GetCodeInfoOffset();
95     DCHECK_NE(offset, 0u);
96     return code_ - offset;
97   }
98 
GetOptimizedCodeInfoPtr()99   ALWAYS_INLINE uint8_t* GetOptimizedCodeInfoPtr() {
100     uint32_t offset = GetCodeInfoOffset();
101     DCHECK_NE(offset, 0u);
102     return code_ - offset;
103   }
104 
GetCode()105   ALWAYS_INLINE const uint8_t* GetCode() const {
106     return code_;
107   }
108 
GetCodeSize()109   ALWAYS_INLINE uint32_t GetCodeSize() const {
110     if (code_ == NterpWithClinitImpl.data()) {
111       return NterpWithClinitImpl.size();
112     }
113     if (code_ == NterpImpl.data()) {
114       return NterpImpl.size();
115     }
116     return CodeInfo::DecodeCodeSize(GetOptimizedCodeInfoPtr());
117   }
118 
GetCodeInfoOffset()119   ALWAYS_INLINE uint32_t GetCodeInfoOffset() const {
120     DCHECK(IsOptimized());
121     return code_info_offset_;
122   }
123 
SetCodeInfoOffset(uint32_t offset)124   void SetCodeInfoOffset(uint32_t offset) { code_info_offset_ = offset; }
125 
Contains(uintptr_t pc)126   bool Contains(uintptr_t pc) const {
127     uintptr_t code_start = reinterpret_cast<uintptr_t>(code_);
128 // Let's not make assumptions about other architectures.
129 #if defined(__aarch64__) || defined(__riscv__) || defined(__riscv)
130     // Verify that the code pointer is not tagged. Memory for code gets allocated with
131     // mspace_memalign or memory mapped from a file, neither of which is tagged by MTE/HWASan.
132     DCHECK_EQ(code_start, reinterpret_cast<uintptr_t>(code_start) & ((UINT64_C(1) << 56) - 1));
133 #endif
134     static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
135     if (kRuntimeISA == InstructionSet::kArm) {
136       // On Thumb-2, the pc is offset by one.
137       code_start++;
138     }
139     return code_start <= pc && pc <= (code_start + GetCodeSize());
140   }
141 
GetEntryPoint()142   const uint8_t* GetEntryPoint() const {
143     // When the runtime architecture is ARM, `kRuntimeISA` is set to `kArm`
144     // (not `kThumb2`), *but* we always generate code for the Thumb-2
145     // instruction set anyway. Thumb-2 requires the entrypoint to be of
146     // offset 1.
147     static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
148     return (kRuntimeISA == InstructionSet::kArm)
149         ? reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(code_) | 1)
150         : code_;
151   }
152 
153   template <bool kCheckFrameSize = true>
GetFrameSizeInBytes()154   uint32_t GetFrameSizeInBytes() const {
155     uint32_t result = GetFrameInfo().FrameSizeInBytes();
156     if (kCheckFrameSize) {
157       DCHECK_ALIGNED(result, kStackAlignment);
158     }
159     return result;
160   }
161 
GetFrameInfo()162   QuickMethodFrameInfo GetFrameInfo() const {
163     DCHECK(IsOptimized());
164     return CodeInfo::DecodeFrameInfo(GetOptimizedCodeInfoPtr());
165   }
166 
GetShouldDeoptimizeFlagOffset()167   size_t GetShouldDeoptimizeFlagOffset() const {
168     DCHECK(IsOptimized());
169     QuickMethodFrameInfo frame_info = GetFrameInfo();
170     size_t frame_size = frame_info.FrameSizeInBytes();
171     size_t core_spill_size =
172         POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeISA);
173     size_t fpu_spill_size =
174         POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeISA);
175     return frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize;
176   }
177 
178   // For non-catch handlers. Only used in test code.
179   EXPORT uintptr_t ToNativeQuickPc(ArtMethod* method,
180                                    const uint32_t dex_pc,
181                                    bool abort_on_failure = true) const;
182 
183   // For catch handlers.
184   uintptr_t ToNativeQuickPcForCatchHandlers(ArtMethod* method,
185                                             ArrayRef<const uint32_t> dex_pc_list,
186                                             /* out */ uint32_t* stack_map_row,
187                                             bool abort_on_failure = true) const;
188 
189   uint32_t ToDexPc(ArtMethod** frame,
190                    const uintptr_t pc,
191                    bool abort_on_failure = true) const
192       REQUIRES_SHARED(Locks::mutator_lock_);
193 
HasShouldDeoptimizeFlag()194   bool HasShouldDeoptimizeFlag() const {
195     return IsOptimized() && CodeInfo::HasShouldDeoptimizeFlag(GetOptimizedCodeInfoPtr());
196   }
197 
198  private:
199   uint32_t code_info_offset_ = 0u;
200   uint8_t code_[0];     // The actual method code.
201 };
202 
203 }  // namespace art
204 
205 #endif  // ART_RUNTIME_OAT_OAT_QUICK_METHOD_HEADER_H_
206