• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_
18 #define ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_
19 
20 #include <stdint.h>
21 #include <memory>
22 #include <vector>
23 
24 #include "base/arena_containers.h"
25 #include "base/logging.h"
26 #include "constants_arm64.h"
27 #include "utils/arm64/managed_register_arm64.h"
28 #include "utils/assembler.h"
29 #include "offsets.h"
30 
31 // TODO: make vixl clean wrt -Wshadow.
32 #pragma GCC diagnostic push
33 #pragma GCC diagnostic ignored "-Wunknown-pragmas"
34 #pragma GCC diagnostic ignored "-Wshadow"
35 #pragma GCC diagnostic ignored "-Wmissing-noreturn"
36 #include "vixl/a64/macro-assembler-a64.h"
37 #include "vixl/a64/disasm-a64.h"
38 #pragma GCC diagnostic pop
39 
40 namespace art {
41 namespace arm64 {
42 
43 #define MEM_OP(...)      vixl::MemOperand(__VA_ARGS__)
44 
45 enum LoadOperandType {
46   kLoadSignedByte,
47   kLoadUnsignedByte,
48   kLoadSignedHalfword,
49   kLoadUnsignedHalfword,
50   kLoadWord,
51   kLoadCoreWord,
52   kLoadSWord,
53   kLoadDWord
54 };
55 
56 enum StoreOperandType {
57   kStoreByte,
58   kStoreHalfword,
59   kStoreWord,
60   kStoreCoreWord,
61   kStoreSWord,
62   kStoreDWord
63 };
64 
65 class Arm64Exception {
66  private:
Arm64Exception(Arm64ManagedRegister scratch,size_t stack_adjust)67   Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
68       : scratch_(scratch), stack_adjust_(stack_adjust) {
69     }
70 
Entry()71   vixl::Label* Entry() { return &exception_entry_; }
72 
73   // Register used for passing Thread::Current()->exception_ .
74   const Arm64ManagedRegister scratch_;
75 
76   // Stack adjust for ExceptionPool.
77   const size_t stack_adjust_;
78 
79   vixl::Label exception_entry_;
80 
81   friend class Arm64Assembler;
82   DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
83 };
84 
85 class Arm64Assembler FINAL : public Assembler {
86  public:
87   // We indicate the size of the initial code generation buffer to the VIXL
88   // assembler. From there we it will automatically manage the buffer.
Arm64Assembler(ArenaAllocator * arena)89   explicit Arm64Assembler(ArenaAllocator* arena)
90       : Assembler(arena),
91         exception_blocks_(arena->Adapter(kArenaAllocAssembler)),
92         vixl_masm_(new vixl::MacroAssembler(kArm64BaseBufferSize)) {}
93 
~Arm64Assembler()94   virtual ~Arm64Assembler() {
95     delete vixl_masm_;
96   }
97 
98   // Finalize the code.
99   void FinalizeCode() OVERRIDE;
100 
101   // Size of generated code.
102   size_t CodeSize() const OVERRIDE;
103   const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
104 
105   // Copy instructions out of assembly buffer into the given region of memory.
106   void FinalizeInstructions(const MemoryRegion& region);
107 
108   void SpillRegisters(vixl::CPURegList registers, int offset);
109   void UnspillRegisters(vixl::CPURegList registers, int offset);
110 
111   // Emit code that will create an activation on the stack.
112   void BuildFrame(size_t frame_size, ManagedRegister method_reg,
113                   const std::vector<ManagedRegister>& callee_save_regs,
114                   const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
115 
116   // Emit code that will remove an activation from the stack.
117   void RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs)
118       OVERRIDE;
119 
120   void IncreaseFrameSize(size_t adjust) OVERRIDE;
121   void DecreaseFrameSize(size_t adjust) OVERRIDE;
122 
123   // Store routines.
124   void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
125   void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
126   void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
127   void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
128   void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch)
129       OVERRIDE;
130   void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
131                                   ManagedRegister scratch) OVERRIDE;
132   void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE;
133   void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
134                      ManagedRegister scratch) OVERRIDE;
135 
136   // Load routines.
137   void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
138   void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE;
139   void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
140   void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
141                bool unpoison_reference) OVERRIDE;
142   void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
143   void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE;
144 
145   // Copying routines.
146   void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
147   void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
148                               ManagedRegister scratch) OVERRIDE;
149   void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
150       OVERRIDE;
151   void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
152   void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
153   void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
154             size_t size) OVERRIDE;
155   void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
156             size_t size) OVERRIDE;
157   void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
158             size_t size) OVERRIDE;
159   void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
160             ManagedRegister scratch, size_t size) OVERRIDE;
161   void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
162             ManagedRegister scratch, size_t size) OVERRIDE;
163   void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
164 
165   // Sign extension.
166   void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
167 
168   // Zero extension.
169   void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
170 
171   // Exploit fast access in managed code to Thread::Current().
172   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
173   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
174 
175   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
176   // value is null and null_allowed. in_reg holds a possibly stale reference
177   // that can be used to avoid loading the handle scope entry to see if the value is
178   // null.
179   void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
180                        ManagedRegister in_reg, bool null_allowed) OVERRIDE;
181 
182   // Set up out_off to hold a Object** into the handle scope, or to be null if the
183   // value is null and null_allowed.
184   void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
185                        ManagedRegister scratch, bool null_allowed) OVERRIDE;
186 
187   // src holds a handle scope entry (Object**) load this into dst.
188   void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
189 
190   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
191   // know that src may not be null.
192   void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
193   void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
194 
195   // Call to address held at [base+offset].
196   void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
197   void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
198   void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE;
199 
200   // Jump to address (not setting link register)
201   void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
202 
203   // Generate code to check if Thread::Current()->exception_ is non-null
204   // and branch to a ExceptionSlowPath if it is.
205   void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
206 
207   //
208   // Heap poisoning.
209   //
210 
211   // Poison a heap reference contained in `reg`.
212   void PoisonHeapReference(vixl::Register reg);
213   // Unpoison a heap reference contained in `reg`.
214   void UnpoisonHeapReference(vixl::Register reg);
215   // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
216   void MaybeUnpoisonHeapReference(vixl::Register reg);
217 
Bind(Label * label ATTRIBUTE_UNUSED)218   void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
219     UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM64";
220   }
Jump(Label * label ATTRIBUTE_UNUSED)221   void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
222     UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM64";
223   }
224 
225  private:
reg_x(int code)226   static vixl::Register reg_x(int code) {
227     CHECK(code < kNumberOfXRegisters) << code;
228     if (code == SP) {
229       return vixl::sp;
230     } else if (code == XZR) {
231       return vixl::xzr;
232     }
233     return vixl::Register::XRegFromCode(code);
234   }
235 
reg_w(int code)236   static vixl::Register reg_w(int code) {
237     CHECK(code < kNumberOfWRegisters) << code;
238     if (code == WSP) {
239       return vixl::wsp;
240     } else if (code == WZR) {
241       return vixl::wzr;
242     }
243     return vixl::Register::WRegFromCode(code);
244   }
245 
reg_d(int code)246   static vixl::FPRegister reg_d(int code) {
247     return vixl::FPRegister::DRegFromCode(code);
248   }
249 
reg_s(int code)250   static vixl::FPRegister reg_s(int code) {
251     return vixl::FPRegister::SRegFromCode(code);
252   }
253 
254   // Emits Exception block.
255   void EmitExceptionPoll(Arm64Exception *exception);
256 
257   void StoreWToOffset(StoreOperandType type, WRegister source,
258                       XRegister base, int32_t offset);
259   void StoreToOffset(XRegister source, XRegister base, int32_t offset);
260   void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
261   void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
262 
263   void LoadImmediate(XRegister dest, int32_t value, vixl::Condition cond = vixl::al);
264   void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
265   void LoadWFromOffset(LoadOperandType type, WRegister dest,
266                       XRegister base, int32_t offset);
267   void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
268   void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
269   void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
270   void AddConstant(XRegister rd, int32_t value, vixl::Condition cond = vixl::al);
271   void AddConstant(XRegister rd, XRegister rn, int32_t value, vixl::Condition cond = vixl::al);
272 
273   // List of exception blocks to generate at the end of the code cache.
274   ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
275 
276  public:
277   // Vixl assembler.
278   vixl::MacroAssembler* const vixl_masm_;
279 
280   // Used for testing.
281   friend class Arm64ManagedRegister_VixlRegisters_Test;
282 };
283 
284 }  // namespace arm64
285 }  // namespace art
286 
287 #endif  // ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_
288