• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
18 #define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
19 
20 #include <android-base/logging.h>
21 
22 #include "base/arena_containers.h"
23 #include "base/macros.h"
24 #include "constants_arm.h"
25 #include "dwarf/register.h"
26 #include "offsets.h"
27 #include "utils/arm/assembler_arm_shared.h"
28 #include "utils/arm/managed_register_arm.h"
29 #include "utils/assembler.h"
30 #include "utils/jni_macro_assembler.h"
31 
32 // TODO(VIXL): Make VIXL compile with -Wshadow and remove pragmas.
33 #pragma GCC diagnostic push
34 #pragma GCC diagnostic ignored "-Wshadow"
35 #include "aarch32/macro-assembler-aarch32.h"
36 #pragma GCC diagnostic pop
37 
38 namespace vixl32 = vixl::aarch32;
39 
40 namespace art {
41 namespace arm {
42 
DWARFReg(vixl32::Register reg)43 inline dwarf::Reg DWARFReg(vixl32::Register reg) {
44   return dwarf::Reg::ArmCore(static_cast<int>(reg.GetCode()));
45 }
46 
DWARFReg(vixl32::SRegister reg)47 inline dwarf::Reg DWARFReg(vixl32::SRegister reg) {
48   return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
49 }
50 
51 class ArmVIXLMacroAssembler final : public vixl32::MacroAssembler {
52  public:
53   // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
54   // fewer system calls than a larger default capacity.
55   static constexpr size_t kDefaultCodeBufferCapacity = 1 * KB;
56 
ArmVIXLMacroAssembler()57   ArmVIXLMacroAssembler()
58       : vixl32::MacroAssembler(ArmVIXLMacroAssembler::kDefaultCodeBufferCapacity) {}
59 
60   // The following interfaces can generate CMP+Bcc or Cbz/Cbnz.
61   // CMP+Bcc are generated by default.
62   // If a hint is given (is_far_target = false) and rn and label can all fit into Cbz/Cbnz,
63   // then Cbz/Cbnz is generated.
64   // Prefer following interfaces to using vixl32::MacroAssembler::Cbz/Cbnz.
65   // In T32, Cbz/Cbnz instructions have following limitations:
66   // - Far targets, which are over 126 bytes away, are not supported.
67   // - Only low registers can be encoded.
68   // - Backward branches are not supported.
69   void CompareAndBranchIfZero(vixl32::Register rn,
70                               vixl32::Label* label,
71                               bool is_far_target = true);
72   void CompareAndBranchIfNonZero(vixl32::Register rn,
73                                  vixl32::Label* label,
74                                  bool is_far_target = true);
75 
76   // In T32 some of the instructions (add, mov, etc) outside an IT block
77   // have only 32-bit encodings. But there are 16-bit flag setting
78   // versions of these instructions (adds, movs, etc). In most of the
79   // cases in ART we don't care if the instructions keep flags or not;
80   // thus we can benefit from smaller code size.
81   // VIXL will never generate flag setting versions (for example, adds
82   // for Add macro instruction) unless vixl32::DontCare option is
83   // explicitly specified. That's why we introduce wrappers to use
84   // DontCare option by default.
85 #define WITH_FLAGS_DONT_CARE_RD_RN_OP(func_name) \
86   void (func_name)(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) { \
87     MacroAssembler::func_name(vixl32::DontCare, rd, rn, operand); \
88   } \
89   using MacroAssembler::func_name
90 
91   WITH_FLAGS_DONT_CARE_RD_RN_OP(Adc);
92   WITH_FLAGS_DONT_CARE_RD_RN_OP(Sub);
93   WITH_FLAGS_DONT_CARE_RD_RN_OP(Sbc);
94   WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsb);
95   WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsc);
96 
97   WITH_FLAGS_DONT_CARE_RD_RN_OP(Eor);
98   WITH_FLAGS_DONT_CARE_RD_RN_OP(Orr);
99   WITH_FLAGS_DONT_CARE_RD_RN_OP(Orn);
100   WITH_FLAGS_DONT_CARE_RD_RN_OP(And);
101   WITH_FLAGS_DONT_CARE_RD_RN_OP(Bic);
102 
103   WITH_FLAGS_DONT_CARE_RD_RN_OP(Asr);
104   WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsr);
105   WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsl);
106   WITH_FLAGS_DONT_CARE_RD_RN_OP(Ror);
107 
108 #undef WITH_FLAGS_DONT_CARE_RD_RN_OP
109 
110 #define WITH_FLAGS_DONT_CARE_RD_OP(func_name) \
111   void (func_name)(vixl32::Register rd, const vixl32::Operand& operand) { \
112     MacroAssembler::func_name(vixl32::DontCare, rd, operand); \
113   } \
114   using MacroAssembler::func_name
115 
116   WITH_FLAGS_DONT_CARE_RD_OP(Mvn);
117   WITH_FLAGS_DONT_CARE_RD_OP(Mov);
118 
119 #undef WITH_FLAGS_DONT_CARE_RD_OP
120 
121   // The following two functions don't fall into above categories. Overload them separately.
Rrx(vixl32::Register rd,vixl32::Register rn)122   void Rrx(vixl32::Register rd, vixl32::Register rn) {
123     MacroAssembler::Rrx(vixl32::DontCare, rd, rn);
124   }
125   using MacroAssembler::Rrx;
126 
Mul(vixl32::Register rd,vixl32::Register rn,vixl32::Register rm)127   void Mul(vixl32::Register rd, vixl32::Register rn, vixl32::Register rm) {
128     MacroAssembler::Mul(vixl32::DontCare, rd, rn, rm);
129   }
130   using MacroAssembler::Mul;
131 
132   // TODO: Remove when MacroAssembler::Add(FlagsUpdate, Condition, Register, Register, Operand)
133   // makes the right decision about 16-bit encodings.
Add(vixl32::Register rd,vixl32::Register rn,const vixl32::Operand & operand)134   void Add(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) {
135     if (rd.Is(rn) && operand.IsPlainRegister()) {
136       MacroAssembler::Add(rd, rn, operand);
137     } else {
138       MacroAssembler::Add(vixl32::DontCare, rd, rn, operand);
139     }
140   }
141   using MacroAssembler::Add;
142 
143   // These interfaces try to use 16-bit T2 encoding of B instruction.
144   void B(vixl32::Label* label);
145   // For B(label), we always try to use Narrow encoding, because 16-bit T2 encoding supports
146   // jumping within 2KB range. For B(cond, label), because the supported branch range is 256
147   // bytes; we use the far_target hint to try to use 16-bit T1 encoding for short range jumps.
148   void B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target = true);
149 
150   // Use literal for generating double constant if it doesn't fit VMOV encoding.
Vmov(vixl32::DRegister rd,double imm)151   void Vmov(vixl32::DRegister rd, double imm) {
152     if (vixl::VFP::IsImmFP64(imm)) {
153       MacroAssembler::Vmov(rd, imm);
154     } else {
155       MacroAssembler::Vldr(rd, imm);
156     }
157   }
158   using MacroAssembler::Vmov;
159 };
160 
161 class ArmVIXLAssembler final : public Assembler {
162  private:
163   class ArmException;
164  public:
ArmVIXLAssembler(ArenaAllocator * allocator)165   explicit ArmVIXLAssembler(ArenaAllocator* allocator)
166       : Assembler(allocator) {
167     // Use Thumb2 instruction set.
168     vixl_masm_.UseT32();
169   }
170 
~ArmVIXLAssembler()171   virtual ~ArmVIXLAssembler() {}
GetVIXLAssembler()172   ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
173   void FinalizeCode() override;
174 
175   // Size of generated code.
176   size_t CodeSize() const override;
177   const uint8_t* CodeBufferBaseAddress() const override;
178 
179   // Copy instructions out of assembly buffer into the given region of memory.
180   void FinalizeInstructions(const MemoryRegion& region) override;
181 
Bind(Label * label ATTRIBUTE_UNUSED)182   void Bind(Label* label ATTRIBUTE_UNUSED) override {
183     UNIMPLEMENTED(FATAL) << "Do not use Bind(Label*) for ARM";
184   }
Jump(Label * label ATTRIBUTE_UNUSED)185   void Jump(Label* label ATTRIBUTE_UNUSED) override {
186     UNIMPLEMENTED(FATAL) << "Do not use Jump(Label*) for ARM";
187   }
188 
Bind(vixl::aarch32::Label * label)189   void Bind(vixl::aarch32::Label* label) {
190     vixl_masm_.Bind(label);
191   }
Jump(vixl::aarch32::Label * label)192   void Jump(vixl::aarch32::Label* label) {
193     vixl_masm_.B(label);
194   }
195 
196   //
197   // Heap poisoning.
198   //
199 
200   // Poison a heap reference contained in `reg`.
201   void PoisonHeapReference(vixl32::Register reg);
202   // Unpoison a heap reference contained in `reg`.
203   void UnpoisonHeapReference(vixl32::Register reg);
204   // Poison a heap reference contained in `reg` if heap poisoning is enabled.
205   void MaybePoisonHeapReference(vixl32::Register reg);
206   // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
207   void MaybeUnpoisonHeapReference(vixl32::Register reg);
208 
209   // Emit code checking the status of the Marking Register, and aborting
210   // the program if MR does not match the value stored in the art::Thread
211   // object.
212   //
213   // Argument `temp` is used as a temporary register to generate code.
214   // Argument `code` is used to identify the different occurrences of
215   // MaybeGenerateMarkingRegisterCheck and is passed to the BKPT instruction.
216   void GenerateMarkingRegisterCheck(vixl32::Register temp, int code = 0);
217 
218   void StoreToOffset(StoreOperandType type,
219                      vixl32::Register reg,
220                      vixl32::Register base,
221                      int32_t offset);
222   void StoreSToOffset(vixl32::SRegister source, vixl32::Register base, int32_t offset);
223   void StoreDToOffset(vixl32::DRegister source, vixl32::Register base, int32_t offset);
224 
225   void LoadImmediate(vixl32::Register dest, int32_t value);
226   void LoadFromOffset(LoadOperandType type,
227                       vixl32::Register reg,
228                       vixl32::Register base,
229                       int32_t offset);
230   void LoadSFromOffset(vixl32::SRegister reg, vixl32::Register base, int32_t offset);
231   void LoadDFromOffset(vixl32::DRegister reg, vixl32::Register base, int32_t offset);
232 
233   void LoadRegisterList(RegList regs, size_t stack_offset);
234   void StoreRegisterList(RegList regs, size_t stack_offset);
235 
236   bool ShifterOperandCanAlwaysHold(uint32_t immediate);
237   bool ShifterOperandCanHold(Opcode opcode,
238                              uint32_t immediate,
239                              vixl::aarch32::FlagsUpdate update_flags = vixl::aarch32::DontCare);
240   bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
241                                int32_t offset,
242                                /*out*/ int32_t* add_to_base,
243                                /*out*/ int32_t* offset_for_load_store);
244   int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits,
245                                 vixl32::Register temp,
246                                 vixl32::Register base,
247                                 int32_t offset);
248   int32_t GetAllowedLoadOffsetBits(LoadOperandType type);
249   int32_t GetAllowedStoreOffsetBits(StoreOperandType type);
250 
251   void AddConstant(vixl32::Register rd, int32_t value);
252   void AddConstant(vixl32::Register rd, vixl32::Register rn, int32_t value);
253   void AddConstantInIt(vixl32::Register rd,
254                        vixl32::Register rn,
255                        int32_t value,
256                        vixl32::Condition cond = vixl32::al);
257 
258   template <typename T>
CreateLiteralDestroyedWithPool(T value)259   vixl::aarch32::Literal<T>* CreateLiteralDestroyedWithPool(T value) {
260     vixl::aarch32::Literal<T>* literal =
261         new vixl::aarch32::Literal<T>(value,
262                                       vixl32::RawLiteral::kPlacedWhenUsed,
263                                       vixl32::RawLiteral::kDeletedOnPoolDestruction);
264     return literal;
265   }
266 
267  private:
268   // VIXL assembler.
269   ArmVIXLMacroAssembler vixl_masm_;
270 };
271 
272 // Thread register declaration.
273 extern const vixl32::Register tr;
274 // Marking register declaration.
275 extern const vixl32::Register mr;
276 
277 }  // namespace arm
278 }  // namespace art
279 
280 #endif  // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
281