1 /* 2 * Copyright (C) 2016 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_ 18 #define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_ 19 20 #include "base/arena_containers.h" 21 #include "base/logging.h" 22 #include "constants_arm.h" 23 #include "offsets.h" 24 #include "utils/arm/assembler_arm_shared.h" 25 #include "utils/arm/managed_register_arm.h" 26 #include "utils/assembler.h" 27 #include "utils/jni_macro_assembler.h" 28 29 // TODO(VIXL): Make VIXL compile with -Wshadow and remove pragmas. 30 #pragma GCC diagnostic push 31 #pragma GCC diagnostic ignored "-Wshadow" 32 #include "aarch32/macro-assembler-aarch32.h" 33 #pragma GCC diagnostic pop 34 35 namespace vixl32 = vixl::aarch32; 36 37 namespace art { 38 namespace arm { 39 40 class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler { 41 public: 42 // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and 43 // fewer system calls than a larger default capacity. 44 static constexpr size_t kDefaultCodeBufferCapacity = 1 * KB; 45 ArmVIXLMacroAssembler()46 ArmVIXLMacroAssembler() 47 : vixl32::MacroAssembler(ArmVIXLMacroAssembler::kDefaultCodeBufferCapacity) {} 48 49 // The following interfaces can generate CMP+Bcc or Cbz/Cbnz. 50 // CMP+Bcc are generated by default. 51 // If a hint is given (is_far_target = false) and rn and label can all fit into Cbz/Cbnz, 52 // then Cbz/Cbnz is generated. 53 // Prefer following interfaces to using vixl32::MacroAssembler::Cbz/Cbnz. 54 // In T32, Cbz/Cbnz instructions have following limitations: 55 // - Far targets, which are over 126 bytes away, are not supported. 56 // - Only low registers can be encoded. 57 // - Backward branches are not supported. 58 void CompareAndBranchIfZero(vixl32::Register rn, 59 vixl32::Label* label, 60 bool is_far_target = true); 61 void CompareAndBranchIfNonZero(vixl32::Register rn, 62 vixl32::Label* label, 63 bool is_far_target = true); 64 65 // In T32 some of the instructions (add, mov, etc) outside an IT block 66 // have only 32-bit encodings. But there are 16-bit flag setting 67 // versions of these instructions (adds, movs, etc). In most of the 68 // cases in ART we don't care if the instructions keep flags or not; 69 // thus we can benefit from smaller code size. 70 // VIXL will never generate flag setting versions (for example, adds 71 // for Add macro instruction) unless vixl32::DontCare option is 72 // explicitly specified. That's why we introduce wrappers to use 73 // DontCare option by default. 74 #define WITH_FLAGS_DONT_CARE_RD_RN_OP(func_name) \ 75 void (func_name)(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) { \ 76 MacroAssembler::func_name(vixl32::DontCare, rd, rn, operand); \ 77 } \ 78 using MacroAssembler::func_name 79 80 WITH_FLAGS_DONT_CARE_RD_RN_OP(Adc); 81 WITH_FLAGS_DONT_CARE_RD_RN_OP(Sub); 82 WITH_FLAGS_DONT_CARE_RD_RN_OP(Sbc); 83 WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsb); 84 WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsc); 85 86 WITH_FLAGS_DONT_CARE_RD_RN_OP(Eor); 87 WITH_FLAGS_DONT_CARE_RD_RN_OP(Orr); 88 WITH_FLAGS_DONT_CARE_RD_RN_OP(Orn); 89 WITH_FLAGS_DONT_CARE_RD_RN_OP(And); 90 WITH_FLAGS_DONT_CARE_RD_RN_OP(Bic); 91 92 WITH_FLAGS_DONT_CARE_RD_RN_OP(Asr); 93 WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsr); 94 WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsl); 95 WITH_FLAGS_DONT_CARE_RD_RN_OP(Ror); 96 97 #undef WITH_FLAGS_DONT_CARE_RD_RN_OP 98 99 #define WITH_FLAGS_DONT_CARE_RD_OP(func_name) \ 100 void (func_name)(vixl32::Register rd, const vixl32::Operand& operand) { \ 101 MacroAssembler::func_name(vixl32::DontCare, rd, operand); \ 102 } \ 103 using MacroAssembler::func_name 104 105 WITH_FLAGS_DONT_CARE_RD_OP(Mvn); 106 WITH_FLAGS_DONT_CARE_RD_OP(Mov); 107 108 #undef WITH_FLAGS_DONT_CARE_RD_OP 109 110 // The following two functions don't fall into above categories. Overload them separately. Rrx(vixl32::Register rd,vixl32::Register rn)111 void Rrx(vixl32::Register rd, vixl32::Register rn) { 112 MacroAssembler::Rrx(vixl32::DontCare, rd, rn); 113 } 114 using MacroAssembler::Rrx; 115 Mul(vixl32::Register rd,vixl32::Register rn,vixl32::Register rm)116 void Mul(vixl32::Register rd, vixl32::Register rn, vixl32::Register rm) { 117 MacroAssembler::Mul(vixl32::DontCare, rd, rn, rm); 118 } 119 using MacroAssembler::Mul; 120 121 // TODO: Remove when MacroAssembler::Add(FlagsUpdate, Condition, Register, Register, Operand) 122 // makes the right decision about 16-bit encodings. Add(vixl32::Register rd,vixl32::Register rn,const vixl32::Operand & operand)123 void Add(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) { 124 if (rd.Is(rn) && operand.IsPlainRegister()) { 125 MacroAssembler::Add(rd, rn, operand); 126 } else { 127 MacroAssembler::Add(vixl32::DontCare, rd, rn, operand); 128 } 129 } 130 using MacroAssembler::Add; 131 132 // These interfaces try to use 16-bit T2 encoding of B instruction. 133 void B(vixl32::Label* label); 134 // For B(label), we always try to use Narrow encoding, because 16-bit T2 encoding supports 135 // jumping within 2KB range. For B(cond, label), because the supported branch range is 256 136 // bytes; we use the far_target hint to try to use 16-bit T1 encoding for short range jumps. 137 void B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target = true); 138 139 // Use literal for generating double constant if it doesn't fit VMOV encoding. Vmov(vixl32::DRegister rd,double imm)140 void Vmov(vixl32::DRegister rd, double imm) { 141 if (vixl::VFP::IsImmFP64(imm)) { 142 MacroAssembler::Vmov(rd, imm); 143 } else { 144 MacroAssembler::Vldr(rd, imm); 145 } 146 } 147 using MacroAssembler::Vmov; 148 }; 149 150 class ArmVIXLAssembler FINAL : public Assembler { 151 private: 152 class ArmException; 153 public: ArmVIXLAssembler(ArenaAllocator * arena)154 explicit ArmVIXLAssembler(ArenaAllocator* arena) 155 : Assembler(arena) { 156 // Use Thumb2 instruction set. 157 vixl_masm_.UseT32(); 158 } 159 ~ArmVIXLAssembler()160 virtual ~ArmVIXLAssembler() {} GetVIXLAssembler()161 ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; } 162 void FinalizeCode() OVERRIDE; 163 164 // Size of generated code. 165 size_t CodeSize() const OVERRIDE; 166 const uint8_t* CodeBufferBaseAddress() const OVERRIDE; 167 168 // Copy instructions out of assembly buffer into the given region of memory. 169 void FinalizeInstructions(const MemoryRegion& region) OVERRIDE; 170 Bind(Label * label ATTRIBUTE_UNUSED)171 void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE { 172 UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM"; 173 } Jump(Label * label ATTRIBUTE_UNUSED)174 void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE { 175 UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM"; 176 } 177 178 // 179 // Heap poisoning. 180 // 181 // Poison a heap reference contained in `reg`. 182 void PoisonHeapReference(vixl32::Register reg); 183 // Unpoison a heap reference contained in `reg`. 184 void UnpoisonHeapReference(vixl32::Register reg); 185 // Poison a heap reference contained in `reg` if heap poisoning is enabled. 186 void MaybePoisonHeapReference(vixl32::Register reg); 187 // Unpoison a heap reference contained in `reg` if heap poisoning is enabled. 188 void MaybeUnpoisonHeapReference(vixl32::Register reg); 189 190 void StoreToOffset(StoreOperandType type, 191 vixl32::Register reg, 192 vixl32::Register base, 193 int32_t offset); 194 void StoreSToOffset(vixl32::SRegister source, vixl32::Register base, int32_t offset); 195 void StoreDToOffset(vixl32::DRegister source, vixl32::Register base, int32_t offset); 196 197 void LoadImmediate(vixl32::Register dest, int32_t value); 198 void LoadFromOffset(LoadOperandType type, 199 vixl32::Register reg, 200 vixl32::Register base, 201 int32_t offset); 202 void LoadSFromOffset(vixl32::SRegister reg, vixl32::Register base, int32_t offset); 203 void LoadDFromOffset(vixl32::DRegister reg, vixl32::Register base, int32_t offset); 204 205 void LoadRegisterList(RegList regs, size_t stack_offset); 206 void StoreRegisterList(RegList regs, size_t stack_offset); 207 208 bool ShifterOperandCanAlwaysHold(uint32_t immediate); 209 bool ShifterOperandCanHold(Opcode opcode, uint32_t immediate, SetCc set_cc = kCcDontCare); 210 bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits, 211 int32_t offset, 212 /*out*/ int32_t* add_to_base, 213 /*out*/ int32_t* offset_for_load_store); 214 int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits, 215 vixl32::Register temp, 216 vixl32::Register base, 217 int32_t offset); 218 int32_t GetAllowedLoadOffsetBits(LoadOperandType type); 219 int32_t GetAllowedStoreOffsetBits(StoreOperandType type); 220 221 void AddConstant(vixl32::Register rd, int32_t value); 222 void AddConstant(vixl32::Register rd, vixl32::Register rn, int32_t value); 223 void AddConstantInIt(vixl32::Register rd, 224 vixl32::Register rn, 225 int32_t value, 226 vixl32::Condition cond = vixl32::al); 227 228 template <typename T> CreateLiteralDestroyedWithPool(T value)229 vixl::aarch32::Literal<T>* CreateLiteralDestroyedWithPool(T value) { 230 vixl::aarch32::Literal<T>* literal = 231 new vixl::aarch32::Literal<T>(value, 232 vixl32::RawLiteral::kPlacedWhenUsed, 233 vixl32::RawLiteral::kDeletedOnPoolDestruction); 234 return literal; 235 } 236 237 private: 238 // VIXL assembler. 239 ArmVIXLMacroAssembler vixl_masm_; 240 }; 241 242 // Thread register declaration. 243 extern const vixl32::Register tr; 244 // Marking register declaration. 245 extern const vixl32::Register mr; 246 247 } // namespace arm 248 } // namespace art 249 250 #endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_ 251