• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
19 
20 #include "base/bit_field.h"
21 #include "base/macros.h"
22 #include "class_root.h"
23 #include "code_generator.h"
24 #include "common_arm64.h"
25 #include "dex/dex_file_types.h"
26 #include "dex/string_reference.h"
27 #include "dex/type_reference.h"
28 #include "driver/compiler_options.h"
29 #include "jit_patches_arm64.h"
30 #include "nodes.h"
31 #include "parallel_move_resolver.h"
32 #include "utils/arm64/assembler_arm64.h"
33 
34 // TODO(VIXL): Make VIXL compile cleanly with -Wshadow, -Wdeprecated-declarations.
35 #pragma GCC diagnostic push
36 #pragma GCC diagnostic ignored "-Wshadow"
37 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
38 #include "aarch64/disasm-aarch64.h"
39 #include "aarch64/macro-assembler-aarch64.h"
40 #pragma GCC diagnostic pop
41 
42 namespace art HIDDEN {
43 
44 namespace linker {
45 class Arm64RelativePatcherTest;
46 }  // namespace linker
47 
48 namespace arm64 {
49 
50 class CodeGeneratorARM64;
51 
52 // Use a local definition to prevent copying mistakes.
53 static constexpr size_t kArm64WordSize = static_cast<size_t>(kArm64PointerSize);
54 
55 // This constant is used as an approximate margin when emission of veneer and literal pools
56 // must be blocked.
57 static constexpr int kMaxMacroInstructionSizeInBytes = 15 * vixl::aarch64::kInstructionSize;
58 
59 // Reference load (except object array loads) is using LDR Wt, [Xn, #offset] which can handle
60 // offset < 16KiB. For offsets >= 16KiB, the load shall be emitted as two or more instructions.
61 // For the Baker read barrier implementation using link-time generated thunks we need to split
62 // the offset explicitly.
63 static constexpr uint32_t kReferenceLoadMinFarOffset = 16 * KB;
64 
65 static const vixl::aarch64::Register kParameterCoreRegisters[] = {
66     vixl::aarch64::x1,
67     vixl::aarch64::x2,
68     vixl::aarch64::x3,
69     vixl::aarch64::x4,
70     vixl::aarch64::x5,
71     vixl::aarch64::x6,
72     vixl::aarch64::x7
73 };
74 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
75 static const vixl::aarch64::VRegister kParameterFPRegisters[] = {
76     vixl::aarch64::d0,
77     vixl::aarch64::d1,
78     vixl::aarch64::d2,
79     vixl::aarch64::d3,
80     vixl::aarch64::d4,
81     vixl::aarch64::d5,
82     vixl::aarch64::d6,
83     vixl::aarch64::d7
84 };
85 static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
86 
87 // Thread Register.
88 const vixl::aarch64::Register tr = vixl::aarch64::x19;
89 // Marking Register.
90 const vixl::aarch64::Register mr = vixl::aarch64::x20;
91 // Implicit suspend check register.
92 const vixl::aarch64::Register kImplicitSuspendCheckRegister = vixl::aarch64::x21;
93 // Method register on invoke.
94 static const vixl::aarch64::Register kArtMethodRegister = vixl::aarch64::x0;
95 const vixl::aarch64::CPURegList vixl_reserved_core_registers(vixl::aarch64::ip0,
96                                                              vixl::aarch64::ip1);
97 const vixl::aarch64::CPURegList vixl_reserved_fp_registers(vixl::aarch64::d31);
98 
99 const vixl::aarch64::CPURegList runtime_reserved_core_registers =
100     vixl::aarch64::CPURegList(
101         tr,
102         // Reserve X20 as Marking Register when emitting Baker read barriers.
103         // TODO: We don't need to reserve marking-register for userfaultfd GC. But
104         // that would require some work in the assembler code as the right GC is
105         // chosen at load-time and not compile time.
106         (kReserveMarkingRegister ? mr : vixl::aarch64::NoCPUReg),
107         kImplicitSuspendCheckRegister,
108         vixl::aarch64::lr);
109 
110 // Some instructions have special requirements for a temporary, for example
111 // LoadClass/kBssEntry and LoadString/kBssEntry for Baker read barrier require
112 // temp that's not an R0 (to avoid an extra move) and Baker read barrier field
113 // loads with large offsets need a fixed register to limit the number of link-time
114 // thunks we generate. For these and similar cases, we want to reserve a specific
115 // register that's neither callee-save nor an argument register. We choose x15.
FixedTempLocation()116 inline Location FixedTempLocation() {
117   return Location::RegisterLocation(vixl::aarch64::x15.GetCode());
118 }
119 
120 // Callee-save registers AAPCS64, without x19 (Thread Register) (nor
121 // x20 (Marking Register) when emitting Baker read barriers).
122 const vixl::aarch64::CPURegList callee_saved_core_registers(
123     vixl::aarch64::CPURegister::kRegister,
124     vixl::aarch64::kXRegSize,
125     (kReserveMarkingRegister ? vixl::aarch64::x21.GetCode() : vixl::aarch64::x20.GetCode()),
126     vixl::aarch64::x30.GetCode());
127 const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kVRegister,
128                                                           vixl::aarch64::kDRegSize,
129                                                           vixl::aarch64::d8.GetCode(),
130                                                           vixl::aarch64::d15.GetCode());
131 Location ARM64ReturnLocation(DataType::Type return_type);
132 
133 vixl::aarch64::Condition ARM64PCondition(HVecPredToBoolean::PCondKind cond);
134 
135 #define UNIMPLEMENTED_INTRINSIC_LIST_ARM64(V) \
136   V(MathSignumFloat)                          \
137   V(MathSignumDouble)                         \
138   V(MathCopySignFloat)                        \
139   V(MathCopySignDouble)                       \
140   V(IntegerRemainderUnsigned)                 \
141   V(LongRemainderUnsigned)                    \
142   V(StringStringIndexOf)                      \
143   V(StringStringIndexOfAfter)                 \
144   V(StringBufferAppend)                       \
145   V(StringBufferLength)                       \
146   V(StringBufferToString)                     \
147   V(StringBuilderAppendObject)                \
148   V(StringBuilderAppendString)                \
149   V(StringBuilderAppendCharSequence)          \
150   V(StringBuilderAppendCharArray)             \
151   V(StringBuilderAppendBoolean)               \
152   V(StringBuilderAppendChar)                  \
153   V(StringBuilderAppendInt)                   \
154   V(StringBuilderAppendLong)                  \
155   V(StringBuilderAppendFloat)                 \
156   V(StringBuilderAppendDouble)                \
157   V(StringBuilderLength)                      \
158   V(StringBuilderToString)                    \
159   V(SystemArrayCopyByte)                      \
160   V(SystemArrayCopyInt)                       \
161   V(UnsafeArrayBaseOffset)                    \
162   /* 1.8 */                                   \
163   V(MethodHandleInvoke)                       \
164   /* OpenJDK 11 */                            \
165   V(JdkUnsafeArrayBaseOffset)
166 
167 class SlowPathCodeARM64 : public SlowPathCode {
168  public:
SlowPathCodeARM64(HInstruction * instruction)169   explicit SlowPathCodeARM64(HInstruction* instruction)
170       : SlowPathCode(instruction), entry_label_(), exit_label_() {}
171 
GetEntryLabel()172   vixl::aarch64::Label* GetEntryLabel() { return &entry_label_; }
GetExitLabel()173   vixl::aarch64::Label* GetExitLabel() { return &exit_label_; }
174 
175   void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
176   void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
177 
178  private:
179   vixl::aarch64::Label entry_label_;
180   vixl::aarch64::Label exit_label_;
181 
182   DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
183 };
184 
185 class JumpTableARM64 : public DeletableArenaObject<kArenaAllocSwitchTable> {
186  public:
187   using VIXLInt32Literal = vixl::aarch64::Literal<int32_t>;
188 
JumpTableARM64(HPackedSwitch * switch_instr,ArenaAllocator * allocator)189   JumpTableARM64(HPackedSwitch* switch_instr, ArenaAllocator* allocator)
190       : switch_instr_(switch_instr),
191         table_start_(),
192         jump_targets_(allocator->Adapter(kArenaAllocCodeGenerator)) {
193       uint32_t num_entries = switch_instr_->GetNumEntries();
194       jump_targets_.reserve(num_entries);
195       for (uint32_t i = 0; i < num_entries; i++) {
196         VIXLInt32Literal* lit = new VIXLInt32Literal(0);
197         jump_targets_.emplace_back(lit);
198       }
199     }
200 
GetTableStartLabel()201   vixl::aarch64::Label* GetTableStartLabel() { return &table_start_; }
202 
203   // Emits the jump table into the code buffer; jump target offsets are not yet known.
204   void EmitTable(CodeGeneratorARM64* codegen);
205 
206   // Updates the offsets in the jump table, to be used when the jump targets basic blocks
207   // addresses are resolved.
208   void FixTable(CodeGeneratorARM64* codegen);
209 
210  private:
211   HPackedSwitch* const switch_instr_;
212   vixl::aarch64::Label table_start_;
213 
214   // Contains literals for the switch's jump targets.
215   ArenaVector<std::unique_ptr<VIXLInt32Literal>> jump_targets_;
216 
217   DISALLOW_COPY_AND_ASSIGN(JumpTableARM64);
218 };
219 
220 static const vixl::aarch64::Register kRuntimeParameterCoreRegisters[] = {
221     vixl::aarch64::x0,
222     vixl::aarch64::x1,
223     vixl::aarch64::x2,
224     vixl::aarch64::x3,
225     vixl::aarch64::x4,
226     vixl::aarch64::x5,
227     vixl::aarch64::x6,
228     vixl::aarch64::x7
229 };
230 static constexpr size_t kRuntimeParameterCoreRegistersLength =
231     arraysize(kRuntimeParameterCoreRegisters);
232 static const vixl::aarch64::VRegister kRuntimeParameterFpuRegisters[] = {
233     vixl::aarch64::d0,
234     vixl::aarch64::d1,
235     vixl::aarch64::d2,
236     vixl::aarch64::d3,
237     vixl::aarch64::d4,
238     vixl::aarch64::d5,
239     vixl::aarch64::d6,
240     vixl::aarch64::d7
241 };
242 static constexpr size_t kRuntimeParameterFpuRegistersLength =
243     arraysize(kRuntimeParameterCoreRegisters);
244 
245 class InvokeRuntimeCallingConvention : public CallingConvention<vixl::aarch64::Register,
246                                                                 vixl::aarch64::VRegister> {
247  public:
InvokeRuntimeCallingConvention()248   InvokeRuntimeCallingConvention()
249       : CallingConvention(kRuntimeParameterCoreRegisters,
250                           kRuntimeParameterCoreRegistersLength,
251                           kRuntimeParameterFpuRegisters,
252                           kRuntimeParameterFpuRegistersLength,
253                           kArm64PointerSize) {}
254 
255   Location GetReturnLocation(DataType::Type return_type);
256 
257  private:
258   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
259 };
260 
261 class InvokeDexCallingConvention : public CallingConvention<vixl::aarch64::Register,
262                                                             vixl::aarch64::VRegister> {
263  public:
InvokeDexCallingConvention()264   InvokeDexCallingConvention()
265       : CallingConvention(kParameterCoreRegisters,
266                           kParameterCoreRegistersLength,
267                           kParameterFPRegisters,
268                           kParameterFPRegistersLength,
269                           kArm64PointerSize) {}
270 
GetReturnLocation(DataType::Type return_type)271   Location GetReturnLocation(DataType::Type return_type) const {
272     return ARM64ReturnLocation(return_type);
273   }
274 
275 
276  private:
277   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
278 };
279 
280 class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConventionVisitor {
281  public:
InvokeDexCallingConventionVisitorARM64()282   InvokeDexCallingConventionVisitorARM64() {}
~InvokeDexCallingConventionVisitorARM64()283   virtual ~InvokeDexCallingConventionVisitorARM64() {}
284 
285   Location GetNextLocation(DataType::Type type) override;
GetReturnLocation(DataType::Type return_type)286   Location GetReturnLocation(DataType::Type return_type) const override {
287     return calling_convention.GetReturnLocation(return_type);
288   }
289   Location GetMethodLocation() const override;
290 
291  private:
292   InvokeDexCallingConvention calling_convention;
293 
294   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM64);
295 };
296 
297 class CriticalNativeCallingConventionVisitorARM64 : public InvokeDexCallingConventionVisitor {
298  public:
CriticalNativeCallingConventionVisitorARM64(bool for_register_allocation)299   explicit CriticalNativeCallingConventionVisitorARM64(bool for_register_allocation)
300       : for_register_allocation_(for_register_allocation) {}
301 
~CriticalNativeCallingConventionVisitorARM64()302   virtual ~CriticalNativeCallingConventionVisitorARM64() {}
303 
304   Location GetNextLocation(DataType::Type type) override;
305   Location GetReturnLocation(DataType::Type type) const override;
306   Location GetMethodLocation() const override;
307 
GetStackOffset()308   size_t GetStackOffset() const { return stack_offset_; }
309 
310  private:
311   // Register allocator does not support adjusting frame size, so we cannot provide final locations
312   // of stack arguments for register allocation. We ask the register allocator for any location and
313   // move these arguments to the right place after adjusting the SP when generating the call.
314   const bool for_register_allocation_;
315   size_t gpr_index_ = 0u;
316   size_t fpr_index_ = 0u;
317   size_t stack_offset_ = 0u;
318 
319   DISALLOW_COPY_AND_ASSIGN(CriticalNativeCallingConventionVisitorARM64);
320 };
321 
322 class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention {
323  public:
FieldAccessCallingConventionARM64()324   FieldAccessCallingConventionARM64() {}
325 
GetObjectLocation()326   Location GetObjectLocation() const override {
327     return helpers::LocationFrom(vixl::aarch64::x1);
328   }
GetFieldIndexLocation()329   Location GetFieldIndexLocation() const override {
330     return helpers::LocationFrom(vixl::aarch64::x0);
331   }
GetReturnLocation(DataType::Type type)332   Location GetReturnLocation([[maybe_unused]] DataType::Type type) const override {
333     return helpers::LocationFrom(vixl::aarch64::x0);
334   }
GetSetValueLocation(DataType::Type type,bool is_instance)335   Location GetSetValueLocation([[maybe_unused]] DataType::Type type,
336                                bool is_instance) const override {
337     return is_instance
338         ? helpers::LocationFrom(vixl::aarch64::x2)
339         : helpers::LocationFrom(vixl::aarch64::x1);
340   }
GetFpuLocation(DataType::Type type)341   Location GetFpuLocation([[maybe_unused]] DataType::Type type) const override {
342     return helpers::LocationFrom(vixl::aarch64::d0);
343   }
344 
345  private:
346   DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64);
347 };
348 
349 class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
350  public:
351   InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
352 
353 #define DECLARE_VISIT_INSTRUCTION(name, super) \
354   void Visit##name(H##name* instr) override;
355 
356   FOR_EACH_CONCRETE_INSTRUCTION_SCALAR_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)357   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
358   FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
359 
360 #undef DECLARE_VISIT_INSTRUCTION
361 
362   void VisitInstruction(HInstruction* instruction) override {
363     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
364                << " (id " << instruction->GetId() << ")";
365   }
366 
GetAssembler()367   Arm64Assembler* GetAssembler() const { return assembler_; }
GetVIXLAssembler()368   vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
369 
370   // SIMD helpers.
371   virtual Location AllocateSIMDScratchLocation(vixl::aarch64::UseScratchRegisterScope* scope) = 0;
372   virtual void FreeSIMDScratchLocation(Location loc,
373                                        vixl::aarch64::UseScratchRegisterScope* scope)  = 0;
374   virtual void LoadSIMDRegFromStack(Location destination, Location source) = 0;
375   virtual void MoveSIMDRegToSIMDReg(Location destination, Location source) = 0;
376   virtual void MoveToSIMDStackSlot(Location destination, Location source) = 0;
377   virtual void SaveLiveRegistersHelper(LocationSummary* locations,
378                                        int64_t spill_offset) = 0;
379   virtual void RestoreLiveRegistersHelper(LocationSummary* locations,
380                                           int64_t spill_offset) = 0;
381 
382  protected:
383   void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
384                                         vixl::aarch64::Register class_reg);
385   void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check,
386                                          vixl::aarch64::Register temp);
387   void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
388   void HandleBinaryOp(HBinaryOperation* instr);
389 
390   void HandleFieldSet(HInstruction* instruction,
391                       const FieldInfo& field_info,
392                       bool value_can_be_null,
393                       WriteBarrierKind write_barrier_kind);
394   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
395   void HandleCondition(HCondition* instruction);
396 
397   // Generate a heap reference load using one register `out`:
398   //
399   //   out <- *(out + offset)
400   //
401   // while honoring heap poisoning and/or read barriers (if any).
402   //
403   // Location `maybe_temp` is used when generating a read barrier and
404   // shall be a register in that case; it may be an invalid location
405   // otherwise.
406   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
407                                         Location out,
408                                         uint32_t offset,
409                                         Location maybe_temp,
410                                         ReadBarrierOption read_barrier_option);
411   // Generate a heap reference load using two different registers
412   // `out` and `obj`:
413   //
414   //   out <- *(obj + offset)
415   //
416   // while honoring heap poisoning and/or read barriers (if any).
417   //
418   // Location `maybe_temp` is used when generating a Baker's (fast
419   // path) read barrier and shall be a register in that case; it may
420   // be an invalid location otherwise.
421   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
422                                          Location out,
423                                          Location obj,
424                                          uint32_t offset,
425                                          Location maybe_temp,
426                                          ReadBarrierOption read_barrier_option);
427 
428   // Generate a floating-point comparison.
429   void GenerateFcmp(HInstruction* instruction);
430 
431   void HandleShift(HBinaryOperation* instr);
432   void GenerateTestAndBranch(HInstruction* instruction,
433                              size_t condition_input_index,
434                              vixl::aarch64::Label* true_target,
435                              vixl::aarch64::Label* false_target);
436   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
437   void DivRemByPowerOfTwo(HBinaryOperation* instruction);
438   void GenerateIncrementNegativeByOne(vixl::aarch64::Register out,
439                                       vixl::aarch64::Register in, bool use_cond_inc);
440   void GenerateResultRemWithAnyConstant(vixl::aarch64::Register out,
441                                         vixl::aarch64::Register dividend,
442                                         vixl::aarch64::Register quotient,
443                                         int64_t divisor,
444                                         // This function may acquire a scratch register.
445                                         vixl::aarch64::UseScratchRegisterScope* temps_scope);
446   void GenerateInt64UnsignedDivRemWithAnyPositiveConstant(HBinaryOperation* instruction);
447   void GenerateInt64DivRemWithAnyConstant(HBinaryOperation* instruction);
448   void GenerateInt32DivRemWithAnyConstant(HBinaryOperation* instruction);
449   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction, int64_t divisor);
450   void GenerateIntDiv(HDiv* instruction);
451   void GenerateIntDivForConstDenom(HDiv *instruction);
452   void GenerateIntDivForPower2Denom(HDiv *instruction);
453   void GenerateIntRem(HRem* instruction);
454   void GenerateIntRemForConstDenom(HRem *instruction);
455   void GenerateIntRemForPower2Denom(HRem *instruction);
456   void HandleGoto(HInstruction* got, HBasicBlock* successor);
457   void GenerateMethodEntryExitHook(HInstruction* instruction);
458 
459   // Helpers to set up locations for vector memory operations. Returns the memory operand and,
460   // if used, sets the output parameter scratch to a temporary register used in this operand,
461   // so that the client can release it right after the memory operand use.
462   // Neon version.
463   vixl::aarch64::MemOperand VecNEONAddress(
464       HVecMemoryOperation* instruction,
465       // This function may acquire a scratch register.
466       vixl::aarch64::UseScratchRegisterScope* temps_scope,
467       size_t size,
468       bool is_string_char_at,
469       /*out*/ vixl::aarch64::Register* scratch);
470   // SVE version.
471   vixl::aarch64::SVEMemOperand VecSVEAddress(
472       HVecMemoryOperation* instruction,
473       // This function may acquire a scratch register.
474       vixl::aarch64::UseScratchRegisterScope* temps_scope,
475       size_t size,
476       bool is_string_char_at,
477       /*out*/ vixl::aarch64::Register* scratch);
478 
479   Arm64Assembler* const assembler_;
480   CodeGeneratorARM64* const codegen_;
481 
482   DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARM64);
483 };
484 
485 class LocationsBuilderARM64 : public HGraphVisitor {
486  public:
LocationsBuilderARM64(HGraph * graph,CodeGeneratorARM64 * codegen)487   LocationsBuilderARM64(HGraph* graph, CodeGeneratorARM64* codegen)
488       : HGraphVisitor(graph), codegen_(codegen) {}
489 
490 #define DECLARE_VISIT_INSTRUCTION(name, super) \
491   void Visit##name(H##name* instr) override;
492 
493   FOR_EACH_CONCRETE_INSTRUCTION_SCALAR_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)494   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
495   FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
496 
497 #undef DECLARE_VISIT_INSTRUCTION
498 
499   void VisitInstruction(HInstruction* instruction) override {
500     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
501                << " (id " << instruction->GetId() << ")";
502   }
503 
504  protected:
505   void HandleBinaryOp(HBinaryOperation* instr);
506   void HandleFieldSet(HInstruction* instruction);
507   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
508   void HandleInvoke(HInvoke* instr);
509   void HandleCondition(HCondition* instruction);
510   void HandleShift(HBinaryOperation* instr);
511 
512   CodeGeneratorARM64* const codegen_;
513   InvokeDexCallingConventionVisitorARM64 parameter_visitor_;
514 
515   DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM64);
516 };
517 
518 class InstructionCodeGeneratorARM64Neon : public InstructionCodeGeneratorARM64 {
519  public:
InstructionCodeGeneratorARM64Neon(HGraph * graph,CodeGeneratorARM64 * codegen)520   InstructionCodeGeneratorARM64Neon(HGraph* graph, CodeGeneratorARM64* codegen) :
521       InstructionCodeGeneratorARM64(graph, codegen) {}
522 
523 #define DECLARE_VISIT_INSTRUCTION(name, super) \
524   void Visit##name(H##name* instr) override;
525 
526   FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(DECLARE_VISIT_INSTRUCTION)
527 
528 #undef DECLARE_VISIT_INSTRUCTION
529 
530   Location AllocateSIMDScratchLocation(vixl::aarch64::UseScratchRegisterScope* scope) override;
531   void FreeSIMDScratchLocation(Location loc,
532                                vixl::aarch64::UseScratchRegisterScope* scope) override;
533   void LoadSIMDRegFromStack(Location destination, Location source) override;
534   void MoveSIMDRegToSIMDReg(Location destination, Location source) override;
535   void MoveToSIMDStackSlot(Location destination, Location source) override;
536   void SaveLiveRegistersHelper(LocationSummary* locations, int64_t spill_offset) override;
537   void RestoreLiveRegistersHelper(LocationSummary* locations, int64_t spill_offset) override;
538 };
539 
540 class LocationsBuilderARM64Neon : public LocationsBuilderARM64 {
541  public:
LocationsBuilderARM64Neon(HGraph * graph,CodeGeneratorARM64 * codegen)542   LocationsBuilderARM64Neon(HGraph* graph, CodeGeneratorARM64* codegen) :
543       LocationsBuilderARM64(graph, codegen) {}
544 
545 #define DECLARE_VISIT_INSTRUCTION(name, super) \
546   void Visit##name(H##name* instr) override;
547 
548   FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(DECLARE_VISIT_INSTRUCTION)
549 
550 #undef DECLARE_VISIT_INSTRUCTION
551 };
552 
553 class InstructionCodeGeneratorARM64Sve : public InstructionCodeGeneratorARM64 {
554  public:
InstructionCodeGeneratorARM64Sve(HGraph * graph,CodeGeneratorARM64 * codegen)555   InstructionCodeGeneratorARM64Sve(HGraph* graph, CodeGeneratorARM64* codegen) :
556       InstructionCodeGeneratorARM64(graph, codegen) {}
557 
558 #define DECLARE_VISIT_INSTRUCTION(name, super) \
559   void Visit##name(H##name* instr) override;
560 
561   FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(DECLARE_VISIT_INSTRUCTION)
562 
563 #undef DECLARE_VISIT_INSTRUCTION
564 
565   Location AllocateSIMDScratchLocation(vixl::aarch64::UseScratchRegisterScope* scope) override;
566   void FreeSIMDScratchLocation(Location loc,
567                                vixl::aarch64::UseScratchRegisterScope* scope) override;
568   void LoadSIMDRegFromStack(Location destination, Location source) override;
569   void MoveSIMDRegToSIMDReg(Location destination, Location source) override;
570   void MoveToSIMDStackSlot(Location destination, Location source) override;
571   void SaveLiveRegistersHelper(LocationSummary* locations, int64_t spill_offset) override;
572   void RestoreLiveRegistersHelper(LocationSummary* locations, int64_t spill_offset) override;
573 
574  private:
575   // Validate that instruction vector length and packed type are compliant with the SIMD
576   // register size (full SIMD register is used).
577   void ValidateVectorLength(HVecOperation* instr) const;
578 
GetVecGoverningPReg(HVecOperation * instr)579   vixl::aarch64::PRegister GetVecGoverningPReg(HVecOperation* instr) {
580     return GetVecPredSetFixedOutPReg(instr->GetGoverningPredicate());
581   }
582 
583   // Returns a fixed p-reg for predicate setting instruction.
584   //
585   // Currently we only support diamond CF loops for predicated vectorization; also we don't have
586   // register allocator support for vector predicates. Thus we use fixed P-regs for loop main,
587   // True and False predicates as a temporary solution.
588   //
589   // TODO: Support SIMD types and registers in ART.
GetVecPredSetFixedOutPReg(HVecPredSetOperation * instr)590   static vixl::aarch64::PRegister GetVecPredSetFixedOutPReg(HVecPredSetOperation* instr) {
591     if (instr->IsVecPredWhile() || instr->IsVecPredSetAll()) {
592       // VecPredWhile and VecPredSetAll live ranges never overlap due to the current vectorization
593       // scheme: the former only is live inside a vectorized loop and the later is never in a
594       // loop and never spans across loops.
595       return vixl::aarch64::p0;
596     } else if (instr->IsVecPredNot()) {
597       // This relies on the fact that we only use PredNot manually in the autovectorizer,
598       // so there is only one of them in each loop.
599       return vixl::aarch64::p1;
600     } else {
601       DCHECK(instr->IsVecCondition());
602       return vixl::aarch64::p2;
603     }
604   }
605 
606   // Generate a vector comparison instruction based on the IfCondition.
607   void GenerateIntegerVecComparison(const vixl::aarch64::PRegisterWithLaneSize& pd,
608                                     const vixl::aarch64::PRegisterZ& pg,
609                                     const vixl::aarch64::ZRegister& zn,
610                                     const vixl::aarch64::ZRegister& zm,
611                                     IfCondition cond);
612   void HandleVecCondition(HVecCondition* instruction);
613 };
614 
615 class LocationsBuilderARM64Sve : public LocationsBuilderARM64 {
616  public:
LocationsBuilderARM64Sve(HGraph * graph,CodeGeneratorARM64 * codegen)617   LocationsBuilderARM64Sve(HGraph* graph, CodeGeneratorARM64* codegen) :
618       LocationsBuilderARM64(graph, codegen) {}
619 
620 #define DECLARE_VISIT_INSTRUCTION(name, super) \
621   void Visit##name(H##name* instr) override;
622 
623   FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(DECLARE_VISIT_INSTRUCTION)
624 
625 #undef DECLARE_VISIT_INSTRUCTION
626  private:
627   void HandleVecCondition(HVecCondition* instruction);
628 };
629 
630 class ParallelMoveResolverARM64 : public ParallelMoveResolverNoSwap {
631  public:
ParallelMoveResolverARM64(ArenaAllocator * allocator,CodeGeneratorARM64 * codegen)632   ParallelMoveResolverARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
633       : ParallelMoveResolverNoSwap(allocator), codegen_(codegen), vixl_temps_() {}
634 
635  protected:
636   void PrepareForEmitNativeCode() override;
637   void FinishEmitNativeCode() override;
638   Location AllocateScratchLocationFor(Location::Kind kind) override;
639   void FreeScratchLocation(Location loc) override;
640   void EmitMove(size_t index) override;
641 
642  private:
643   Arm64Assembler* GetAssembler() const;
GetVIXLAssembler()644   vixl::aarch64::MacroAssembler* GetVIXLAssembler() const {
645     return GetAssembler()->GetVIXLAssembler();
646   }
647 
648   CodeGeneratorARM64* const codegen_;
649   vixl::aarch64::UseScratchRegisterScope vixl_temps_;
650 
651   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARM64);
652 };
653 
654 class CodeGeneratorARM64 : public CodeGenerator {
655  public:
656   CodeGeneratorARM64(HGraph* graph,
657                      const CompilerOptions& compiler_options,
658                      OptimizingCompilerStats* stats = nullptr);
~CodeGeneratorARM64()659   virtual ~CodeGeneratorARM64() {}
660 
661   void GenerateFrameEntry() override;
662   void GenerateFrameExit() override;
663 
664   static void PopFrameAndReturn(Arm64Assembler* assembler,
665                                 int32_t frame_size,
666                                 vixl::aarch64::CPURegList preserved_core_registers,
667                                 vixl::aarch64::CPURegList preserved_fp_registers);
668 
669   vixl::aarch64::CPURegList GetFramePreservedCoreRegisters() const;
670   vixl::aarch64::CPURegList GetFramePreservedFPRegisters() const;
671 
672   void Bind(HBasicBlock* block) override;
673 
GetLabelOf(HBasicBlock * block)674   vixl::aarch64::Label* GetLabelOf(HBasicBlock* block) {
675     block = FirstNonEmptyBlock(block);
676     return &(block_labels_[block->GetBlockId()]);
677   }
678 
GetWordSize()679   size_t GetWordSize() const override {
680     return kArm64WordSize;
681   }
682 
SupportsPredicatedSIMD()683   bool SupportsPredicatedSIMD() const override { return ShouldUseSVE(); }
684 
GetSlowPathFPWidth()685   size_t GetSlowPathFPWidth() const override {
686     return GetGraph()->HasSIMD()
687         ? GetSIMDRegisterWidth()
688         : vixl::aarch64::kDRegSizeInBytes;
689   }
690 
GetCalleePreservedFPWidth()691   size_t GetCalleePreservedFPWidth() const override {
692     return vixl::aarch64::kDRegSizeInBytes;
693   }
694 
695   size_t GetSIMDRegisterWidth() const override;
696 
GetAddressOf(HBasicBlock * block)697   uintptr_t GetAddressOf(HBasicBlock* block) override {
698     vixl::aarch64::Label* block_entry_label = GetLabelOf(block);
699     DCHECK(block_entry_label->IsBound());
700     return block_entry_label->GetLocation();
701   }
702 
GetLocationBuilder()703   HGraphVisitor* GetLocationBuilder() override { return location_builder_; }
GetInstructionCodeGeneratorArm64()704   InstructionCodeGeneratorARM64* GetInstructionCodeGeneratorArm64() {
705     return instruction_visitor_;
706   }
GetInstructionVisitor()707   HGraphVisitor* GetInstructionVisitor() override { return GetInstructionCodeGeneratorArm64(); }
GetAssembler()708   Arm64Assembler* GetAssembler() override { return &assembler_; }
GetAssembler()709   const Arm64Assembler& GetAssembler() const override { return assembler_; }
GetVIXLAssembler()710   vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
711 
712   // Emit a write barrier if:
713   // A) emit_null_check is false
714   // B) emit_null_check is true, and value is not null.
715   void MaybeMarkGCCard(vixl::aarch64::Register object,
716                        vixl::aarch64::Register value,
717                        bool emit_null_check);
718 
719   // Emit a write barrier unconditionally.
720   void MarkGCCard(vixl::aarch64::Register object);
721 
722   // Crash if the card table is not valid. This check is only emitted for the CC GC. We assert
723   // `(!clean || !self->is_gc_marking)`, since the card table should not be set to clean when the CC
724   // GC is marking for eliminated write barriers.
725   void CheckGCCardIsValid(vixl::aarch64::Register object);
726 
727   void GenerateMemoryBarrier(MemBarrierKind kind);
728 
729   // Register allocation.
730 
731   void SetupBlockedRegisters() const override;
732 
733   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
734   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
735   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
736   size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
737 
738   // The number of registers that can be allocated. The register allocator may
739   // decide to reserve and not use a few of them.
740   // We do not consider registers sp, xzr, wzr. They are either not allocatable
741   // (xzr, wzr), or make for poor allocatable registers (sp alignment
742   // requirements, etc.). This also facilitates our task as all other registers
743   // can easily be mapped via to or from their type and index or code.
744   static const int kNumberOfAllocatableRegisters = vixl::aarch64::kNumberOfRegisters - 1;
745   static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfVRegisters;
746   static constexpr int kNumberOfAllocatableRegisterPairs = 0;
747 
748   void DumpCoreRegister(std::ostream& stream, int reg) const override;
749   void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
750 
GetInstructionSet()751   InstructionSet GetInstructionSet() const override {
752     return InstructionSet::kArm64;
753   }
754 
755   const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const;
756 
Initialize()757   void Initialize() override {
758     block_labels_.resize(GetGraph()->GetBlocks().size());
759   }
760 
761   // We want to use the STP and LDP instructions to spill and restore registers for slow paths.
762   // These instructions can only encode offsets that are multiples of the register size accessed.
GetPreferredSlotsAlignment()763   uint32_t GetPreferredSlotsAlignment() const override { return vixl::aarch64::kXRegSizeInBytes; }
764 
CreateJumpTable(HPackedSwitch * switch_instr)765   JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
766     ArenaAllocator* allocator = GetGraph()->GetAllocator();
767     jump_tables_.emplace_back(new (allocator) JumpTableARM64(switch_instr, allocator));
768     return jump_tables_.back().get();
769   }
770 
771   void Finalize() override;
772 
773   // Code generation helpers.
774   void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant);
775   void MoveConstant(Location destination, int32_t value) override;
776   void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
777   void AddLocationAsTemp(Location location, LocationSummary* locations) override;
778 
779   void Load(DataType::Type type,
780             vixl::aarch64::CPURegister dst,
781             const vixl::aarch64::MemOperand& src);
782   void Store(DataType::Type type,
783              vixl::aarch64::CPURegister src,
784              const vixl::aarch64::MemOperand& dst);
785   void LoadAcquire(HInstruction* instruction,
786                    DataType::Type type,
787                    vixl::aarch64::CPURegister dst,
788                    const vixl::aarch64::MemOperand& src,
789                    bool needs_null_check);
790   void StoreRelease(HInstruction* instruction,
791                     DataType::Type type,
792                     vixl::aarch64::CPURegister src,
793                     const vixl::aarch64::MemOperand& dst,
794                     bool needs_null_check);
795 
796   // Generate code to invoke a runtime entry point.
797   void InvokeRuntime(QuickEntrypointEnum entrypoint,
798                      HInstruction* instruction,
799                      SlowPathCode* slow_path = nullptr) override;
800 
801   // Generate code to invoke a runtime entry point, but do not record
802   // PC-related information in a stack map.
803   void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
804                                            HInstruction* instruction,
805                                            SlowPathCode* slow_path);
806 
GetMoveResolver()807   ParallelMoveResolverARM64* GetMoveResolver() override { return &move_resolver_; }
808 
NeedsTwoRegisters(DataType::Type type)809   bool NeedsTwoRegisters([[maybe_unused]] DataType::Type type) const override { return false; }
810 
811   // Check if the desired_string_load_kind is supported. If it is, return it,
812   // otherwise return a fall-back kind that should be used instead.
813   HLoadString::LoadKind GetSupportedLoadStringKind(
814       HLoadString::LoadKind desired_string_load_kind) override;
815 
816   // Check if the desired_class_load_kind is supported. If it is, return it,
817   // otherwise return a fall-back kind that should be used instead.
818   HLoadClass::LoadKind GetSupportedLoadClassKind(
819       HLoadClass::LoadKind desired_class_load_kind) override;
820 
821   // Check if the desired_dispatch_info is supported. If it is, return it,
822   // otherwise return a fall-back info that should be used instead.
823   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
824       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
825       ArtMethod* method) override;
826 
827   void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
828   void GenerateStaticOrDirectCall(
829       HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
830   void GenerateVirtualCall(
831       HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
832 
833   void MoveFromReturnRegister(Location trg, DataType::Type type) override;
834 
835   // Add a new boot image intrinsic patch for an instruction and return the label
836   // to be bound before the instruction. The instruction will be either the
837   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
838   // to the associated ADRP patch label).
839   vixl::aarch64::Label* NewBootImageIntrinsicPatch(uint32_t intrinsic_data,
840                                                    vixl::aarch64::Label* adrp_label = nullptr);
841 
842   // Add a new boot image relocation patch for an instruction and return the label
843   // to be bound before the instruction. The instruction will be either the
844   // ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label` pointing
845   // to the associated ADRP patch label).
846   vixl::aarch64::Label* NewBootImageRelRoPatch(uint32_t boot_image_offset,
847                                                vixl::aarch64::Label* adrp_label = nullptr);
848 
849   // Add a new boot image method patch for an instruction and return the label
850   // to be bound before the instruction. The instruction will be either the
851   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
852   // to the associated ADRP patch label).
853   vixl::aarch64::Label* NewBootImageMethodPatch(MethodReference target_method,
854                                                 vixl::aarch64::Label* adrp_label = nullptr);
855 
856   // Add a new app image method patch for an instruction and return the label
857   // to be bound before the instruction. The instruction will be either the
858   // ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label` pointing
859   // to the associated ADRP patch label).
860   vixl::aarch64::Label* NewAppImageMethodPatch(MethodReference target_method,
861                                                vixl::aarch64::Label* adrp_label = nullptr);
862 
863   // Add a new .bss entry method patch for an instruction and return
864   // the label to be bound before the instruction. The instruction will be
865   // either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label`
866   // pointing to the associated ADRP patch label).
867   vixl::aarch64::Label* NewMethodBssEntryPatch(MethodReference target_method,
868                                                vixl::aarch64::Label* adrp_label = nullptr);
869 
870   // Add a new boot image type patch for an instruction and return the label
871   // to be bound before the instruction. The instruction will be either the
872   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
873   // to the associated ADRP patch label).
874   vixl::aarch64::Label* NewBootImageTypePatch(const DexFile& dex_file,
875                                               dex::TypeIndex type_index,
876                                               vixl::aarch64::Label* adrp_label = nullptr);
877 
878   // Add a new app image type patch for an instruction and return the label
879   // to be bound before the instruction. The instruction will be either the
880   // ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label` pointing
881   // to the associated ADRP patch label).
882   vixl::aarch64::Label* NewAppImageTypePatch(const DexFile& dex_file,
883                                              dex::TypeIndex type_index,
884                                              vixl::aarch64::Label* adrp_label = nullptr);
885 
886   // Add a new .bss entry type patch for an instruction and return the label
887   // to be bound before the instruction. The instruction will be either the
888   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
889   // to the associated ADRP patch label).
890   vixl::aarch64::Label* NewBssEntryTypePatch(HLoadClass* load_class,
891                                              vixl::aarch64::Label* adrp_label = nullptr);
892 
893   // Add a new boot image string patch for an instruction and return the label
894   // to be bound before the instruction. The instruction will be either the
895   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
896   // to the associated ADRP patch label).
897   vixl::aarch64::Label* NewBootImageStringPatch(const DexFile& dex_file,
898                                                 dex::StringIndex string_index,
899                                                 vixl::aarch64::Label* adrp_label = nullptr);
900 
901   // Add a new .bss entry string patch for an instruction and return the label
902   // to be bound before the instruction. The instruction will be either the
903   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
904   // to the associated ADRP patch label).
905   vixl::aarch64::Label* NewStringBssEntryPatch(const DexFile& dex_file,
906                                                dex::StringIndex string_index,
907                                                vixl::aarch64::Label* adrp_label = nullptr);
908 
909   // Add a new .bss entry MethodType patch for an instruction and return the label
910   // to be bound before the instruction. The instruction will be either the
911   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
912   // to the associated ADRP patch label).
913   vixl::aarch64::Label* NewMethodTypeBssEntryPatch(HLoadMethodType* load_method_type,
914                                                    vixl::aarch64::Label* adrp_label = nullptr);
915 
916   // Add a new boot image JNI entrypoint patch for an instruction and return the label
917   // to be bound before the instruction. The instruction will be either the
918   // ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label` pointing
919   // to the associated ADRP patch label).
920   vixl::aarch64::Label* NewBootImageJniEntrypointPatch(MethodReference target_method,
921                                                        vixl::aarch64::Label* adrp_label = nullptr);
922 
923   // Emit the BL instruction for entrypoint thunk call and record the associated patch for AOT.
924   void EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offset);
925 
926   // Emit the CBNZ instruction for baker read barrier and record
927   // the associated patch for AOT or slow path for JIT.
928   void EmitBakerReadBarrierCbnz(uint32_t custom_data);
929 
DeduplicateBootImageAddressLiteral(uint64_t address)930   vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address) {
931     return jit_patches_.DeduplicateBootImageAddressLiteral(address);
932   }
DeduplicateJitStringLiteral(const DexFile & dex_file,dex::StringIndex string_index,Handle<mirror::String> handle)933   vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
934                                                                 dex::StringIndex string_index,
935                                                                 Handle<mirror::String> handle) {
936     return jit_patches_.DeduplicateJitStringLiteral(
937         dex_file, string_index, handle, GetCodeGenerationData());
938   }
DeduplicateJitClassLiteral(const DexFile & dex_file,dex::TypeIndex class_index,Handle<mirror::Class> handle)939   vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file,
940                                                                dex::TypeIndex class_index,
941                                                                Handle<mirror::Class> handle) {
942     return jit_patches_.DeduplicateJitClassLiteral(
943         dex_file, class_index, handle, GetCodeGenerationData());
944   }
DeduplicateJitMethodTypeLiteral(const DexFile & dex_file,dex::ProtoIndex proto_index,Handle<mirror::MethodType> handle)945   vixl::aarch64::Literal<uint32_t>* DeduplicateJitMethodTypeLiteral(
946       const DexFile& dex_file,
947       dex::ProtoIndex proto_index,
948       Handle<mirror::MethodType> handle) {
949     return jit_patches_.DeduplicateJitMethodTypeLiteral(
950         dex_file, proto_index, handle, GetCodeGenerationData());
951   }
952 
953   void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg);
954   void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
955                           vixl::aarch64::Register out,
956                           vixl::aarch64::Register base);
957   void EmitLdrOffsetPlaceholder(vixl::aarch64::Label* fixup_label,
958                                 vixl::aarch64::Register out,
959                                 vixl::aarch64::Register base);
960 
961   void LoadBootImageRelRoEntry(vixl::aarch64::Register reg, uint32_t boot_image_offset);
962   void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_reference);
963   void LoadTypeForBootImageIntrinsic(vixl::aarch64::Register reg, TypeReference type_reference);
964   void LoadIntrinsicDeclaringClass(vixl::aarch64::Register reg, HInvoke* invoke);
965   void LoadClassRootForIntrinsic(vixl::aarch64::Register reg, ClassRoot class_root);
966 
967   void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
968   bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
969   void EmitThunkCode(const linker::LinkerPatch& patch,
970                      /*out*/ ArenaVector<uint8_t>* code,
971                      /*out*/ std::string* debug_name) override;
972 
973   void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
974 
975   // Generate a GC root reference load:
976   //
977   //   root <- *(obj + offset)
978   //
979   // while honoring read barriers based on read_barrier_option.
980   void GenerateGcRootFieldLoad(HInstruction* instruction,
981                                Location root,
982                                vixl::aarch64::Register obj,
983                                uint32_t offset,
984                                vixl::aarch64::Label* fixup_label,
985                                ReadBarrierOption read_barrier_option);
986   // Generate MOV for the `old_value` in intrinsic and mark it with Baker read barrier.
987   void GenerateIntrinsicMoveWithBakerReadBarrier(vixl::aarch64::Register marked_old_value,
988                                                  vixl::aarch64::Register old_value);
989   // Fast path implementation of ReadBarrier::Barrier for a heap
990   // reference field load when Baker's read barriers are used.
991   // Overload suitable for Unsafe.getObject/-Volatile() intrinsic.
992   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
993                                              Location ref,
994                                              vixl::aarch64::Register obj,
995                                              const vixl::aarch64::MemOperand& src,
996                                              bool needs_null_check,
997                                              bool use_load_acquire);
998   // Fast path implementation of ReadBarrier::Barrier for a heap
999   // reference field load when Baker's read barriers are used.
1000   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
1001                                              Location ref,
1002                                              vixl::aarch64::Register obj,
1003                                              uint32_t offset,
1004                                              Location maybe_temp,
1005                                              bool needs_null_check,
1006                                              bool use_load_acquire);
1007   // Fast path implementation of ReadBarrier::Barrier for a heap
1008   // reference array load when Baker's read barriers are used.
1009   void GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction,
1010                                              Location ref,
1011                                              vixl::aarch64::Register obj,
1012                                              uint32_t data_offset,
1013                                              Location index,
1014                                              bool needs_null_check);
1015 
1016   // Emit code checking the status of the Marking Register, and
1017   // aborting the program if MR does not match the value stored in the
1018   // art::Thread object. Code is only emitted in debug mode and if
1019   // CompilerOptions::EmitRunTimeChecksInDebugMode returns true.
1020   //
1021   // Argument `code` is used to identify the different occurrences of
1022   // MaybeGenerateMarkingRegisterCheck in the code generator, and is
1023   // passed to the BRK instruction.
1024   //
1025   // If `temp_loc` is a valid location, it is expected to be a
1026   // register and will be used as a temporary to generate code;
1027   // otherwise, a temporary will be fetched from the core register
1028   // scratch pool.
1029   virtual void MaybeGenerateMarkingRegisterCheck(int code,
1030                                                  Location temp_loc = Location::NoLocation());
1031 
1032   // Create slow path for a read barrier for a heap reference within `instruction`.
1033   //
1034   // This is a helper function for GenerateReadBarrierSlow() that has the same
1035   // arguments. The creation and adding of the slow path is exposed for intrinsics
1036   // that cannot use GenerateReadBarrierSlow() from their own slow paths.
1037   SlowPathCodeARM64* AddReadBarrierSlowPath(HInstruction* instruction,
1038                                             Location out,
1039                                             Location ref,
1040                                             Location obj,
1041                                             uint32_t offset,
1042                                             Location index);
1043 
1044   // Generate a read barrier for a heap reference within `instruction`
1045   // using a slow path.
1046   //
1047   // A read barrier for an object reference read from the heap is
1048   // implemented as a call to the artReadBarrierSlow runtime entry
1049   // point, which is passed the values in locations `ref`, `obj`, and
1050   // `offset`:
1051   //
1052   //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
1053   //                                      mirror::Object* obj,
1054   //                                      uint32_t offset);
1055   //
1056   // The `out` location contains the value returned by
1057   // artReadBarrierSlow.
1058   //
1059   // When `index` is provided (i.e. for array accesses), the offset
1060   // value passed to artReadBarrierSlow is adjusted to take `index`
1061   // into account.
1062   void GenerateReadBarrierSlow(HInstruction* instruction,
1063                                Location out,
1064                                Location ref,
1065                                Location obj,
1066                                uint32_t offset,
1067                                Location index = Location::NoLocation());
1068 
1069   // If read barriers are enabled, generate a read barrier for a heap
1070   // reference using a slow path. If heap poisoning is enabled, also
1071   // unpoison the reference in `out`.
1072   void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
1073                                     Location out,
1074                                     Location ref,
1075                                     Location obj,
1076                                     uint32_t offset,
1077                                     Location index = Location::NoLocation());
1078 
1079   // Generate a read barrier for a GC root within `instruction` using
1080   // a slow path.
1081   //
1082   // A read barrier for an object reference GC root is implemented as
1083   // a call to the artReadBarrierForRootSlow runtime entry point,
1084   // which is passed the value in location `root`:
1085   //
1086   //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
1087   //
1088   // The `out` location contains the value returned by
1089   // artReadBarrierForRootSlow.
1090   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
1091 
1092   void IncreaseFrame(size_t adjustment) override;
1093   void DecreaseFrame(size_t adjustment) override;
1094 
1095   void GenerateNop() override;
1096 
1097   void GenerateImplicitNullCheck(HNullCheck* instruction) override;
1098   void GenerateExplicitNullCheck(HNullCheck* instruction) override;
1099 
MaybeRecordImplicitNullCheck(HInstruction * instr)1100   void MaybeRecordImplicitNullCheck(HInstruction* instr) final {
1101     // The function must be only called within special scopes
1102     // (EmissionCheckScope, ExactAssemblyScope) which prevent generation of
1103     // veneer/literal pools by VIXL assembler.
1104     CHECK_EQ(GetVIXLAssembler()->ArePoolsBlocked(), true)
1105         << "The function must only be called within EmissionCheckScope or ExactAssemblyScope";
1106     CodeGenerator::MaybeRecordImplicitNullCheck(instr);
1107   }
1108 
1109   void MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl::aarch64::Register klass);
1110   void MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry);
1111   void MaybeRecordTraceEvent(bool is_method_entry);
1112 
1113   bool CanUseImplicitSuspendCheck() const;
1114 
1115  private:
1116   // Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
1117 
1118   enum class BakerReadBarrierKind : uint8_t {
1119     kField,     // Field get or array get with constant offset (i.e. constant index).
1120     kAcquire,   // Volatile field get.
1121     kArray,     // Array get with index in register.
1122     kGcRoot,    // GC root load.
1123     kLast = kGcRoot
1124   };
1125 
1126   static constexpr uint32_t kBakerReadBarrierInvalidEncodedReg = /* sp/zr is invalid */ 31u;
1127 
1128   static constexpr size_t kBitsForBakerReadBarrierKind =
1129       MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast));
1130   static constexpr size_t kBakerReadBarrierBitsForRegister =
1131       MinimumBitsToStore(kBakerReadBarrierInvalidEncodedReg);
1132   using BakerReadBarrierKindField =
1133       BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>;
1134   using BakerReadBarrierFirstRegField =
1135       BitField<uint32_t, kBitsForBakerReadBarrierKind, kBakerReadBarrierBitsForRegister>;
1136   using BakerReadBarrierSecondRegField =
1137       BitField<uint32_t,
1138                kBitsForBakerReadBarrierKind + kBakerReadBarrierBitsForRegister,
1139                kBakerReadBarrierBitsForRegister>;
1140 
CheckValidReg(uint32_t reg)1141   static void CheckValidReg(uint32_t reg) {
1142     DCHECK(reg < vixl::aarch64::lr.GetCode() &&
1143            reg != vixl::aarch64::ip0.GetCode() &&
1144            reg != vixl::aarch64::ip1.GetCode()) << reg;
1145   }
1146 
EncodeBakerReadBarrierFieldData(uint32_t base_reg,uint32_t holder_reg)1147   static inline uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, uint32_t holder_reg) {
1148     CheckValidReg(base_reg);
1149     CheckValidReg(holder_reg);
1150     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) |
1151            BakerReadBarrierFirstRegField::Encode(base_reg) |
1152            BakerReadBarrierSecondRegField::Encode(holder_reg);
1153   }
1154 
EncodeBakerReadBarrierAcquireData(uint32_t base_reg,uint32_t holder_reg)1155   static inline uint32_t EncodeBakerReadBarrierAcquireData(uint32_t base_reg, uint32_t holder_reg) {
1156     CheckValidReg(base_reg);
1157     CheckValidReg(holder_reg);
1158     DCHECK_NE(base_reg, holder_reg);
1159     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kAcquire) |
1160            BakerReadBarrierFirstRegField::Encode(base_reg) |
1161            BakerReadBarrierSecondRegField::Encode(holder_reg);
1162   }
1163 
EncodeBakerReadBarrierArrayData(uint32_t base_reg)1164   static inline uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
1165     CheckValidReg(base_reg);
1166     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) |
1167            BakerReadBarrierFirstRegField::Encode(base_reg) |
1168            BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg);
1169   }
1170 
EncodeBakerReadBarrierGcRootData(uint32_t root_reg)1171   static inline uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg) {
1172     CheckValidReg(root_reg);
1173     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) |
1174            BakerReadBarrierFirstRegField::Encode(root_reg) |
1175            BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg);
1176   }
1177 
1178   void CompileBakerReadBarrierThunk(Arm64Assembler& assembler,
1179                                     uint32_t encoded_data,
1180                                     /*out*/ std::string* debug_name);
1181 
1182   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
1183   // whether through .data.img.rel.ro, .bss, or directly in the boot image.
1184   struct PcRelativePatchInfo : PatchInfo<vixl::aarch64::Label> {
PcRelativePatchInfoPcRelativePatchInfo1185     PcRelativePatchInfo(const DexFile* dex_file, uint32_t off_or_idx)
1186         : PatchInfo<vixl::aarch64::Label>(dex_file, off_or_idx), pc_insn_label() { }
1187 
1188     vixl::aarch64::Label* pc_insn_label;
1189   };
1190 
1191   struct BakerReadBarrierPatchInfo {
BakerReadBarrierPatchInfoBakerReadBarrierPatchInfo1192     explicit BakerReadBarrierPatchInfo(uint32_t data) : label(), custom_data(data) { }
1193 
1194     vixl::aarch64::Label label;
1195     uint32_t custom_data;
1196   };
1197 
1198   vixl::aarch64::Label* NewPcRelativePatch(const DexFile* dex_file,
1199                                            uint32_t offset_or_index,
1200                                            vixl::aarch64::Label* adrp_label,
1201                                            ArenaDeque<PcRelativePatchInfo>* patches);
1202 
1203   void FixJumpTables();
1204 
1205   template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
1206   static void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
1207                                           ArenaVector<linker::LinkerPatch>* linker_patches);
1208 
1209   // Returns whether SVE features are supported and should be used.
1210   bool ShouldUseSVE() const;
1211 
1212   // Labels for each block that will be compiled.
1213   // We use a deque so that the `vixl::aarch64::Label` objects do not move in memory.
1214   ArenaDeque<vixl::aarch64::Label> block_labels_;  // Indexed by block id.
1215   vixl::aarch64::Label frame_entry_label_;
1216   ArenaVector<std::unique_ptr<JumpTableARM64>> jump_tables_;
1217 
1218   LocationsBuilderARM64Neon location_builder_neon_;
1219   InstructionCodeGeneratorARM64Neon instruction_visitor_neon_;
1220   LocationsBuilderARM64Sve location_builder_sve_;
1221   InstructionCodeGeneratorARM64Sve instruction_visitor_sve_;
1222 
1223   LocationsBuilderARM64* location_builder_;
1224   InstructionCodeGeneratorARM64* instruction_visitor_;
1225   ParallelMoveResolverARM64 move_resolver_;
1226   Arm64Assembler assembler_;
1227 
1228   // PC-relative method patch info for kBootImageLinkTimePcRelative.
1229   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
1230   // PC-relative method patch info for kAppImageRelRo.
1231   ArenaDeque<PcRelativePatchInfo> app_image_method_patches_;
1232   // PC-relative method patch info for kBssEntry.
1233   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
1234   // PC-relative type patch info for kBootImageLinkTimePcRelative.
1235   ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
1236   // PC-relative type patch info for kAppImageRelRo.
1237   ArenaDeque<PcRelativePatchInfo> app_image_type_patches_;
1238   // PC-relative type patch info for kBssEntry.
1239   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
1240   // PC-relative public type patch info for kBssEntryPublic.
1241   ArenaDeque<PcRelativePatchInfo> public_type_bss_entry_patches_;
1242   // PC-relative package type patch info for kBssEntryPackage.
1243   ArenaDeque<PcRelativePatchInfo> package_type_bss_entry_patches_;
1244   // PC-relative String patch info for kBootImageLinkTimePcRelative.
1245   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
1246   // PC-relative String patch info for kBssEntry.
1247   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
1248   // PC-relative MethodType patch info for kBssEntry.
1249   ArenaDeque<PcRelativePatchInfo> method_type_bss_entry_patches_;
1250   // PC-relative method patch info for kBootImageLinkTimePcRelative+kCallCriticalNative.
1251   ArenaDeque<PcRelativePatchInfo> boot_image_jni_entrypoint_patches_;
1252   // PC-relative patch info for IntrinsicObjects for the boot image,
1253   // and for method/type/string patches for kBootImageRelRo otherwise.
1254   ArenaDeque<PcRelativePatchInfo> boot_image_other_patches_;
1255   // Patch info for calls to entrypoint dispatch thunks. Used for slow paths.
1256   ArenaDeque<PatchInfo<vixl::aarch64::Label>> call_entrypoint_patches_;
1257   // Baker read barrier patch info.
1258   ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
1259 
1260   JitPatchesARM64 jit_patches_;
1261 
1262   // Baker read barrier slow paths, mapping custom data (uint32_t) to label.
1263   // Wrap the label to work around vixl::aarch64::Label being non-copyable
1264   // and non-moveable and as such unusable in ArenaSafeMap<>.
1265   struct LabelWrapper {
LabelWrapperLabelWrapper1266     LabelWrapper(const LabelWrapper& src)
1267         : label() {
1268       DCHECK(!src.label.IsLinked() && !src.label.IsBound());
1269     }
1270     LabelWrapper() = default;
1271     vixl::aarch64::Label label;
1272   };
1273   ArenaSafeMap<uint32_t, LabelWrapper> jit_baker_read_barrier_slow_paths_;
1274 
1275   friend class linker::Arm64RelativePatcherTest;
1276   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
1277 };
1278 
GetAssembler()1279 inline Arm64Assembler* ParallelMoveResolverARM64::GetAssembler() const {
1280   return codegen_->GetAssembler();
1281 }
1282 
1283 }  // namespace arm64
1284 }  // namespace art
1285 
1286 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
1287