• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2023 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_
18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_
19 
20 #include "android-base/logging.h"
21 #include "arch/riscv64/registers_riscv64.h"
22 #include "base/macros.h"
23 #include "code_generator.h"
24 #include "driver/compiler_options.h"
25 #include "intrinsics_list.h"
26 #include "optimizing/locations.h"
27 #include "parallel_move_resolver.h"
28 #include "utils/riscv64/assembler_riscv64.h"
29 
30 namespace art HIDDEN {
31 namespace riscv64 {
32 
33 // InvokeDexCallingConvention registers
34 static constexpr XRegister kParameterCoreRegisters[] = {A1, A2, A3, A4, A5, A6, A7};
35 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
36 
37 static constexpr FRegister kParameterFpuRegisters[] = {FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7};
38 static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
39 
40 // InvokeRuntimeCallingConvention registers
41 static constexpr XRegister kRuntimeParameterCoreRegisters[] = {A0, A1, A2, A3, A4, A5, A6, A7};
42 static constexpr size_t kRuntimeParameterCoreRegistersLength =
43     arraysize(kRuntimeParameterCoreRegisters);
44 
45 static constexpr FRegister kRuntimeParameterFpuRegisters[] = {
46     FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7
47 };
48 static constexpr size_t kRuntimeParameterFpuRegistersLength =
49     arraysize(kRuntimeParameterFpuRegisters);
50 
51 // FCLASS returns a 10-bit classification mask with the two highest bits marking NaNs
52 // (signaling and quiet). To detect a NaN, we can compare (either BGE or BGEU, the sign
53 // bit is always clear) the result with the `kFClassNaNMinValue`.
54 static_assert(kSignalingNaN == 0x100);
55 static_assert(kQuietNaN == 0x200);
56 static constexpr int32_t kFClassNaNMinValue = 0x100;
57 
58 #define UNIMPLEMENTED_INTRINSIC_LIST_RISCV64(V) \
59   V(FP16Ceil)                                   \
60   V(FP16Compare)                                \
61   V(FP16Floor)                                  \
62   V(FP16Rint)                                   \
63   V(FP16ToFloat)                                \
64   V(FP16ToHalf)                                 \
65   V(FP16Greater)                                \
66   V(FP16GreaterEquals)                          \
67   V(FP16Less)                                   \
68   V(FP16LessEquals)                             \
69   V(FP16Min)                                    \
70   V(FP16Max)                                    \
71   V(StringStringIndexOf)                        \
72   V(StringStringIndexOfAfter)                   \
73   V(StringBufferAppend)                         \
74   V(StringBufferLength)                         \
75   V(StringBufferToString)                       \
76   V(StringBuilderAppendObject)                  \
77   V(StringBuilderAppendString)                  \
78   V(StringBuilderAppendCharSequence)            \
79   V(StringBuilderAppendCharArray)               \
80   V(StringBuilderAppendBoolean)                 \
81   V(StringBuilderAppendChar)                    \
82   V(StringBuilderAppendInt)                     \
83   V(StringBuilderAppendLong)                    \
84   V(StringBuilderAppendFloat)                   \
85   V(StringBuilderAppendDouble)                  \
86   V(StringBuilderLength)                        \
87   V(StringBuilderToString)                      \
88   V(CRC32Update)                                \
89   V(CRC32UpdateBytes)                           \
90   V(CRC32UpdateByteBuffer)                      \
91   V(MethodHandleInvoke)                         \
92   V(UnsafeArrayBaseOffset)                      \
93   V(JdkUnsafeArrayBaseOffset)                   \
94 
95 // Method register on invoke.
96 static const XRegister kArtMethodRegister = A0;
97 
98 // Helper functions used by codegen as well as intrinsics.
99 XRegister InputXRegisterOrZero(Location location);
100 int32_t ReadBarrierMarkEntrypointOffset(Location ref);
101 
102 class CodeGeneratorRISCV64;
103 
104 class InvokeRuntimeCallingConvention : public CallingConvention<XRegister, FRegister> {
105  public:
InvokeRuntimeCallingConvention()106   InvokeRuntimeCallingConvention()
107       : CallingConvention(kRuntimeParameterCoreRegisters,
108                           kRuntimeParameterCoreRegistersLength,
109                           kRuntimeParameterFpuRegisters,
110                           kRuntimeParameterFpuRegistersLength,
111                           kRiscv64PointerSize) {}
112 
113   Location GetReturnLocation(DataType::Type return_type);
114 
115  private:
116   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
117 };
118 
119 class InvokeDexCallingConvention : public CallingConvention<XRegister, FRegister> {
120  public:
InvokeDexCallingConvention()121   InvokeDexCallingConvention()
122       : CallingConvention(kParameterCoreRegisters,
123                           kParameterCoreRegistersLength,
124                           kParameterFpuRegisters,
125                           kParameterFpuRegistersLength,
126                           kRiscv64PointerSize) {}
127 
128  private:
129   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
130 };
131 
132 class InvokeDexCallingConventionVisitorRISCV64 : public InvokeDexCallingConventionVisitor {
133  public:
InvokeDexCallingConventionVisitorRISCV64()134   InvokeDexCallingConventionVisitorRISCV64() {}
~InvokeDexCallingConventionVisitorRISCV64()135   virtual ~InvokeDexCallingConventionVisitorRISCV64() {}
136 
137   Location GetNextLocation(DataType::Type type) override;
138   Location GetReturnLocation(DataType::Type type) const override;
139   Location GetMethodLocation() const override;
140 
141  private:
142   InvokeDexCallingConvention calling_convention;
143 
144   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorRISCV64);
145 };
146 
147 class CriticalNativeCallingConventionVisitorRiscv64 : public InvokeDexCallingConventionVisitor {
148  public:
CriticalNativeCallingConventionVisitorRiscv64(bool for_register_allocation)149   explicit CriticalNativeCallingConventionVisitorRiscv64(bool for_register_allocation)
150       : for_register_allocation_(for_register_allocation) {}
151 
~CriticalNativeCallingConventionVisitorRiscv64()152   virtual ~CriticalNativeCallingConventionVisitorRiscv64() {}
153 
154   Location GetNextLocation(DataType::Type type) override;
155   Location GetReturnLocation(DataType::Type type) const override;
156   Location GetMethodLocation() const override;
157 
GetStackOffset()158   size_t GetStackOffset() const { return stack_offset_; }
159 
160  private:
161   // Register allocator does not support adjusting frame size, so we cannot provide final locations
162   // of stack arguments for register allocation. We ask the register allocator for any location and
163   // move these arguments to the right place after adjusting the SP when generating the call.
164   const bool for_register_allocation_;
165   size_t gpr_index_ = 0u;
166   size_t fpr_index_ = 0u;
167   size_t stack_offset_ = 0u;
168 
169   DISALLOW_COPY_AND_ASSIGN(CriticalNativeCallingConventionVisitorRiscv64);
170 };
171 
172 class SlowPathCodeRISCV64 : public SlowPathCode {
173  public:
SlowPathCodeRISCV64(HInstruction * instruction)174   explicit SlowPathCodeRISCV64(HInstruction* instruction)
175       : SlowPathCode(instruction), entry_label_(), exit_label_() {}
176 
GetEntryLabel()177   Riscv64Label* GetEntryLabel() { return &entry_label_; }
GetExitLabel()178   Riscv64Label* GetExitLabel() { return &exit_label_; }
179 
180  private:
181   Riscv64Label entry_label_;
182   Riscv64Label exit_label_;
183 
184   DISALLOW_COPY_AND_ASSIGN(SlowPathCodeRISCV64);
185 };
186 
187 class ParallelMoveResolverRISCV64 : public ParallelMoveResolverWithSwap {
188  public:
ParallelMoveResolverRISCV64(ArenaAllocator * allocator,CodeGeneratorRISCV64 * codegen)189   ParallelMoveResolverRISCV64(ArenaAllocator* allocator, CodeGeneratorRISCV64* codegen)
190       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
191 
192   void EmitMove(size_t index) override;
193   void EmitSwap(size_t index) override;
194   void SpillScratch(int reg) override;
195   void RestoreScratch(int reg) override;
196 
197   void Exchange(int index1, int index2, bool double_slot);
198 
199   Riscv64Assembler* GetAssembler() const;
200 
201  private:
202   CodeGeneratorRISCV64* const codegen_;
203 
204   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverRISCV64);
205 };
206 
207 class FieldAccessCallingConventionRISCV64 : public FieldAccessCallingConvention {
208  public:
FieldAccessCallingConventionRISCV64()209   FieldAccessCallingConventionRISCV64() {}
210 
GetObjectLocation()211   Location GetObjectLocation() const override {
212     return Location::RegisterLocation(A1);
213   }
GetFieldIndexLocation()214   Location GetFieldIndexLocation() const override {
215     return Location::RegisterLocation(A0);
216   }
GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED)217   Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
218     return Location::RegisterLocation(A0);
219   }
GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,bool is_instance)220   Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
221                                bool is_instance) const override {
222     return is_instance
223         ? Location::RegisterLocation(A2)
224         : Location::RegisterLocation(A1);
225   }
GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED)226   Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
227     return Location::FpuRegisterLocation(FA0);
228   }
229 
230  private:
231   DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionRISCV64);
232 };
233 
234 class LocationsBuilderRISCV64 : public HGraphVisitor {
235  public:
LocationsBuilderRISCV64(HGraph * graph,CodeGeneratorRISCV64 * codegen)236   LocationsBuilderRISCV64(HGraph* graph, CodeGeneratorRISCV64* codegen)
237       : HGraphVisitor(graph), codegen_(codegen) {}
238 
239 #define DECLARE_VISIT_INSTRUCTION(name, super) void Visit##name(H##name* instr) override;
240 
241   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_RISCV64(DECLARE_VISIT_INSTRUCTION)242   FOR_EACH_CONCRETE_INSTRUCTION_RISCV64(DECLARE_VISIT_INSTRUCTION)
243 
244 #undef DECLARE_VISIT_INSTRUCTION
245 
246   void VisitInstruction(HInstruction* instruction) override {
247     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id "
248                << instruction->GetId() << ")";
249   }
250 
251  protected:
252   void HandleInvoke(HInvoke* invoke);
253   void HandleBinaryOp(HBinaryOperation* operation);
254   void HandleCondition(HCondition* instruction);
255   void HandleShift(HBinaryOperation* operation);
256   void HandleFieldSet(HInstruction* instruction);
257   void HandleFieldGet(HInstruction* instruction);
258 
259   InvokeDexCallingConventionVisitorRISCV64 parameter_visitor_;
260 
261   CodeGeneratorRISCV64* const codegen_;
262 
263   DISALLOW_COPY_AND_ASSIGN(LocationsBuilderRISCV64);
264 };
265 
266 class InstructionCodeGeneratorRISCV64 : public InstructionCodeGenerator {
267  public:
268   InstructionCodeGeneratorRISCV64(HGraph* graph, CodeGeneratorRISCV64* codegen);
269 
270 #define DECLARE_VISIT_INSTRUCTION(name, super) void Visit##name(H##name* instr) override;
271 
272   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_RISCV64(DECLARE_VISIT_INSTRUCTION)273   FOR_EACH_CONCRETE_INSTRUCTION_RISCV64(DECLARE_VISIT_INSTRUCTION)
274 
275 #undef DECLARE_VISIT_INSTRUCTION
276 
277   void VisitInstruction(HInstruction* instruction) override {
278     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id "
279                << instruction->GetId() << ")";
280   }
281 
GetAssembler()282   Riscv64Assembler* GetAssembler() const { return assembler_; }
283 
284   void GenerateMemoryBarrier(MemBarrierKind kind);
285 
286   void FAdd(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
287   void FClass(XRegister rd, FRegister rs1, DataType::Type type);
288 
289   void Load(Location out, XRegister rs1, int32_t offset, DataType::Type type);
290   void Store(Location value, XRegister rs1, int32_t offset, DataType::Type type);
291 
292   // Sequentially consistent store. Used for volatile fields and intrinsics.
293   // The `instruction` argument is for recording an implicit null check stack map with the
294   // store instruction which may not be the last instruction emitted by `StoreSeqCst()`.
295   void StoreSeqCst(Location value,
296                    XRegister rs1,
297                    int32_t offset,
298                    DataType::Type type,
299                    HInstruction* instruction = nullptr);
300 
301   void ShNAdd(XRegister rd, XRegister rs1, XRegister rs2, DataType::Type type);
302 
303  protected:
304   void GenerateClassInitializationCheck(SlowPathCodeRISCV64* slow_path, XRegister class_reg);
305   void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, XRegister temp);
306   void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
307   void HandleBinaryOp(HBinaryOperation* operation);
308   void HandleCondition(HCondition* instruction);
309   void HandleShift(HBinaryOperation* operation);
310   void HandleFieldSet(HInstruction* instruction,
311                       const FieldInfo& field_info,
312                       bool value_can_be_null,
313                       WriteBarrierKind write_barrier_kind);
314   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
315 
316   // Generate a heap reference load using one register `out`:
317   //
318   //   out <- *(out + offset)
319   //
320   // while honoring heap poisoning and/or read barriers (if any).
321   //
322   // Location `maybe_temp` is used when generating a read barrier and
323   // shall be a register in that case; it may be an invalid location
324   // otherwise.
325   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
326                                         Location out,
327                                         uint32_t offset,
328                                         Location maybe_temp,
329                                         ReadBarrierOption read_barrier_option);
330   // Generate a heap reference load using two different registers
331   // `out` and `obj`:
332   //
333   //   out <- *(obj + offset)
334   //
335   // while honoring heap poisoning and/or read barriers (if any).
336   //
337   // Location `maybe_temp` is used when generating a Baker's (fast
338   // path) read barrier and shall be a register in that case; it may
339   // be an invalid location otherwise.
340   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
341                                          Location out,
342                                          Location obj,
343                                          uint32_t offset,
344                                          Location maybe_temp,
345                                          ReadBarrierOption read_barrier_option);
346 
347   void GenerateTestAndBranch(HInstruction* instruction,
348                              size_t condition_input_index,
349                              Riscv64Label* true_target,
350                              Riscv64Label* false_target);
351   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
352   void DivRemByPowerOfTwo(HBinaryOperation* instruction);
353   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
354   void GenerateDivRemIntegral(HBinaryOperation* instruction);
355   void GenerateIntLongCondition(IfCondition cond, LocationSummary* locations);
356   void GenerateIntLongCondition(IfCondition cond,
357                                 LocationSummary* locations,
358                                 XRegister rd,
359                                 bool to_all_bits);
360   void GenerateIntLongCompareAndBranch(IfCondition cond,
361                                        LocationSummary* locations,
362                                        Riscv64Label* label);
363   void GenerateFpCondition(IfCondition cond,
364                            bool gt_bias,
365                            DataType::Type type,
366                            LocationSummary* locations,
367                            Riscv64Label* label = nullptr);
368   void GenerateFpCondition(IfCondition cond,
369                            bool gt_bias,
370                            DataType::Type type,
371                            LocationSummary* locations,
372                            Riscv64Label* label,
373                            XRegister rd,
374                            bool to_all_bits);
375   void GenerateMethodEntryExitHook(HInstruction* instruction);
376   void HandleGoto(HInstruction* got, HBasicBlock* successor);
377   void GenPackedSwitchWithCompares(XRegister adjusted,
378                                    XRegister temp,
379                                    uint32_t num_entries,
380                                    HBasicBlock* switch_block);
381   void GenTableBasedPackedSwitch(XRegister adjusted,
382                                  XRegister temp,
383                                  uint32_t num_entries,
384                                  HBasicBlock* switch_block);
385   int32_t VecAddress(LocationSummary* locations,
386                      size_t size,
387                      /*out*/ XRegister* adjusted_base);
388 
389   template <typename Reg,
390             void (Riscv64Assembler::*opS)(Reg, FRegister, FRegister),
391             void (Riscv64Assembler::*opD)(Reg, FRegister, FRegister)>
392   void FpBinOp(Reg rd, FRegister rs1, FRegister rs2, DataType::Type type);
393   void FSub(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
394   void FDiv(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
395   void FMul(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
396   void FMin(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
397   void FMax(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
398   void FEq(XRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
399   void FLt(XRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
400   void FLe(XRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
401 
402   template <typename Reg,
403             void (Riscv64Assembler::*opS)(Reg, FRegister),
404             void (Riscv64Assembler::*opD)(Reg, FRegister)>
405   void FpUnOp(Reg rd, FRegister rs1, DataType::Type type);
406   void FAbs(FRegister rd, FRegister rs1, DataType::Type type);
407   void FNeg(FRegister rd, FRegister rs1, DataType::Type type);
408   void FMv(FRegister rd, FRegister rs1, DataType::Type type);
409   void FMvX(XRegister rd, FRegister rs1, DataType::Type type);
410 
411   Riscv64Assembler* const assembler_;
412   CodeGeneratorRISCV64* const codegen_;
413 
414   DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorRISCV64);
415 };
416 
417 class CodeGeneratorRISCV64 : public CodeGenerator {
418  public:
419   CodeGeneratorRISCV64(HGraph* graph,
420                        const CompilerOptions& compiler_options,
421                        OptimizingCompilerStats* stats = nullptr);
~CodeGeneratorRISCV64()422   virtual ~CodeGeneratorRISCV64() {}
423 
424   void GenerateFrameEntry() override;
425   void GenerateFrameExit() override;
426 
427   void Bind(HBasicBlock* block) override;
428 
GetWordSize()429   size_t GetWordSize() const override {
430     // The "word" for the compiler is the core register size (64-bit for riscv64) while the
431     // riscv64 assembler uses "word" for 32-bit values and "double word" for 64-bit values.
432     return kRiscv64DoublewordSize;
433   }
434 
SupportsPredicatedSIMD()435   bool SupportsPredicatedSIMD() const override {
436     // TODO(riscv64): Check the vector extension.
437     return false;
438   }
439 
440   // Get FP register width in bytes for spilling/restoring in the slow paths.
441   //
442   // Note: In SIMD graphs this should return SIMD register width as all FP and SIMD registers
443   // alias and live SIMD registers are forced to be spilled in full size in the slow paths.
GetSlowPathFPWidth()444   size_t GetSlowPathFPWidth() const override {
445     // Default implementation.
446     return GetCalleePreservedFPWidth();
447   }
448 
GetCalleePreservedFPWidth()449   size_t GetCalleePreservedFPWidth() const override {
450     return kRiscv64FloatRegSizeInBytes;
451   };
452 
GetSIMDRegisterWidth()453   size_t GetSIMDRegisterWidth() const override {
454     // TODO(riscv64): Implement SIMD with the Vector extension.
455     // Note: HLoopOptimization calls this function even for an ISA without SIMD support.
456     return kRiscv64FloatRegSizeInBytes;
457   };
458 
GetAddressOf(HBasicBlock * block)459   uintptr_t GetAddressOf(HBasicBlock* block) override {
460     return assembler_.GetLabelLocation(GetLabelOf(block));
461   };
462 
GetLabelOf(HBasicBlock * block)463   Riscv64Label* GetLabelOf(HBasicBlock* block) const {
464     return CommonGetLabelOf<Riscv64Label>(block_labels_, block);
465   }
466 
Initialize()467   void Initialize() override { block_labels_ = CommonInitializeLabels<Riscv64Label>(); }
468 
469   void MoveConstant(Location destination, int32_t value) override;
470   void MoveLocation(Location destination, Location source, DataType::Type dst_type) override;
471   void AddLocationAsTemp(Location location, LocationSummary* locations) override;
472 
GetAssembler()473   Riscv64Assembler* GetAssembler() override { return &assembler_; }
GetAssembler()474   const Riscv64Assembler& GetAssembler() const override { return assembler_; }
475 
GetLocationBuilder()476   HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
477 
GetInstructionVisitor()478   InstructionCodeGeneratorRISCV64* GetInstructionVisitor() override {
479     return &instruction_visitor_;
480   }
481 
482   void MaybeGenerateInlineCacheCheck(HInstruction* instruction, XRegister klass);
483 
484   void SetupBlockedRegisters() const override;
485 
486   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
487   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
488   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
489   size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
490 
491   void DumpCoreRegister(std::ostream& stream, int reg) const override;
492   void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
493 
GetInstructionSet()494   InstructionSet GetInstructionSet() const override { return InstructionSet::kRiscv64; }
495 
496   const Riscv64InstructionSetFeatures& GetInstructionSetFeatures() const;
497 
GetPreferredSlotsAlignment()498   uint32_t GetPreferredSlotsAlignment() const override {
499     return static_cast<uint32_t>(kRiscv64PointerSize);
500   }
501 
502   void Finalize() override;
503 
504   // Generate code to invoke a runtime entry point.
505   void InvokeRuntime(QuickEntrypointEnum entrypoint,
506                      HInstruction* instruction,
507                      SlowPathCode* slow_path = nullptr) override;
508 
509   // Generate code to invoke a runtime entry point, but do not record
510   // PC-related information in a stack map.
511   void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
512                                            HInstruction* instruction,
513                                            SlowPathCode* slow_path);
514 
GetMoveResolver()515   ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
516 
NeedsTwoRegisters(DataType::Type type)517   bool NeedsTwoRegisters([[maybe_unused]] DataType::Type type) const override { return false; }
518 
519   void IncreaseFrame(size_t adjustment) override;
520   void DecreaseFrame(size_t adjustment) override;
521 
522   void GenerateNop() override;
523 
524   void GenerateImplicitNullCheck(HNullCheck* instruction) override;
525   void GenerateExplicitNullCheck(HNullCheck* instruction) override;
526 
527   // Check if the desired_string_load_kind is supported. If it is, return it,
528   // otherwise return a fall-back kind that should be used instead.
529   HLoadString::LoadKind GetSupportedLoadStringKind(
530       HLoadString::LoadKind desired_string_load_kind) override;
531 
532   // Check if the desired_class_load_kind is supported. If it is, return it,
533   // otherwise return a fall-back kind that should be used instead.
534   HLoadClass::LoadKind GetSupportedLoadClassKind(
535       HLoadClass::LoadKind desired_class_load_kind) override;
536 
537   // Check if the desired_dispatch_info is supported. If it is, return it,
538   // otherwise return a fall-back info that should be used instead.
539   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
540       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, ArtMethod* method) override;
541 
542   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
543   // whether through .data.img.rel.ro, .bss, or directly in the boot image.
544   //
545   // The 20-bit and 12-bit parts of the 32-bit PC-relative offset are patched separately,
546   // necessitating two patches/infos. There can be more than two patches/infos if the
547   // instruction supplying the high part is shared with e.g. a slow path, while the low
548   // part is supplied by separate instructions, e.g.:
549   //     auipc r1, high       // patch
550   //     lwu   r2, low(r1)    // patch
551   //     beqz  r2, slow_path
552   //   back:
553   //     ...
554   //   slow_path:
555   //     ...
556   //     sw    r2, low(r1)    // patch
557   //     j     back
558   struct PcRelativePatchInfo : PatchInfo<Riscv64Label> {
PcRelativePatchInfoPcRelativePatchInfo559     PcRelativePatchInfo(const DexFile* dex_file,
560                         uint32_t off_or_idx,
561                         const PcRelativePatchInfo* info_high)
562         : PatchInfo<Riscv64Label>(dex_file, off_or_idx),
563           pc_insn_label(info_high != nullptr ? &info_high->label : &label) {
564       DCHECK_IMPLIES(info_high != nullptr, info_high->pc_insn_label == &info_high->label);
565     }
566 
567     // Pointer to the info for the high part patch or nullptr if this is the high part patch info.
568     const Riscv64Label* pc_insn_label;
569 
570    private:
571     PcRelativePatchInfo(PcRelativePatchInfo&& other) = delete;
572     DISALLOW_COPY_AND_ASSIGN(PcRelativePatchInfo);
573   };
574 
575   PcRelativePatchInfo* NewBootImageIntrinsicPatch(uint32_t intrinsic_data,
576                                                   const PcRelativePatchInfo* info_high = nullptr);
577   PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset,
578                                               const PcRelativePatchInfo* info_high = nullptr);
579   PcRelativePatchInfo* NewAppImageMethodPatch(MethodReference target_method,
580                                               const PcRelativePatchInfo* info_high = nullptr);
581   PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method,
582                                                const PcRelativePatchInfo* info_high = nullptr);
583   PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method,
584                                               const PcRelativePatchInfo* info_high = nullptr);
585   PcRelativePatchInfo* NewBootImageJniEntrypointPatch(
586       MethodReference target_method, const PcRelativePatchInfo* info_high = nullptr);
587 
588   PcRelativePatchInfo* NewBootImageTypePatch(const DexFile& dex_file,
589                                              dex::TypeIndex type_index,
590                                              const PcRelativePatchInfo* info_high = nullptr);
591   PcRelativePatchInfo* NewAppImageTypePatch(const DexFile& dex_file,
592                                             dex::TypeIndex type_index,
593                                             const PcRelativePatchInfo* info_high = nullptr);
594   PcRelativePatchInfo* NewTypeBssEntryPatch(HLoadClass* load_class,
595                                             const PcRelativePatchInfo* info_high = nullptr);
596   PcRelativePatchInfo* NewBootImageStringPatch(const DexFile& dex_file,
597                                                dex::StringIndex string_index,
598                                                const PcRelativePatchInfo* info_high = nullptr);
599   PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file,
600                                               dex::StringIndex string_index,
601                                               const PcRelativePatchInfo* info_high = nullptr);
602 
603   void EmitPcRelativeAuipcPlaceholder(PcRelativePatchInfo* info_high, XRegister out);
604   void EmitPcRelativeAddiPlaceholder(PcRelativePatchInfo* info_low, XRegister rd, XRegister rs1);
605   void EmitPcRelativeLwuPlaceholder(PcRelativePatchInfo* info_low, XRegister rd, XRegister rs1);
606   void EmitPcRelativeLdPlaceholder(PcRelativePatchInfo* info_low, XRegister rd, XRegister rs1);
607 
608   void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
609 
610   Literal* DeduplicateBootImageAddressLiteral(uint64_t address);
611   void PatchJitRootUse(uint8_t* code,
612                        const uint8_t* roots_data,
613                        const Literal* literal,
614                        uint64_t index_in_table) const;
615   Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
616                                        dex::StringIndex string_index,
617                                        Handle<mirror::String> handle);
618   Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
619                                       dex::TypeIndex type_index,
620                                       Handle<mirror::Class> handle);
621   void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
622 
623   void LoadTypeForBootImageIntrinsic(XRegister dest, TypeReference target_type);
624   void LoadBootImageRelRoEntry(XRegister dest, uint32_t boot_image_offset);
625   void LoadBootImageAddress(XRegister dest, uint32_t boot_image_reference);
626   void LoadIntrinsicDeclaringClass(XRegister dest, HInvoke* invoke);
627   void LoadClassRootForIntrinsic(XRegister dest, ClassRoot class_root);
628 
629   void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
630   void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
631                                   Location temp,
632                                   SlowPathCode* slow_path = nullptr) override;
633   void GenerateVirtualCall(HInvokeVirtual* invoke,
634                            Location temp,
635                            SlowPathCode* slow_path = nullptr) override;
636   void MoveFromReturnRegister(Location trg, DataType::Type type) override;
637 
638   void GenerateMemoryBarrier(MemBarrierKind kind);
639 
640   void MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry);
641 
642   bool CanUseImplicitSuspendCheck() const;
643 
644 
645   // Create slow path for a Baker read barrier for a GC root load within `instruction`.
646   SlowPathCodeRISCV64* AddGcRootBakerBarrierBarrierSlowPath(
647       HInstruction* instruction, Location root, Location temp);
648 
649   // Emit marking check for a Baker read barrier for a GC root load within `instruction`.
650   void EmitBakerReadBarierMarkingCheck(
651       SlowPathCodeRISCV64* slow_path, Location root, Location temp);
652 
653   // Generate a GC root reference load:
654   //
655   //   root <- *(obj + offset)
656   //
657   // while honoring read barriers (if any).
658   void GenerateGcRootFieldLoad(HInstruction* instruction,
659                                Location root,
660                                XRegister obj,
661                                uint32_t offset,
662                                ReadBarrierOption read_barrier_option,
663                                Riscv64Label* label_low = nullptr);
664 
665   // Fast path implementation of ReadBarrier::Barrier for a heap
666   // reference field load when Baker's read barriers are used.
667   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
668                                              Location ref,
669                                              XRegister obj,
670                                              uint32_t offset,
671                                              Location temp,
672                                              bool needs_null_check);
673   // Fast path implementation of ReadBarrier::Barrier for a heap
674   // reference array load when Baker's read barriers are used.
675   void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
676                                              Location ref,
677                                              XRegister obj,
678                                              uint32_t data_offset,
679                                              Location index,
680                                              Location temp,
681                                              bool needs_null_check);
682   // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
683   // GenerateArrayLoadWithBakerReadBarrier and intrinsics.
684   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
685                                                  Location ref,
686                                                  XRegister obj,
687                                                  uint32_t offset,
688                                                  Location index,
689                                                  Location temp,
690                                                  bool needs_null_check);
691 
692   // Create slow path for a read barrier for a heap reference within `instruction`.
693   //
694   // This is a helper function for GenerateReadBarrierSlow() that has the same
695   // arguments. The creation and adding of the slow path is exposed for intrinsics
696   // that cannot use GenerateReadBarrierSlow() from their own slow paths.
697   SlowPathCodeRISCV64* AddReadBarrierSlowPath(HInstruction* instruction,
698                                               Location out,
699                                               Location ref,
700                                               Location obj,
701                                               uint32_t offset,
702                                               Location index);
703 
704   // Generate a read barrier for a heap reference within `instruction`
705   // using a slow path.
706   //
707   // A read barrier for an object reference read from the heap is
708   // implemented as a call to the artReadBarrierSlow runtime entry
709   // point, which is passed the values in locations `ref`, `obj`, and
710   // `offset`:
711   //
712   //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
713   //                                      mirror::Object* obj,
714   //                                      uint32_t offset);
715   //
716   // The `out` location contains the value returned by
717   // artReadBarrierSlow.
718   //
719   // When `index` is provided (i.e. for array accesses), the offset
720   // value passed to artReadBarrierSlow is adjusted to take `index`
721   // into account.
722   void GenerateReadBarrierSlow(HInstruction* instruction,
723                                Location out,
724                                Location ref,
725                                Location obj,
726                                uint32_t offset,
727                                Location index = Location::NoLocation());
728 
729   // If read barriers are enabled, generate a read barrier for a heap
730   // reference using a slow path. If heap poisoning is enabled, also
731   // unpoison the reference in `out`.
732   void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
733                                     Location out,
734                                     Location ref,
735                                     Location obj,
736                                     uint32_t offset,
737                                     Location index = Location::NoLocation());
738 
739   // Generate a read barrier for a GC root within `instruction` using
740   // a slow path.
741   //
742   // A read barrier for an object reference GC root is implemented as
743   // a call to the artReadBarrierForRootSlow runtime entry point,
744   // which is passed the value in location `root`:
745   //
746   //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
747   //
748   // The `out` location contains the value returned by
749   // artReadBarrierForRootSlow.
750   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
751 
752   // Emit a write barrier if:
753   // A) emit_null_check is false
754   // B) emit_null_check is true, and value is not null.
755   void MaybeMarkGCCard(XRegister object, XRegister value, bool emit_null_check);
756 
757   // Emit a write barrier unconditionally.
758   void MarkGCCard(XRegister object);
759 
760   // Crash if the card table is not valid. This check is only emitted for the CC GC. We assert
761   // `(!clean || !self->is_gc_marking)`, since the card table should not be set to clean when the CC
762   // GC is marking for eliminated write barriers.
763   void CheckGCCardIsValid(XRegister object);
764 
765   //
766   // Heap poisoning.
767   //
768 
769   // Poison a heap reference contained in `reg`.
770   void PoisonHeapReference(XRegister reg);
771 
772   // Unpoison a heap reference contained in `reg`.
773   void UnpoisonHeapReference(XRegister reg);
774 
775   // Poison a heap reference contained in `reg` if heap poisoning is enabled.
776   void MaybePoisonHeapReference(XRegister reg);
777 
778   // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
779   void MaybeUnpoisonHeapReference(XRegister reg);
780 
781   void SwapLocations(Location loc1, Location loc2, DataType::Type type);
782 
783  private:
784   using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
785   using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>;
786   using StringToLiteralMap =
787       ArenaSafeMap<StringReference, Literal*, StringReferenceValueComparator>;
788   using TypeToLiteralMap = ArenaSafeMap<TypeReference, Literal*, TypeReferenceValueComparator>;
789 
790   Literal* DeduplicateUint32Literal(uint32_t value);
791   Literal* DeduplicateUint64Literal(uint64_t value);
792 
793   PcRelativePatchInfo* NewPcRelativePatch(const DexFile* dex_file,
794                                           uint32_t offset_or_index,
795                                           const PcRelativePatchInfo* info_high,
796                                           ArenaDeque<PcRelativePatchInfo>* patches);
797 
798   template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
799   void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
800                                    ArenaVector<linker::LinkerPatch>* linker_patches);
801 
802   Riscv64Assembler assembler_;
803   LocationsBuilderRISCV64 location_builder_;
804   InstructionCodeGeneratorRISCV64 instruction_visitor_;
805   Riscv64Label frame_entry_label_;
806 
807   // Labels for each block that will be compiled.
808   Riscv64Label* block_labels_;  // Indexed by block id.
809 
810   ParallelMoveResolverRISCV64 move_resolver_;
811 
812   // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
813   Uint32ToLiteralMap uint32_literals_;
814   // Deduplication map for 64-bit literals, used for non-patchable method address or method code
815   // address.
816   Uint64ToLiteralMap uint64_literals_;
817 
818   // PC-relative method patch info for kBootImageLinkTimePcRelative.
819   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
820   // PC-relative method patch info for kAppImageRelRo.
821   ArenaDeque<PcRelativePatchInfo> app_image_method_patches_;
822   // PC-relative method patch info for kBssEntry.
823   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
824   // PC-relative type patch info for kBootImageLinkTimePcRelative.
825   ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
826   // PC-relative type patch info for kAppImageRelRo.
827   ArenaDeque<PcRelativePatchInfo> app_image_type_patches_;
828   // PC-relative type patch info for kBssEntry.
829   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
830   // PC-relative public type patch info for kBssEntryPublic.
831   ArenaDeque<PcRelativePatchInfo> public_type_bss_entry_patches_;
832   // PC-relative package type patch info for kBssEntryPackage.
833   ArenaDeque<PcRelativePatchInfo> package_type_bss_entry_patches_;
834   // PC-relative String patch info for kBootImageLinkTimePcRelative.
835   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
836   // PC-relative String patch info for kBssEntry.
837   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
838   // PC-relative method patch info for kBootImageLinkTimePcRelative+kCallCriticalNative.
839   ArenaDeque<PcRelativePatchInfo> boot_image_jni_entrypoint_patches_;
840   // PC-relative patch info for IntrinsicObjects for the boot image,
841   // and for method/type/string patches for kBootImageRelRo otherwise.
842   ArenaDeque<PcRelativePatchInfo> boot_image_other_patches_;
843 
844   // Patches for string root accesses in JIT compiled code.
845   StringToLiteralMap jit_string_patches_;
846   // Patches for class root accesses in JIT compiled code.
847   TypeToLiteralMap jit_class_patches_;
848 };
849 
850 }  // namespace riscv64
851 }  // namespace art
852 
853 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_
854