1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef COMPILER_OPTIMIZER_CODEGEN_TARGET_AARCH64_TARGET_H_
17 #define COMPILER_OPTIMIZER_CODEGEN_TARGET_AARCH64_TARGET_H_
18
19 #include "operands.h"
20 #include "encode.h"
21 #include "callconv.h"
22 #include "target_info.h"
23
24 #ifndef USE_VIXL_ARM64
25 #error "Wrong build type, please add VIXL in build"
26 #endif // USE_VIXL_ARM64
27
28 namespace panda::compiler::aarch64 {
29 // Ensure that vixl has same callee regs as our arch util
30 static constexpr auto CALLEE_REG_LIST =
31 vixl::aarch64::CPURegList(vixl::aarch64::CPURegister::kRegister, vixl::aarch64::kXRegSize,
32 GetFirstCalleeReg(Arch::AARCH64, false), GetLastCalleeReg(Arch::AARCH64, false));
33 static constexpr auto CALLEE_VREG_LIST =
34 vixl::aarch64::CPURegList(vixl::aarch64::CPURegister::kRegister, vixl::aarch64::kDRegSize,
35 GetFirstCalleeReg(Arch::AARCH64, true), GetLastCalleeReg(Arch::AARCH64, true));
36 static constexpr auto CALLER_REG_LIST =
37 vixl::aarch64::CPURegList(vixl::aarch64::CPURegister::kRegister, vixl::aarch64::kXRegSize,
38 GetCallerRegsMask(Arch::AARCH64, false).GetValue());
39 static constexpr auto CALLER_VREG_LIST = vixl::aarch64::CPURegList(
40 vixl::aarch64::CPURegister::kRegister, vixl::aarch64::kXRegSize, GetCallerRegsMask(Arch::AARCH64, true).GetValue());
41
42 static_assert(vixl::aarch64::kCalleeSaved.GetList() == CALLEE_REG_LIST.GetList());
43 static_assert(vixl::aarch64::kCalleeSavedV.GetList() == CALLEE_VREG_LIST.GetList());
44 static_assert(vixl::aarch64::kCallerSaved.GetList() == CALLER_REG_LIST.GetList());
45 static_assert(vixl::aarch64::kCallerSavedV.GetList() == CALLER_VREG_LIST.GetList());
46
47 const size_t MAX_SCALAR_PARAM_ID = 7; // r0-r7
48 const size_t MAX_VECTOR_PARAM_ID = 7; // v0-v7
49
50 /**
51 * Converters
52 */
Convert(const Condition CC)53 static inline vixl::aarch64::Condition Convert(const Condition CC)
54 {
55 switch (CC) {
56 case Condition::EQ:
57 return vixl::aarch64::Condition::eq;
58 case Condition::NE:
59 return vixl::aarch64::Condition::ne;
60 case Condition::LT:
61 return vixl::aarch64::Condition::lt;
62 case Condition::GT:
63 return vixl::aarch64::Condition::gt;
64 case Condition::LE:
65 return vixl::aarch64::Condition::le;
66 case Condition::GE:
67 return vixl::aarch64::Condition::ge;
68 case Condition::LO:
69 return vixl::aarch64::Condition::lo;
70 case Condition::LS:
71 return vixl::aarch64::Condition::ls;
72 case Condition::HI:
73 return vixl::aarch64::Condition::hi;
74 case Condition::HS:
75 return vixl::aarch64::Condition::hs;
76 // TODO(igorban) : Remove them
77 case Condition::MI:
78 return vixl::aarch64::Condition::mi;
79 case Condition::PL:
80 return vixl::aarch64::Condition::pl;
81 case Condition::VS:
82 return vixl::aarch64::Condition::vs;
83 case Condition::VC:
84 return vixl::aarch64::Condition::vc;
85 case Condition::AL:
86 return vixl::aarch64::Condition::al;
87 case Condition::NV:
88 return vixl::aarch64::Condition::nv;
89 default:
90 UNREACHABLE();
91 return vixl::aarch64::Condition::eq;
92 }
93 }
94
ConvertTest(const Condition CC)95 static inline vixl::aarch64::Condition ConvertTest(const Condition CC)
96 {
97 ASSERT(CC == Condition::TST_EQ || CC == Condition::TST_NE);
98 return CC == Condition::TST_EQ ? vixl::aarch64::Condition::eq : vixl::aarch64::Condition::ne;
99 }
100
Convert(const ShiftType TYPE)101 static inline vixl::aarch64::Shift Convert(const ShiftType TYPE)
102 {
103 switch (TYPE) {
104 case ShiftType::LSL:
105 return vixl::aarch64::Shift::LSL;
106 case ShiftType::LSR:
107 return vixl::aarch64::Shift::LSR;
108 case ShiftType::ASR:
109 return vixl::aarch64::Shift::ASR;
110 case ShiftType::ROR:
111 return vixl::aarch64::Shift::ROR;
112 default:
113 UNREACHABLE();
114 }
115 }
116
VixlReg(Reg reg)117 static inline vixl::aarch64::Register VixlReg(Reg reg)
118 {
119 ASSERT(reg.IsValid());
120 if (reg.IsScalar()) {
121 size_t reg_size = reg.GetSize();
122 if (reg_size < WORD_SIZE) {
123 reg_size = WORD_SIZE;
124 }
125 if (reg_size > DOUBLE_WORD_SIZE) {
126 reg_size = DOUBLE_WORD_SIZE;
127 }
128 auto vixl_reg = vixl::aarch64::Register(reg.GetId(), reg_size);
129 ASSERT(vixl_reg.IsValid());
130 return vixl_reg;
131 }
132 if (reg.GetId() == vixl::aarch64::sp.GetCode()) {
133 return vixl::aarch64::sp;
134 }
135
136 // Invalid register type
137 UNREACHABLE();
138 return vixl::aarch64::xzr;
139 }
140
VixlReg(Reg reg,const uint8_t SIZE)141 static inline vixl::aarch64::Register VixlReg(Reg reg, const uint8_t SIZE)
142 {
143 ASSERT(reg.IsValid());
144 if (reg.IsScalar()) {
145 auto vixl_reg = vixl::aarch64::Register(reg.GetId(), (SIZE < WORD_SIZE ? WORD_SIZE : SIZE));
146 ASSERT(vixl_reg.IsValid());
147 return vixl_reg;
148 }
149 if (reg.GetId() == vixl::aarch64::sp.GetCode()) {
150 return vixl::aarch64::sp;
151 }
152
153 // Invalid register type
154 UNREACHABLE();
155 return vixl::aarch64::xzr;
156 }
157
158 // Upper half-part for 128bit register
VixlRegU(Reg reg)159 static inline vixl::aarch64::Register VixlRegU(Reg reg)
160 {
161 ASSERT(reg.IsValid());
162 if (reg.IsScalar()) {
163 auto vixl_reg = vixl::aarch64::Register(reg.GetId() + 1, DOUBLE_WORD_SIZE);
164 ASSERT(vixl_reg.IsValid());
165 return vixl_reg;
166 }
167
168 // Invalid register type
169 UNREACHABLE();
170 return vixl::aarch64::xzr;
171 }
172
VixlVReg(Reg reg)173 static inline vixl::aarch64::VRegister VixlVReg(Reg reg)
174 {
175 ASSERT(reg.IsValid());
176 auto vixl_vreg = vixl::aarch64::VRegister(reg.GetId(), reg.GetSize());
177 ASSERT(vixl_vreg.IsValid());
178 return vixl_vreg;
179 }
180
VixlShift(Shift shift)181 static inline vixl::aarch64::Operand VixlShift(Shift shift)
182 {
183 Reg reg = shift.GetBase();
184 ASSERT(reg.IsValid());
185 if (reg.IsScalar()) {
186 ASSERT(reg.IsScalar());
187 size_t reg_size = reg.GetSize();
188 if (reg_size < WORD_SIZE) {
189 reg_size = WORD_SIZE;
190 }
191 auto vixl_reg = vixl::aarch64::Register(reg.GetId(), reg_size);
192 ASSERT(vixl_reg.IsValid());
193
194 return vixl::aarch64::Operand(vixl_reg, Convert(shift.GetType()), shift.GetScale());
195 }
196
197 // Invalid register type
198 UNREACHABLE();
199 }
200
VixlImm(const int64_t IMM)201 static inline vixl::aarch64::Operand VixlImm(const int64_t IMM)
202 {
203 return vixl::aarch64::Operand(IMM);
204 }
205
VixlImm(Imm imm)206 static inline vixl::aarch64::Operand VixlImm(Imm imm)
207 {
208 ASSERT(imm.IsValid());
209 if (imm.GetType() == INT64_TYPE) {
210 return vixl::aarch64::Operand(imm.GetValue<int64_t>());
211 }
212 if (imm.GetType() == INT32_TYPE) {
213 return vixl::aarch64::Operand(imm.GetValue<int32_t>());
214 }
215 if (imm.GetType() == INT16_TYPE) {
216 return vixl::aarch64::Operand(imm.GetValue<int16_t>());
217 }
218 if (imm.GetType() == INT8_TYPE) {
219 return vixl::aarch64::Operand(imm.GetValue<int8_t>());
220 }
221 // Invalid converted register
222 UNREACHABLE();
223 return vixl::aarch64::Operand(imm.GetValue<int8_t>());
224 }
225
ConvertMem(MemRef mem)226 static inline vixl::aarch64::MemOperand ConvertMem(MemRef mem)
227 {
228 bool base = mem.HasBase() && (mem.GetBase().GetId() != vixl::aarch64::xzr.GetCode());
229 bool has_index = mem.HasIndex();
230 bool shift = mem.HasScale();
231 bool offset = mem.HasDisp();
232 auto base_reg = Reg(mem.GetBase().GetId(), INT64_TYPE);
233 if (base && !has_index && !shift) {
234 // Default memory - base + offset
235 if (mem.GetDisp() != 0) {
236 auto disp = mem.GetDisp();
237 return vixl::aarch64::MemOperand(VixlReg(base_reg), VixlImm(disp));
238 }
239 return vixl::aarch64::MemOperand(VixlReg(mem.GetBase(), DOUBLE_WORD_SIZE));
240 }
241 if (base && has_index && !offset) {
242 auto scale = mem.GetScale();
243 auto index_reg = mem.GetIndex();
244 if (index_reg.GetSize() == WORD_SIZE) {
245 // Unsign-extend and shift w-register in offset-position
246 return vixl::aarch64::MemOperand(VixlReg(base_reg), VixlReg(index_reg), vixl::aarch64::Extend::UXTW, scale);
247 }
248 if (scale != 0) {
249 ASSERT(index_reg.GetSize() == DOUBLE_WORD_SIZE);
250 return vixl::aarch64::MemOperand(VixlReg(base_reg), VixlReg(index_reg), vixl::aarch64::LSL, scale);
251 }
252 return vixl::aarch64::MemOperand(VixlReg(base_reg), VixlReg(index_reg));
253 }
254 // Wrong memRef
255 // Return invalid memory operand
256 auto tmp = vixl::aarch64::MemOperand();
257 ASSERT(!tmp.IsValid());
258 return tmp;
259 }
260
261 class Aarch64RegisterDescription final : public RegistersDescription {
262 public:
263 explicit Aarch64RegisterDescription(ArenaAllocator *allocator);
264
265 NO_MOVE_SEMANTIC(Aarch64RegisterDescription);
266 NO_COPY_SEMANTIC(Aarch64RegisterDescription);
267 ~Aarch64RegisterDescription() override = default;
268
269 ArenaVector<Reg> GetCalleeSaved() override;
270 void SetCalleeSaved(const ArenaVector<Reg> ®s) override;
271 // Set used regs - change GetCallee
272 void SetUsedRegs(const ArenaVector<Reg> ®s) override;
273
GetCallerSavedRegMask()274 RegMask GetCallerSavedRegMask() const override
275 {
276 return RegMask(caller_saved_.GetList());
277 }
278
GetCallerSavedVRegMask()279 VRegMask GetCallerSavedVRegMask() const override
280 {
281 return VRegMask(caller_savedv_.GetList());
282 }
283
IsCalleeRegister(Reg reg)284 bool IsCalleeRegister(Reg reg) override
285 {
286 bool is_fp = reg.IsFloat();
287 return reg.GetId() >= GetFirstCalleeReg(Arch::AARCH64, is_fp) &&
288 reg.GetId() <= GetLastCalleeReg(Arch::AARCH64, is_fp);
289 }
290
GetZeroReg()291 Reg GetZeroReg() const override
292 {
293 return Target(Arch::AARCH64).GetZeroReg();
294 }
295
IsZeroReg(Reg reg)296 bool IsZeroReg(Reg reg) const override
297 {
298 return reg.IsValid() && reg.IsScalar() && reg.GetId() == GetZeroReg().GetId();
299 }
300
GetTempReg()301 Reg::RegIDType GetTempReg() override
302 {
303 return compiler::arch_info::arm64::TEMP_REGS.GetMaxRegister();
304 }
305
GetTempVReg()306 Reg::RegIDType GetTempVReg() override
307 {
308 return compiler::arch_info::arm64::TEMP_FP_REGS.GetMaxRegister();
309 }
310
GetDefaultRegMask()311 RegMask GetDefaultRegMask() const override
312 {
313 RegMask reg_mask = compiler::arch_info::arm64::TEMP_REGS;
314 reg_mask.set(Target(Arch::AARCH64).GetZeroReg().GetId());
315 reg_mask.set(GetThreadReg(Arch::AARCH64));
316 reg_mask.set(vixl::aarch64::x29.GetCode());
317 reg_mask.set(vixl::aarch64::lr.GetCode());
318 return reg_mask;
319 }
320
GetVRegMask()321 VRegMask GetVRegMask() override
322 {
323 return compiler::arch_info::arm64::TEMP_FP_REGS;
324 }
325
326 // Check register mapping
SupportMapping(uint32_t type)327 bool SupportMapping(uint32_t type) override
328 {
329 // Current implementation does not support reg-reg mapping
330 if ((type & (RegMapping::VECTOR_VECTOR | RegMapping::FLOAT_FLOAT)) != 0U) {
331 return false;
332 }
333 // Scalar and float registers lay in different registers
334 if ((type & (RegMapping::SCALAR_VECTOR | RegMapping::SCALAR_FLOAT)) != 0U) {
335 return false;
336 }
337 return true;
338 };
339
IsValid()340 bool IsValid() const override
341 {
342 return true;
343 }
344
345 bool IsRegUsed(ArenaVector<Reg> vec_reg, Reg reg) override;
346
347 public:
348 // Special implementation-specific getters
GetCalleeSavedR()349 vixl::aarch64::CPURegList GetCalleeSavedR()
350 {
351 return callee_saved_;
352 }
GetCalleeSavedV()353 vixl::aarch64::CPURegList GetCalleeSavedV()
354 {
355 return callee_savedv_;
356 }
GetCallerSavedR()357 vixl::aarch64::CPURegList GetCallerSavedR()
358 {
359 return caller_saved_;
360 }
GetCallerSavedV()361 vixl::aarch64::CPURegList GetCallerSavedV()
362 {
363 return caller_savedv_;
364 }
GetAlignmentVreg(bool is_callee)365 uint8_t GetAlignmentVreg(bool is_callee)
366 {
367 auto allignment_vreg = is_callee ? allignment_vreg_callee_ : allignment_vreg_caller_;
368 // !TODO Ishin Pavel fix if allignment_vreg == UNDEF_VREG
369 ASSERT(allignment_vreg != UNDEF_VREG);
370
371 return allignment_vreg;
372 }
373
374 private:
375 ArenaVector<Reg> used_regs_;
376
377 vixl::aarch64::CPURegList callee_saved_ {vixl::aarch64::kCalleeSaved};
378 vixl::aarch64::CPURegList caller_saved_ {vixl::aarch64::kCallerSaved};
379
380 vixl::aarch64::CPURegList callee_savedv_ {vixl::aarch64::kCalleeSavedV};
381 vixl::aarch64::CPURegList caller_savedv_ {vixl::aarch64::kCallerSavedV};
382
383 static inline constexpr const uint8_t UNDEF_VREG = std::numeric_limits<uint8_t>::max();
384 // The number of register in Push/Pop list must be even. The regisers are used for alignment vetor register lists
385 uint8_t allignment_vreg_callee_ {UNDEF_VREG};
386 uint8_t allignment_vreg_caller_ {UNDEF_VREG};
387 }; // Aarch64RegisterDescription
388
389 class Aarch64Encoder;
390
391 class Aarch64LabelHolder final : public LabelHolder {
392 public:
393 using LabelType = vixl::aarch64::Label;
Aarch64LabelHolder(Encoder * enc)394 explicit Aarch64LabelHolder(Encoder *enc) : LabelHolder(enc), labels_(enc->GetAllocator()->Adapter()) {};
395
396 NO_MOVE_SEMANTIC(Aarch64LabelHolder);
397 NO_COPY_SEMANTIC(Aarch64LabelHolder);
398 ~Aarch64LabelHolder() override = default;
399
CreateLabel()400 LabelId CreateLabel() override
401 {
402 ++id_;
403 auto allocator = GetEncoder()->GetAllocator();
404 auto *label = allocator->New<LabelType>(allocator);
405 labels_.push_back(label);
406 ASSERT(labels_.size() == id_);
407 return id_ - 1;
408 };
409
CreateLabels(LabelId size)410 void CreateLabels(LabelId size) override
411 {
412 for (LabelId i = 0; i <= size; ++i) {
413 CreateLabel();
414 }
415 };
416
417 void BindLabel(LabelId id) override;
418
GetLabel(LabelId id)419 LabelType *GetLabel(LabelId id) const
420 {
421 ASSERT(labels_.size() > id);
422 return labels_[id];
423 }
424
Size()425 LabelId Size() override
426 {
427 return labels_.size();
428 };
429
430 private:
431 ArenaVector<LabelType *> labels_;
432 LabelId id_ {0};
433 friend Aarch64Encoder;
434 }; // Aarch64LabelHolder
435
436 class Aarch64Encoder final : public Encoder {
437 public:
438 explicit Aarch64Encoder(ArenaAllocator *allocator);
439
GetLabels()440 LabelHolder *GetLabels() const override
441 {
442 ASSERT(labels_ != nullptr);
443 return labels_;
444 };
445
446 ~Aarch64Encoder() override;
447
448 NO_COPY_SEMANTIC(Aarch64Encoder);
449 NO_MOVE_SEMANTIC(Aarch64Encoder);
450
IsValid()451 bool IsValid() const override
452 {
453 return true;
454 }
455
GetTarget()456 static constexpr auto GetTarget()
457 {
458 return panda::compiler::Target(Arch::AARCH64);
459 }
460
461 void LoadPcRelative(Reg reg, intptr_t offset, Reg reg_addr = INVALID_REGISTER);
462
SetMaxAllocatedBytes(size_t size)463 void SetMaxAllocatedBytes(size_t size) override
464 {
465 GetMasm()->GetBuffer()->SetMmapMaxBytes(size);
466 }
467
468 #ifndef PANDA_MINIMAL_VIXL
469 auto &GetDecoder() const;
470 #endif
471
472 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
473 #define UnaryOperation(opc) void Encode##opc(Reg dst, Reg src0) override;
474 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
475 #define BinaryOperationReg(opc) void Encode##opc(Reg dst, Reg src0, Reg src1) override;
476 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
477 #define BinaryOperationImm(opc) void Encode##opc(Reg dst, Reg src0, Imm src1) override;
478 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
479 #define BinaryOperation(opc) BinaryOperationReg(opc) BinaryOperationImm(opc)
480 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
481 #define INST_DEF(OPCODE, TYPE) TYPE(OPCODE)
482
483 ENCODE_MATH_LIST(INST_DEF)
484
485 #undef UnaryOperation
486 #undef BinaryOperation
487 #undef INST_DEF
488
489 void EncodeNop() override;
490 void CheckAlignment(MemRef mem, size_t size);
491
492 // Additional special instructions
493 void EncodeAdd(Reg dst, Reg src0, Shift src1) override;
494 void EncodeSub(Reg dst, Reg src0, Shift src1) override;
495 void EncodeAnd(Reg dst, Reg src0, Shift src1) override;
496 void EncodeOr(Reg dst, Reg src0, Shift src1) override;
497 void EncodeXor(Reg dst, Reg src0, Shift src1) override;
498 void EncodeOrNot(Reg dst, Reg src0, Shift src1) override;
499 void EncodeAndNot(Reg dst, Reg src0, Shift src1) override;
500 void EncodeXorNot(Reg dst, Reg src0, Shift src1) override;
501 void EncodeNeg(Reg dst, Shift src) override;
502
503 void EncodeCast(Reg dst, bool dst_signed, Reg src, bool src_signed) override;
504 void EncodeCastToBool(Reg dst, Reg src) override;
505
506 void EncodeMin(Reg dst, bool dst_signed, Reg src0, Reg src1) override;
507 void EncodeDiv(Reg dst, bool dst_signed, Reg src0, Reg src1) override;
508 void EncodeMod(Reg dst, bool dst_signed, Reg src0, Reg src1) override;
509 void EncodeMax(Reg dst, bool dst_signed, Reg src0, Reg src1) override;
510
511 void EncodeAddOverflow(compiler::LabelHolder::LabelId id, Reg dst, Reg src0, Reg src1, Condition cc) override;
512 void EncodeSubOverflow(compiler::LabelHolder::LabelId id, Reg dst, Reg src0, Reg src1, Condition cc) override;
513
514 void EncodeLdr(Reg dst, bool dst_signed, MemRef mem) override;
515 void EncodeLdrAcquire(Reg dst, bool dst_signed, MemRef mem) override;
516 void EncodeLdrAcquireInvalid(Reg dst, bool dst_signed, MemRef mem);
517 void EncodeLdrAcquireScalar(Reg dst, bool dst_signed, MemRef mem);
518
519 void EncodeMov(Reg dst, Imm src) override;
520 void EncodeStr(Reg src, MemRef mem) override;
521 void EncodeStrRelease(Reg src, MemRef mem) override;
522
523 void EncodeLdrExclusive(Reg dst, Reg addr, bool acquire) override;
524 void EncodeStrExclusive(Reg dst, Reg src, Reg addr, bool release) override;
525
526 // zerod high part: [reg.size, 64)
527 void EncodeStrz(Reg src, MemRef mem) override;
528 void EncodeSti(Imm src, MemRef mem) override;
529 // size must be 8, 16,32 or 64
530 void EncodeMemCopy(MemRef mem_from, MemRef mem_to, size_t size) override;
531 // size must be 8, 16,32 or 64
532 // zerod high part: [reg.size, 64)
533 void EncodeMemCopyz(MemRef mem_from, MemRef mem_to, size_t size) override;
534
535 void EncodeCmp(Reg dst, Reg src0, Reg src1, Condition cc) override;
536
537 void EncodeCompare(Reg dst, Reg src0, Reg src1, Condition cc) override;
538 void EncodeCompareTest(Reg dst, Reg src0, Reg src1, Condition cc) override;
539
540 void EncodeSelect(Reg dst, Reg src0, Reg src1, Reg src2, Reg src3, Condition cc) override;
541 void EncodeSelect(Reg dst, Reg src0, Reg src1, Reg src2, Imm imm, Condition cc) override;
542 void EncodeSelectTest(Reg dst, Reg src0, Reg src1, Reg src2, Reg src3, Condition cc) override;
543 void EncodeSelectTest(Reg dst, Reg src0, Reg src1, Reg src2, Imm imm, Condition cc) override;
544
545 void EncodeLdp(Reg dst0, Reg dst1, bool dst_signed, MemRef mem) override;
546
547 void EncodeStp(Reg src0, Reg src1, MemRef mem) override;
548
549 void EncodeMAdd(Reg dst, Reg src0, Reg src1, Reg src2) override;
550 void EncodeMSub(Reg dst, Reg src0, Reg src1, Reg src2) override;
551
552 void EncodeMNeg(Reg dst, Reg src0, Reg src1) override;
553 void EncodeXorNot(Reg dst, Reg src0, Reg src1) override;
554 void EncodeAndNot(Reg dst, Reg src0, Reg src1) override;
555 void EncodeOrNot(Reg dst, Reg src0, Reg src1) override;
556
557 void EncodeExtractBits(Reg dst, Reg src0, Imm imm1, Imm imm2) override;
558
559 /* builtins-related encoders */
560 void EncodeIsInf(Reg dst, Reg src) override;
561 void EncodeBitCount(Reg dst, Reg src) override;
562 void EncodeCountLeadingZeroBits(Reg dst, Reg src) override;
563 void EncodeCountTrailingZeroBits(Reg dst, Reg src) override;
564 void EncodeCeil([[maybe_unused]] Reg dst, [[maybe_unused]] Reg src) override;
565 void EncodeFloor([[maybe_unused]] Reg dst, [[maybe_unused]] Reg src) override;
566 void EncodeRint([[maybe_unused]] Reg dst, [[maybe_unused]] Reg src) override;
567 void EncodeRound([[maybe_unused]] Reg dst, [[maybe_unused]] Reg src) override;
568
569 void EncodeStringEquals(Reg dst, Reg str1, Reg str2, bool compression, uint32_t length_offset,
570 uint32_t data_offset) override;
571
572 void EncodeStringIndexOfAfter(Reg dst, Reg str, Reg character, Reg idx, Reg tmp, bool compression,
573 uint32_t length_offset, uint32_t data_offset, int32_t char_const_value) override;
574
575 void EncodeReverseBytes(Reg dst, Reg src) override;
576 void EncodeReverseBits(Reg dst, Reg src) override;
577 void EncodeRotate(Reg dst, Reg src1, Reg src2, bool is_ror) override;
578 void EncodeSignum(Reg dst, Reg src) override;
579 void EncodeCompressedStringCharAt(Reg dst, Reg str, Reg idx, Reg length, Reg tmp, size_t data_offset,
580 uint32_t shift) override;
581 void EncodeCompressedStringCharAtI(Reg dst, Reg str, Reg length, size_t data_offset, uint32_t index,
582 uint32_t shift) override;
583
584 void EncodeFpToBits(Reg dst, Reg src) override;
585 void EncodeMoveBitsRaw(Reg dst, Reg src) override;
586 void EncodeGetTypeSize(Reg size, Reg type) override;
587
588 bool CanEncodeImmAddSubCmp(int64_t imm, uint32_t size, bool signed_compare) override;
589 bool CanEncodeImmLogical(uint64_t imm, uint32_t size) override;
590 bool CanEncodeScale(uint64_t imm, uint32_t size) override;
591
592 void EncodeCompareAndSwap(Reg dst, Reg obj, Reg offset, Reg val, Reg newval) override;
593 void EncodeUnsafeGetAndSet(Reg dst, Reg obj, Reg offset, Reg val) override;
594 void EncodeUnsafeGetAndAdd(Reg dst, Reg obj, Reg offset, Reg val, Reg tmp) override;
595 void EncodeMemoryBarrier(MemoryOrder::Order order) override;
596
597 void EncodeStackOverflowCheck(ssize_t offset) override;
598 void EncodeCrc32Update(Reg dst, Reg crc_reg, Reg val_reg) override;
599
CanEncodeBitCount()600 bool CanEncodeBitCount() override
601 {
602 return true;
603 }
604
CanEncodeCompressedStringCharAt()605 bool CanEncodeCompressedStringCharAt() override
606 {
607 return true;
608 }
609
CanEncodeCompressedStringCharAtI()610 bool CanEncodeCompressedStringCharAtI() override
611 {
612 return true;
613 }
614
CanEncodeMAdd()615 bool CanEncodeMAdd() override
616 {
617 return true;
618 }
CanEncodeMSub()619 bool CanEncodeMSub() override
620 {
621 return true;
622 }
CanEncodeMNeg()623 bool CanEncodeMNeg() override
624 {
625 return true;
626 }
CanEncodeOrNot()627 bool CanEncodeOrNot() override
628 {
629 return true;
630 }
CanEncodeAndNot()631 bool CanEncodeAndNot() override
632 {
633 return true;
634 }
CanEncodeXorNot()635 bool CanEncodeXorNot() override
636 {
637 return true;
638 }
639 bool CanEncodeShiftedOperand(ShiftOpcode opcode, ShiftType shift_type) override;
640
GetCursorOffset()641 size_t GetCursorOffset() const override
642 {
643 return GetMasm()->GetBuffer()->GetCursorOffset();
644 }
SetCursorOffset(size_t offset)645 void SetCursorOffset(size_t offset) override
646 {
647 GetMasm()->GetBuffer()->Rewind(offset);
648 }
649
650 Reg AcquireScratchRegister(TypeInfo type) override;
651 void AcquireScratchRegister(Reg reg) override;
652 void ReleaseScratchRegister(Reg reg) override;
653 bool IsScratchRegisterReleased(Reg reg) override;
654
GetScratchRegistersMask()655 RegMask GetScratchRegistersMask() const override
656 {
657 return RegMask(GetMasm()->GetScratchRegisterList()->GetList());
658 }
659
GetScratchFpRegistersMask()660 RegMask GetScratchFpRegistersMask() const override
661 {
662 return RegMask(GetMasm()->GetScratchVRegisterList()->GetList());
663 }
664
GetAvailableScratchRegisters()665 RegMask GetAvailableScratchRegisters() const override
666 {
667 return RegMask(GetMasm()->GetScratchRegisterList()->GetList());
668 }
669
GetAvailableScratchFpRegisters()670 VRegMask GetAvailableScratchFpRegisters() const override
671 {
672 return VRegMask(GetMasm()->GetScratchVRegisterList()->GetList());
673 }
674
GetRefType()675 TypeInfo GetRefType() override
676 {
677 return INT64_TYPE;
678 };
679
680 size_t DisasmInstr(std::ostream &stream, size_t pc, ssize_t code_offset) const override;
681
BufferData()682 void *BufferData() const override
683 {
684 return GetMasm()->GetBuffer()->GetStartAddress<void *>();
685 };
686
BufferSize()687 size_t BufferSize() const override
688 {
689 return GetMasm()->GetBuffer()->GetSizeInBytes();
690 };
691
692 bool InitMasm() override;
693
694 void Finalize() override;
695
696 void MakeCall(compiler::RelocationInfo *relocation) override;
697 void MakeCall(LabelHolder::LabelId id) override;
698 void MakeCall(const void *entry_point) override;
699 void MakeCall(MemRef entry_point) override;
700 void MakeCall(Reg reg) override;
701
702 void MakeCallAot(intptr_t offset) override;
703 void MakeCallByOffset(intptr_t offset) override;
704 void MakeLoadAotTable(intptr_t offset, Reg reg) override;
705 void MakeLoadAotTableAddr(intptr_t offset, Reg addr, Reg val) override;
706 bool CanMakeCallByOffset(intptr_t offset) override;
707
708 // Encode unconditional branch
709 void EncodeJump(LabelHolder::LabelId id) override;
710
711 // Encode jump with compare to zero
712 void EncodeJump(LabelHolder::LabelId id, Reg src, Condition cc) override;
713
714 // Compare reg and immediate and branch
715 void EncodeJump(LabelHolder::LabelId id, Reg src, Imm imm, Condition cc) override;
716
717 // Compare two regs and branch
718 void EncodeJump(LabelHolder::LabelId id, Reg src0, Reg src1, Condition cc) override;
719
720 // Compare reg and immediate and branch
721 void EncodeJumpTest(LabelHolder::LabelId id, Reg src, Imm imm, Condition cc) override;
722
723 // Compare two regs and branch
724 void EncodeJumpTest(LabelHolder::LabelId id, Reg src0, Reg src1, Condition cc) override;
725
726 // Encode jump by register value
727 void EncodeJump(Reg dst) override;
728
729 void EncodeJump(RelocationInfo *relocation) override;
730
731 void EncodeBitTestAndBranch(LabelHolder::LabelId id, compiler::Reg reg, uint32_t bit_pos, bool bit_value) override;
732
733 void EncodeAbort() override;
734
735 void EncodeReturn() override;
736
737 void MakeLibCall(Reg dst, Reg src0, Reg src1, const void *entry_point);
738
SaveRegisters(RegMask registers,ssize_t slot,size_t start_reg,bool is_fp)739 void SaveRegisters(RegMask registers, ssize_t slot, size_t start_reg, bool is_fp) override
740 {
741 LoadStoreRegisters<true>(registers, slot, start_reg, is_fp);
742 }
LoadRegisters(RegMask registers,ssize_t slot,size_t start_reg,bool is_fp)743 void LoadRegisters(RegMask registers, ssize_t slot, size_t start_reg, bool is_fp) override
744 {
745 LoadStoreRegisters<false>(registers, slot, start_reg, is_fp);
746 }
747
SaveRegisters(RegMask registers,bool is_fp,ssize_t slot,Reg base,RegMask mask)748 void SaveRegisters(RegMask registers, bool is_fp, ssize_t slot, Reg base, RegMask mask) override
749 {
750 LoadStoreRegisters<true>(registers, is_fp, slot, base, mask);
751 }
LoadRegisters(RegMask registers,bool is_fp,ssize_t slot,Reg base,RegMask mask)752 void LoadRegisters(RegMask registers, bool is_fp, ssize_t slot, Reg base, RegMask mask) override
753 {
754 LoadStoreRegisters<false>(registers, is_fp, slot, base, mask);
755 }
756
757 void PushRegisters(RegMask registers, bool is_fp, bool align) override;
758 void PopRegisters(RegMask registers, bool is_fp, bool align) override;
759
GetMasm()760 vixl::aarch64::MacroAssembler *GetMasm() const
761 {
762 ASSERT(masm_ != nullptr);
763 return masm_;
764 }
765
GetLabelAddress(LabelHolder::LabelId label)766 size_t GetLabelAddress(LabelHolder::LabelId label) override
767 {
768 auto plabel = labels_->GetLabel(label);
769 ASSERT(plabel->IsBound());
770 return GetMasm()->GetLabelAddress<size_t>(plabel);
771 }
772
LabelHasLinks(LabelHolder::LabelId label)773 bool LabelHasLinks(LabelHolder::LabelId label) override
774 {
775 auto plabel = labels_->GetLabel(label);
776 return plabel->IsLinked();
777 }
778
779 private:
780 template <bool is_store>
781 void LoadStoreRegisters(RegMask registers, ssize_t slot, size_t start_reg, bool is_fp);
782
783 template <bool is_store>
784 void LoadStoreRegistersLoop(RegMask registers, ssize_t slot, size_t start_reg, bool is_fp,
785 const vixl::aarch64::Register &base_reg);
786
787 template <bool is_store>
788 void LoadStoreRegisters(RegMask registers, bool is_fp, int32_t slot, Reg base, RegMask mask);
789
790 void EncodeCastFloat(Reg dst, bool dst_signed, Reg src, bool src_signed);
791 // This function not used, but it is working and can be used.
792 // Unlike "EncodeCastFloat", it implements castes float32/64 to int8/16.
793 void EncodeCastFloatWithSmallDst(Reg dst, bool dst_signed, Reg src, bool src_signed);
794
795 void EncodeCastScalar(Reg dst, bool dst_signed, Reg src, bool src_signed);
796
797 void EncodeCastSigned(Reg dst, Reg src);
798 void EncodeCastUnsigned(Reg dst, Reg src);
799
800 void EncodeCastCheckNaN(Reg dst, Reg src, LabelHolder::LabelId exit_id);
801
802 // helpers to split generation logic
803 void IndexOfHandleLatin1Case(Reg str, Reg character, Reg idx, Reg tmp, bool compression, uint32_t data_offset,
804 const vixl::aarch64::Register &tmp1, const vixl::aarch64::Register &tmp2,
805 const vixl::aarch64::Register &tmp3, vixl::aarch64::Label *label_found,
806 vixl::aarch64::Label *label_not_found);
807 void IndexOfHandleUtf16NormalCase(Reg str, Reg character, Reg idx, Reg tmp, bool compression, uint32_t data_offset,
808 const vixl::aarch64::Register &tmp1, const vixl::aarch64::Register &tmp2,
809 const vixl::aarch64::Register &tmp3, vixl::aarch64::Label *label_found,
810 vixl::aarch64::Label *label_not_found);
811 void IndexOfHandleSurrogateCase(Reg str, Reg character, Reg idx, Reg tmp, bool compression, uint32_t data_offset,
812 const vixl::aarch64::Register &tmp1, const vixl::aarch64::Register &tmp2,
813 const vixl::aarch64::Register &tmp3, vixl::aarch64::Label *label_found,
814 vixl::aarch64::Label *label_not_found);
815
816 void EncodeStringEqualsMainLoop(Reg dst, Reg str1, Reg str2, Reg tmp1_scoped, Reg tmp2_scoped, Reg tmp3_scoped,
817 vixl::aarch64::Label *label_false, vixl::aarch64::Label *label_cset,
818 uint32_t data_offset);
819
820 void IndexOfHandleLatin1CaseMainLoop(Reg str, Reg character, Reg tmp, uint32_t data_offset,
821 const vixl::aarch64::Register &tmp1, const vixl::aarch64::Register &tmp2,
822 const vixl::aarch64::Register &tmp3, vixl::aarch64::Label *label_found,
823 vixl::aarch64::Label *label_not_found, vixl::aarch64::Label *label_small_loop);
824
825 void IndexOfHandleUtf16NormalCaseMainLoop(Reg str, Reg character, Reg tmp, uint32_t data_offset,
826 const vixl::aarch64::Register &tmp1, const vixl::aarch64::Register &tmp2,
827 const vixl::aarch64::Register &tmp3, vixl::aarch64::Label *label_found,
828 vixl::aarch64::Label *label_not_found,
829 vixl::aarch64::Label *label_small_loop);
830
831 void EncodeStringIndexOfAfterMainCase(Reg dst, Reg str, Reg character, Reg idx, Reg tmp,
832 const vixl::aarch64::Register &tmp1, const vixl::aarch64::Register &tmp2,
833 const vixl::aarch64::Register &tmp3, bool compression, uint32_t data_offset,
834 int32_t char_const_value, vixl::aarch64::Label *label_not_found);
835 void EncodeFMod(Reg dst, Reg src0, Reg src1);
836 void HandleChar(int32_t ch, const vixl::aarch64::Register &tmp, vixl::aarch64::Label *label_not_found,
837 vixl::aarch64::Label *label_uncompressed_string);
838
839 private:
840 Aarch64LabelHolder *labels_ {nullptr};
841 vixl::aarch64::MacroAssembler *masm_ {nullptr};
842 #ifndef PANDA_MINIMAL_VIXL
843 mutable vixl::aarch64::Decoder *decoder_ {nullptr};
844 #endif
845 bool lr_acquired_ {false};
846 }; // Aarch64Encoder
847
848 class Aarch64ParameterInfo : public ParameterInfo {
849 public:
850 std::variant<Reg, uint8_t> GetNativeParam(const TypeInfo &type) override;
851 Location GetNextLocation(DataType::Type type) override;
852 };
853
854 class Aarch64CallingConvention : public CallingConvention {
855 public:
856 Aarch64CallingConvention(ArenaAllocator *allocator, Encoder *enc, RegistersDescription *descr, CallConvMode mode);
857 NO_MOVE_SEMANTIC(Aarch64CallingConvention);
858 NO_COPY_SEMANTIC(Aarch64CallingConvention);
859 ~Aarch64CallingConvention() override = default;
860
GetTarget()861 static constexpr auto GetTarget()
862 {
863 return panda::compiler::Target(Arch::AARCH64);
864 }
865
IsValid()866 bool IsValid() const override
867 {
868 return true;
869 }
870
871 void GeneratePrologue(const FrameInfo &frame_info) override;
872 void GenerateEpilogue(const FrameInfo &frame_info, std::function<void()> post_job) override;
873 void GenerateNativePrologue(const FrameInfo &frame_info) override;
874 void GenerateNativeEpilogue(const FrameInfo &frame_info, std::function<void()> post_job) override;
875
876 void *GetCodeEntry() override;
877 uint32_t GetCodeSize() override;
878
879 Reg InitFlagsReg(bool has_float_regs);
880
881 // Pushes regs and returns number of regs(from boths vectos)
882 size_t PushRegs(vixl::aarch64::CPURegList regs, vixl::aarch64::CPURegList vregs, bool is_callee);
883 // Pops regs and returns number of regs(from boths vectos)
884 size_t PopRegs(vixl::aarch64::CPURegList regs, vixl::aarch64::CPURegList vregs, bool is_callee);
885
886 // Calculating information about parameters and save regs_offset registers for special needs
887 ParameterInfo *GetParameterInfo(uint8_t regs_offset) override;
888
GetMasm()889 vixl::aarch64::MacroAssembler *GetMasm()
890 {
891 return (static_cast<Aarch64Encoder *>(GetEncoder()))->GetMasm();
892 }
893 }; // Aarch64CallingConvention
894 } // namespace panda::compiler::aarch64
895 #endif // COMPILER_OPTIMIZER_CODEGEN_TARGET_AARCH64_TARGET_H_
896