• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8 
9 #ifndef V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
10 #define V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
11 
12 #include <vector>
13 
14 #include "src/base/bits.h"
15 #include "src/codegen/arm64/assembler-arm64.h"
16 #include "src/codegen/bailout-reason.h"
17 #include "src/common/globals.h"
18 
19 // Simulator specific helpers.
20 #if USE_SIMULATOR
21 #if DEBUG
22 #define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
23 #define ASM_LOCATION_IN_ASSEMBLER(message) \
24   Debug("LOCATION: " message, __LINE__, NO_PARAM)
25 #else
26 #define ASM_LOCATION(message)
27 #define ASM_LOCATION_IN_ASSEMBLER(message)
28 #endif
29 #else
30 #define ASM_LOCATION(message)
31 #define ASM_LOCATION_IN_ASSEMBLER(message)
32 #endif
33 
34 namespace v8 {
35 namespace internal {
36 
37 #define LS_MACRO_LIST(V)                                     \
38   V(Ldrb, Register&, rt, LDRB_w)                             \
39   V(Strb, Register&, rt, STRB_w)                             \
40   V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
41   V(Ldrh, Register&, rt, LDRH_w)                             \
42   V(Strh, Register&, rt, STRH_w)                             \
43   V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
44   V(Ldr, CPURegister&, rt, LoadOpFor(rt))                    \
45   V(Str, CPURegister&, rt, StoreOpFor(rt))                   \
46   V(Ldrsw, Register&, rt, LDRSW_x)
47 
48 #define LSPAIR_MACRO_LIST(V)                             \
49   V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2))  \
50   V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
51   V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
52 
53 #define LDA_STL_MACRO_LIST(V) \
54   V(Ldarb, ldarb)             \
55   V(Ldarh, ldarh)             \
56   V(Ldar, ldar)               \
57   V(Ldaxrb, ldaxrb)           \
58   V(Ldaxrh, ldaxrh)           \
59   V(Ldaxr, ldaxr)             \
60   V(Stlrb, stlrb)             \
61   V(Stlrh, stlrh)             \
62   V(Stlr, stlr)
63 
64 #define STLX_MACRO_LIST(V) \
65   V(Stlxrb, stlxrb)        \
66   V(Stlxrh, stlxrh)        \
67   V(Stlxr, stlxr)
68 
69 // ----------------------------------------------------------------------------
70 // Static helper functions
71 
72 // Generate a MemOperand for loading a field from an object.
73 inline MemOperand FieldMemOperand(Register object, int offset);
74 
75 // ----------------------------------------------------------------------------
76 // MacroAssembler
77 
78 enum BranchType {
79   // Copies of architectural conditions.
80   // The associated conditions can be used in place of those, the code will
81   // take care of reinterpreting them with the correct type.
82   integer_eq = eq,
83   integer_ne = ne,
84   integer_hs = hs,
85   integer_lo = lo,
86   integer_mi = mi,
87   integer_pl = pl,
88   integer_vs = vs,
89   integer_vc = vc,
90   integer_hi = hi,
91   integer_ls = ls,
92   integer_ge = ge,
93   integer_lt = lt,
94   integer_gt = gt,
95   integer_le = le,
96   integer_al = al,
97   integer_nv = nv,
98 
99   // These two are *different* from the architectural codes al and nv.
100   // 'always' is used to generate unconditional branches.
101   // 'never' is used to not generate a branch (generally as the inverse
102   // branch type of 'always).
103   always,
104   never,
105   // cbz and cbnz
106   reg_zero,
107   reg_not_zero,
108   // tbz and tbnz
109   reg_bit_clear,
110   reg_bit_set,
111 
112   // Aliases.
113   kBranchTypeFirstCondition = eq,
114   kBranchTypeLastCondition = nv,
115   kBranchTypeFirstUsingReg = reg_zero,
116   kBranchTypeFirstUsingBit = reg_bit_clear
117 };
118 
InvertBranchType(BranchType type)119 inline BranchType InvertBranchType(BranchType type) {
120   if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
121     return static_cast<BranchType>(
122         NegateCondition(static_cast<Condition>(type)));
123   } else {
124     return static_cast<BranchType>(type ^ 1);
125   }
126 }
127 
128 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
129 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
130 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
131 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
132 
133 // The macro assembler supports moving automatically pre-shifted immediates for
134 // arithmetic and logical instructions, and then applying a post shift in the
135 // instruction to undo the modification, in order to reduce the code emitted for
136 // an operation. For example:
137 //
138 //  Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
139 //
140 // This optimisation can be only partially applied when the stack pointer is an
141 // operand or destination, so this enumeration is used to control the shift.
142 enum PreShiftImmMode {
143   kNoShift,          // Don't pre-shift.
144   kLimitShiftForSP,  // Limit pre-shift for add/sub extend use.
145   kAnyShift          // Allow any pre-shift.
146 };
147 
148 // TODO(victorgomes): Move definition to macro-assembler.h, once all other
149 // platforms are updated.
150 enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
151 
152 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
153  public:
154   using TurboAssemblerBase::TurboAssemblerBase;
155 
156 #if DEBUG
set_allow_macro_instructions(bool value)157   void set_allow_macro_instructions(bool value) {
158     allow_macro_instructions_ = value;
159   }
allow_macro_instructions()160   bool allow_macro_instructions() const { return allow_macro_instructions_; }
161 #endif
162 
163   // We should not use near calls or jumps for calls to external references,
164   // since the code spaces are not guaranteed to be close to each other.
CanUseNearCallOrJump(RelocInfo::Mode rmode)165   bool CanUseNearCallOrJump(RelocInfo::Mode rmode) {
166     return rmode != RelocInfo::EXTERNAL_REFERENCE;
167   }
168 
169   static bool IsNearCallOffset(int64_t offset);
170 
171   // Activation support.
172   void EnterFrame(StackFrame::Type type);
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)173   void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
174     // Out-of-line constant pool not implemented on arm64.
175     UNREACHABLE();
176   }
177   void LeaveFrame(StackFrame::Type type);
178 
179   inline void InitializeRootRegister();
180 
181   void Mov(const Register& rd, const Operand& operand,
182            DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
183   void Mov(const Register& rd, uint64_t imm);
Mov(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)184   void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
185            int vn_index) {
186     DCHECK(allow_macro_instructions());
187     mov(vd, vd_index, vn, vn_index);
188   }
189   void Mov(const Register& rd, Smi smi);
Mov(const VRegister & vd,const VRegister & vn,int index)190   void Mov(const VRegister& vd, const VRegister& vn, int index) {
191     DCHECK(allow_macro_instructions());
192     mov(vd, vn, index);
193   }
Mov(const VRegister & vd,int vd_index,const Register & rn)194   void Mov(const VRegister& vd, int vd_index, const Register& rn) {
195     DCHECK(allow_macro_instructions());
196     mov(vd, vd_index, rn);
197   }
Mov(const Register & rd,const VRegister & vn,int vn_index)198   void Mov(const Register& rd, const VRegister& vn, int vn_index) {
199     DCHECK(allow_macro_instructions());
200     mov(rd, vn, vn_index);
201   }
202 
203   // This is required for compatibility with architecture independent code.
204   // Remove if not needed.
205   void Move(Register dst, Smi src);
206 
207   // Move src0 to dst0 and src1 to dst1, handling possible overlaps.
208   void MovePair(Register dst0, Register src0, Register dst1, Register src1);
209 
210   // Register swap. Note that the register operands should be distinct.
211   void Swap(Register lhs, Register rhs);
212   void Swap(VRegister lhs, VRegister rhs);
213 
214 // NEON by element instructions.
215 #define NEON_BYELEMENT_MACRO_LIST(V) \
216   V(fmla, Fmla)                      \
217   V(fmls, Fmls)                      \
218   V(fmul, Fmul)                      \
219   V(fmulx, Fmulx)                    \
220   V(mul, Mul)                        \
221   V(mla, Mla)                        \
222   V(mls, Mls)                        \
223   V(sqdmulh, Sqdmulh)                \
224   V(sqrdmulh, Sqrdmulh)              \
225   V(sqdmull, Sqdmull)                \
226   V(sqdmull2, Sqdmull2)              \
227   V(sqdmlal, Sqdmlal)                \
228   V(sqdmlal2, Sqdmlal2)              \
229   V(sqdmlsl, Sqdmlsl)                \
230   V(sqdmlsl2, Sqdmlsl2)              \
231   V(smull, Smull)                    \
232   V(smull2, Smull2)                  \
233   V(smlal, Smlal)                    \
234   V(smlal2, Smlal2)                  \
235   V(smlsl, Smlsl)                    \
236   V(smlsl2, Smlsl2)                  \
237   V(umull, Umull)                    \
238   V(umull2, Umull2)                  \
239   V(umlal, Umlal)                    \
240   V(umlal2, Umlal2)                  \
241   V(umlsl, Umlsl)                    \
242   V(umlsl2, Umlsl2)
243 
244 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                   \
245   void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \
246             int vm_index) {                                                \
247     DCHECK(allow_macro_instructions());                                    \
248     ASM(vd, vn, vm, vm_index);                                             \
249   }
250   NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
251 #undef DEFINE_MACRO_ASM_FUNC
252 
253 // NEON 2 vector register instructions.
254 #define NEON_2VREG_MACRO_LIST(V) \
255   V(abs, Abs)                    \
256   V(addp, Addp)                  \
257   V(addv, Addv)                  \
258   V(cls, Cls)                    \
259   V(clz, Clz)                    \
260   V(cnt, Cnt)                    \
261   V(faddp, Faddp)                \
262   V(fcvtas, Fcvtas)              \
263   V(fcvtau, Fcvtau)              \
264   V(fcvtms, Fcvtms)              \
265   V(fcvtmu, Fcvtmu)              \
266   V(fcvtns, Fcvtns)              \
267   V(fcvtnu, Fcvtnu)              \
268   V(fcvtps, Fcvtps)              \
269   V(fcvtpu, Fcvtpu)              \
270   V(fmaxnmp, Fmaxnmp)            \
271   V(fmaxnmv, Fmaxnmv)            \
272   V(fmaxp, Fmaxp)                \
273   V(fmaxv, Fmaxv)                \
274   V(fminnmp, Fminnmp)            \
275   V(fminnmv, Fminnmv)            \
276   V(fminp, Fminp)                \
277   V(fminv, Fminv)                \
278   V(fneg, Fneg)                  \
279   V(frecpe, Frecpe)              \
280   V(frecpx, Frecpx)              \
281   V(frinta, Frinta)              \
282   V(frinti, Frinti)              \
283   V(frintm, Frintm)              \
284   V(frintn, Frintn)              \
285   V(frintp, Frintp)              \
286   V(frintx, Frintx)              \
287   V(frintz, Frintz)              \
288   V(frsqrte, Frsqrte)            \
289   V(fsqrt, Fsqrt)                \
290   V(mov, Mov)                    \
291   V(mvn, Mvn)                    \
292   V(neg, Neg)                    \
293   V(not_, Not)                   \
294   V(rbit, Rbit)                  \
295   V(rev16, Rev16)                \
296   V(rev32, Rev32)                \
297   V(rev64, Rev64)                \
298   V(sadalp, Sadalp)              \
299   V(saddlp, Saddlp)              \
300   V(saddlv, Saddlv)              \
301   V(smaxv, Smaxv)                \
302   V(sminv, Sminv)                \
303   V(sqabs, Sqabs)                \
304   V(sqneg, Sqneg)                \
305   V(sqxtn2, Sqxtn2)              \
306   V(sqxtn, Sqxtn)                \
307   V(sqxtun2, Sqxtun2)            \
308   V(sqxtun, Sqxtun)              \
309   V(suqadd, Suqadd)              \
310   V(sxtl2, Sxtl2)                \
311   V(sxtl, Sxtl)                  \
312   V(uadalp, Uadalp)              \
313   V(uaddlp, Uaddlp)              \
314   V(uaddlv, Uaddlv)              \
315   V(umaxv, Umaxv)                \
316   V(uminv, Uminv)                \
317   V(uqxtn2, Uqxtn2)              \
318   V(uqxtn, Uqxtn)                \
319   V(urecpe, Urecpe)              \
320   V(ursqrte, Ursqrte)            \
321   V(usqadd, Usqadd)              \
322   V(uxtl2, Uxtl2)                \
323   V(uxtl, Uxtl)                  \
324   V(xtn2, Xtn2)                  \
325   V(xtn, Xtn)
326 
327 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                \
328   void MASM(const VRegister& vd, const VRegister& vn) { \
329     DCHECK(allow_macro_instructions());                 \
330     ASM(vd, vn);                                        \
331   }
NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)332   NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
333 #undef DEFINE_MACRO_ASM_FUNC
334 #undef NEON_2VREG_MACRO_LIST
335 
336 // NEON 2 vector register with immediate instructions.
337 #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
338   V(fcmeq, Fcmeq)                      \
339   V(fcmge, Fcmge)                      \
340   V(fcmgt, Fcmgt)                      \
341   V(fcmle, Fcmle)                      \
342   V(fcmlt, Fcmlt)
343 
344 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                            \
345   void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
346     DCHECK(allow_macro_instructions());                             \
347     ASM(vd, vn, imm);                                               \
348   }
349   NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
350 #undef DEFINE_MACRO_ASM_FUNC
351 
352 // NEON 3 vector register instructions.
353 #define NEON_3VREG_MACRO_LIST(V) \
354   V(add, Add)                    \
355   V(addhn2, Addhn2)              \
356   V(addhn, Addhn)                \
357   V(addp, Addp)                  \
358   V(and_, And)                   \
359   V(bic, Bic)                    \
360   V(bif, Bif)                    \
361   V(bit, Bit)                    \
362   V(bsl, Bsl)                    \
363   V(cmeq, Cmeq)                  \
364   V(cmge, Cmge)                  \
365   V(cmgt, Cmgt)                  \
366   V(cmhi, Cmhi)                  \
367   V(cmhs, Cmhs)                  \
368   V(cmtst, Cmtst)                \
369   V(eor, Eor)                    \
370   V(fabd, Fabd)                  \
371   V(facge, Facge)                \
372   V(facgt, Facgt)                \
373   V(faddp, Faddp)                \
374   V(fcmeq, Fcmeq)                \
375   V(fcmge, Fcmge)                \
376   V(fcmgt, Fcmgt)                \
377   V(fmaxnmp, Fmaxnmp)            \
378   V(fmaxp, Fmaxp)                \
379   V(fminnmp, Fminnmp)            \
380   V(fminp, Fminp)                \
381   V(fmla, Fmla)                  \
382   V(fmls, Fmls)                  \
383   V(fmulx, Fmulx)                \
384   V(fnmul, Fnmul)                \
385   V(frecps, Frecps)              \
386   V(frsqrts, Frsqrts)            \
387   V(mla, Mla)                    \
388   V(mls, Mls)                    \
389   V(mul, Mul)                    \
390   V(orn, Orn)                    \
391   V(orr, Orr)                    \
392   V(pmull2, Pmull2)              \
393   V(pmull, Pmull)                \
394   V(pmul, Pmul)                  \
395   V(raddhn2, Raddhn2)            \
396   V(raddhn, Raddhn)              \
397   V(rsubhn2, Rsubhn2)            \
398   V(rsubhn, Rsubhn)              \
399   V(sabal2, Sabal2)              \
400   V(sabal, Sabal)                \
401   V(saba, Saba)                  \
402   V(sabdl2, Sabdl2)              \
403   V(sabdl, Sabdl)                \
404   V(sabd, Sabd)                  \
405   V(saddl2, Saddl2)              \
406   V(saddl, Saddl)                \
407   V(saddw2, Saddw2)              \
408   V(saddw, Saddw)                \
409   V(shadd, Shadd)                \
410   V(shsub, Shsub)                \
411   V(smaxp, Smaxp)                \
412   V(smax, Smax)                  \
413   V(sminp, Sminp)                \
414   V(smin, Smin)                  \
415   V(smlal2, Smlal2)              \
416   V(smlal, Smlal)                \
417   V(smlsl2, Smlsl2)              \
418   V(smlsl, Smlsl)                \
419   V(smull2, Smull2)              \
420   V(smull, Smull)                \
421   V(sqadd, Sqadd)                \
422   V(sqdmlal2, Sqdmlal2)          \
423   V(sqdmlal, Sqdmlal)            \
424   V(sqdmlsl2, Sqdmlsl2)          \
425   V(sqdmlsl, Sqdmlsl)            \
426   V(sqdmulh, Sqdmulh)            \
427   V(sqdmull2, Sqdmull2)          \
428   V(sqdmull, Sqdmull)            \
429   V(sqrdmulh, Sqrdmulh)          \
430   V(sqrshl, Sqrshl)              \
431   V(sqshl, Sqshl)                \
432   V(sqsub, Sqsub)                \
433   V(srhadd, Srhadd)              \
434   V(srshl, Srshl)                \
435   V(sshl, Sshl)                  \
436   V(ssubl2, Ssubl2)              \
437   V(ssubl, Ssubl)                \
438   V(ssubw2, Ssubw2)              \
439   V(ssubw, Ssubw)                \
440   V(subhn2, Subhn2)              \
441   V(subhn, Subhn)                \
442   V(sub, Sub)                    \
443   V(trn1, Trn1)                  \
444   V(trn2, Trn2)                  \
445   V(uabal2, Uabal2)              \
446   V(uabal, Uabal)                \
447   V(uaba, Uaba)                  \
448   V(uabdl2, Uabdl2)              \
449   V(uabdl, Uabdl)                \
450   V(uabd, Uabd)                  \
451   V(uaddl2, Uaddl2)              \
452   V(uaddl, Uaddl)                \
453   V(uaddw2, Uaddw2)              \
454   V(uaddw, Uaddw)                \
455   V(uhadd, Uhadd)                \
456   V(uhsub, Uhsub)                \
457   V(umaxp, Umaxp)                \
458   V(umax, Umax)                  \
459   V(uminp, Uminp)                \
460   V(umin, Umin)                  \
461   V(umlal2, Umlal2)              \
462   V(umlal, Umlal)                \
463   V(umlsl2, Umlsl2)              \
464   V(umlsl, Umlsl)                \
465   V(umull2, Umull2)              \
466   V(umull, Umull)                \
467   V(uqadd, Uqadd)                \
468   V(uqrshl, Uqrshl)              \
469   V(uqshl, Uqshl)                \
470   V(uqsub, Uqsub)                \
471   V(urhadd, Urhadd)              \
472   V(urshl, Urshl)                \
473   V(ushl, Ushl)                  \
474   V(usubl2, Usubl2)              \
475   V(usubl, Usubl)                \
476   V(usubw2, Usubw2)              \
477   V(usubw, Usubw)                \
478   V(uzp1, Uzp1)                  \
479   V(uzp2, Uzp2)                  \
480   V(zip1, Zip1)                  \
481   V(zip2, Zip2)
482 
483 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                     \
484   void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
485     DCHECK(allow_macro_instructions());                                      \
486     ASM(vd, vn, vm);                                                         \
487   }
488   NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
489 #undef DEFINE_MACRO_ASM_FUNC
490 
491   void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
492     DCHECK(allow_macro_instructions());
493     bic(vd, imm8, left_shift);
494   }
495 
496   // This is required for compatibility in architecture independent code.
497   inline void jmp(Label* L);
498 
499   void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
500   inline void B(Label* label);
501   inline void B(Condition cond, Label* label);
502   void B(Label* label, Condition cond);
503 
504   void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
505   void Tbz(const Register& rt, unsigned bit_pos, Label* label);
506 
507   void Cbnz(const Register& rt, Label* label);
508   void Cbz(const Register& rt, Label* label);
509 
Pacibsp()510   void Pacibsp() {
511     DCHECK(allow_macro_instructions_);
512     pacibsp();
513   }
Autibsp()514   void Autibsp() {
515     DCHECK(allow_macro_instructions_);
516     autibsp();
517   }
518 
519   // The 1716 pac and aut instructions encourage people to use x16 and x17
520   // directly, perhaps without realising that this is forbidden. For example:
521   //
522   //     UseScratchRegisterScope temps(&masm);
523   //     Register temp = temps.AcquireX();  // temp will be x16
524   //     __ Mov(x17, ptr);
525   //     __ Mov(x16, modifier);  // Will override temp!
526   //     __ Pacib1716();
527   //
528   // To work around this issue, you must exclude x16 and x17 from the scratch
529   // register list. You may need to replace them with other registers:
530   //
531   //     UseScratchRegisterScope temps(&masm);
532   //     temps.Exclude(x16, x17);
533   //     temps.Include(x10, x11);
534   //     __ Mov(x17, ptr);
535   //     __ Mov(x16, modifier);
536   //     __ Pacib1716();
Pacib1716()537   void Pacib1716() {
538     DCHECK(allow_macro_instructions_);
539     DCHECK(!TmpList()->IncludesAliasOf(x16));
540     DCHECK(!TmpList()->IncludesAliasOf(x17));
541     pacib1716();
542   }
Autib1716()543   void Autib1716() {
544     DCHECK(allow_macro_instructions_);
545     DCHECK(!TmpList()->IncludesAliasOf(x16));
546     DCHECK(!TmpList()->IncludesAliasOf(x17));
547     autib1716();
548   }
549 
550   inline void Dmb(BarrierDomain domain, BarrierType type);
551   inline void Dsb(BarrierDomain domain, BarrierType type);
552   inline void Isb();
553   inline void Csdb();
554 
555   // Removes current frame and its arguments from the stack preserving
556   // the arguments and a return address pushed to the stack for the next call.
557   // Both |callee_args_count| and |caller_args_count| do not include
558   // receiver. |callee_args_count| is not modified. |caller_args_count| is
559   // trashed.
560   void PrepareForTailCall(Register callee_args_count,
561                           Register caller_args_count, Register scratch0,
562                           Register scratch1);
563 
564   inline void SmiUntag(Register dst, Register src);
565   inline void SmiUntag(Register dst, const MemOperand& src);
566   inline void SmiUntag(Register smi);
567 
568   // Calls Abort(msg) if the condition cond is not satisfied.
569   // Use --debug_code to enable.
570   void Assert(Condition cond, AbortReason reason);
571 
572   // Like Assert(), but without condition.
573   // Use --debug_code to enable.
574   void AssertUnreachable(AbortReason reason);
575 
576   void AssertSmi(Register object,
577                  AbortReason reason = AbortReason::kOperandIsNotASmi);
578 
579   // Like Assert(), but always enabled.
580   void Check(Condition cond, AbortReason reason);
581 
582   inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
583 
584   void Trap() override;
585   void DebugBreak() override;
586 
587   // Print a message to stderr and abort execution.
588   void Abort(AbortReason reason);
589 
590   // Like printf, but print at run-time from generated code.
591   //
592   // The caller must ensure that arguments for floating-point placeholders
593   // (such as %e, %f or %g) are VRegisters, and that arguments for integer
594   // placeholders are Registers.
595   //
596   // Format placeholders that refer to more than one argument, or to a specific
597   // argument, are not supported. This includes formats like "%1$d" or "%.*d".
598   //
599   // This function automatically preserves caller-saved registers so that
600   // calling code can use Printf at any point without having to worry about
601   // corruption. The preservation mechanism generates a lot of code. If this is
602   // a problem, preserve the important registers manually and then call
603   // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
604   // implicitly preserved.
605   void Printf(const char* format, CPURegister arg0 = NoCPUReg,
606               CPURegister arg1 = NoCPUReg, CPURegister arg2 = NoCPUReg,
607               CPURegister arg3 = NoCPUReg);
608 
609   // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
610   //
611   // The return code from the system printf call will be returned in x0.
612   void PrintfNoPreserve(const char* format, const CPURegister& arg0 = NoCPUReg,
613                         const CPURegister& arg1 = NoCPUReg,
614                         const CPURegister& arg2 = NoCPUReg,
615                         const CPURegister& arg3 = NoCPUReg);
616 
617   // Remaining instructions are simple pass-through calls to the assembler.
618   inline void Asr(const Register& rd, const Register& rn, unsigned shift);
619   inline void Asr(const Register& rd, const Register& rn, const Register& rm);
620 
621   // Try to move an immediate into the destination register in a single
622   // instruction. Returns true for success, and updates the contents of dst.
623   // Returns false, otherwise.
624   bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
625 
626   inline void Bind(Label* label,
627                    BranchTargetIdentifier id = BranchTargetIdentifier::kNone);
628 
629   // Control-flow integrity:
630 
631   // Define a function entrypoint.
632   inline void CodeEntry();
633   // Define an exception handler.
634   inline void ExceptionHandler();
635   // Define an exception handler and bind a label.
636   inline void BindExceptionHandler(Label* label);
637 
638   // Control-flow integrity:
639 
640   // Define a jump (BR) target.
641   inline void JumpTarget();
642   // Define a jump (BR) target and bind a label.
643   inline void BindJumpTarget(Label* label);
644   // Define a call (BLR) target. The target also allows tail calls (via BR)
645   // when the target is x16 or x17.
646   inline void CallTarget();
647   // Define a jump/call target.
648   inline void JumpOrCallTarget();
649   // Define a jump/call target and bind a label.
650   inline void BindJumpOrCallTarget(Label* label);
651 
652   static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
653 
TmpList()654   CPURegList* TmpList() { return &tmp_list_; }
FPTmpList()655   CPURegList* FPTmpList() { return &fptmp_list_; }
656 
657   static CPURegList DefaultTmpList();
658   static CPURegList DefaultFPTmpList();
659 
660   // Move macros.
661   inline void Mvn(const Register& rd, uint64_t imm);
662   void Mvn(const Register& rd, const Operand& operand);
663   static bool IsImmMovn(uint64_t imm, unsigned reg_size);
664   static bool IsImmMovz(uint64_t imm, unsigned reg_size);
665 
666   void LogicalMacro(const Register& rd, const Register& rn,
667                     const Operand& operand, LogicalOp op);
668   void AddSubMacro(const Register& rd, const Register& rn,
669                    const Operand& operand, FlagsUpdate S, AddSubOp op);
670   inline void Orr(const Register& rd, const Register& rn,
671                   const Operand& operand);
672   void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
673     DCHECK(allow_macro_instructions());
674     orr(vd, imm8, left_shift);
675   }
676   inline void Orn(const Register& rd, const Register& rn,
677                   const Operand& operand);
678   inline void Eor(const Register& rd, const Register& rn,
679                   const Operand& operand);
680   inline void Eon(const Register& rd, const Register& rn,
681                   const Operand& operand);
682   inline void And(const Register& rd, const Register& rn,
683                   const Operand& operand);
684   inline void Ands(const Register& rd, const Register& rn,
685                    const Operand& operand);
686   inline void Tst(const Register& rn, const Operand& operand);
687   inline void Bic(const Register& rd, const Register& rn,
688                   const Operand& operand);
689   inline void Blr(const Register& xn);
690   inline void Cmp(const Register& rn, const Operand& operand);
691   inline void CmpTagged(const Register& rn, const Operand& operand);
692   inline void Subs(const Register& rd, const Register& rn,
693                    const Operand& operand);
694   void Csel(const Register& rd, const Register& rn, const Operand& operand,
695             Condition cond);
696 
697   // Emits a runtime assert that the stack pointer is aligned.
698   void AssertSpAligned();
699 
700   // Copy slot_count stack slots from the stack offset specified by src to
701   // the stack offset specified by dst. The offsets and count are expressed in
702   // slot-sized units. Offset dst must be less than src, or the gap between
703   // them must be greater than or equal to slot_count, otherwise the result is
704   // unpredictable. The function may corrupt its register arguments. The
705   // registers must not alias each other.
706   void CopySlots(int dst, Register src, Register slot_count);
707   void CopySlots(Register dst, Register src, Register slot_count);
708 
709   // Copy count double words from the address in register src to the address
710   // in register dst. There are three modes for this function:
711   // 1) Address dst must be less than src, or the gap between them must be
712   //    greater than or equal to count double words, otherwise the result is
713   //    unpredictable. This is the default mode.
714   // 2) Address src must be less than dst, or the gap between them must be
715   //    greater than or equal to count double words, otherwise the result is
716   //    undpredictable. In this mode, src and dst specify the last (highest)
717   //    address of the regions to copy from and to.
718   // 3) The same as mode 1, but the words are copied in the reversed order.
719   // The case where src == dst is not supported.
720   // The function may corrupt its register arguments. The registers must not
721   // alias each other.
722   enum CopyDoubleWordsMode {
723     kDstLessThanSrc,
724     kSrcLessThanDst,
725     kDstLessThanSrcAndReverse
726   };
727   void CopyDoubleWords(Register dst, Register src, Register count,
728                        CopyDoubleWordsMode mode = kDstLessThanSrc);
729 
730   // Calculate the address of a double word-sized slot at slot_offset from the
731   // stack pointer, and write it to dst. Positive slot_offsets are at addresses
732   // greater than sp, with slot zero at sp.
733   void SlotAddress(Register dst, int slot_offset);
734   void SlotAddress(Register dst, Register slot_offset);
735 
736   // Load a literal from the inline constant pool.
737   inline void Ldr(const CPURegister& rt, const Operand& imm);
738 
739   // Claim or drop stack space.
740   //
741   // On Windows, Claim will write a value every 4k, as is required by the stack
742   // expansion mechanism.
743   //
744   // The stack pointer must be aligned to 16 bytes and the size claimed or
745   // dropped must be a multiple of 16 bytes.
746   //
747   // Note that unit_size must be specified in bytes. For variants which take a
748   // Register count, the unit size must be a power of two.
749   inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
750   inline void Claim(const Register& count, uint64_t unit_size = kXRegSize);
751   inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
752   inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
753 
754   // Drop 'count' arguments from the stack, rounded up to a multiple of two,
755   // without actually accessing memory.
756   // We assume the size of the arguments is the pointer size.
757   // An optional mode argument is passed, which can indicate we need to
758   // explicitly add the receiver to the count.
759   enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
760   inline void DropArguments(const Register& count,
761                             ArgumentsCountMode mode = kCountIncludesReceiver);
762   inline void DropArguments(int64_t count,
763                             ArgumentsCountMode mode = kCountIncludesReceiver);
764 
765   // Drop 'count' slots from stack, rounded up to a multiple of two, without
766   // actually accessing memory.
767   inline void DropSlots(int64_t count);
768 
769   // Push a single argument, with padding, to the stack.
770   inline void PushArgument(const Register& arg);
771 
772   // Add and sub macros.
773   inline void Add(const Register& rd, const Register& rn,
774                   const Operand& operand);
775   inline void Adds(const Register& rd, const Register& rn,
776                    const Operand& operand);
777   inline void Sub(const Register& rd, const Register& rn,
778                   const Operand& operand);
779 
780   // Abort execution if argument is not a positive or zero integer, enabled via
781   // --debug-code.
782   void AssertPositiveOrZero(Register value);
783 
784 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
785   inline void FN(const REGTYPE REG, const MemOperand& addr);
786   LS_MACRO_LIST(DECLARE_FUNCTION)
787 #undef DECLARE_FUNCTION
788 
789   // Push or pop up to 4 registers of the same width to or from the stack.
790   //
791   // If an argument register is 'NoReg', all further arguments are also assumed
792   // to be 'NoReg', and are thus not pushed or popped.
793   //
794   // Arguments are ordered such that "Push(a, b);" is functionally equivalent
795   // to "Push(a); Push(b);".
796   //
797   // It is valid to push the same register more than once, and there is no
798   // restriction on the order in which registers are specified.
799   //
800   // It is not valid to pop into the same register more than once in one
801   // operation, not even into the zero register.
802   //
803   // The stack pointer must be aligned to 16 bytes on entry and the total size
804   // of the specified registers must also be a multiple of 16 bytes.
805   //
806   // Other than the registers passed into Pop, the stack pointer, (possibly)
807   // the system stack pointer and (possibly) the link register, these methods
808   // do not modify any other registers.
809   //
810   // Some of the methods take an optional LoadLRMode or StoreLRMode template
811   // argument, which specifies whether we need to sign the link register at the
812   // start of the operation, or authenticate it at the end of the operation,
813   // when control flow integrity measures are enabled.
814   // When the mode is kDontLoadLR or kDontStoreLR, LR must not be passed as an
815   // argument to the operation.
816   enum LoadLRMode { kAuthLR, kDontLoadLR };
817   enum StoreLRMode { kSignLR, kDontStoreLR };
818   template <StoreLRMode lr_mode = kDontStoreLR>
819   void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
820             const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
821   void Push(const CPURegister& src0, const CPURegister& src1,
822             const CPURegister& src2, const CPURegister& src3,
823             const CPURegister& src4, const CPURegister& src5 = NoReg,
824             const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
825   template <LoadLRMode lr_mode = kDontLoadLR>
826   void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
827            const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
828   void Pop(const CPURegister& dst0, const CPURegister& dst1,
829            const CPURegister& dst2, const CPURegister& dst3,
830            const CPURegister& dst4, const CPURegister& dst5 = NoReg,
831            const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
832   template <StoreLRMode lr_mode = kDontStoreLR>
833   void Push(const Register& src0, const VRegister& src1);
834 
835   // This is a convenience method for pushing a single Handle<Object>.
836   inline void Push(Handle<HeapObject> object);
837   inline void Push(Smi smi);
838 
839   // Aliases of Push and Pop, required for V8 compatibility.
push(Register src)840   inline void push(Register src) { Push(src); }
pop(Register dst)841   inline void pop(Register dst) { Pop(dst); }
842 
843   void SaveRegisters(RegList registers);
844   void RestoreRegisters(RegList registers);
845 
846   void CallRecordWriteStub(Register object, Operand offset,
847                            RememberedSetAction remembered_set_action,
848                            SaveFPRegsMode fp_mode);
849   void CallRecordWriteStub(Register object, Operand offset,
850                            RememberedSetAction remembered_set_action,
851                            SaveFPRegsMode fp_mode, Address wasm_target);
852   void CallEphemeronKeyBarrier(Register object, Operand offset,
853                                SaveFPRegsMode fp_mode);
854 
855   // For a given |object| and |offset|:
856   //   - Move |object| to |dst_object|.
857   //   - Compute the address of the slot pointed to by |offset| in |object| and
858   //     write it to |dst_slot|.
859   // This method makes sure |object| and |offset| are allowed to overlap with
860   // the destination registers.
861   void MoveObjectAndSlot(Register dst_object, Register dst_slot,
862                          Register object, Operand offset);
863 
864   // Alternative forms of Push and Pop, taking a RegList or CPURegList that
865   // specifies the registers that are to be pushed or popped. Higher-numbered
866   // registers are associated with higher memory addresses (as in the A32 push
867   // and pop instructions).
868   //
869   // (Push|Pop)SizeRegList allow you to specify the register size as a
870   // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
871   // kSRegSizeInBits are supported.
872   //
873   // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
874   //
875   // The methods take an optional LoadLRMode or StoreLRMode template argument.
876   // When control flow integrity measures are enabled and the link register is
877   // included in 'registers', passing kSignLR to PushCPURegList will sign the
878   // link register before pushing the list, and passing kAuthLR to
879   // PopCPURegList will authenticate it after popping the list.
880   template <StoreLRMode lr_mode = kDontStoreLR>
881   void PushCPURegList(CPURegList registers);
882   template <LoadLRMode lr_mode = kDontLoadLR>
883   void PopCPURegList(CPURegList registers);
884 
885   // Calculate how much stack space (in bytes) are required to store caller
886   // registers excluding those specified in the arguments.
887   int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
888                                       Register exclusion) const;
889 
890   // Push caller saved registers on the stack, and return the number of bytes
891   // stack pointer is adjusted.
892   int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
893 
894   // Restore caller saved registers from the stack, and return the number of
895   // bytes stack pointer is adjusted.
896   int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
897 
898   // Move an immediate into register dst, and return an Operand object for use
899   // with a subsequent instruction that accepts a shift. The value moved into
900   // dst is not necessarily equal to imm; it may have had a shifting operation
901   // applied to it that will be subsequently undone by the shift applied in the
902   // Operand.
903   Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
904                                     PreShiftImmMode mode);
905 
906   void CheckPageFlag(const Register& object, int mask, Condition cc,
907                      Label* condition_met);
908 
909   // Compare a register with an operand, and branch to label depending on the
910   // condition. May corrupt the status flags.
911   inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
912                                Condition cond, Label* label);
913   inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs,
914                                      Condition cond, Label* label);
915 
916   // Test the bits of register defined by bit_pattern, and branch if ANY of
917   // those bits are set. May corrupt the status flags.
918   inline void TestAndBranchIfAnySet(const Register& reg,
919                                     const uint64_t bit_pattern, Label* label);
920 
921   // Test the bits of register defined by bit_pattern, and branch if ALL of
922   // those bits are clear (ie. not set.) May corrupt the status flags.
923   inline void TestAndBranchIfAllClear(const Register& reg,
924                                       const uint64_t bit_pattern, Label* label);
925 
926   inline void Brk(int code);
927 
928   inline void JumpIfSmi(Register value, Label* smi_label,
929                         Label* not_smi_label = nullptr);
930 
931   inline void JumpIfEqual(Register x, int32_t y, Label* dest);
932   inline void JumpIfLessThan(Register x, int32_t y, Label* dest);
933 
934   inline void Fmov(VRegister fd, VRegister fn);
935   inline void Fmov(VRegister fd, Register rn);
936   // Provide explicit double and float interfaces for FP immediate moves, rather
937   // than relying on implicit C++ casts. This allows signalling NaNs to be
938   // preserved when the immediate matches the format of fd. Most systems convert
939   // signalling NaNs to quiet NaNs when converting between float and double.
940   inline void Fmov(VRegister fd, double imm);
941   inline void Fmov(VRegister fd, float imm);
942   // Provide a template to allow other types to be converted automatically.
943   template <typename T>
Fmov(VRegister fd,T imm)944   void Fmov(VRegister fd, T imm) {
945     DCHECK(allow_macro_instructions());
946     Fmov(fd, static_cast<double>(imm));
947   }
948   inline void Fmov(Register rd, VRegister fn);
949 
950   void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL,
951             int shift_amount = 0);
952   void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
953 
954   void LoadFromConstantsTable(Register destination,
955                               int constant_index) override;
956   void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
957   void LoadRootRelative(Register destination, int32_t offset) override;
958 
959   void Jump(Register target, Condition cond = al);
960   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
961   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
962   void Jump(const ExternalReference& reference) override;
963 
964   void Call(Register target);
965   void Call(Address target, RelocInfo::Mode rmode);
966   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
967   void Call(ExternalReference target);
968 
969   // Generate an indirect call (for when a direct call's range is not adequate).
970   void IndirectCall(Address target, RelocInfo::Mode rmode);
971 
972   // Load the builtin given by the Smi in |builtin_index| into the same
973   // register.
974   void LoadEntryFromBuiltinIndex(Register builtin_index);
975   void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
976                                  Register destination);
977   void CallBuiltinByIndex(Register builtin_index) override;
978   void CallBuiltin(int builtin_index);
979 
980   void LoadCodeObjectEntry(Register destination, Register code_object) override;
981   void CallCodeObject(Register code_object) override;
982   void JumpCodeObject(Register code_object) override;
983 
984   // Generates an instruction sequence s.t. the return address points to the
985   // instruction following the call.
986   // The return address on the stack is used by frame iteration.
987   void StoreReturnAddressAndCall(Register target);
988 
989   void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
990                              DeoptimizeKind kind,
991                              Label* jump_deoptimization_entry_label);
992 
993   // Calls a C function.
994   // The called function is not allowed to trigger a
995   // garbage collection, since that might move the code and invalidate the
996   // return address (unless this is somehow accounted for by the called
997   // function).
998   void CallCFunction(ExternalReference function, int num_reg_arguments);
999   void CallCFunction(ExternalReference function, int num_reg_arguments,
1000                      int num_double_arguments);
1001   void CallCFunction(Register function, int num_reg_arguments,
1002                      int num_double_arguments);
1003 
1004   // Performs a truncating conversion of a floating point number as used by
1005   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1006   // Exits with 'result' holding the answer.
1007   void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
1008                          DoubleRegister double_input, StubCallMode stub_mode,
1009                          LinkRegisterStatus lr_status);
1010 
1011   inline void Mul(const Register& rd, const Register& rn, const Register& rm);
1012 
1013   inline void Fcvtzs(const Register& rd, const VRegister& fn);
1014   void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1015     DCHECK(allow_macro_instructions());
1016     fcvtzs(vd, vn, fbits);
1017   }
1018 
Fjcvtzs(const Register & rd,const VRegister & vn)1019   void Fjcvtzs(const Register& rd, const VRegister& vn) {
1020     DCHECK(allow_macro_instructions());
1021     DCHECK(!rd.IsZero());
1022     fjcvtzs(rd, vn);
1023   }
1024 
1025   inline void Fcvtzu(const Register& rd, const VRegister& fn);
1026   void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1027     DCHECK(allow_macro_instructions());
1028     fcvtzu(vd, vn, fbits);
1029   }
1030 
1031   inline void Madd(const Register& rd, const Register& rn, const Register& rm,
1032                    const Register& ra);
1033   inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
1034   inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
1035   inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
1036   inline void Msub(const Register& rd, const Register& rn, const Register& rm,
1037                    const Register& ra);
1038 
1039   inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
1040   inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
1041   inline void Umull(const Register& rd, const Register& rn, const Register& rm);
1042   inline void Smull(const Register& rd, const Register& rn, const Register& rm);
1043 
1044   inline void Sxtb(const Register& rd, const Register& rn);
1045   inline void Sxth(const Register& rd, const Register& rn);
1046   inline void Sxtw(const Register& rd, const Register& rn);
1047   inline void Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
1048                     unsigned width);
1049   inline void Ubfx(const Register& rd, const Register& rn, unsigned lsb,
1050                    unsigned width);
1051   inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
1052   inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
1053   inline void Ror(const Register& rd, const Register& rs, unsigned shift);
1054   inline void Ror(const Register& rd, const Register& rn, const Register& rm);
1055   inline void Cmn(const Register& rn, const Operand& operand);
1056   inline void Fadd(const VRegister& fd, const VRegister& fn,
1057                    const VRegister& fm);
1058   inline void Fcmp(const VRegister& fn, const VRegister& fm);
1059   inline void Fcmp(const VRegister& fn, double value);
1060   inline void Fabs(const VRegister& fd, const VRegister& fn);
1061   inline void Fmul(const VRegister& fd, const VRegister& fn,
1062                    const VRegister& fm);
1063   inline void Fsub(const VRegister& fd, const VRegister& fn,
1064                    const VRegister& fm);
1065   inline void Fdiv(const VRegister& fd, const VRegister& fn,
1066                    const VRegister& fm);
1067   inline void Fmax(const VRegister& fd, const VRegister& fn,
1068                    const VRegister& fm);
1069   inline void Fmin(const VRegister& fd, const VRegister& fn,
1070                    const VRegister& fm);
1071   inline void Rbit(const Register& rd, const Register& rn);
1072   inline void Rev(const Register& rd, const Register& rn);
1073 
1074   enum AdrHint {
1075     // The target must be within the immediate range of adr.
1076     kAdrNear,
1077     // The target may be outside of the immediate range of adr. Additional
1078     // instructions may be emitted.
1079     kAdrFar
1080   };
1081   void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
1082 
1083   // Add/sub with carry macros.
1084   inline void Adc(const Register& rd, const Register& rn,
1085                   const Operand& operand);
1086 
1087   // Conditional macros.
1088   inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
1089                    Condition cond);
1090   inline void CcmpTagged(const Register& rn, const Operand& operand,
1091                          StatusFlags nzcv, Condition cond);
1092 
1093   inline void Clz(const Register& rd, const Register& rn);
1094 
1095   // Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
1096   // be 16 byte aligned.
1097   // When the optional template argument is kSignLR and control flow integrity
1098   // measures are enabled, we sign the link register before poking it onto the
1099   // stack. 'src' must be lr in this case.
1100   template <StoreLRMode lr_mode = kDontStoreLR>
1101   void Poke(const CPURegister& src, const Operand& offset);
1102 
1103   // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
1104   // The stack pointer must be aligned to 16 bytes.
1105   // When the optional template argument is kAuthLR and control flow integrity
1106   // measures are enabled, we authenticate the link register after peeking the
1107   // value. 'dst' must be lr in this case.
1108   template <LoadLRMode lr_mode = kDontLoadLR>
1109   void Peek(const CPURegister& dst, const Operand& offset);
1110 
1111   // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
1112   // with 'src2' at a higher address than 'src1'. The offset is in bytes. The
1113   // stack pointer must be 16 byte aligned.
1114   void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
1115 
1116   inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
1117                    unsigned width);
1118 
1119   inline void Bfi(const Register& rd, const Register& rn, unsigned lsb,
1120                   unsigned width);
1121 
1122   inline void Scvtf(const VRegister& fd, const Register& rn,
1123                     unsigned fbits = 0);
1124   void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1125     DCHECK(allow_macro_instructions());
1126     scvtf(vd, vn, fbits);
1127   }
1128   inline void Ucvtf(const VRegister& fd, const Register& rn,
1129                     unsigned fbits = 0);
1130   void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1131     DCHECK(allow_macro_instructions());
1132     ucvtf(vd, vn, fbits);
1133   }
1134 
1135   void AssertFPCRState(Register fpcr = NoReg);
1136   void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
CanonicalizeNaN(const VRegister & reg)1137   void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
1138 
1139   inline void CmovX(const Register& rd, const Register& rn, Condition cond);
1140   inline void Cset(const Register& rd, Condition cond);
1141   inline void Csetm(const Register& rd, Condition cond);
1142   inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
1143                     Condition cond);
1144   inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
1145                     Condition cond);
1146 
1147   inline void Fcvt(const VRegister& fd, const VRegister& fn);
1148 
1149   int ActivationFrameAlignment();
1150 
Ins(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)1151   void Ins(const VRegister& vd, int vd_index, const VRegister& vn,
1152            int vn_index) {
1153     DCHECK(allow_macro_instructions());
1154     ins(vd, vd_index, vn, vn_index);
1155   }
Ins(const VRegister & vd,int vd_index,const Register & rn)1156   void Ins(const VRegister& vd, int vd_index, const Register& rn) {
1157     DCHECK(allow_macro_instructions());
1158     ins(vd, vd_index, rn);
1159   }
1160 
1161   inline void Bl(Label* label);
1162   inline void Br(const Register& xn);
1163 
1164   inline void Uxtb(const Register& rd, const Register& rn);
1165   inline void Uxth(const Register& rd, const Register& rn);
1166   inline void Uxtw(const Register& rd, const Register& rn);
1167 
Dup(const VRegister & vd,const VRegister & vn,int index)1168   void Dup(const VRegister& vd, const VRegister& vn, int index) {
1169     DCHECK(allow_macro_instructions());
1170     dup(vd, vn, index);
1171   }
Dup(const VRegister & vd,const Register & rn)1172   void Dup(const VRegister& vd, const Register& rn) {
1173     DCHECK(allow_macro_instructions());
1174     dup(vd, rn);
1175   }
1176 
1177 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
1178   inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
1179   LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
1180 #undef DECLARE_FUNCTION
1181 
1182 #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
1183   V(rshrn, Rshrn)                      \
1184   V(rshrn2, Rshrn2)                    \
1185   V(shl, Shl)                          \
1186   V(shll, Shll)                        \
1187   V(shll2, Shll2)                      \
1188   V(shrn, Shrn)                        \
1189   V(shrn2, Shrn2)                      \
1190   V(sli, Sli)                          \
1191   V(sqrshrn, Sqrshrn)                  \
1192   V(sqrshrn2, Sqrshrn2)                \
1193   V(sqrshrun, Sqrshrun)                \
1194   V(sqrshrun2, Sqrshrun2)              \
1195   V(sqshl, Sqshl)                      \
1196   V(sqshlu, Sqshlu)                    \
1197   V(sqshrn, Sqshrn)                    \
1198   V(sqshrn2, Sqshrn2)                  \
1199   V(sqshrun, Sqshrun)                  \
1200   V(sqshrun2, Sqshrun2)                \
1201   V(sri, Sri)                          \
1202   V(srshr, Srshr)                      \
1203   V(srsra, Srsra)                      \
1204   V(sshll, Sshll)                      \
1205   V(sshll2, Sshll2)                    \
1206   V(sshr, Sshr)                        \
1207   V(ssra, Ssra)                        \
1208   V(uqrshrn, Uqrshrn)                  \
1209   V(uqrshrn2, Uqrshrn2)                \
1210   V(uqshl, Uqshl)                      \
1211   V(uqshrn, Uqshrn)                    \
1212   V(uqshrn2, Uqshrn2)                  \
1213   V(urshr, Urshr)                      \
1214   V(ursra, Ursra)                      \
1215   V(ushll, Ushll)                      \
1216   V(ushll2, Ushll2)                    \
1217   V(ushr, Ushr)                        \
1218   V(usra, Usra)
1219 
1220 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                           \
1221   void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
1222     DCHECK(allow_macro_instructions());                            \
1223     ASM(vd, vn, shift);                                            \
1224   }
NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)1225   NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
1226 #undef DEFINE_MACRO_ASM_FUNC
1227 
1228   void Umov(const Register& rd, const VRegister& vn, int vn_index) {
1229     DCHECK(allow_macro_instructions());
1230     umov(rd, vn, vn_index);
1231   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vm)1232   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1233     DCHECK(allow_macro_instructions());
1234     tbl(vd, vn, vm);
1235   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1236   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1237            const VRegister& vm) {
1238     DCHECK(allow_macro_instructions());
1239     tbl(vd, vn, vn2, vm);
1240   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1241   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1242            const VRegister& vn3, const VRegister& vm) {
1243     DCHECK(allow_macro_instructions());
1244     tbl(vd, vn, vn2, vn3, vm);
1245   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1246   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1247            const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1248     DCHECK(allow_macro_instructions());
1249     tbl(vd, vn, vn2, vn3, vn4, vm);
1250   }
Ext(const VRegister & vd,const VRegister & vn,const VRegister & vm,int index)1251   void Ext(const VRegister& vd, const VRegister& vn, const VRegister& vm,
1252            int index) {
1253     DCHECK(allow_macro_instructions());
1254     ext(vd, vn, vm, index);
1255   }
1256 
Smov(const Register & rd,const VRegister & vn,int vn_index)1257   void Smov(const Register& rd, const VRegister& vn, int vn_index) {
1258     DCHECK(allow_macro_instructions());
1259     smov(rd, vn, vn_index);
1260   }
1261 
1262 // Load-acquire/store-release macros.
1263 #define DECLARE_FUNCTION(FN, OP) \
1264   inline void FN(const Register& rt, const Register& rn);
1265   LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
1266 #undef DECLARE_FUNCTION
1267 
1268   // Load an object from the root table.
1269   void LoadRoot(Register destination, RootIndex index) override;
1270 
1271   inline void Ret(const Register& xn = lr);
1272 
1273   // Perform a conversion from a double to a signed int64. If the input fits in
1274   // range of the 64-bit result, execution branches to done. Otherwise,
1275   // execution falls through, and the sign of the result can be used to
1276   // determine if overflow was towards positive or negative infinity.
1277   //
1278   // On successful conversion, the least significant 32 bits of the result are
1279   // equivalent to the ECMA-262 operation "ToInt32".
1280   void TryConvertDoubleToInt64(Register result, DoubleRegister input,
1281                                Label* done);
1282 
1283   inline void Mrs(const Register& rt, SystemRegister sysreg);
1284   inline void Msr(SystemRegister sysreg, const Register& rt);
1285 
1286   // Prologue claims an extra slot due to arm64's alignement constraints.
1287   static constexpr int kExtraSlotClaimedByPrologue = 1;
1288   // Generates function prologue code.
1289   void Prologue();
1290 
Cmgt(const VRegister & vd,const VRegister & vn,int imm)1291   void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
1292     DCHECK(allow_macro_instructions());
1293     cmgt(vd, vn, imm);
1294   }
Cmge(const VRegister & vd,const VRegister & vn,int imm)1295   void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
1296     DCHECK(allow_macro_instructions());
1297     cmge(vd, vn, imm);
1298   }
Cmeq(const VRegister & vd,const VRegister & vn,int imm)1299   void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
1300     DCHECK(allow_macro_instructions());
1301     cmeq(vd, vn, imm);
1302   }
1303 
1304   inline void Neg(const Register& rd, const Operand& operand);
1305   inline void Negs(const Register& rd, const Operand& operand);
1306 
1307   // Compute rd = abs(rm).
1308   // This function clobbers the condition flags. On output the overflow flag is
1309   // set iff the negation overflowed.
1310   //
1311   // If rm is the minimum representable value, the result is not representable.
1312   // Handlers for each case can be specified using the relevant labels.
1313   void Abs(const Register& rd, const Register& rm,
1314            Label* is_not_representable = nullptr,
1315            Label* is_representable = nullptr);
1316 
1317   inline void Cls(const Register& rd, const Register& rn);
1318   inline void Cneg(const Register& rd, const Register& rn, Condition cond);
1319   inline void Rev16(const Register& rd, const Register& rn);
1320   inline void Rev32(const Register& rd, const Register& rn);
1321   inline void Fcvtns(const Register& rd, const VRegister& fn);
1322   inline void Fcvtnu(const Register& rd, const VRegister& fn);
1323   inline void Fcvtms(const Register& rd, const VRegister& fn);
1324   inline void Fcvtmu(const Register& rd, const VRegister& fn);
1325   inline void Fcvtas(const Register& rd, const VRegister& fn);
1326   inline void Fcvtau(const Register& rd, const VRegister& fn);
1327 
1328   // Compute the start of the generated instruction stream from the current PC.
1329   // This is an alternative to embedding the {CodeObject} handle as a reference.
1330   void ComputeCodeStartAddress(const Register& rd);
1331 
1332   void ResetSpeculationPoisonRegister();
1333 
1334   // ---------------------------------------------------------------------------
1335   // Pointer compression Support
1336 
1337   // Loads a field containing a HeapObject and decompresses it if pointer
1338   // compression is enabled.
1339   void LoadTaggedPointerField(const Register& destination,
1340                               const MemOperand& field_operand);
1341 
1342   // Loads a field containing any tagged value and decompresses it if necessary.
1343   void LoadAnyTaggedField(const Register& destination,
1344                           const MemOperand& field_operand);
1345 
1346   // Loads a field containing smi value and untags it.
1347   void SmiUntagField(Register dst, const MemOperand& src);
1348 
1349   // Compresses and stores tagged value to given on-heap location.
1350   void StoreTaggedField(const Register& value,
1351                         const MemOperand& dst_field_operand);
1352 
1353   void DecompressTaggedSigned(const Register& destination,
1354                               const MemOperand& field_operand);
1355   void DecompressTaggedPointer(const Register& destination,
1356                                const MemOperand& field_operand);
1357   void DecompressTaggedPointer(const Register& destination,
1358                                const Register& source);
1359   void DecompressAnyTagged(const Register& destination,
1360                            const MemOperand& field_operand);
1361 
1362   // Restore FP and LR from the values stored in the current frame. This will
1363   // authenticate the LR when pointer authentication is enabled.
1364   void RestoreFPAndLR();
1365 
1366   void StoreReturnAddressInWasmExitFrame(Label* return_location);
1367 
1368  protected:
1369   // The actual Push and Pop implementations. These don't generate any code
1370   // other than that required for the push or pop. This allows
1371   // (Push|Pop)CPURegList to bundle together run-time assertions for a large
1372   // block of registers.
1373   //
1374   // Note that size is per register, and is specified in bytes.
1375   void PushHelper(int count, int size, const CPURegister& src0,
1376                   const CPURegister& src1, const CPURegister& src2,
1377                   const CPURegister& src3);
1378   void PopHelper(int count, int size, const CPURegister& dst0,
1379                  const CPURegister& dst1, const CPURegister& dst2,
1380                  const CPURegister& dst3);
1381 
1382   void ConditionalCompareMacro(const Register& rn, const Operand& operand,
1383                                StatusFlags nzcv, Condition cond,
1384                                ConditionalCompareOp op);
1385 
1386   void AddSubWithCarryMacro(const Register& rd, const Register& rn,
1387                             const Operand& operand, FlagsUpdate S,
1388                             AddSubWithCarryOp op);
1389 
1390   // Call Printf. On a native build, a simple call will be generated, but if the
1391   // simulator is being used then a suitable pseudo-instruction is used. The
1392   // arguments and stack must be prepared by the caller as for a normal AAPCS64
1393   // call to 'printf'.
1394   //
1395   // The 'args' argument should point to an array of variable arguments in their
1396   // proper PCS registers (and in calling order). The argument registers can
1397   // have mixed types. The format string (x0) should not be included.
1398   void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
1399 
1400  private:
1401 #if DEBUG
1402   // Tell whether any of the macro instruction can be used. When false the
1403   // MacroAssembler will assert if a method which can emit a variable number
1404   // of instructions is called.
1405   bool allow_macro_instructions_ = true;
1406 #endif
1407 
1408   // Scratch registers available for use by the MacroAssembler.
1409   CPURegList tmp_list_ = DefaultTmpList();
1410   CPURegList fptmp_list_ = DefaultFPTmpList();
1411 
1412   // Helps resolve branching to labels potentially out of range.
1413   // If the label is not bound, it registers the information necessary to later
1414   // be able to emit a veneer for this branch if necessary.
1415   // If the label is bound, it returns true if the label (or the previous link
1416   // in the label chain) is out of range. In that case the caller is responsible
1417   // for generating appropriate code.
1418   // Otherwise it returns false.
1419   // This function also checks wether veneers need to be emitted.
1420   bool NeedExtraInstructionsOrRegisterBranch(Label* label,
1421                                              ImmBranchType branch_type);
1422 
1423   void Movi16bitHelper(const VRegister& vd, uint64_t imm);
1424   void Movi32bitHelper(const VRegister& vd, uint64_t imm);
1425   void Movi64bitHelper(const VRegister& vd, uint64_t imm);
1426 
1427   void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
1428                       LoadStoreOp op);
1429 
1430   void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
1431                           const MemOperand& addr, LoadStorePairOp op);
1432 
1433   void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
1434 
1435   void CallRecordWriteStub(Register object, Operand offset,
1436                            RememberedSetAction remembered_set_action,
1437                            SaveFPRegsMode fp_mode, Handle<Code> code_target,
1438                            Address wasm_target);
1439 };
1440 
1441 class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
1442  public:
1443   using TurboAssembler::TurboAssembler;
1444 
1445   // Instruction set functions ------------------------------------------------
1446   // Logical macros.
1447   inline void Bics(const Register& rd, const Register& rn,
1448                    const Operand& operand);
1449 
1450   inline void Adcs(const Register& rd, const Register& rn,
1451                    const Operand& operand);
1452   inline void Sbc(const Register& rd, const Register& rn,
1453                   const Operand& operand);
1454   inline void Sbcs(const Register& rd, const Register& rn,
1455                    const Operand& operand);
1456   inline void Ngc(const Register& rd, const Operand& operand);
1457   inline void Ngcs(const Register& rd, const Operand& operand);
1458 
1459   inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
1460                    Condition cond);
1461 
1462 #define DECLARE_FUNCTION(FN, OP) \
1463   inline void FN(const Register& rs, const Register& rt, const Register& rn);
1464   STLX_MACRO_LIST(DECLARE_FUNCTION)
1465 #undef DECLARE_FUNCTION
1466 
1467   // Branch type inversion relies on these relations.
1468   STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
1469                 (reg_bit_clear == (reg_bit_set ^ 1)) &&
1470                 (always == (never ^ 1)));
1471 
1472   inline void Bfxil(const Register& rd, const Register& rn, unsigned lsb,
1473                     unsigned width);
1474   inline void Cinc(const Register& rd, const Register& rn, Condition cond);
1475   inline void Cinv(const Register& rd, const Register& rn, Condition cond);
1476   inline void CzeroX(const Register& rd, Condition cond);
1477   inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
1478                     Condition cond);
1479   inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
1480                     Condition cond);
1481   inline void Extr(const Register& rd, const Register& rn, const Register& rm,
1482                    unsigned lsb);
1483   inline void Fcsel(const VRegister& fd, const VRegister& fn,
1484                     const VRegister& fm, Condition cond);
Fcvtl(const VRegister & vd,const VRegister & vn)1485   void Fcvtl(const VRegister& vd, const VRegister& vn) {
1486     DCHECK(allow_macro_instructions());
1487     fcvtl(vd, vn);
1488   }
Fcvtl2(const VRegister & vd,const VRegister & vn)1489   void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1490     DCHECK(allow_macro_instructions());
1491     fcvtl2(vd, vn);
1492   }
Fcvtn(const VRegister & vd,const VRegister & vn)1493   void Fcvtn(const VRegister& vd, const VRegister& vn) {
1494     DCHECK(allow_macro_instructions());
1495     fcvtn(vd, vn);
1496   }
Fcvtn2(const VRegister & vd,const VRegister & vn)1497   void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1498     DCHECK(allow_macro_instructions());
1499     fcvtn2(vd, vn);
1500   }
Fcvtxn(const VRegister & vd,const VRegister & vn)1501   void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1502     DCHECK(allow_macro_instructions());
1503     fcvtxn(vd, vn);
1504   }
Fcvtxn2(const VRegister & vd,const VRegister & vn)1505   void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1506     DCHECK(allow_macro_instructions());
1507     fcvtxn2(vd, vn);
1508   }
1509   inline void Fmadd(const VRegister& fd, const VRegister& fn,
1510                     const VRegister& fm, const VRegister& fa);
1511   inline void Fmaxnm(const VRegister& fd, const VRegister& fn,
1512                      const VRegister& fm);
1513   inline void Fminnm(const VRegister& fd, const VRegister& fn,
1514                      const VRegister& fm);
1515   inline void Fmsub(const VRegister& fd, const VRegister& fn,
1516                     const VRegister& fm, const VRegister& fa);
1517   inline void Fnmadd(const VRegister& fd, const VRegister& fn,
1518                      const VRegister& fm, const VRegister& fa);
1519   inline void Fnmsub(const VRegister& fd, const VRegister& fn,
1520                      const VRegister& fm, const VRegister& fa);
1521   inline void Hint(SystemHint code);
1522   inline void Hlt(int code);
1523   inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
1524                    const MemOperand& src);
1525   inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
Nop()1526   inline void Nop() { nop(); }
1527   void Mvni(const VRegister& vd, const int imm8, Shift shift = LSL,
1528             const int shift_amount = 0) {
1529     DCHECK(allow_macro_instructions());
1530     mvni(vd, imm8, shift, shift_amount);
1531   }
1532   inline void Rev(const Register& rd, const Register& rn);
1533   inline void Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
1534                     unsigned width);
1535   inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
1536                      const Register& ra);
1537   inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
1538                      const Register& ra);
1539   inline void Smulh(const Register& rd, const Register& rn, const Register& rm);
1540   inline void Stnp(const CPURegister& rt, const CPURegister& rt2,
1541                    const MemOperand& dst);
1542   inline void Umaddl(const Register& rd, const Register& rn, const Register& rm,
1543                      const Register& ra);
1544   inline void Umsubl(const Register& rd, const Register& rn, const Register& rm,
1545                      const Register& ra);
1546 
Cmle(const VRegister & vd,const VRegister & vn,int imm)1547   void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
1548     DCHECK(allow_macro_instructions());
1549     cmle(vd, vn, imm);
1550   }
Cmlt(const VRegister & vd,const VRegister & vn,int imm)1551   void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
1552     DCHECK(allow_macro_instructions());
1553     cmlt(vd, vn, imm);
1554   }
1555 
Ld1(const VRegister & vt,const MemOperand & src)1556   void Ld1(const VRegister& vt, const MemOperand& src) {
1557     DCHECK(allow_macro_instructions());
1558     ld1(vt, src);
1559   }
Ld1(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1560   void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1561     DCHECK(allow_macro_instructions());
1562     ld1(vt, vt2, src);
1563   }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1564   void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1565            const MemOperand& src) {
1566     DCHECK(allow_macro_instructions());
1567     ld1(vt, vt2, vt3, src);
1568   }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1569   void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1570            const VRegister& vt4, const MemOperand& src) {
1571     DCHECK(allow_macro_instructions());
1572     ld1(vt, vt2, vt3, vt4, src);
1573   }
Ld1(const VRegister & vt,int lane,const MemOperand & src)1574   void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
1575     DCHECK(allow_macro_instructions());
1576     ld1(vt, lane, src);
1577   }
Ld1r(const VRegister & vt,const MemOperand & src)1578   void Ld1r(const VRegister& vt, const MemOperand& src) {
1579     DCHECK(allow_macro_instructions());
1580     ld1r(vt, src);
1581   }
Ld2(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1582   void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1583     DCHECK(allow_macro_instructions());
1584     ld2(vt, vt2, src);
1585   }
Ld2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & src)1586   void Ld2(const VRegister& vt, const VRegister& vt2, int lane,
1587            const MemOperand& src) {
1588     DCHECK(allow_macro_instructions());
1589     ld2(vt, vt2, lane, src);
1590   }
Ld2r(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1591   void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1592     DCHECK(allow_macro_instructions());
1593     ld2r(vt, vt2, src);
1594   }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1595   void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1596            const MemOperand& src) {
1597     DCHECK(allow_macro_instructions());
1598     ld3(vt, vt2, vt3, src);
1599   }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & src)1600   void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1601            int lane, const MemOperand& src) {
1602     DCHECK(allow_macro_instructions());
1603     ld3(vt, vt2, vt3, lane, src);
1604   }
Ld3r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1605   void Ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1606             const MemOperand& src) {
1607     DCHECK(allow_macro_instructions());
1608     ld3r(vt, vt2, vt3, src);
1609   }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1610   void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1611            const VRegister& vt4, const MemOperand& src) {
1612     DCHECK(allow_macro_instructions());
1613     ld4(vt, vt2, vt3, vt4, src);
1614   }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & src)1615   void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1616            const VRegister& vt4, int lane, const MemOperand& src) {
1617     DCHECK(allow_macro_instructions());
1618     ld4(vt, vt2, vt3, vt4, lane, src);
1619   }
Ld4r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1620   void Ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1621             const VRegister& vt4, const MemOperand& src) {
1622     DCHECK(allow_macro_instructions());
1623     ld4r(vt, vt2, vt3, vt4, src);
1624   }
St1(const VRegister & vt,const MemOperand & dst)1625   void St1(const VRegister& vt, const MemOperand& dst) {
1626     DCHECK(allow_macro_instructions());
1627     st1(vt, dst);
1628   }
St1(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)1629   void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1630     DCHECK(allow_macro_instructions());
1631     st1(vt, vt2, dst);
1632   }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)1633   void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1634            const MemOperand& dst) {
1635     DCHECK(allow_macro_instructions());
1636     st1(vt, vt2, vt3, dst);
1637   }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)1638   void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1639            const VRegister& vt4, const MemOperand& dst) {
1640     DCHECK(allow_macro_instructions());
1641     st1(vt, vt2, vt3, vt4, dst);
1642   }
St1(const VRegister & vt,int lane,const MemOperand & dst)1643   void St1(const VRegister& vt, int lane, const MemOperand& dst) {
1644     DCHECK(allow_macro_instructions());
1645     st1(vt, lane, dst);
1646   }
St2(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)1647   void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1648     DCHECK(allow_macro_instructions());
1649     st2(vt, vt2, dst);
1650   }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)1651   void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1652            const MemOperand& dst) {
1653     DCHECK(allow_macro_instructions());
1654     st3(vt, vt2, vt3, dst);
1655   }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)1656   void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1657            const VRegister& vt4, const MemOperand& dst) {
1658     DCHECK(allow_macro_instructions());
1659     st4(vt, vt2, vt3, vt4, dst);
1660   }
St2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & dst)1661   void St2(const VRegister& vt, const VRegister& vt2, int lane,
1662            const MemOperand& dst) {
1663     DCHECK(allow_macro_instructions());
1664     st2(vt, vt2, lane, dst);
1665   }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & dst)1666   void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1667            int lane, const MemOperand& dst) {
1668     DCHECK(allow_macro_instructions());
1669     st3(vt, vt2, vt3, lane, dst);
1670   }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & dst)1671   void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1672            const VRegister& vt4, int lane, const MemOperand& dst) {
1673     DCHECK(allow_macro_instructions());
1674     st4(vt, vt2, vt3, vt4, lane, dst);
1675   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vm)1676   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1677     DCHECK(allow_macro_instructions());
1678     tbx(vd, vn, vm);
1679   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1680   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1681            const VRegister& vm) {
1682     DCHECK(allow_macro_instructions());
1683     tbx(vd, vn, vn2, vm);
1684   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1685   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1686            const VRegister& vn3, const VRegister& vm) {
1687     DCHECK(allow_macro_instructions());
1688     tbx(vd, vn, vn2, vn3, vm);
1689   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1690   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1691            const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1692     DCHECK(allow_macro_instructions());
1693     tbx(vd, vn, vn2, vn3, vn4, vm);
1694   }
1695 
1696   // For the 'lr_mode' template argument of the following methods, see
1697   // PushCPURegList/PopCPURegList.
1698   template <StoreLRMode lr_mode = kDontStoreLR>
1699   inline void PushSizeRegList(
1700       RegList registers, unsigned reg_size,
1701       CPURegister::RegisterType type = CPURegister::kRegister) {
1702     PushCPURegList<lr_mode>(CPURegList(type, reg_size, registers));
1703   }
1704   template <LoadLRMode lr_mode = kDontLoadLR>
1705   inline void PopSizeRegList(
1706       RegList registers, unsigned reg_size,
1707       CPURegister::RegisterType type = CPURegister::kRegister) {
1708     PopCPURegList<lr_mode>(CPURegList(type, reg_size, registers));
1709   }
1710   template <StoreLRMode lr_mode = kDontStoreLR>
PushXRegList(RegList regs)1711   inline void PushXRegList(RegList regs) {
1712     PushSizeRegList<lr_mode>(regs, kXRegSizeInBits);
1713   }
1714   template <LoadLRMode lr_mode = kDontLoadLR>
PopXRegList(RegList regs)1715   inline void PopXRegList(RegList regs) {
1716     PopSizeRegList<lr_mode>(regs, kXRegSizeInBits);
1717   }
PushWRegList(RegList regs)1718   inline void PushWRegList(RegList regs) {
1719     PushSizeRegList(regs, kWRegSizeInBits);
1720   }
PopWRegList(RegList regs)1721   inline void PopWRegList(RegList regs) {
1722     PopSizeRegList(regs, kWRegSizeInBits);
1723   }
PushQRegList(RegList regs)1724   inline void PushQRegList(RegList regs) {
1725     PushSizeRegList(regs, kQRegSizeInBits, CPURegister::kVRegister);
1726   }
PopQRegList(RegList regs)1727   inline void PopQRegList(RegList regs) {
1728     PopSizeRegList(regs, kQRegSizeInBits, CPURegister::kVRegister);
1729   }
PushDRegList(RegList regs)1730   inline void PushDRegList(RegList regs) {
1731     PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
1732   }
PopDRegList(RegList regs)1733   inline void PopDRegList(RegList regs) {
1734     PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
1735   }
PushSRegList(RegList regs)1736   inline void PushSRegList(RegList regs) {
1737     PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
1738   }
PopSRegList(RegList regs)1739   inline void PopSRegList(RegList regs) {
1740     PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
1741   }
1742 
1743   // Push the specified register 'count' times.
1744   void PushMultipleTimes(CPURegister src, Register count);
1745 
1746   // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
1747   // values peeked will be adjacent, with the value in 'dst2' being from a
1748   // higher address than 'dst1'. The offset is in bytes. The stack pointer must
1749   // be aligned to 16 bytes.
1750   void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
1751 
1752   // Preserve the callee-saved registers (as defined by AAPCS64).
1753   //
1754   // Higher-numbered registers are pushed before lower-numbered registers, and
1755   // thus get higher addresses.
1756   // Floating-point registers are pushed before general-purpose registers, and
1757   // thus get higher addresses.
1758   //
1759   // When control flow integrity measures are enabled, this method signs the
1760   // link register before pushing it.
1761   //
1762   // Note that registers are not checked for invalid values. Use this method
1763   // only if you know that the GC won't try to examine the values on the stack.
1764   void PushCalleeSavedRegisters();
1765 
1766   // Restore the callee-saved registers (as defined by AAPCS64).
1767   //
1768   // Higher-numbered registers are popped after lower-numbered registers, and
1769   // thus come from higher addresses.
1770   // Floating-point registers are popped after general-purpose registers, and
1771   // thus come from higher addresses.
1772   //
1773   // When control flow integrity measures are enabled, this method
1774   // authenticates the link register after popping it.
1775   void PopCalleeSavedRegisters();
1776 
1777   // Helpers ------------------------------------------------------------------
1778 
1779   template <typename Field>
DecodeField(Register dst,Register src)1780   void DecodeField(Register dst, Register src) {
1781     static const int shift = Field::kShift;
1782     static const int setbits = CountSetBits(Field::kMask, 32);
1783     Ubfx(dst, src, shift, setbits);
1784   }
1785 
1786   template <typename Field>
DecodeField(Register reg)1787   void DecodeField(Register reg) {
1788     DecodeField<Field>(reg, reg);
1789   }
1790 
1791   Operand ReceiverOperand(const Register arg_count);
1792 
1793   // ---- SMI and Number Utilities ----
1794 
1795   inline void SmiTag(Register dst, Register src);
1796   inline void SmiTag(Register smi);
1797 
1798   inline void JumpIfNotSmi(Register value, Label* not_smi_label);
1799 
1800   // Abort execution if argument is a smi, enabled via --debug-code.
1801   void AssertNotSmi(Register object,
1802                     AbortReason reason = AbortReason::kOperandIsASmi);
1803 
1804   // Abort execution if argument is not a Constructor, enabled via --debug-code.
1805   void AssertConstructor(Register object);
1806 
1807   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1808   void AssertFunction(Register object);
1809 
1810   // Abort execution if argument is not a JSGeneratorObject (or subclass),
1811   // enabled via --debug-code.
1812   void AssertGeneratorObject(Register object);
1813 
1814   // Abort execution if argument is not a JSBoundFunction,
1815   // enabled via --debug-code.
1816   void AssertBoundFunction(Register object);
1817 
1818   // Abort execution if argument is not undefined or an AllocationSite, enabled
1819   // via --debug-code.
1820   void AssertUndefinedOrAllocationSite(Register object);
1821 
1822   // ---- Calling / Jumping helpers ----
1823 
1824   void CallRuntime(const Runtime::Function* f, int num_arguments,
1825                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1826 
1827   // Convenience function: Same as above, but takes the fid instead.
1828   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1829                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1830     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1831   }
1832 
1833   // Convenience function: Same as above, but takes the fid instead.
1834   void CallRuntime(Runtime::FunctionId fid,
1835                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1836     const Runtime::Function* function = Runtime::FunctionForId(fid);
1837     CallRuntime(function, function->nargs, save_doubles);
1838   }
1839 
1840   void TailCallRuntime(Runtime::FunctionId fid);
1841 
1842   // Jump to a runtime routine.
1843   void JumpToExternalReference(const ExternalReference& builtin,
1844                                bool builtin_exit_frame = false);
1845 
1846   // Generates a trampoline to jump to the off-heap instruction stream.
1847   void JumpToInstructionStream(Address entry);
1848 
1849   // Registers used through the invocation chain are hard-coded.
1850   // We force passing the parameters to ensure the contracts are correctly
1851   // honoured by the caller.
1852   // 'function' must be x1.
1853   // 'actual' must use an immediate or x0.
1854   // 'expected' must use an immediate or x2.
1855   // 'call_kind' must be x5.
1856   void InvokePrologue(Register expected_parameter_count,
1857                       Register actual_parameter_count, Label* done,
1858                       InvokeFlag flag);
1859 
1860   // On function call, call into the debugger.
1861   void CallDebugOnFunctionCall(Register fun, Register new_target,
1862                                Register expected_parameter_count,
1863                                Register actual_parameter_count);
1864   void InvokeFunctionCode(Register function, Register new_target,
1865                           Register expected_parameter_count,
1866                           Register actual_parameter_count, InvokeFlag flag);
1867   // Invoke the JavaScript function in the given register.
1868   // Changes the current context to the context in the function before invoking.
1869   void InvokeFunctionWithNewTarget(Register function, Register new_target,
1870                                    Register actual_parameter_count,
1871                                    InvokeFlag flag);
1872   void InvokeFunction(Register function, Register expected_parameter_count,
1873                       Register actual_parameter_count, InvokeFlag flag);
1874 
1875   // ---- Code generation helpers ----
1876 
1877   // Frame restart support
1878   void MaybeDropFrames();
1879 
1880   // ---------------------------------------------------------------------------
1881   // Support functions.
1882 
1883   // Compare object type for heap object.  heap_object contains a non-Smi
1884   // whose object type should be compared with the given type.  This both
1885   // sets the flags and leaves the object type in the type_reg register.
1886   // It leaves the map in the map register (unless the type_reg and map register
1887   // are the same register).  It leaves the heap object in the heap_object
1888   // register unless the heap_object register is the same register as one of the
1889   // other registers.
1890   void CompareObjectType(Register heap_object, Register map, Register type_reg,
1891                          InstanceType type);
1892 
1893   // Compare object type for heap object, and branch if equal (or not.)
1894   // heap_object contains a non-Smi whose object type should be compared with
1895   // the given type.  This both sets the flags and leaves the object type in
1896   // the type_reg register. It leaves the map in the map register (unless the
1897   // type_reg and map register are the same register).  It leaves the heap
1898   // object in the heap_object register unless the heap_object register is the
1899   // same register as one of the other registers.
1900   void JumpIfObjectType(Register object, Register map, Register type_reg,
1901                         InstanceType type, Label* if_cond_pass,
1902                         Condition cond = eq);
1903 
1904   // Compare instance type in a map.  map contains a valid map object whose
1905   // object type should be compared with the given type.  This both
1906   // sets the flags and leaves the object type in the type_reg register.
1907   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
1908 
1909   // Load the elements kind field from a map, and return it in the result
1910   // register.
1911   void LoadElementsKindFromMap(Register result, Register map);
1912 
1913   // Compare the object in a register to a value from the root list.
1914   void CompareRoot(const Register& obj, RootIndex index);
1915 
1916   // Compare the object in a register to a value and jump if they are equal.
1917   void JumpIfRoot(const Register& obj, RootIndex index, Label* if_equal);
1918 
1919   // Compare the object in a register to a value and jump if they are not equal.
1920   void JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal);
1921 
1922   // Checks if value is in range [lower_limit, higher_limit] using a single
1923   // comparison.
1924   void JumpIfIsInRange(const Register& value, unsigned lower_limit,
1925                        unsigned higher_limit, Label* on_in_range);
1926 
1927   // ---------------------------------------------------------------------------
1928   // Frames.
1929 
1930   void ExitFramePreserveFPRegs();
1931   void ExitFrameRestoreFPRegs();
1932 
1933   // Enter exit frame. Exit frames are used when calling C code from generated
1934   // (JavaScript) code.
1935   //
1936   // The only registers modified by this function are the provided scratch
1937   // register, the frame pointer and the stack pointer.
1938   //
1939   // The 'extra_space' argument can be used to allocate some space in the exit
1940   // frame that will be ignored by the GC. This space will be reserved in the
1941   // bottom of the frame immediately above the return address slot.
1942   //
1943   // Set up a stack frame and registers as follows:
1944   //         fp[8]: CallerPC (lr)
1945   //   fp -> fp[0]: CallerFP (old fp)
1946   //         fp[-8]: SPOffset (new sp)
1947   //         fp[-16]: CodeObject()
1948   //         fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
1949   //         sp[8]: Memory reserved for the caller if extra_space != 0.
1950   //                 Alignment padding, if necessary.
1951   //   sp -> sp[0]: Space reserved for the return address.
1952   //
1953   // This function also stores the new frame information in the top frame, so
1954   // that the new frame becomes the current frame.
1955   void EnterExitFrame(bool save_doubles, const Register& scratch,
1956                       int extra_space = 0,
1957                       StackFrame::Type frame_type = StackFrame::EXIT);
1958 
1959   // Leave the current exit frame, after a C function has returned to generated
1960   // (JavaScript) code.
1961   //
1962   // This effectively unwinds the operation of EnterExitFrame:
1963   //  * Preserved doubles are restored (if restore_doubles is true).
1964   //  * The frame information is removed from the top frame.
1965   //  * The exit frame is dropped.
1966   void LeaveExitFrame(bool save_doubles, const Register& scratch,
1967                       const Register& scratch2);
1968 
1969   void LoadMap(Register dst, Register object);
1970 
1971   // Load the global proxy from the current context.
1972   void LoadGlobalProxy(Register dst);
1973 
1974   // ---------------------------------------------------------------------------
1975   // In-place weak references.
1976   void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1977 
1978   // ---------------------------------------------------------------------------
1979   // StatsCounter support
1980 
1981   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1982                         Register scratch2);
1983   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1984                         Register scratch2);
1985 
1986   // ---------------------------------------------------------------------------
1987   // Stack limit utilities
1988   void LoadStackLimit(Register destination, StackLimitKind kind);
1989   void StackOverflowCheck(Register num_args, Label* stack_overflow);
1990 
1991   // ---------------------------------------------------------------------------
1992   // Garbage collector support (GC).
1993 
1994   // Notify the garbage collector that we wrote a pointer into an object.
1995   // |object| is the object being stored into, |value| is the object being
1996   // stored.
1997   // The offset is the offset from the start of the object, not the offset from
1998   // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
1999   void RecordWriteField(
2000       Register object, int offset, Register value, LinkRegisterStatus lr_status,
2001       SaveFPRegsMode save_fp,
2002       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
2003       SmiCheck smi_check = INLINE_SMI_CHECK);
2004 
2005   // For a given |object| notify the garbage collector that the slot at |offset|
2006   // has been written. |value| is the object being stored.
2007   void RecordWrite(
2008       Register object, Operand offset, Register value,
2009       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
2010       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
2011       SmiCheck smi_check = INLINE_SMI_CHECK);
2012 
2013   // ---------------------------------------------------------------------------
2014   // Debugging.
2015 
2016   void LoadNativeContextSlot(int index, Register dst);
2017 
2018   DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
2019 };
2020 
2021 // Use this scope when you need a one-to-one mapping between methods and
2022 // instructions. This scope prevents the MacroAssembler from being called and
2023 // literal pools from being emitted. It also asserts the number of instructions
2024 // emitted is what you specified when creating the scope.
2025 class InstructionAccurateScope {
2026  public:
2027   explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
tasm_(tasm)2028       : tasm_(tasm),
2029         block_pool_(tasm, count * kInstrSize)
2030 #ifdef DEBUG
2031         ,
2032         size_(count * kInstrSize)
2033 #endif
2034   {
2035     tasm_->CheckVeneerPool(false, true, count * kInstrSize);
2036     tasm_->StartBlockVeneerPool();
2037 #ifdef DEBUG
2038     if (count != 0) {
2039       tasm_->bind(&start_);
2040     }
2041     previous_allow_macro_instructions_ = tasm_->allow_macro_instructions();
2042     tasm_->set_allow_macro_instructions(false);
2043 #endif
2044   }
2045 
~InstructionAccurateScope()2046   ~InstructionAccurateScope() {
2047     tasm_->EndBlockVeneerPool();
2048 #ifdef DEBUG
2049     if (start_.is_bound()) {
2050       DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
2051     }
2052     tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2053 #endif
2054   }
2055 
2056  private:
2057   TurboAssembler* tasm_;
2058   TurboAssembler::BlockConstPoolScope block_pool_;
2059 #ifdef DEBUG
2060   size_t size_;
2061   Label start_;
2062   bool previous_allow_macro_instructions_;
2063 #endif
2064 };
2065 
2066 // This scope utility allows scratch registers to be managed safely. The
2067 // TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2068 // registers. These registers can be allocated on demand, and will be returned
2069 // at the end of the scope.
2070 //
2071 // When the scope ends, the MacroAssembler's lists will be restored to their
2072 // original state, even if the lists were modified by some other means. Note
2073 // that this scope can be nested but the destructors need to run in the opposite
2074 // order as the constructors. We do not have assertions for this.
2075 class UseScratchRegisterScope {
2076  public:
UseScratchRegisterScope(TurboAssembler * tasm)2077   explicit UseScratchRegisterScope(TurboAssembler* tasm)
2078       : available_(tasm->TmpList()),
2079         availablefp_(tasm->FPTmpList()),
2080         old_available_(available_->list()),
2081         old_availablefp_(availablefp_->list()) {
2082     DCHECK_EQ(available_->type(), CPURegister::kRegister);
2083     DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
2084   }
2085 
2086   V8_EXPORT_PRIVATE ~UseScratchRegisterScope();
2087 
2088   // Take a register from the appropriate temps list. It will be returned
2089   // automatically when the scope ends.
AcquireW()2090   Register AcquireW() { return AcquireNextAvailable(available_).W(); }
AcquireX()2091   Register AcquireX() { return AcquireNextAvailable(available_).X(); }
AcquireS()2092   VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
AcquireD()2093   VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
AcquireQ()2094   VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
AcquireV(VectorFormat format)2095   VRegister AcquireV(VectorFormat format) {
2096     return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
2097   }
2098 
2099   Register AcquireSameSizeAs(const Register& reg);
2100   V8_EXPORT_PRIVATE VRegister AcquireSameSizeAs(const VRegister& reg);
2101 
Include(const CPURegList & list)2102   void Include(const CPURegList& list) { available_->Combine(list); }
Exclude(const CPURegList & list)2103   void Exclude(const CPURegList& list) {
2104 #if DEBUG
2105     CPURegList copy(list);
2106     while (!copy.IsEmpty()) {
2107       const CPURegister& reg = copy.PopHighestIndex();
2108       DCHECK(available_->IncludesAliasOf(reg));
2109     }
2110 #endif
2111     available_->Remove(list);
2112   }
2113   void Include(const Register& reg1, const Register& reg2 = NoReg) {
2114     CPURegList list(reg1, reg2);
2115     Include(list);
2116   }
2117   void Exclude(const Register& reg1, const Register& reg2 = NoReg) {
2118     CPURegList list(reg1, reg2);
2119     Exclude(list);
2120   }
2121 
2122  private:
2123   V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable(
2124       CPURegList* available);
2125 
2126   // Available scratch registers.
2127   CPURegList* available_;    // kRegister
2128   CPURegList* availablefp_;  // kVRegister
2129 
2130   // The state of the available lists at the start of this scope.
2131   RegList old_available_;    // kRegister
2132   RegList old_availablefp_;  // kVRegister
2133 };
2134 
2135 }  // namespace internal
2136 }  // namespace v8
2137 
2138 #define ACCESS_MASM(masm) masm->
2139 
2140 #endif  // V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
2141