• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
7 
8 #include <vector>
9 
10 #include "src/arm64/assembler-arm64.h"
11 #include "src/bailout-reason.h"
12 #include "src/base/bits.h"
13 #include "src/globals.h"
14 #include "src/turbo-assembler.h"
15 
16 // Simulator specific helpers.
17 #if USE_SIMULATOR
18   // TODO(all): If possible automatically prepend an indicator like
19   // UNIMPLEMENTED or LOCATION.
20   #define ASM_UNIMPLEMENTED(message)                                         \
21   __ Debug(message, __LINE__, NO_PARAM)
22   #define ASM_UNIMPLEMENTED_BREAK(message)                                   \
23   __ Debug(message, __LINE__,                                                \
24            FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
25 #if DEBUG
26 #define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
27 #define ASM_LOCATION_IN_ASSEMBLER(message) \
28   Debug("LOCATION: " message, __LINE__, NO_PARAM)
29 #else
30 #define ASM_LOCATION(message)
31 #define ASM_LOCATION_IN_ASSEMBLER(message)
32 #endif
33 #else
34 #define ASM_UNIMPLEMENTED(message)
35 #define ASM_UNIMPLEMENTED_BREAK(message)
36 #define ASM_LOCATION(message)
37 #define ASM_LOCATION_IN_ASSEMBLER(message)
38 #endif
39 
40 
41 namespace v8 {
42 namespace internal {
43 
44 // Give alias names to registers for calling conventions.
45 constexpr Register kReturnRegister0 = x0;
46 constexpr Register kReturnRegister1 = x1;
47 constexpr Register kReturnRegister2 = x2;
48 constexpr Register kJSFunctionRegister = x1;
49 constexpr Register kContextRegister = cp;
50 constexpr Register kAllocateSizeRegister = x1;
51 constexpr Register kSpeculationPoisonRegister = x18;
52 constexpr Register kInterpreterAccumulatorRegister = x0;
53 constexpr Register kInterpreterBytecodeOffsetRegister = x19;
54 constexpr Register kInterpreterBytecodeArrayRegister = x20;
55 constexpr Register kInterpreterDispatchTableRegister = x21;
56 
57 constexpr Register kJavaScriptCallArgCountRegister = x0;
58 constexpr Register kJavaScriptCallCodeStartRegister = x2;
59 constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
60 constexpr Register kJavaScriptCallNewTargetRegister = x3;
61 constexpr Register kJavaScriptCallExtraArg1Register = x2;
62 
63 constexpr Register kOffHeapTrampolineRegister = ip0;
64 constexpr Register kRuntimeCallFunctionRegister = x1;
65 constexpr Register kRuntimeCallArgCountRegister = x0;
66 constexpr Register kRuntimeCallArgvRegister = x11;
67 constexpr Register kWasmInstanceRegister = x7;
68 
69 #define LS_MACRO_LIST(V)                                     \
70   V(Ldrb, Register&, rt, LDRB_w)                             \
71   V(Strb, Register&, rt, STRB_w)                             \
72   V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
73   V(Ldrh, Register&, rt, LDRH_w)                             \
74   V(Strh, Register&, rt, STRH_w)                             \
75   V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
76   V(Ldr, CPURegister&, rt, LoadOpFor(rt))                    \
77   V(Str, CPURegister&, rt, StoreOpFor(rt))                   \
78   V(Ldrsw, Register&, rt, LDRSW_x)
79 
80 #define LSPAIR_MACRO_LIST(V)                             \
81   V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2))  \
82   V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
83   V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
84 
85 #define LDA_STL_MACRO_LIST(V) \
86   V(Ldarb, ldarb)             \
87   V(Ldarh, ldarh)             \
88   V(Ldar, ldar)               \
89   V(Ldaxrb, ldaxrb)           \
90   V(Ldaxrh, ldaxrh)           \
91   V(Ldaxr, ldaxr)             \
92   V(Stlrb, stlrb)             \
93   V(Stlrh, stlrh)             \
94   V(Stlr, stlr)
95 
96 #define STLX_MACRO_LIST(V) \
97   V(Stlxrb, stlxrb)        \
98   V(Stlxrh, stlxrh)        \
99   V(Stlxr, stlxr)
100 
101 // ----------------------------------------------------------------------------
102 // Static helper functions
103 
104 // Generate a MemOperand for loading a field from an object.
105 inline MemOperand FieldMemOperand(Register object, int offset);
106 
107 // ----------------------------------------------------------------------------
108 // MacroAssembler
109 
110 enum BranchType {
111   // Copies of architectural conditions.
112   // The associated conditions can be used in place of those, the code will
113   // take care of reinterpreting them with the correct type.
114   integer_eq = eq,
115   integer_ne = ne,
116   integer_hs = hs,
117   integer_lo = lo,
118   integer_mi = mi,
119   integer_pl = pl,
120   integer_vs = vs,
121   integer_vc = vc,
122   integer_hi = hi,
123   integer_ls = ls,
124   integer_ge = ge,
125   integer_lt = lt,
126   integer_gt = gt,
127   integer_le = le,
128   integer_al = al,
129   integer_nv = nv,
130 
131   // These two are *different* from the architectural codes al and nv.
132   // 'always' is used to generate unconditional branches.
133   // 'never' is used to not generate a branch (generally as the inverse
134   // branch type of 'always).
135   always, never,
136   // cbz and cbnz
137   reg_zero, reg_not_zero,
138   // tbz and tbnz
139   reg_bit_clear, reg_bit_set,
140 
141   // Aliases.
142   kBranchTypeFirstCondition = eq,
143   kBranchTypeLastCondition = nv,
144   kBranchTypeFirstUsingReg = reg_zero,
145   kBranchTypeFirstUsingBit = reg_bit_clear
146 };
147 
InvertBranchType(BranchType type)148 inline BranchType InvertBranchType(BranchType type) {
149   if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
150     return static_cast<BranchType>(
151         NegateCondition(static_cast<Condition>(type)));
152   } else {
153     return static_cast<BranchType>(type ^ 1);
154   }
155 }
156 
157 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
158 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
159 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
160 enum TargetAddressStorageMode {
161   CAN_INLINE_TARGET_ADDRESS,
162   NEVER_INLINE_TARGET_ADDRESS
163 };
164 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
165 
166 // The macro assembler supports moving automatically pre-shifted immediates for
167 // arithmetic and logical instructions, and then applying a post shift in the
168 // instruction to undo the modification, in order to reduce the code emitted for
169 // an operation. For example:
170 //
171 //  Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
172 //
173 // This optimisation can be only partially applied when the stack pointer is an
174 // operand or destination, so this enumeration is used to control the shift.
175 enum PreShiftImmMode {
176   kNoShift,          // Don't pre-shift.
177   kLimitShiftForSP,  // Limit pre-shift for add/sub extend use.
178   kAnyShift          // Allow any pre-shift.
179 };
180 
181 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
182  public:
TurboAssembler(Isolate * isolate,const AssemblerOptions & options,void * buffer,int buffer_size,CodeObjectRequired create_code_object)183   TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
184                  void* buffer, int buffer_size,
185                  CodeObjectRequired create_code_object)
186       : TurboAssemblerBase(isolate, options, buffer, buffer_size,
187                            create_code_object) {}
188 
189 #if DEBUG
set_allow_macro_instructions(bool value)190   void set_allow_macro_instructions(bool value) {
191     allow_macro_instructions_ = value;
192   }
allow_macro_instructions()193   bool allow_macro_instructions() const { return allow_macro_instructions_; }
194 #endif
195 
196   // We should not use near calls or jumps for JS->WASM calls and calls to
197   // external references, since the code spaces are not guaranteed to be close
198   // to each other.
CanUseNearCallOrJump(RelocInfo::Mode rmode)199   bool CanUseNearCallOrJump(RelocInfo::Mode rmode) {
200     return rmode != RelocInfo::JS_TO_WASM_CALL &&
201            rmode != RelocInfo::EXTERNAL_REFERENCE;
202   }
203 
204   // Activation support.
205   void EnterFrame(StackFrame::Type type);
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)206   void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
207     // Out-of-line constant pool not implemented on arm64.
208     UNREACHABLE();
209   }
210   void LeaveFrame(StackFrame::Type type);
211 
212   inline void InitializeRootRegister();
213 
214   void Mov(const Register& rd, const Operand& operand,
215            DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
216   void Mov(const Register& rd, uint64_t imm);
Mov(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)217   void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
218            int vn_index) {
219     DCHECK(allow_macro_instructions());
220     mov(vd, vd_index, vn, vn_index);
221   }
Mov(const VRegister & vd,const VRegister & vn,int index)222   void Mov(const VRegister& vd, const VRegister& vn, int index) {
223     DCHECK(allow_macro_instructions());
224     mov(vd, vn, index);
225   }
Mov(const VRegister & vd,int vd_index,const Register & rn)226   void Mov(const VRegister& vd, int vd_index, const Register& rn) {
227     DCHECK(allow_macro_instructions());
228     mov(vd, vd_index, rn);
229   }
Mov(const Register & rd,const VRegister & vn,int vn_index)230   void Mov(const Register& rd, const VRegister& vn, int vn_index) {
231     DCHECK(allow_macro_instructions());
232     mov(rd, vn, vn_index);
233   }
234 
235   // This is required for compatibility with architecture independent code.
236   // Remove if not needed.
237   void Move(Register dst, Smi* src);
238 
239   // Register swap. Note that the register operands should be distinct.
240   void Swap(Register lhs, Register rhs);
241   void Swap(VRegister lhs, VRegister rhs);
242 
243 // NEON by element instructions.
244 #define NEON_BYELEMENT_MACRO_LIST(V) \
245   V(fmla, Fmla)                      \
246   V(fmls, Fmls)                      \
247   V(fmul, Fmul)                      \
248   V(fmulx, Fmulx)                    \
249   V(mul, Mul)                        \
250   V(mla, Mla)                        \
251   V(mls, Mls)                        \
252   V(sqdmulh, Sqdmulh)                \
253   V(sqrdmulh, Sqrdmulh)              \
254   V(sqdmull, Sqdmull)                \
255   V(sqdmull2, Sqdmull2)              \
256   V(sqdmlal, Sqdmlal)                \
257   V(sqdmlal2, Sqdmlal2)              \
258   V(sqdmlsl, Sqdmlsl)                \
259   V(sqdmlsl2, Sqdmlsl2)              \
260   V(smull, Smull)                    \
261   V(smull2, Smull2)                  \
262   V(smlal, Smlal)                    \
263   V(smlal2, Smlal2)                  \
264   V(smlsl, Smlsl)                    \
265   V(smlsl2, Smlsl2)                  \
266   V(umull, Umull)                    \
267   V(umull2, Umull2)                  \
268   V(umlal, Umlal)                    \
269   V(umlal2, Umlal2)                  \
270   V(umlsl, Umlsl)                    \
271   V(umlsl2, Umlsl2)
272 
273 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                   \
274   void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \
275             int vm_index) {                                                \
276     DCHECK(allow_macro_instructions());                                    \
277     ASM(vd, vn, vm, vm_index);                                             \
278   }
279   NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
280 #undef DEFINE_MACRO_ASM_FUNC
281 
282 // NEON 2 vector register instructions.
283 #define NEON_2VREG_MACRO_LIST(V) \
284   V(abs, Abs)                    \
285   V(addp, Addp)                  \
286   V(addv, Addv)                  \
287   V(cls, Cls)                    \
288   V(clz, Clz)                    \
289   V(cnt, Cnt)                    \
290   V(faddp, Faddp)                \
291   V(fcvtas, Fcvtas)              \
292   V(fcvtau, Fcvtau)              \
293   V(fcvtms, Fcvtms)              \
294   V(fcvtmu, Fcvtmu)              \
295   V(fcvtns, Fcvtns)              \
296   V(fcvtnu, Fcvtnu)              \
297   V(fcvtps, Fcvtps)              \
298   V(fcvtpu, Fcvtpu)              \
299   V(fmaxnmp, Fmaxnmp)            \
300   V(fmaxnmv, Fmaxnmv)            \
301   V(fmaxp, Fmaxp)                \
302   V(fmaxv, Fmaxv)                \
303   V(fminnmp, Fminnmp)            \
304   V(fminnmv, Fminnmv)            \
305   V(fminp, Fminp)                \
306   V(fminv, Fminv)                \
307   V(fneg, Fneg)                  \
308   V(frecpe, Frecpe)              \
309   V(frecpx, Frecpx)              \
310   V(frinta, Frinta)              \
311   V(frinti, Frinti)              \
312   V(frintm, Frintm)              \
313   V(frintn, Frintn)              \
314   V(frintp, Frintp)              \
315   V(frintx, Frintx)              \
316   V(frintz, Frintz)              \
317   V(frsqrte, Frsqrte)            \
318   V(fsqrt, Fsqrt)                \
319   V(mov, Mov)                    \
320   V(mvn, Mvn)                    \
321   V(neg, Neg)                    \
322   V(not_, Not)                   \
323   V(rbit, Rbit)                  \
324   V(rev16, Rev16)                \
325   V(rev32, Rev32)                \
326   V(rev64, Rev64)                \
327   V(sadalp, Sadalp)              \
328   V(saddlp, Saddlp)              \
329   V(saddlv, Saddlv)              \
330   V(smaxv, Smaxv)                \
331   V(sminv, Sminv)                \
332   V(sqabs, Sqabs)                \
333   V(sqneg, Sqneg)                \
334   V(sqxtn2, Sqxtn2)              \
335   V(sqxtn, Sqxtn)                \
336   V(sqxtun2, Sqxtun2)            \
337   V(sqxtun, Sqxtun)              \
338   V(suqadd, Suqadd)              \
339   V(sxtl2, Sxtl2)                \
340   V(sxtl, Sxtl)                  \
341   V(uadalp, Uadalp)              \
342   V(uaddlp, Uaddlp)              \
343   V(uaddlv, Uaddlv)              \
344   V(umaxv, Umaxv)                \
345   V(uminv, Uminv)                \
346   V(uqxtn2, Uqxtn2)              \
347   V(uqxtn, Uqxtn)                \
348   V(urecpe, Urecpe)              \
349   V(ursqrte, Ursqrte)            \
350   V(usqadd, Usqadd)              \
351   V(uxtl2, Uxtl2)                \
352   V(uxtl, Uxtl)                  \
353   V(xtn2, Xtn2)                  \
354   V(xtn, Xtn)
355 
356 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                \
357   void MASM(const VRegister& vd, const VRegister& vn) { \
358     DCHECK(allow_macro_instructions());                 \
359     ASM(vd, vn);                                        \
360   }
NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)361   NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
362 #undef DEFINE_MACRO_ASM_FUNC
363 #undef NEON_2VREG_MACRO_LIST
364 
365 // NEON 2 vector register with immediate instructions.
366 #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
367   V(fcmeq, Fcmeq)                      \
368   V(fcmge, Fcmge)                      \
369   V(fcmgt, Fcmgt)                      \
370   V(fcmle, Fcmle)                      \
371   V(fcmlt, Fcmlt)
372 
373 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                            \
374   void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
375     DCHECK(allow_macro_instructions());                             \
376     ASM(vd, vn, imm);                                               \
377   }
378   NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
379 #undef DEFINE_MACRO_ASM_FUNC
380 
381 // NEON 3 vector register instructions.
382 #define NEON_3VREG_MACRO_LIST(V) \
383   V(add, Add)                    \
384   V(addhn2, Addhn2)              \
385   V(addhn, Addhn)                \
386   V(addp, Addp)                  \
387   V(and_, And)                   \
388   V(bic, Bic)                    \
389   V(bif, Bif)                    \
390   V(bit, Bit)                    \
391   V(bsl, Bsl)                    \
392   V(cmeq, Cmeq)                  \
393   V(cmge, Cmge)                  \
394   V(cmgt, Cmgt)                  \
395   V(cmhi, Cmhi)                  \
396   V(cmhs, Cmhs)                  \
397   V(cmtst, Cmtst)                \
398   V(eor, Eor)                    \
399   V(fabd, Fabd)                  \
400   V(facge, Facge)                \
401   V(facgt, Facgt)                \
402   V(faddp, Faddp)                \
403   V(fcmeq, Fcmeq)                \
404   V(fcmge, Fcmge)                \
405   V(fcmgt, Fcmgt)                \
406   V(fmaxnmp, Fmaxnmp)            \
407   V(fmaxp, Fmaxp)                \
408   V(fminnmp, Fminnmp)            \
409   V(fminp, Fminp)                \
410   V(fmla, Fmla)                  \
411   V(fmls, Fmls)                  \
412   V(fmulx, Fmulx)                \
413   V(frecps, Frecps)              \
414   V(frsqrts, Frsqrts)            \
415   V(mla, Mla)                    \
416   V(mls, Mls)                    \
417   V(mul, Mul)                    \
418   V(orn, Orn)                    \
419   V(orr, Orr)                    \
420   V(pmull2, Pmull2)              \
421   V(pmull, Pmull)                \
422   V(pmul, Pmul)                  \
423   V(raddhn2, Raddhn2)            \
424   V(raddhn, Raddhn)              \
425   V(rsubhn2, Rsubhn2)            \
426   V(rsubhn, Rsubhn)              \
427   V(sabal2, Sabal2)              \
428   V(sabal, Sabal)                \
429   V(saba, Saba)                  \
430   V(sabdl2, Sabdl2)              \
431   V(sabdl, Sabdl)                \
432   V(sabd, Sabd)                  \
433   V(saddl2, Saddl2)              \
434   V(saddl, Saddl)                \
435   V(saddw2, Saddw2)              \
436   V(saddw, Saddw)                \
437   V(shadd, Shadd)                \
438   V(shsub, Shsub)                \
439   V(smaxp, Smaxp)                \
440   V(smax, Smax)                  \
441   V(sminp, Sminp)                \
442   V(smin, Smin)                  \
443   V(smlal2, Smlal2)              \
444   V(smlal, Smlal)                \
445   V(smlsl2, Smlsl2)              \
446   V(smlsl, Smlsl)                \
447   V(smull2, Smull2)              \
448   V(smull, Smull)                \
449   V(sqadd, Sqadd)                \
450   V(sqdmlal2, Sqdmlal2)          \
451   V(sqdmlal, Sqdmlal)            \
452   V(sqdmlsl2, Sqdmlsl2)          \
453   V(sqdmlsl, Sqdmlsl)            \
454   V(sqdmulh, Sqdmulh)            \
455   V(sqdmull2, Sqdmull2)          \
456   V(sqdmull, Sqdmull)            \
457   V(sqrdmulh, Sqrdmulh)          \
458   V(sqrshl, Sqrshl)              \
459   V(sqshl, Sqshl)                \
460   V(sqsub, Sqsub)                \
461   V(srhadd, Srhadd)              \
462   V(srshl, Srshl)                \
463   V(sshl, Sshl)                  \
464   V(ssubl2, Ssubl2)              \
465   V(ssubl, Ssubl)                \
466   V(ssubw2, Ssubw2)              \
467   V(ssubw, Ssubw)                \
468   V(subhn2, Subhn2)              \
469   V(subhn, Subhn)                \
470   V(sub, Sub)                    \
471   V(trn1, Trn1)                  \
472   V(trn2, Trn2)                  \
473   V(uabal2, Uabal2)              \
474   V(uabal, Uabal)                \
475   V(uaba, Uaba)                  \
476   V(uabdl2, Uabdl2)              \
477   V(uabdl, Uabdl)                \
478   V(uabd, Uabd)                  \
479   V(uaddl2, Uaddl2)              \
480   V(uaddl, Uaddl)                \
481   V(uaddw2, Uaddw2)              \
482   V(uaddw, Uaddw)                \
483   V(uhadd, Uhadd)                \
484   V(uhsub, Uhsub)                \
485   V(umaxp, Umaxp)                \
486   V(umax, Umax)                  \
487   V(uminp, Uminp)                \
488   V(umin, Umin)                  \
489   V(umlal2, Umlal2)              \
490   V(umlal, Umlal)                \
491   V(umlsl2, Umlsl2)              \
492   V(umlsl, Umlsl)                \
493   V(umull2, Umull2)              \
494   V(umull, Umull)                \
495   V(uqadd, Uqadd)                \
496   V(uqrshl, Uqrshl)              \
497   V(uqshl, Uqshl)                \
498   V(uqsub, Uqsub)                \
499   V(urhadd, Urhadd)              \
500   V(urshl, Urshl)                \
501   V(ushl, Ushl)                  \
502   V(usubl2, Usubl2)              \
503   V(usubl, Usubl)                \
504   V(usubw2, Usubw2)              \
505   V(usubw, Usubw)                \
506   V(uzp1, Uzp1)                  \
507   V(uzp2, Uzp2)                  \
508   V(zip1, Zip1)                  \
509   V(zip2, Zip2)
510 
511 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                     \
512   void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
513     DCHECK(allow_macro_instructions());                                      \
514     ASM(vd, vn, vm);                                                         \
515   }
516   NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
517 #undef DEFINE_MACRO_ASM_FUNC
518 
519   void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
520     DCHECK(allow_macro_instructions());
521     bic(vd, imm8, left_shift);
522   }
523 
524   // This is required for compatibility in architecture independent code.
525   inline void jmp(Label* L);
526 
527   void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
528   inline void B(Label* label);
529   inline void B(Condition cond, Label* label);
530   void B(Label* label, Condition cond);
531 
532   void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
533   void Tbz(const Register& rt, unsigned bit_pos, Label* label);
534 
535   void Cbnz(const Register& rt, Label* label);
536   void Cbz(const Register& rt, Label* label);
537 
538   inline void Dmb(BarrierDomain domain, BarrierType type);
539   inline void Dsb(BarrierDomain domain, BarrierType type);
540   inline void Isb();
541   inline void Csdb();
542 
543   bool AllowThisStubCall(CodeStub* stub);
544   void CallStubDelayed(CodeStub* stub);
545 
546   // Call a runtime routine. This expects {centry} to contain a fitting CEntry
547   // builtin for the target runtime function and uses an indirect call.
548   void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
549 
550   // Removes current frame and its arguments from the stack preserving
551   // the arguments and a return address pushed to the stack for the next call.
552   // Both |callee_args_count| and |caller_args_count_reg| do not include
553   // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
554   // is trashed.
555   void PrepareForTailCall(const ParameterCount& callee_args_count,
556                           Register caller_args_count_reg, Register scratch0,
557                           Register scratch1);
558 
559   inline void SmiUntag(Register dst, Register src);
560   inline void SmiUntag(Register dst, const MemOperand& src);
561   inline void SmiUntag(Register smi);
562 
563   // Calls Abort(msg) if the condition cond is not satisfied.
564   // Use --debug_code to enable.
565   void Assert(Condition cond, AbortReason reason);
566 
567   // Like Assert(), but without condition.
568   // Use --debug_code to enable.
569   void AssertUnreachable(AbortReason reason);
570 
571   void AssertSmi(Register object,
572                  AbortReason reason = AbortReason::kOperandIsNotASmi);
573 
574   // Like Assert(), but always enabled.
575   void Check(Condition cond, AbortReason reason);
576 
577   inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
578 
579   // Print a message to stderr and abort execution.
580   void Abort(AbortReason reason);
581 
582   // Remaining instructions are simple pass-through calls to the assembler.
583   inline void Asr(const Register& rd, const Register& rn, unsigned shift);
584   inline void Asr(const Register& rd, const Register& rn, const Register& rm);
585 
586   // Try to move an immediate into the destination register in a single
587   // instruction. Returns true for success, and updates the contents of dst.
588   // Returns false, otherwise.
589   bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
590 
591   inline void Bind(Label* label);
592 
593   static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
594 
TmpList()595   CPURegList* TmpList() { return &tmp_list_; }
FPTmpList()596   CPURegList* FPTmpList() { return &fptmp_list_; }
597 
598   static CPURegList DefaultTmpList();
599   static CPURegList DefaultFPTmpList();
600 
601   // Move macros.
602   inline void Mvn(const Register& rd, uint64_t imm);
603   void Mvn(const Register& rd, const Operand& operand);
604   static bool IsImmMovn(uint64_t imm, unsigned reg_size);
605   static bool IsImmMovz(uint64_t imm, unsigned reg_size);
606 
607   void LogicalMacro(const Register& rd, const Register& rn,
608                     const Operand& operand, LogicalOp op);
609   void AddSubMacro(const Register& rd, const Register& rn,
610                    const Operand& operand, FlagsUpdate S, AddSubOp op);
611   inline void Orr(const Register& rd, const Register& rn,
612                   const Operand& operand);
613   void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
614     DCHECK(allow_macro_instructions());
615     orr(vd, imm8, left_shift);
616   }
617   inline void Orn(const Register& rd, const Register& rn,
618                   const Operand& operand);
619   inline void Eor(const Register& rd, const Register& rn,
620                   const Operand& operand);
621   inline void Eon(const Register& rd, const Register& rn,
622                   const Operand& operand);
623   inline void And(const Register& rd, const Register& rn,
624                   const Operand& operand);
625   inline void Ands(const Register& rd, const Register& rn,
626                    const Operand& operand);
627   inline void Tst(const Register& rn, const Operand& operand);
628   inline void Bic(const Register& rd, const Register& rn,
629                   const Operand& operand);
630   inline void Blr(const Register& xn);
631   inline void Cmp(const Register& rn, const Operand& operand);
632   inline void Subs(const Register& rd, const Register& rn,
633                    const Operand& operand);
634   void Csel(const Register& rd, const Register& rn, const Operand& operand,
635             Condition cond);
636 
637   // Emits a runtime assert that the stack pointer is aligned.
638   void AssertSpAligned();
639 
640   // Copy slot_count stack slots from the stack offset specified by src to
641   // the stack offset specified by dst. The offsets and count are expressed in
642   // slot-sized units. Offset dst must be less than src, or the gap between
643   // them must be greater than or equal to slot_count, otherwise the result is
644   // unpredictable. The function may corrupt its register arguments. The
645   // registers must not alias each other.
646   void CopySlots(int dst, Register src, Register slot_count);
647   void CopySlots(Register dst, Register src, Register slot_count);
648 
649   // Copy count double words from the address in register src to the address
650   // in register dst. There are two modes for this function:
651   // 1) Address dst must be less than src, or the gap between them must be
652   //    greater than or equal to count double words, otherwise the result is
653   //    unpredictable. This is the default mode.
654   // 2) Address src must be less than dst, or the gap between them must be
655   //    greater than or equal to count double words, otherwise the result is
656   //    undpredictable. In this mode, src and dst specify the last (highest)
657   //    address of the regions to copy from and to.
658   // The case where src == dst is not supported.
659   // The function may corrupt its register arguments. The registers must not
660   // alias each other.
661   enum CopyDoubleWordsMode { kDstLessThanSrc, kSrcLessThanDst };
662   void CopyDoubleWords(Register dst, Register src, Register count,
663                        CopyDoubleWordsMode mode = kDstLessThanSrc);
664 
665   // Calculate the address of a double word-sized slot at slot_offset from the
666   // stack pointer, and write it to dst. Positive slot_offsets are at addresses
667   // greater than sp, with slot zero at sp.
668   void SlotAddress(Register dst, int slot_offset);
669   void SlotAddress(Register dst, Register slot_offset);
670 
671   // Load a literal from the inline constant pool.
672   inline void Ldr(const CPURegister& rt, const Operand& imm);
673 
674   // Claim or drop stack space without actually accessing memory.
675   //
676   // In debug mode, both of these will write invalid data into the claimed or
677   // dropped space.
678   //
679   // The stack pointer must be aligned to 16 bytes and the size claimed or
680   // dropped must be a multiple of 16 bytes.
681   //
682   // Note that unit_size must be specified in bytes. For variants which take a
683   // Register count, the unit size must be a power of two.
684   inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
685   inline void Claim(const Register& count, uint64_t unit_size = kXRegSize);
686   inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
687   inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
688 
689   // Drop 'count' arguments from the stack, rounded up to a multiple of two,
690   // without actually accessing memory.
691   // We assume the size of the arguments is the pointer size.
692   // An optional mode argument is passed, which can indicate we need to
693   // explicitly add the receiver to the count.
694   enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
695   inline void DropArguments(const Register& count,
696                             ArgumentsCountMode mode = kCountIncludesReceiver);
697   inline void DropArguments(int64_t count,
698                             ArgumentsCountMode mode = kCountIncludesReceiver);
699 
700   // Drop 'count' slots from stack, rounded up to a multiple of two, without
701   // actually accessing memory.
702   inline void DropSlots(int64_t count);
703 
704   // Push a single argument, with padding, to the stack.
705   inline void PushArgument(const Register& arg);
706 
707   // Add and sub macros.
708   inline void Add(const Register& rd, const Register& rn,
709                   const Operand& operand);
710   inline void Adds(const Register& rd, const Register& rn,
711                    const Operand& operand);
712   inline void Sub(const Register& rd, const Register& rn,
713                   const Operand& operand);
714 
715   // Abort execution if argument is not a positive or zero integer, enabled via
716   // --debug-code.
717   void AssertPositiveOrZero(Register value);
718 
719 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
720   inline void FN(const REGTYPE REG, const MemOperand& addr);
721   LS_MACRO_LIST(DECLARE_FUNCTION)
722 #undef DECLARE_FUNCTION
723 
724   // Push or pop up to 4 registers of the same width to or from the stack.
725   //
726   // If an argument register is 'NoReg', all further arguments are also assumed
727   // to be 'NoReg', and are thus not pushed or popped.
728   //
729   // Arguments are ordered such that "Push(a, b);" is functionally equivalent
730   // to "Push(a); Push(b);".
731   //
732   // It is valid to push the same register more than once, and there is no
733   // restriction on the order in which registers are specified.
734   //
735   // It is not valid to pop into the same register more than once in one
736   // operation, not even into the zero register.
737   //
738   // The stack pointer must be aligned to 16 bytes on entry and the total size
739   // of the specified registers must also be a multiple of 16 bytes.
740   //
741   // Other than the registers passed into Pop, the stack pointer and (possibly)
742   // the system stack pointer, these methods do not modify any other registers.
743   void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
744             const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
745   void Push(const CPURegister& src0, const CPURegister& src1,
746             const CPURegister& src2, const CPURegister& src3,
747             const CPURegister& src4, const CPURegister& src5 = NoReg,
748             const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
749   void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
750            const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
751   void Pop(const CPURegister& dst0, const CPURegister& dst1,
752            const CPURegister& dst2, const CPURegister& dst3,
753            const CPURegister& dst4, const CPURegister& dst5 = NoReg,
754            const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
755   void Push(const Register& src0, const VRegister& src1);
756 
757   // This is a convenience method for pushing a single Handle<Object>.
758   inline void Push(Handle<HeapObject> object);
759   inline void Push(Smi* smi);
760 
761   // Aliases of Push and Pop, required for V8 compatibility.
push(Register src)762   inline void push(Register src) { Push(src); }
pop(Register dst)763   inline void pop(Register dst) { Pop(dst); }
764 
765   void SaveRegisters(RegList registers);
766   void RestoreRegisters(RegList registers);
767 
768   void CallRecordWriteStub(Register object, Register address,
769                            RememberedSetAction remembered_set_action,
770                            SaveFPRegsMode fp_mode);
771 
772   // Alternative forms of Push and Pop, taking a RegList or CPURegList that
773   // specifies the registers that are to be pushed or popped. Higher-numbered
774   // registers are associated with higher memory addresses (as in the A32 push
775   // and pop instructions).
776   //
777   // (Push|Pop)SizeRegList allow you to specify the register size as a
778   // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
779   // kSRegSizeInBits are supported.
780   //
781   // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
782   void PushCPURegList(CPURegList registers);
783   void PopCPURegList(CPURegList registers);
784 
785   // Calculate how much stack space (in bytes) are required to store caller
786   // registers excluding those specified in the arguments.
787   int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
788                                       Register exclusion) const;
789 
790   // Push caller saved registers on the stack, and return the number of bytes
791   // stack pointer is adjusted.
792   int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
793 
794   // Restore caller saved registers from the stack, and return the number of
795   // bytes stack pointer is adjusted.
796   int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
797 
798   // Move an immediate into register dst, and return an Operand object for use
799   // with a subsequent instruction that accepts a shift. The value moved into
800   // dst is not necessarily equal to imm; it may have had a shifting operation
801   // applied to it that will be subsequently undone by the shift applied in the
802   // Operand.
803   Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
804                                     PreShiftImmMode mode);
805 
806   void CheckPageFlagSet(const Register& object, const Register& scratch,
807                         int mask, Label* if_any_set);
808 
809   void CheckPageFlagClear(const Register& object, const Register& scratch,
810                           int mask, Label* if_all_clear);
811 
812   // Test the bits of register defined by bit_pattern, and branch if ANY of
813   // those bits are set. May corrupt the status flags.
814   inline void TestAndBranchIfAnySet(const Register& reg,
815                                     const uint64_t bit_pattern, Label* label);
816 
817   // Test the bits of register defined by bit_pattern, and branch if ALL of
818   // those bits are clear (ie. not set.) May corrupt the status flags.
819   inline void TestAndBranchIfAllClear(const Register& reg,
820                                       const uint64_t bit_pattern, Label* label);
821 
822   inline void Brk(int code);
823 
824   inline void JumpIfSmi(Register value, Label* smi_label,
825                         Label* not_smi_label = nullptr);
826 
827   inline void JumpIfEqual(Register x, int32_t y, Label* dest);
828   inline void JumpIfLessThan(Register x, int32_t y, Label* dest);
829 
830   inline void Fmov(VRegister fd, VRegister fn);
831   inline void Fmov(VRegister fd, Register rn);
832   // Provide explicit double and float interfaces for FP immediate moves, rather
833   // than relying on implicit C++ casts. This allows signalling NaNs to be
834   // preserved when the immediate matches the format of fd. Most systems convert
835   // signalling NaNs to quiet NaNs when converting between float and double.
836   inline void Fmov(VRegister fd, double imm);
837   inline void Fmov(VRegister fd, float imm);
838   // Provide a template to allow other types to be converted automatically.
839   template <typename T>
Fmov(VRegister fd,T imm)840   void Fmov(VRegister fd, T imm) {
841     DCHECK(allow_macro_instructions());
842     Fmov(fd, static_cast<double>(imm));
843   }
844   inline void Fmov(Register rd, VRegister fn);
845 
846   void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL,
847             int shift_amount = 0);
848   void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
849 
850   void LoadFromConstantsTable(Register destination,
851                               int constant_index) override;
852   void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
853   void LoadRootRelative(Register destination, int32_t offset) override;
854 
855   void Jump(Register target, Condition cond = al);
856   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
857   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
858 
859   void Call(Register target);
860   void Call(Address target, RelocInfo::Mode rmode);
861   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
862   void Call(ExternalReference target);
863 
864   // Generate an indirect call (for when a direct call's range is not adequate).
865   void IndirectCall(Address target, RelocInfo::Mode rmode);
866 
867   void CallForDeoptimization(Address target, int deopt_id,
868                              RelocInfo::Mode rmode);
869 
870   // Calls a C function.
871   // The called function is not allowed to trigger a
872   // garbage collection, since that might move the code and invalidate the
873   // return address (unless this is somehow accounted for by the called
874   // function).
875   void CallCFunction(ExternalReference function, int num_reg_arguments);
876   void CallCFunction(ExternalReference function, int num_reg_arguments,
877                      int num_double_arguments);
878   void CallCFunction(Register function, int num_reg_arguments,
879                      int num_double_arguments);
880 
881   // Performs a truncating conversion of a floating point number as used by
882   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
883   // Exits with 'result' holding the answer.
884   void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
885                          DoubleRegister double_input, StubCallMode stub_mode);
886 
887   inline void Mul(const Register& rd, const Register& rn, const Register& rm);
888 
889   inline void Fcvtzs(const Register& rd, const VRegister& fn);
890   void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
891     DCHECK(allow_macro_instructions());
892     fcvtzs(vd, vn, fbits);
893   }
894 
895   inline void Fcvtzu(const Register& rd, const VRegister& fn);
896   void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
897     DCHECK(allow_macro_instructions());
898     fcvtzu(vd, vn, fbits);
899   }
900 
901   inline void Madd(const Register& rd, const Register& rn, const Register& rm,
902                    const Register& ra);
903   inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
904   inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
905   inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
906   inline void Msub(const Register& rd, const Register& rn, const Register& rm,
907                    const Register& ra);
908 
909   inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
910   inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
911   inline void Umull(const Register& rd, const Register& rn, const Register& rm);
912   inline void Smull(const Register& rd, const Register& rn, const Register& rm);
913 
914   inline void Sxtb(const Register& rd, const Register& rn);
915   inline void Sxth(const Register& rd, const Register& rn);
916   inline void Sxtw(const Register& rd, const Register& rn);
917   inline void Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
918                     unsigned width);
919   inline void Ubfx(const Register& rd, const Register& rn, unsigned lsb,
920                    unsigned width);
921   inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
922   inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
923   inline void Ror(const Register& rd, const Register& rs, unsigned shift);
924   inline void Ror(const Register& rd, const Register& rn, const Register& rm);
925   inline void Cmn(const Register& rn, const Operand& operand);
926   inline void Fadd(const VRegister& fd, const VRegister& fn,
927                    const VRegister& fm);
928   inline void Fcmp(const VRegister& fn, const VRegister& fm);
929   inline void Fcmp(const VRegister& fn, double value);
930   inline void Fabs(const VRegister& fd, const VRegister& fn);
931   inline void Fmul(const VRegister& fd, const VRegister& fn,
932                    const VRegister& fm);
933   inline void Fsub(const VRegister& fd, const VRegister& fn,
934                    const VRegister& fm);
935   inline void Fdiv(const VRegister& fd, const VRegister& fn,
936                    const VRegister& fm);
937   inline void Fmax(const VRegister& fd, const VRegister& fn,
938                    const VRegister& fm);
939   inline void Fmin(const VRegister& fd, const VRegister& fn,
940                    const VRegister& fm);
941   inline void Rbit(const Register& rd, const Register& rn);
942   inline void Rev(const Register& rd, const Register& rn);
943 
944   enum AdrHint {
945     // The target must be within the immediate range of adr.
946     kAdrNear,
947     // The target may be outside of the immediate range of adr. Additional
948     // instructions may be emitted.
949     kAdrFar
950   };
951   void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
952 
953   // Add/sub with carry macros.
954   inline void Adc(const Register& rd, const Register& rn,
955                   const Operand& operand);
956 
957   // Conditional macros.
958   inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
959                    Condition cond);
960 
961   inline void Clz(const Register& rd, const Register& rn);
962 
963   // Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
964   // be 16 byte aligned.
965   void Poke(const CPURegister& src, const Operand& offset);
966 
967   // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
968   // The stack pointer must be aligned to 16 bytes.
969   void Peek(const CPURegister& dst, const Operand& offset);
970 
971   // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
972   // with 'src2' at a higher address than 'src1'. The offset is in bytes. The
973   // stack pointer must be 16 byte aligned.
974   void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
975 
976   inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
977                    unsigned width);
978 
979   inline void Bfi(const Register& rd, const Register& rn, unsigned lsb,
980                   unsigned width);
981 
982   inline void Scvtf(const VRegister& fd, const Register& rn,
983                     unsigned fbits = 0);
984   void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
985     DCHECK(allow_macro_instructions());
986     scvtf(vd, vn, fbits);
987   }
988   inline void Ucvtf(const VRegister& fd, const Register& rn,
989                     unsigned fbits = 0);
990   void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
991     DCHECK(allow_macro_instructions());
992     ucvtf(vd, vn, fbits);
993   }
994 
995   void AssertFPCRState(Register fpcr = NoReg);
996   void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
CanonicalizeNaN(const VRegister & reg)997   void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
998 
999   inline void CmovX(const Register& rd, const Register& rn, Condition cond);
1000   inline void Cset(const Register& rd, Condition cond);
1001   inline void Csetm(const Register& rd, Condition cond);
1002   inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
1003                     Condition cond);
1004   inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
1005                     Condition cond);
1006 
1007   inline void Fcvt(const VRegister& fd, const VRegister& fn);
1008 
1009   int ActivationFrameAlignment();
1010 
Ins(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)1011   void Ins(const VRegister& vd, int vd_index, const VRegister& vn,
1012            int vn_index) {
1013     DCHECK(allow_macro_instructions());
1014     ins(vd, vd_index, vn, vn_index);
1015   }
Ins(const VRegister & vd,int vd_index,const Register & rn)1016   void Ins(const VRegister& vd, int vd_index, const Register& rn) {
1017     DCHECK(allow_macro_instructions());
1018     ins(vd, vd_index, rn);
1019   }
1020 
1021   inline void Bl(Label* label);
1022   inline void Br(const Register& xn);
1023 
1024   inline void Uxtb(const Register& rd, const Register& rn);
1025   inline void Uxth(const Register& rd, const Register& rn);
1026   inline void Uxtw(const Register& rd, const Register& rn);
1027 
Dup(const VRegister & vd,const VRegister & vn,int index)1028   void Dup(const VRegister& vd, const VRegister& vn, int index) {
1029     DCHECK(allow_macro_instructions());
1030     dup(vd, vn, index);
1031   }
Dup(const VRegister & vd,const Register & rn)1032   void Dup(const VRegister& vd, const Register& rn) {
1033     DCHECK(allow_macro_instructions());
1034     dup(vd, rn);
1035   }
1036 
1037 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
1038   inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
1039   LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
1040 #undef DECLARE_FUNCTION
1041 
1042 #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
1043   V(rshrn, Rshrn)                      \
1044   V(rshrn2, Rshrn2)                    \
1045   V(shl, Shl)                          \
1046   V(shll, Shll)                        \
1047   V(shll2, Shll2)                      \
1048   V(shrn, Shrn)                        \
1049   V(shrn2, Shrn2)                      \
1050   V(sli, Sli)                          \
1051   V(sqrshrn, Sqrshrn)                  \
1052   V(sqrshrn2, Sqrshrn2)                \
1053   V(sqrshrun, Sqrshrun)                \
1054   V(sqrshrun2, Sqrshrun2)              \
1055   V(sqshl, Sqshl)                      \
1056   V(sqshlu, Sqshlu)                    \
1057   V(sqshrn, Sqshrn)                    \
1058   V(sqshrn2, Sqshrn2)                  \
1059   V(sqshrun, Sqshrun)                  \
1060   V(sqshrun2, Sqshrun2)                \
1061   V(sri, Sri)                          \
1062   V(srshr, Srshr)                      \
1063   V(srsra, Srsra)                      \
1064   V(sshll, Sshll)                      \
1065   V(sshll2, Sshll2)                    \
1066   V(sshr, Sshr)                        \
1067   V(ssra, Ssra)                        \
1068   V(uqrshrn, Uqrshrn)                  \
1069   V(uqrshrn2, Uqrshrn2)                \
1070   V(uqshl, Uqshl)                      \
1071   V(uqshrn, Uqshrn)                    \
1072   V(uqshrn2, Uqshrn2)                  \
1073   V(urshr, Urshr)                      \
1074   V(ursra, Ursra)                      \
1075   V(ushll, Ushll)                      \
1076   V(ushll2, Ushll2)                    \
1077   V(ushr, Ushr)                        \
1078   V(usra, Usra)
1079 
1080 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                           \
1081   void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
1082     DCHECK(allow_macro_instructions());                            \
1083     ASM(vd, vn, shift);                                            \
1084   }
NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)1085   NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
1086 #undef DEFINE_MACRO_ASM_FUNC
1087 
1088   void Umov(const Register& rd, const VRegister& vn, int vn_index) {
1089     DCHECK(allow_macro_instructions());
1090     umov(rd, vn, vn_index);
1091   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vm)1092   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1093     DCHECK(allow_macro_instructions());
1094     tbl(vd, vn, vm);
1095   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1096   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1097            const VRegister& vm) {
1098     DCHECK(allow_macro_instructions());
1099     tbl(vd, vn, vn2, vm);
1100   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1101   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1102            const VRegister& vn3, const VRegister& vm) {
1103     DCHECK(allow_macro_instructions());
1104     tbl(vd, vn, vn2, vn3, vm);
1105   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1106   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1107            const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1108     DCHECK(allow_macro_instructions());
1109     tbl(vd, vn, vn2, vn3, vn4, vm);
1110   }
Ext(const VRegister & vd,const VRegister & vn,const VRegister & vm,int index)1111   void Ext(const VRegister& vd, const VRegister& vn, const VRegister& vm,
1112            int index) {
1113     DCHECK(allow_macro_instructions());
1114     ext(vd, vn, vm, index);
1115   }
1116 
Smov(const Register & rd,const VRegister & vn,int vn_index)1117   void Smov(const Register& rd, const VRegister& vn, int vn_index) {
1118     DCHECK(allow_macro_instructions());
1119     smov(rd, vn, vn_index);
1120   }
1121 
1122 // Load-acquire/store-release macros.
1123 #define DECLARE_FUNCTION(FN, OP) \
1124   inline void FN(const Register& rt, const Register& rn);
1125   LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
1126 #undef DECLARE_FUNCTION
1127 
1128   // Load an object from the root table.
1129   void LoadRoot(Register destination, Heap::RootListIndex index) override;
1130 
1131   inline void Ret(const Register& xn = lr);
1132 
1133   // Perform a conversion from a double to a signed int64. If the input fits in
1134   // range of the 64-bit result, execution branches to done. Otherwise,
1135   // execution falls through, and the sign of the result can be used to
1136   // determine if overflow was towards positive or negative infinity.
1137   //
1138   // On successful conversion, the least significant 32 bits of the result are
1139   // equivalent to the ECMA-262 operation "ToInt32".
1140   //
1141   // Only public for the test code in test-code-stubs-arm64.cc.
1142   void TryConvertDoubleToInt64(Register result, DoubleRegister input,
1143                                Label* done);
1144 
1145   inline void Mrs(const Register& rt, SystemRegister sysreg);
1146 
1147   // Generates function prologue code.
1148   void Prologue();
1149 
Cmgt(const VRegister & vd,const VRegister & vn,int imm)1150   void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
1151     DCHECK(allow_macro_instructions());
1152     cmgt(vd, vn, imm);
1153   }
Cmge(const VRegister & vd,const VRegister & vn,int imm)1154   void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
1155     DCHECK(allow_macro_instructions());
1156     cmge(vd, vn, imm);
1157   }
Cmeq(const VRegister & vd,const VRegister & vn,int imm)1158   void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
1159     DCHECK(allow_macro_instructions());
1160     cmeq(vd, vn, imm);
1161   }
1162 
1163   inline void Neg(const Register& rd, const Operand& operand);
1164   inline void Negs(const Register& rd, const Operand& operand);
1165 
1166   // Compute rd = abs(rm).
1167   // This function clobbers the condition flags. On output the overflow flag is
1168   // set iff the negation overflowed.
1169   //
1170   // If rm is the minimum representable value, the result is not representable.
1171   // Handlers for each case can be specified using the relevant labels.
1172   void Abs(const Register& rd, const Register& rm,
1173            Label* is_not_representable = nullptr,
1174            Label* is_representable = nullptr);
1175 
1176   inline void Cls(const Register& rd, const Register& rn);
1177   inline void Cneg(const Register& rd, const Register& rn, Condition cond);
1178   inline void Rev16(const Register& rd, const Register& rn);
1179   inline void Rev32(const Register& rd, const Register& rn);
1180   inline void Fcvtns(const Register& rd, const VRegister& fn);
1181   inline void Fcvtnu(const Register& rd, const VRegister& fn);
1182   inline void Fcvtms(const Register& rd, const VRegister& fn);
1183   inline void Fcvtmu(const Register& rd, const VRegister& fn);
1184   inline void Fcvtas(const Register& rd, const VRegister& fn);
1185   inline void Fcvtau(const Register& rd, const VRegister& fn);
1186 
1187   // Compute the start of the generated instruction stream from the current PC.
1188   // This is an alternative to embedding the {CodeObject} handle as a reference.
1189   void ComputeCodeStartAddress(const Register& rd);
1190 
1191   void ResetSpeculationPoisonRegister();
1192 
1193  protected:
1194   // The actual Push and Pop implementations. These don't generate any code
1195   // other than that required for the push or pop. This allows
1196   // (Push|Pop)CPURegList to bundle together run-time assertions for a large
1197   // block of registers.
1198   //
1199   // Note that size is per register, and is specified in bytes.
1200   void PushHelper(int count, int size, const CPURegister& src0,
1201                   const CPURegister& src1, const CPURegister& src2,
1202                   const CPURegister& src3);
1203   void PopHelper(int count, int size, const CPURegister& dst0,
1204                  const CPURegister& dst1, const CPURegister& dst2,
1205                  const CPURegister& dst3);
1206 
1207   void ConditionalCompareMacro(const Register& rn, const Operand& operand,
1208                                StatusFlags nzcv, Condition cond,
1209                                ConditionalCompareOp op);
1210 
1211   void AddSubWithCarryMacro(const Register& rd, const Register& rn,
1212                             const Operand& operand, FlagsUpdate S,
1213                             AddSubWithCarryOp op);
1214 
1215   // Call Printf. On a native build, a simple call will be generated, but if the
1216   // simulator is being used then a suitable pseudo-instruction is used. The
1217   // arguments and stack must be prepared by the caller as for a normal AAPCS64
1218   // call to 'printf'.
1219   //
1220   // The 'args' argument should point to an array of variable arguments in their
1221   // proper PCS registers (and in calling order). The argument registers can
1222   // have mixed types. The format string (x0) should not be included.
1223   void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
1224 
1225  private:
1226 #if DEBUG
1227   // Tell whether any of the macro instruction can be used. When false the
1228   // MacroAssembler will assert if a method which can emit a variable number
1229   // of instructions is called.
1230   bool allow_macro_instructions_ = true;
1231 #endif
1232 
1233 
1234   // Scratch registers available for use by the MacroAssembler.
1235   CPURegList tmp_list_ = DefaultTmpList();
1236   CPURegList fptmp_list_ = DefaultFPTmpList();
1237 
1238   // Helps resolve branching to labels potentially out of range.
1239   // If the label is not bound, it registers the information necessary to later
1240   // be able to emit a veneer for this branch if necessary.
1241   // If the label is bound, it returns true if the label (or the previous link
1242   // in the label chain) is out of range. In that case the caller is responsible
1243   // for generating appropriate code.
1244   // Otherwise it returns false.
1245   // This function also checks wether veneers need to be emitted.
1246   bool NeedExtraInstructionsOrRegisterBranch(Label* label,
1247                                              ImmBranchType branch_type);
1248 
1249   void Movi16bitHelper(const VRegister& vd, uint64_t imm);
1250   void Movi32bitHelper(const VRegister& vd, uint64_t imm);
1251   void Movi64bitHelper(const VRegister& vd, uint64_t imm);
1252 
1253   void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
1254                       LoadStoreOp op);
1255 
1256   void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
1257                           const MemOperand& addr, LoadStorePairOp op);
1258 
1259   static bool IsNearCallOffset(int64_t offset);
1260   void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
1261 };
1262 
1263 class MacroAssembler : public TurboAssembler {
1264  public:
MacroAssembler(Isolate * isolate,void * buffer,int size,CodeObjectRequired create_code_object)1265   MacroAssembler(Isolate* isolate, void* buffer, int size,
1266                  CodeObjectRequired create_code_object)
1267       : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
1268                        size, create_code_object) {}
1269   MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
1270                  void* buffer, int size, CodeObjectRequired create_code_object);
1271 
1272   // Instruction set functions ------------------------------------------------
1273   // Logical macros.
1274   inline void Bics(const Register& rd, const Register& rn,
1275                    const Operand& operand);
1276 
1277   inline void Adcs(const Register& rd, const Register& rn,
1278                    const Operand& operand);
1279   inline void Sbc(const Register& rd, const Register& rn,
1280                   const Operand& operand);
1281   inline void Sbcs(const Register& rd, const Register& rn,
1282                    const Operand& operand);
1283   inline void Ngc(const Register& rd, const Operand& operand);
1284   inline void Ngcs(const Register& rd, const Operand& operand);
1285 
1286   inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
1287                    Condition cond);
1288 
1289 #define DECLARE_FUNCTION(FN, OP) \
1290   inline void FN(const Register& rs, const Register& rt, const Register& rn);
1291   STLX_MACRO_LIST(DECLARE_FUNCTION)
1292 #undef DECLARE_FUNCTION
1293 
1294   // Branch type inversion relies on these relations.
1295   STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
1296                 (reg_bit_clear == (reg_bit_set ^ 1)) &&
1297                 (always == (never ^ 1)));
1298 
1299   inline void Bfxil(const Register& rd, const Register& rn, unsigned lsb,
1300                     unsigned width);
1301   inline void Cinc(const Register& rd, const Register& rn, Condition cond);
1302   inline void Cinv(const Register& rd, const Register& rn, Condition cond);
1303   inline void CzeroX(const Register& rd, Condition cond);
1304   inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
1305                     Condition cond);
1306   inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
1307                     Condition cond);
1308   inline void Extr(const Register& rd, const Register& rn, const Register& rm,
1309                    unsigned lsb);
1310   inline void Fcsel(const VRegister& fd, const VRegister& fn,
1311                     const VRegister& fm, Condition cond);
Fcvtl(const VRegister & vd,const VRegister & vn)1312   void Fcvtl(const VRegister& vd, const VRegister& vn) {
1313     DCHECK(allow_macro_instructions());
1314     fcvtl(vd, vn);
1315   }
Fcvtl2(const VRegister & vd,const VRegister & vn)1316   void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1317     DCHECK(allow_macro_instructions());
1318     fcvtl2(vd, vn);
1319   }
Fcvtn(const VRegister & vd,const VRegister & vn)1320   void Fcvtn(const VRegister& vd, const VRegister& vn) {
1321     DCHECK(allow_macro_instructions());
1322     fcvtn(vd, vn);
1323   }
Fcvtn2(const VRegister & vd,const VRegister & vn)1324   void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1325     DCHECK(allow_macro_instructions());
1326     fcvtn2(vd, vn);
1327   }
Fcvtxn(const VRegister & vd,const VRegister & vn)1328   void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1329     DCHECK(allow_macro_instructions());
1330     fcvtxn(vd, vn);
1331   }
Fcvtxn2(const VRegister & vd,const VRegister & vn)1332   void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1333     DCHECK(allow_macro_instructions());
1334     fcvtxn2(vd, vn);
1335   }
1336   inline void Fmadd(const VRegister& fd, const VRegister& fn,
1337                     const VRegister& fm, const VRegister& fa);
1338   inline void Fmaxnm(const VRegister& fd, const VRegister& fn,
1339                      const VRegister& fm);
1340   inline void Fminnm(const VRegister& fd, const VRegister& fn,
1341                      const VRegister& fm);
1342   inline void Fmsub(const VRegister& fd, const VRegister& fn,
1343                     const VRegister& fm, const VRegister& fa);
1344   inline void Fnmadd(const VRegister& fd, const VRegister& fn,
1345                      const VRegister& fm, const VRegister& fa);
1346   inline void Fnmsub(const VRegister& fd, const VRegister& fn,
1347                      const VRegister& fm, const VRegister& fa);
1348   inline void Hint(SystemHint code);
1349   inline void Hlt(int code);
1350   inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
1351                    const MemOperand& src);
1352   inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
1353   inline void Msr(SystemRegister sysreg, const Register& rt);
Nop()1354   inline void Nop() { nop(); }
1355   void Mvni(const VRegister& vd, const int imm8, Shift shift = LSL,
1356             const int shift_amount = 0) {
1357     DCHECK(allow_macro_instructions());
1358     mvni(vd, imm8, shift, shift_amount);
1359   }
1360   inline void Rev(const Register& rd, const Register& rn);
1361   inline void Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
1362                     unsigned width);
1363   inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
1364                      const Register& ra);
1365   inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
1366                      const Register& ra);
1367   inline void Smulh(const Register& rd, const Register& rn, const Register& rm);
1368   inline void Stnp(const CPURegister& rt, const CPURegister& rt2,
1369                    const MemOperand& dst);
1370   inline void Umaddl(const Register& rd, const Register& rn, const Register& rm,
1371                      const Register& ra);
1372   inline void Umsubl(const Register& rd, const Register& rn, const Register& rm,
1373                      const Register& ra);
1374 
Cmle(const VRegister & vd,const VRegister & vn,int imm)1375   void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
1376     DCHECK(allow_macro_instructions());
1377     cmle(vd, vn, imm);
1378   }
Cmlt(const VRegister & vd,const VRegister & vn,int imm)1379   void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
1380     DCHECK(allow_macro_instructions());
1381     cmlt(vd, vn, imm);
1382   }
1383 
Ld1(const VRegister & vt,const MemOperand & src)1384   void Ld1(const VRegister& vt, const MemOperand& src) {
1385     DCHECK(allow_macro_instructions());
1386     ld1(vt, src);
1387   }
Ld1(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1388   void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1389     DCHECK(allow_macro_instructions());
1390     ld1(vt, vt2, src);
1391   }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1392   void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1393            const MemOperand& src) {
1394     DCHECK(allow_macro_instructions());
1395     ld1(vt, vt2, vt3, src);
1396   }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1397   void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1398            const VRegister& vt4, const MemOperand& src) {
1399     DCHECK(allow_macro_instructions());
1400     ld1(vt, vt2, vt3, vt4, src);
1401   }
Ld1(const VRegister & vt,int lane,const MemOperand & src)1402   void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
1403     DCHECK(allow_macro_instructions());
1404     ld1(vt, lane, src);
1405   }
Ld1r(const VRegister & vt,const MemOperand & src)1406   void Ld1r(const VRegister& vt, const MemOperand& src) {
1407     DCHECK(allow_macro_instructions());
1408     ld1r(vt, src);
1409   }
Ld2(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1410   void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1411     DCHECK(allow_macro_instructions());
1412     ld2(vt, vt2, src);
1413   }
Ld2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & src)1414   void Ld2(const VRegister& vt, const VRegister& vt2, int lane,
1415            const MemOperand& src) {
1416     DCHECK(allow_macro_instructions());
1417     ld2(vt, vt2, lane, src);
1418   }
Ld2r(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1419   void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1420     DCHECK(allow_macro_instructions());
1421     ld2r(vt, vt2, src);
1422   }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1423   void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1424            const MemOperand& src) {
1425     DCHECK(allow_macro_instructions());
1426     ld3(vt, vt2, vt3, src);
1427   }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & src)1428   void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1429            int lane, const MemOperand& src) {
1430     DCHECK(allow_macro_instructions());
1431     ld3(vt, vt2, vt3, lane, src);
1432   }
Ld3r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1433   void Ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1434             const MemOperand& src) {
1435     DCHECK(allow_macro_instructions());
1436     ld3r(vt, vt2, vt3, src);
1437   }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1438   void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1439            const VRegister& vt4, const MemOperand& src) {
1440     DCHECK(allow_macro_instructions());
1441     ld4(vt, vt2, vt3, vt4, src);
1442   }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & src)1443   void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1444            const VRegister& vt4, int lane, const MemOperand& src) {
1445     DCHECK(allow_macro_instructions());
1446     ld4(vt, vt2, vt3, vt4, lane, src);
1447   }
Ld4r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1448   void Ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1449             const VRegister& vt4, const MemOperand& src) {
1450     DCHECK(allow_macro_instructions());
1451     ld4r(vt, vt2, vt3, vt4, src);
1452   }
St1(const VRegister & vt,const MemOperand & dst)1453   void St1(const VRegister& vt, const MemOperand& dst) {
1454     DCHECK(allow_macro_instructions());
1455     st1(vt, dst);
1456   }
St1(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)1457   void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1458     DCHECK(allow_macro_instructions());
1459     st1(vt, vt2, dst);
1460   }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)1461   void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1462            const MemOperand& dst) {
1463     DCHECK(allow_macro_instructions());
1464     st1(vt, vt2, vt3, dst);
1465   }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)1466   void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1467            const VRegister& vt4, const MemOperand& dst) {
1468     DCHECK(allow_macro_instructions());
1469     st1(vt, vt2, vt3, vt4, dst);
1470   }
St1(const VRegister & vt,int lane,const MemOperand & dst)1471   void St1(const VRegister& vt, int lane, const MemOperand& dst) {
1472     DCHECK(allow_macro_instructions());
1473     st1(vt, lane, dst);
1474   }
St2(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)1475   void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1476     DCHECK(allow_macro_instructions());
1477     st2(vt, vt2, dst);
1478   }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)1479   void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1480            const MemOperand& dst) {
1481     DCHECK(allow_macro_instructions());
1482     st3(vt, vt2, vt3, dst);
1483   }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)1484   void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1485            const VRegister& vt4, const MemOperand& dst) {
1486     DCHECK(allow_macro_instructions());
1487     st4(vt, vt2, vt3, vt4, dst);
1488   }
St2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & dst)1489   void St2(const VRegister& vt, const VRegister& vt2, int lane,
1490            const MemOperand& dst) {
1491     DCHECK(allow_macro_instructions());
1492     st2(vt, vt2, lane, dst);
1493   }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & dst)1494   void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1495            int lane, const MemOperand& dst) {
1496     DCHECK(allow_macro_instructions());
1497     st3(vt, vt2, vt3, lane, dst);
1498   }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & dst)1499   void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1500            const VRegister& vt4, int lane, const MemOperand& dst) {
1501     DCHECK(allow_macro_instructions());
1502     st4(vt, vt2, vt3, vt4, lane, dst);
1503   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vm)1504   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1505     DCHECK(allow_macro_instructions());
1506     tbx(vd, vn, vm);
1507   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1508   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1509            const VRegister& vm) {
1510     DCHECK(allow_macro_instructions());
1511     tbx(vd, vn, vn2, vm);
1512   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1513   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1514            const VRegister& vn3, const VRegister& vm) {
1515     DCHECK(allow_macro_instructions());
1516     tbx(vd, vn, vn2, vn3, vm);
1517   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1518   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1519            const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1520     DCHECK(allow_macro_instructions());
1521     tbx(vd, vn, vn2, vn3, vn4, vm);
1522   }
1523 
1524   void LoadObject(Register result, Handle<Object> object);
1525 
1526   inline void PushSizeRegList(RegList registers, unsigned reg_size,
1527       CPURegister::RegisterType type = CPURegister::kRegister) {
1528     PushCPURegList(CPURegList(type, reg_size, registers));
1529   }
1530   inline void PopSizeRegList(RegList registers, unsigned reg_size,
1531       CPURegister::RegisterType type = CPURegister::kRegister) {
1532     PopCPURegList(CPURegList(type, reg_size, registers));
1533   }
PushXRegList(RegList regs)1534   inline void PushXRegList(RegList regs) {
1535     PushSizeRegList(regs, kXRegSizeInBits);
1536   }
PopXRegList(RegList regs)1537   inline void PopXRegList(RegList regs) {
1538     PopSizeRegList(regs, kXRegSizeInBits);
1539   }
PushWRegList(RegList regs)1540   inline void PushWRegList(RegList regs) {
1541     PushSizeRegList(regs, kWRegSizeInBits);
1542   }
PopWRegList(RegList regs)1543   inline void PopWRegList(RegList regs) {
1544     PopSizeRegList(regs, kWRegSizeInBits);
1545   }
PushDRegList(RegList regs)1546   inline void PushDRegList(RegList regs) {
1547     PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
1548   }
PopDRegList(RegList regs)1549   inline void PopDRegList(RegList regs) {
1550     PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
1551   }
PushSRegList(RegList regs)1552   inline void PushSRegList(RegList regs) {
1553     PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
1554   }
PopSRegList(RegList regs)1555   inline void PopSRegList(RegList regs) {
1556     PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
1557   }
1558 
1559   // Push the specified register 'count' times.
1560   void PushMultipleTimes(CPURegister src, Register count);
1561 
1562   // Sometimes callers need to push or pop multiple registers in a way that is
1563   // difficult to structure efficiently for fixed Push or Pop calls. This scope
1564   // allows push requests to be queued up, then flushed at once. The
1565   // MacroAssembler will try to generate the most efficient sequence required.
1566   //
1567   // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
1568   // register sizes and types.
1569   class PushPopQueue {
1570    public:
PushPopQueue(MacroAssembler * masm)1571     explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) {}
1572 
~PushPopQueue()1573     ~PushPopQueue() {
1574       DCHECK(queued_.empty());
1575     }
1576 
Queue(const CPURegister & rt)1577     void Queue(const CPURegister& rt) {
1578       size_ += rt.SizeInBytes();
1579       queued_.push_back(rt);
1580     }
1581 
1582     void PushQueued();
1583     void PopQueued();
1584 
1585    private:
1586     MacroAssembler* masm_;
1587     int size_;
1588     std::vector<CPURegister> queued_;
1589   };
1590 
1591   // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
1592   // values peeked will be adjacent, with the value in 'dst2' being from a
1593   // higher address than 'dst1'. The offset is in bytes. The stack pointer must
1594   // be aligned to 16 bytes.
1595   void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
1596 
1597   // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
1598   // register.
1599   inline void ClaimBySMI(const Register& count_smi,
1600                          uint64_t unit_size = kXRegSize);
1601   inline void DropBySMI(const Register& count_smi,
1602                         uint64_t unit_size = kXRegSize);
1603 
1604   // Compare a register with an operand, and branch to label depending on the
1605   // condition. May corrupt the status flags.
1606   inline void CompareAndBranch(const Register& lhs,
1607                                const Operand& rhs,
1608                                Condition cond,
1609                                Label* label);
1610 
1611   // Insert one or more instructions into the instruction stream that encode
1612   // some caller-defined data. The instructions used will be executable with no
1613   // side effects.
1614   inline void InlineData(uint64_t data);
1615 
1616   // Insert an instrumentation enable marker into the instruction stream.
1617   inline void EnableInstrumentation();
1618 
1619   // Insert an instrumentation disable marker into the instruction stream.
1620   inline void DisableInstrumentation();
1621 
1622   // Insert an instrumentation event marker into the instruction stream. These
1623   // will be picked up by the instrumentation system to annotate an instruction
1624   // profile. The argument marker_name must be a printable two character string;
1625   // it will be encoded in the event marker.
1626   inline void AnnotateInstrumentation(const char* marker_name);
1627 
1628   // Preserve the callee-saved registers (as defined by AAPCS64).
1629   //
1630   // Higher-numbered registers are pushed before lower-numbered registers, and
1631   // thus get higher addresses.
1632   // Floating-point registers are pushed before general-purpose registers, and
1633   // thus get higher addresses.
1634   //
1635   // Note that registers are not checked for invalid values. Use this method
1636   // only if you know that the GC won't try to examine the values on the stack.
1637   void PushCalleeSavedRegisters();
1638 
1639   // Restore the callee-saved registers (as defined by AAPCS64).
1640   //
1641   // Higher-numbered registers are popped after lower-numbered registers, and
1642   // thus come from higher addresses.
1643   // Floating-point registers are popped after general-purpose registers, and
1644   // thus come from higher addresses.
1645   void PopCalleeSavedRegisters();
1646 
1647   // Helpers ------------------------------------------------------------------
1648 
1649   static int SafepointRegisterStackIndex(int reg_code);
1650 
1651   template<typename Field>
DecodeField(Register dst,Register src)1652   void DecodeField(Register dst, Register src) {
1653     static const int shift = Field::kShift;
1654     static const int setbits = CountSetBits(Field::kMask, 32);
1655     Ubfx(dst, src, shift, setbits);
1656   }
1657 
1658   template<typename Field>
DecodeField(Register reg)1659   void DecodeField(Register reg) {
1660     DecodeField<Field>(reg, reg);
1661   }
1662 
1663   // ---- SMI and Number Utilities ----
1664 
1665   inline void SmiTag(Register dst, Register src);
1666   inline void SmiTag(Register smi);
1667 
1668   inline void JumpIfNotSmi(Register value, Label* not_smi_label);
1669   inline void JumpIfBothSmi(Register value1, Register value2,
1670                             Label* both_smi_label,
1671                             Label* not_smi_label = nullptr);
1672   inline void JumpIfEitherSmi(Register value1, Register value2,
1673                               Label* either_smi_label,
1674                               Label* not_smi_label = nullptr);
1675   inline void JumpIfEitherNotSmi(Register value1,
1676                                  Register value2,
1677                                  Label* not_smi_label);
1678   inline void JumpIfBothNotSmi(Register value1,
1679                                Register value2,
1680                                Label* not_smi_label);
1681 
1682   // Abort execution if argument is a smi, enabled via --debug-code.
1683   void AssertNotSmi(Register object,
1684                     AbortReason reason = AbortReason::kOperandIsASmi);
1685 
1686   inline void ObjectTag(Register tagged_obj, Register obj);
1687   inline void ObjectUntag(Register untagged_obj, Register obj);
1688 
1689   // Abort execution if argument is not a Constructor, enabled via --debug-code.
1690   void AssertConstructor(Register object);
1691 
1692   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1693   void AssertFunction(Register object);
1694 
1695   // Abort execution if argument is not a JSGeneratorObject (or subclass),
1696   // enabled via --debug-code.
1697   void AssertGeneratorObject(Register object);
1698 
1699   // Abort execution if argument is not a JSBoundFunction,
1700   // enabled via --debug-code.
1701   void AssertBoundFunction(Register object);
1702 
1703   // Abort execution if argument is not undefined or an AllocationSite, enabled
1704   // via --debug-code.
1705   void AssertUndefinedOrAllocationSite(Register object);
1706 
1707   // Try to represent a double as a signed 64-bit int.
1708   // This succeeds if the result compares equal to the input, so inputs of -0.0
1709   // are represented as 0 and handled as a success.
1710   //
1711   // On output the Z flag is set if the operation was successful.
1712   void TryRepresentDoubleAsInt64(Register as_int, VRegister value,
1713                                  VRegister scratch_d,
1714                                  Label* on_successful_conversion = nullptr,
1715                                  Label* on_failed_conversion = nullptr) {
1716     DCHECK(as_int.Is64Bits());
1717     TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1718                             on_failed_conversion);
1719   }
1720 
1721   // ---- Calling / Jumping helpers ----
1722 
1723   void CallStub(CodeStub* stub);
1724   void TailCallStub(CodeStub* stub);
1725 
1726   void CallRuntime(const Runtime::Function* f,
1727                    int num_arguments,
1728                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1729 
1730   // Convenience function: Same as above, but takes the fid instead.
1731   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1732                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1733     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1734   }
1735 
1736   // Convenience function: Same as above, but takes the fid instead.
1737   void CallRuntime(Runtime::FunctionId fid,
1738                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1739     const Runtime::Function* function = Runtime::FunctionForId(fid);
1740     CallRuntime(function, function->nargs, save_doubles);
1741   }
1742 
1743   void TailCallRuntime(Runtime::FunctionId fid);
1744 
1745   // Jump to a runtime routine.
1746   void JumpToExternalReference(const ExternalReference& builtin,
1747                                bool builtin_exit_frame = false);
1748 
1749   // Generates a trampoline to jump to the off-heap instruction stream.
1750   void JumpToInstructionStream(Address entry);
1751 
1752   // Registers used through the invocation chain are hard-coded.
1753   // We force passing the parameters to ensure the contracts are correctly
1754   // honoured by the caller.
1755   // 'function' must be x1.
1756   // 'actual' must use an immediate or x0.
1757   // 'expected' must use an immediate or x2.
1758   // 'call_kind' must be x5.
1759   void InvokePrologue(const ParameterCount& expected,
1760                       const ParameterCount& actual, Label* done,
1761                       InvokeFlag flag, bool* definitely_mismatches);
1762 
1763   // On function call, call into the debugger if necessary.
1764   void CheckDebugHook(Register fun, Register new_target,
1765                       const ParameterCount& expected,
1766                       const ParameterCount& actual);
1767   void InvokeFunctionCode(Register function, Register new_target,
1768                           const ParameterCount& expected,
1769                           const ParameterCount& actual, InvokeFlag flag);
1770   // Invoke the JavaScript function in the given register.
1771   // Changes the current context to the context in the function before invoking.
1772   void InvokeFunction(Register function, Register new_target,
1773                       const ParameterCount& actual, InvokeFlag flag);
1774   void InvokeFunction(Register function, const ParameterCount& expected,
1775                       const ParameterCount& actual, InvokeFlag flag);
1776 
1777   // ---- Code generation helpers ----
1778 
1779   // Frame restart support
1780   void MaybeDropFrames();
1781 
1782   // ---------------------------------------------------------------------------
1783   // Support functions.
1784 
1785   // Compare object type for heap object.  heap_object contains a non-Smi
1786   // whose object type should be compared with the given type.  This both
1787   // sets the flags and leaves the object type in the type_reg register.
1788   // It leaves the map in the map register (unless the type_reg and map register
1789   // are the same register).  It leaves the heap object in the heap_object
1790   // register unless the heap_object register is the same register as one of the
1791   // other registers.
1792   void CompareObjectType(Register heap_object,
1793                          Register map,
1794                          Register type_reg,
1795                          InstanceType type);
1796 
1797 
1798   // Compare object type for heap object, and branch if equal (or not.)
1799   // heap_object contains a non-Smi whose object type should be compared with
1800   // the given type.  This both sets the flags and leaves the object type in
1801   // the type_reg register. It leaves the map in the map register (unless the
1802   // type_reg and map register are the same register).  It leaves the heap
1803   // object in the heap_object register unless the heap_object register is the
1804   // same register as one of the other registers.
1805   void JumpIfObjectType(Register object,
1806                         Register map,
1807                         Register type_reg,
1808                         InstanceType type,
1809                         Label* if_cond_pass,
1810                         Condition cond = eq);
1811 
1812   // Compare instance type in a map.  map contains a valid map object whose
1813   // object type should be compared with the given type.  This both
1814   // sets the flags and leaves the object type in the type_reg register.
1815   void CompareInstanceType(Register map,
1816                            Register type_reg,
1817                            InstanceType type);
1818 
1819   // Load the elements kind field from a map, and return it in the result
1820   // register.
1821   void LoadElementsKindFromMap(Register result, Register map);
1822 
1823   // Compare the object in a register to a value from the root list.
1824   void CompareRoot(const Register& obj, Heap::RootListIndex index);
1825 
1826   // Compare the object in a register to a value and jump if they are equal.
1827   void JumpIfRoot(const Register& obj,
1828                   Heap::RootListIndex index,
1829                   Label* if_equal);
1830 
1831   // Compare the object in a register to a value and jump if they are not equal.
1832   void JumpIfNotRoot(const Register& obj,
1833                      Heap::RootListIndex index,
1834                      Label* if_not_equal);
1835 
1836   // Compare the contents of a register with an operand, and branch to true,
1837   // false or fall through, depending on condition.
1838   void CompareAndSplit(const Register& lhs,
1839                        const Operand& rhs,
1840                        Condition cond,
1841                        Label* if_true,
1842                        Label* if_false,
1843                        Label* fall_through);
1844 
1845   // Test the bits of register defined by bit_pattern, and branch to
1846   // if_any_set, if_all_clear or fall_through accordingly.
1847   void TestAndSplit(const Register& reg,
1848                     uint64_t bit_pattern,
1849                     Label* if_all_clear,
1850                     Label* if_any_set,
1851                     Label* fall_through);
1852 
1853   // ---------------------------------------------------------------------------
1854   // Frames.
1855 
1856   void ExitFramePreserveFPRegs();
1857   void ExitFrameRestoreFPRegs();
1858 
1859   // Enter exit frame. Exit frames are used when calling C code from generated
1860   // (JavaScript) code.
1861   //
1862   // The only registers modified by this function are the provided scratch
1863   // register, the frame pointer and the stack pointer.
1864   //
1865   // The 'extra_space' argument can be used to allocate some space in the exit
1866   // frame that will be ignored by the GC. This space will be reserved in the
1867   // bottom of the frame immediately above the return address slot.
1868   //
1869   // Set up a stack frame and registers as follows:
1870   //         fp[8]: CallerPC (lr)
1871   //   fp -> fp[0]: CallerFP (old fp)
1872   //         fp[-8]: SPOffset (new sp)
1873   //         fp[-16]: CodeObject()
1874   //         fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
1875   //         sp[8]: Memory reserved for the caller if extra_space != 0.
1876   //                 Alignment padding, if necessary.
1877   //   sp -> sp[0]: Space reserved for the return address.
1878   //
1879   // This function also stores the new frame information in the top frame, so
1880   // that the new frame becomes the current frame.
1881   void EnterExitFrame(bool save_doubles, const Register& scratch,
1882                       int extra_space = 0,
1883                       StackFrame::Type frame_type = StackFrame::EXIT);
1884 
1885   // Leave the current exit frame, after a C function has returned to generated
1886   // (JavaScript) code.
1887   //
1888   // This effectively unwinds the operation of EnterExitFrame:
1889   //  * Preserved doubles are restored (if restore_doubles is true).
1890   //  * The frame information is removed from the top frame.
1891   //  * The exit frame is dropped.
1892   void LeaveExitFrame(bool save_doubles, const Register& scratch,
1893                       const Register& scratch2);
1894 
1895   // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)1896   void LoadGlobalProxy(Register dst) {
1897     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
1898   }
1899 
1900   // ---------------------------------------------------------------------------
1901   // In-place weak references.
1902   void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1903 
1904   // ---------------------------------------------------------------------------
1905   // StatsCounter support
1906 
1907   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1908                         Register scratch2);
1909   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1910                         Register scratch2);
1911 
1912   // ---------------------------------------------------------------------------
1913   // Garbage collector support (GC).
1914 
1915   // Push and pop the registers that can hold pointers, as defined by the
1916   // RegList constant kSafepointSavedRegisters.
1917   void PushSafepointRegisters();
1918   void PopSafepointRegisters();
1919 
1920   void CheckPageFlag(const Register& object, const Register& scratch, int mask,
1921                      Condition cc, Label* condition_met);
1922 
1923   // Notify the garbage collector that we wrote a pointer into an object.
1924   // |object| is the object being stored into, |value| is the object being
1925   // stored.  value and scratch registers are clobbered by the operation.
1926   // The offset is the offset from the start of the object, not the offset from
1927   // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
1928   void RecordWriteField(
1929       Register object, int offset, Register value, Register scratch,
1930       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
1931       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1932       SmiCheck smi_check = INLINE_SMI_CHECK);
1933 
1934   // For a given |object| notify the garbage collector that the slot |address|
1935   // has been written.  |value| is the object being stored. The value and
1936   // address registers are clobbered by the operation.
1937   void RecordWrite(
1938       Register object, Register address, Register value,
1939       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
1940       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1941       SmiCheck smi_check = INLINE_SMI_CHECK);
1942 
1943   // ---------------------------------------------------------------------------
1944   // Debugging.
1945 
1946   void AssertRegisterIsRoot(
1947       Register reg, Heap::RootListIndex index,
1948       AbortReason reason = AbortReason::kRegisterDidNotMatchExpectedRoot);
1949 
1950   // Abort if the specified register contains the invalid color bit pattern.
1951   // The pattern must be in bits [1:0] of 'reg' register.
1952   //
1953   // If emit_debug_code() is false, this emits no code.
1954   void AssertHasValidColor(const Register& reg);
1955 
1956   void LoadNativeContextSlot(int index, Register dst);
1957 
1958   // Like printf, but print at run-time from generated code.
1959   //
1960   // The caller must ensure that arguments for floating-point placeholders
1961   // (such as %e, %f or %g) are VRegisters, and that arguments for integer
1962   // placeholders are Registers.
1963   //
1964   // Format placeholders that refer to more than one argument, or to a specific
1965   // argument, are not supported. This includes formats like "%1$d" or "%.*d".
1966   //
1967   // This function automatically preserves caller-saved registers so that
1968   // calling code can use Printf at any point without having to worry about
1969   // corruption. The preservation mechanism generates a lot of code. If this is
1970   // a problem, preserve the important registers manually and then call
1971   // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
1972   // implicitly preserved.
1973   void Printf(const char * format,
1974               CPURegister arg0 = NoCPUReg,
1975               CPURegister arg1 = NoCPUReg,
1976               CPURegister arg2 = NoCPUReg,
1977               CPURegister arg3 = NoCPUReg);
1978 
1979   // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
1980   //
1981   // The return code from the system printf call will be returned in x0.
1982   void PrintfNoPreserve(const char * format,
1983                         const CPURegister& arg0 = NoCPUReg,
1984                         const CPURegister& arg1 = NoCPUReg,
1985                         const CPURegister& arg2 = NoCPUReg,
1986                         const CPURegister& arg3 = NoCPUReg);
1987 
1988  private:
1989   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1990   void InNewSpace(Register object,
1991                   Condition cond,  // eq for new space, ne otherwise.
1992                   Label* branch);
1993 
1994   // Try to represent a double as an int so that integer fast-paths may be
1995   // used. Not every valid integer value is guaranteed to be caught.
1996   // It supports both 32-bit and 64-bit integers depending whether 'as_int'
1997   // is a W or X register.
1998   //
1999   // This does not distinguish between +0 and -0, so if this distinction is
2000   // important it must be checked separately.
2001   //
2002   // On output the Z flag is set if the operation was successful.
2003   void TryRepresentDoubleAsInt(Register as_int, VRegister value,
2004                                VRegister scratch_d,
2005                                Label* on_successful_conversion = nullptr,
2006                                Label* on_failed_conversion = nullptr);
2007 
2008  public:
2009   // Far branches resolving.
2010   //
2011   // The various classes of branch instructions with immediate offsets have
2012   // different ranges. While the Assembler will fail to assemble a branch
2013   // exceeding its range, the MacroAssembler offers a mechanism to resolve
2014   // branches to too distant targets, either by tweaking the generated code to
2015   // use branch instructions with wider ranges or generating veneers.
2016   //
2017   // Currently branches to distant targets are resolved using unconditional
2018   // branch isntructions with a range of +-128MB. If that becomes too little
2019   // (!), the mechanism can be extended to generate special veneers for really
2020   // far targets.
2021 };
2022 
2023 
2024 // Use this scope when you need a one-to-one mapping between methods and
2025 // instructions. This scope prevents the MacroAssembler from being called and
2026 // literal pools from being emitted. It also asserts the number of instructions
2027 // emitted is what you specified when creating the scope.
2028 class InstructionAccurateScope BASE_EMBEDDED {
2029  public:
2030   explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
tasm_(tasm)2031       : tasm_(tasm)
2032 #ifdef DEBUG
2033         ,
2034         size_(count * kInstrSize)
2035 #endif
2036   {
2037     // Before blocking the const pool, see if it needs to be emitted.
2038     tasm_->CheckConstPool(false, true);
2039     tasm_->CheckVeneerPool(false, true);
2040 
2041     tasm_->StartBlockPools();
2042 #ifdef DEBUG
2043     if (count != 0) {
2044       tasm_->bind(&start_);
2045     }
2046     previous_allow_macro_instructions_ = tasm_->allow_macro_instructions();
2047     tasm_->set_allow_macro_instructions(false);
2048 #endif
2049   }
2050 
~InstructionAccurateScope()2051   ~InstructionAccurateScope() {
2052     tasm_->EndBlockPools();
2053 #ifdef DEBUG
2054     if (start_.is_bound()) {
2055       DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
2056     }
2057     tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2058 #endif
2059   }
2060 
2061  private:
2062   TurboAssembler* tasm_;
2063 #ifdef DEBUG
2064   size_t size_;
2065   Label start_;
2066   bool previous_allow_macro_instructions_;
2067 #endif
2068 };
2069 
2070 // This scope utility allows scratch registers to be managed safely. The
2071 // TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2072 // registers. These registers can be allocated on demand, and will be returned
2073 // at the end of the scope.
2074 //
2075 // When the scope ends, the MacroAssembler's lists will be restored to their
2076 // original state, even if the lists were modified by some other means. Note
2077 // that this scope can be nested but the destructors need to run in the opposite
2078 // order as the constructors. We do not have assertions for this.
2079 class UseScratchRegisterScope {
2080  public:
UseScratchRegisterScope(TurboAssembler * tasm)2081   explicit UseScratchRegisterScope(TurboAssembler* tasm)
2082       : available_(tasm->TmpList()),
2083         availablefp_(tasm->FPTmpList()),
2084         old_available_(available_->list()),
2085         old_availablefp_(availablefp_->list()) {
2086     DCHECK_EQ(available_->type(), CPURegister::kRegister);
2087     DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
2088   }
2089 
2090   ~UseScratchRegisterScope();
2091 
2092   // Take a register from the appropriate temps list. It will be returned
2093   // automatically when the scope ends.
AcquireW()2094   Register AcquireW() { return AcquireNextAvailable(available_).W(); }
AcquireX()2095   Register AcquireX() { return AcquireNextAvailable(available_).X(); }
AcquireS()2096   VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
AcquireD()2097   VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
AcquireQ()2098   VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
AcquireV(VectorFormat format)2099   VRegister AcquireV(VectorFormat format) {
2100     return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
2101   }
2102 
2103   Register AcquireSameSizeAs(const Register& reg);
2104   VRegister AcquireSameSizeAs(const VRegister& reg);
2105 
2106  private:
2107   static CPURegister AcquireNextAvailable(CPURegList* available);
2108 
2109   // Available scratch registers.
2110   CPURegList* available_;     // kRegister
2111   CPURegList* availablefp_;   // kVRegister
2112 
2113   // The state of the available lists at the start of this scope.
2114   RegList old_available_;     // kRegister
2115   RegList old_availablefp_;   // kVRegister
2116 };
2117 
2118 MemOperand ContextMemOperand(Register context, int index = 0);
2119 MemOperand NativeContextMemOperand();
2120 
2121 // Encode and decode information about patchable inline SMI checks.
2122 class InlineSmiCheckInfo {
2123  public:
2124   explicit InlineSmiCheckInfo(Address info);
2125 
HasSmiCheck()2126   bool HasSmiCheck() const { return smi_check_ != nullptr; }
2127 
SmiRegister()2128   const Register& SmiRegister() const {
2129     return reg_;
2130   }
2131 
SmiCheck()2132   Instruction* SmiCheck() const {
2133     return smi_check_;
2134   }
2135 
SmiCheckDelta()2136   int SmiCheckDelta() const { return smi_check_delta_; }
2137 
2138   // Use MacroAssembler::InlineData to emit information about patchable inline
2139   // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
2140   // indicate that there is no inline SMI check. Note that 'reg' cannot be sp.
2141   //
2142   // The generated patch information can be read using the InlineSMICheckInfo
2143   // class.
2144   static void Emit(MacroAssembler* masm, const Register& reg,
2145                    const Label* smi_check);
2146 
2147   // Emit information to indicate that there is no inline SMI check.
EmitNotInlined(MacroAssembler * masm)2148   static void EmitNotInlined(MacroAssembler* masm) {
2149     Label unbound;
2150     Emit(masm, NoReg, &unbound);
2151   }
2152 
2153  private:
2154   Register reg_;
2155   int smi_check_delta_;
2156   Instruction* smi_check_;
2157 
2158   // Fields in the data encoded by InlineData.
2159 
2160   // A width of 5 (Rd_width) for the SMI register precludes the use of sp,
2161   // since kSPRegInternalCode is 63. However, sp should never hold a SMI or be
2162   // used in a patchable check. The Emit() method checks this.
2163   //
2164   // Note that the total size of the fields is restricted by the underlying
2165   // storage size handled by the BitField class, which is a uint32_t.
2166   class RegisterBits : public BitField<unsigned, 0, 5> {};
2167   class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
2168 };
2169 
2170 }  // namespace internal
2171 }  // namespace v8
2172 
2173 #define ACCESS_MASM(masm) masm->
2174 
2175 #endif  // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
2176