1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8
9 #ifndef V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
10 #define V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
11
12 #include "src/base/bits.h"
13 #include "src/codegen/arm64/assembler-arm64.h"
14 #include "src/codegen/bailout-reason.h"
15 #include "src/common/globals.h"
16 #include "src/objects/tagged-index.h"
17
18 // Simulator specific helpers.
19 #if USE_SIMULATOR
20 #if DEBUG
21 #define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
22 #define ASM_LOCATION_IN_ASSEMBLER(message) \
23 Debug("LOCATION: " message, __LINE__, NO_PARAM)
24 #else
25 #define ASM_LOCATION(message)
26 #define ASM_LOCATION_IN_ASSEMBLER(message)
27 #endif
28 #else
29 #define ASM_LOCATION(message)
30 #define ASM_LOCATION_IN_ASSEMBLER(message)
31 #endif
32
33 namespace v8 {
34 namespace internal {
35
36 #define LS_MACRO_LIST(V) \
37 V(Ldrb, Register&, rt, LDRB_w) \
38 V(Strb, Register&, rt, STRB_w) \
39 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
40 V(Ldrh, Register&, rt, LDRH_w) \
41 V(Strh, Register&, rt, STRH_w) \
42 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
43 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
44 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
45 V(Ldrsw, Register&, rt, LDRSW_x)
46
47 #define LSPAIR_MACRO_LIST(V) \
48 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
49 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
50 V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
51
52 #define LDA_STL_MACRO_LIST(V) \
53 V(Ldarb, ldarb) \
54 V(Ldarh, ldarh) \
55 V(Ldar, ldar) \
56 V(Ldaxrb, ldaxrb) \
57 V(Ldaxrh, ldaxrh) \
58 V(Ldaxr, ldaxr) \
59 V(Stlrb, stlrb) \
60 V(Stlrh, stlrh) \
61 V(Stlr, stlr)
62
63 #define STLX_MACRO_LIST(V) \
64 V(Stlxrb, stlxrb) \
65 V(Stlxrh, stlxrh) \
66 V(Stlxr, stlxr)
67
68 // ----------------------------------------------------------------------------
69 // Static helper functions
70
71 // Generate a MemOperand for loading a field from an object.
72 inline MemOperand FieldMemOperand(Register object, int offset);
73
74 // ----------------------------------------------------------------------------
75 // MacroAssembler
76
77 enum BranchType {
78 // Copies of architectural conditions.
79 // The associated conditions can be used in place of those, the code will
80 // take care of reinterpreting them with the correct type.
81 integer_eq = eq,
82 integer_ne = ne,
83 integer_hs = hs,
84 integer_lo = lo,
85 integer_mi = mi,
86 integer_pl = pl,
87 integer_vs = vs,
88 integer_vc = vc,
89 integer_hi = hi,
90 integer_ls = ls,
91 integer_ge = ge,
92 integer_lt = lt,
93 integer_gt = gt,
94 integer_le = le,
95 integer_al = al,
96 integer_nv = nv,
97
98 // These two are *different* from the architectural codes al and nv.
99 // 'always' is used to generate unconditional branches.
100 // 'never' is used to not generate a branch (generally as the inverse
101 // branch type of 'always).
102 always,
103 never,
104 // cbz and cbnz
105 reg_zero,
106 reg_not_zero,
107 // tbz and tbnz
108 reg_bit_clear,
109 reg_bit_set,
110
111 // Aliases.
112 kBranchTypeFirstCondition = eq,
113 kBranchTypeLastCondition = nv,
114 kBranchTypeFirstUsingReg = reg_zero,
115 kBranchTypeFirstUsingBit = reg_bit_clear
116 };
117
InvertBranchType(BranchType type)118 inline BranchType InvertBranchType(BranchType type) {
119 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
120 return static_cast<BranchType>(
121 NegateCondition(static_cast<Condition>(type)));
122 } else {
123 return static_cast<BranchType>(type ^ 1);
124 }
125 }
126
127 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
128 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
129
130 // The macro assembler supports moving automatically pre-shifted immediates for
131 // arithmetic and logical instructions, and then applying a post shift in the
132 // instruction to undo the modification, in order to reduce the code emitted for
133 // an operation. For example:
134 //
135 // Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
136 //
137 // This optimisation can be only partially applied when the stack pointer is an
138 // operand or destination, so this enumeration is used to control the shift.
139 enum PreShiftImmMode {
140 kNoShift, // Don't pre-shift.
141 kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
142 kAnyShift // Allow any pre-shift.
143 };
144
145 // TODO(victorgomes): Move definition to macro-assembler.h, once all other
146 // platforms are updated.
147 enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
148
149 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
150 public:
151 using TurboAssemblerBase::TurboAssemblerBase;
152
153 #if DEBUG
set_allow_macro_instructions(bool value)154 void set_allow_macro_instructions(bool value) {
155 allow_macro_instructions_ = value;
156 }
allow_macro_instructions()157 bool allow_macro_instructions() const { return allow_macro_instructions_; }
158 #endif
159
160 // We should not use near calls or jumps for calls to external references,
161 // since the code spaces are not guaranteed to be close to each other.
CanUseNearCallOrJump(RelocInfo::Mode rmode)162 bool CanUseNearCallOrJump(RelocInfo::Mode rmode) {
163 return rmode != RelocInfo::EXTERNAL_REFERENCE;
164 }
165
166 static bool IsNearCallOffset(int64_t offset);
167
168 // Activation support.
169 void EnterFrame(StackFrame::Type type);
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)170 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
171 // Out-of-line constant pool not implemented on arm64.
172 UNREACHABLE();
173 }
174 void LeaveFrame(StackFrame::Type type);
175
176 inline void InitializeRootRegister();
177
178 void Mov(const Register& rd, const Operand& operand,
179 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
180 void Mov(const Register& rd, uint64_t imm);
Mov(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)181 void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
182 int vn_index) {
183 DCHECK(allow_macro_instructions());
184 mov(vd, vd_index, vn, vn_index);
185 }
186 void Mov(const Register& rd, Smi smi);
Mov(const VRegister & vd,const VRegister & vn,int index)187 void Mov(const VRegister& vd, const VRegister& vn, int index) {
188 DCHECK(allow_macro_instructions());
189 mov(vd, vn, index);
190 }
Mov(const VRegister & vd,int vd_index,const Register & rn)191 void Mov(const VRegister& vd, int vd_index, const Register& rn) {
192 DCHECK(allow_macro_instructions());
193 mov(vd, vd_index, rn);
194 }
Mov(const Register & rd,const VRegister & vn,int vn_index)195 void Mov(const Register& rd, const VRegister& vn, int vn_index) {
196 DCHECK(allow_macro_instructions());
197 mov(rd, vn, vn_index);
198 }
199
200 // These are required for compatibility with architecture independent code.
201 // Remove if not needed.
202 void Move(Register dst, Smi src);
203 void Move(Register dst, MemOperand src);
204 void Move(Register dst, Register src);
205
206 // Move src0 to dst0 and src1 to dst1, handling possible overlaps.
207 void MovePair(Register dst0, Register src0, Register dst1, Register src1);
208
209 // Register swap. Note that the register operands should be distinct.
210 void Swap(Register lhs, Register rhs);
211 void Swap(VRegister lhs, VRegister rhs);
212
213 // NEON by element instructions.
214 #define NEON_BYELEMENT_MACRO_LIST(V) \
215 V(fmla, Fmla) \
216 V(fmls, Fmls) \
217 V(fmul, Fmul) \
218 V(fmulx, Fmulx) \
219 V(mul, Mul) \
220 V(mla, Mla) \
221 V(mls, Mls) \
222 V(sqdmulh, Sqdmulh) \
223 V(sqrdmulh, Sqrdmulh) \
224 V(sqdmull, Sqdmull) \
225 V(sqdmull2, Sqdmull2) \
226 V(sqdmlal, Sqdmlal) \
227 V(sqdmlal2, Sqdmlal2) \
228 V(sqdmlsl, Sqdmlsl) \
229 V(sqdmlsl2, Sqdmlsl2) \
230 V(smull, Smull) \
231 V(smull2, Smull2) \
232 V(smlal, Smlal) \
233 V(smlal2, Smlal2) \
234 V(smlsl, Smlsl) \
235 V(smlsl2, Smlsl2) \
236 V(umull, Umull) \
237 V(umull2, Umull2) \
238 V(umlal, Umlal) \
239 V(umlal2, Umlal2) \
240 V(umlsl, Umlsl) \
241 V(umlsl2, Umlsl2)
242
243 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
244 void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \
245 int vm_index) { \
246 DCHECK(allow_macro_instructions()); \
247 ASM(vd, vn, vm, vm_index); \
248 }
249 NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
250 #undef DEFINE_MACRO_ASM_FUNC
251
252 // NEON 2 vector register instructions.
253 #define NEON_2VREG_MACRO_LIST(V) \
254 V(abs, Abs) \
255 V(addp, Addp) \
256 V(addv, Addv) \
257 V(cls, Cls) \
258 V(clz, Clz) \
259 V(cnt, Cnt) \
260 V(faddp, Faddp) \
261 V(fcvtas, Fcvtas) \
262 V(fcvtau, Fcvtau) \
263 V(fcvtl, Fcvtl) \
264 V(fcvtms, Fcvtms) \
265 V(fcvtmu, Fcvtmu) \
266 V(fcvtn, Fcvtn) \
267 V(fcvtns, Fcvtns) \
268 V(fcvtnu, Fcvtnu) \
269 V(fcvtps, Fcvtps) \
270 V(fcvtpu, Fcvtpu) \
271 V(fmaxnmp, Fmaxnmp) \
272 V(fmaxnmv, Fmaxnmv) \
273 V(fmaxp, Fmaxp) \
274 V(fmaxv, Fmaxv) \
275 V(fminnmp, Fminnmp) \
276 V(fminnmv, Fminnmv) \
277 V(fminp, Fminp) \
278 V(fminv, Fminv) \
279 V(fneg, Fneg) \
280 V(frecpe, Frecpe) \
281 V(frecpx, Frecpx) \
282 V(frinta, Frinta) \
283 V(frinti, Frinti) \
284 V(frintm, Frintm) \
285 V(frintn, Frintn) \
286 V(frintp, Frintp) \
287 V(frintx, Frintx) \
288 V(frintz, Frintz) \
289 V(frsqrte, Frsqrte) \
290 V(fsqrt, Fsqrt) \
291 V(mov, Mov) \
292 V(mvn, Mvn) \
293 V(neg, Neg) \
294 V(not_, Not) \
295 V(rbit, Rbit) \
296 V(rev16, Rev16) \
297 V(rev32, Rev32) \
298 V(rev64, Rev64) \
299 V(sadalp, Sadalp) \
300 V(saddlp, Saddlp) \
301 V(saddlv, Saddlv) \
302 V(smaxv, Smaxv) \
303 V(sminv, Sminv) \
304 V(sqabs, Sqabs) \
305 V(sqneg, Sqneg) \
306 V(sqxtn2, Sqxtn2) \
307 V(sqxtn, Sqxtn) \
308 V(sqxtun2, Sqxtun2) \
309 V(sqxtun, Sqxtun) \
310 V(suqadd, Suqadd) \
311 V(sxtl2, Sxtl2) \
312 V(sxtl, Sxtl) \
313 V(uadalp, Uadalp) \
314 V(uaddlp, Uaddlp) \
315 V(uaddlv, Uaddlv) \
316 V(umaxv, Umaxv) \
317 V(uminv, Uminv) \
318 V(uqxtn2, Uqxtn2) \
319 V(uqxtn, Uqxtn) \
320 V(urecpe, Urecpe) \
321 V(ursqrte, Ursqrte) \
322 V(usqadd, Usqadd) \
323 V(uxtl2, Uxtl2) \
324 V(uxtl, Uxtl) \
325 V(xtn2, Xtn2) \
326 V(xtn, Xtn)
327
328 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
329 void MASM(const VRegister& vd, const VRegister& vn) { \
330 DCHECK(allow_macro_instructions()); \
331 ASM(vd, vn); \
332 }
NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)333 NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
334 #undef DEFINE_MACRO_ASM_FUNC
335 #undef NEON_2VREG_MACRO_LIST
336
337 // NEON 2 vector register with immediate instructions.
338 #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
339 V(fcmeq, Fcmeq) \
340 V(fcmge, Fcmge) \
341 V(fcmgt, Fcmgt) \
342 V(fcmle, Fcmle) \
343 V(fcmlt, Fcmlt)
344
345 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
346 void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
347 DCHECK(allow_macro_instructions()); \
348 ASM(vd, vn, imm); \
349 }
350 NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
351 #undef DEFINE_MACRO_ASM_FUNC
352
353 // NEON 3 vector register instructions.
354 #define NEON_3VREG_MACRO_LIST(V) \
355 V(add, Add) \
356 V(addhn2, Addhn2) \
357 V(addhn, Addhn) \
358 V(addp, Addp) \
359 V(and_, And) \
360 V(bic, Bic) \
361 V(bif, Bif) \
362 V(bit, Bit) \
363 V(bsl, Bsl) \
364 V(cmeq, Cmeq) \
365 V(cmge, Cmge) \
366 V(cmgt, Cmgt) \
367 V(cmhi, Cmhi) \
368 V(cmhs, Cmhs) \
369 V(cmtst, Cmtst) \
370 V(eor, Eor) \
371 V(fabd, Fabd) \
372 V(facge, Facge) \
373 V(facgt, Facgt) \
374 V(faddp, Faddp) \
375 V(fcmeq, Fcmeq) \
376 V(fcmge, Fcmge) \
377 V(fcmgt, Fcmgt) \
378 V(fmaxnmp, Fmaxnmp) \
379 V(fmaxp, Fmaxp) \
380 V(fminnmp, Fminnmp) \
381 V(fminp, Fminp) \
382 V(fmla, Fmla) \
383 V(fmls, Fmls) \
384 V(fmulx, Fmulx) \
385 V(fnmul, Fnmul) \
386 V(frecps, Frecps) \
387 V(frsqrts, Frsqrts) \
388 V(mla, Mla) \
389 V(mls, Mls) \
390 V(mul, Mul) \
391 V(orn, Orn) \
392 V(orr, Orr) \
393 V(pmull2, Pmull2) \
394 V(pmull, Pmull) \
395 V(pmul, Pmul) \
396 V(raddhn2, Raddhn2) \
397 V(raddhn, Raddhn) \
398 V(rsubhn2, Rsubhn2) \
399 V(rsubhn, Rsubhn) \
400 V(sabal2, Sabal2) \
401 V(sabal, Sabal) \
402 V(saba, Saba) \
403 V(sabdl2, Sabdl2) \
404 V(sabdl, Sabdl) \
405 V(sabd, Sabd) \
406 V(saddl2, Saddl2) \
407 V(saddl, Saddl) \
408 V(saddw2, Saddw2) \
409 V(saddw, Saddw) \
410 V(shadd, Shadd) \
411 V(shsub, Shsub) \
412 V(smaxp, Smaxp) \
413 V(smax, Smax) \
414 V(sminp, Sminp) \
415 V(smin, Smin) \
416 V(smlal2, Smlal2) \
417 V(smlal, Smlal) \
418 V(smlsl2, Smlsl2) \
419 V(smlsl, Smlsl) \
420 V(smull2, Smull2) \
421 V(smull, Smull) \
422 V(sqadd, Sqadd) \
423 V(sqdmlal2, Sqdmlal2) \
424 V(sqdmlal, Sqdmlal) \
425 V(sqdmlsl2, Sqdmlsl2) \
426 V(sqdmlsl, Sqdmlsl) \
427 V(sqdmulh, Sqdmulh) \
428 V(sqdmull2, Sqdmull2) \
429 V(sqdmull, Sqdmull) \
430 V(sqrdmulh, Sqrdmulh) \
431 V(sqrshl, Sqrshl) \
432 V(sqshl, Sqshl) \
433 V(sqsub, Sqsub) \
434 V(srhadd, Srhadd) \
435 V(srshl, Srshl) \
436 V(sshl, Sshl) \
437 V(ssubl2, Ssubl2) \
438 V(ssubl, Ssubl) \
439 V(ssubw2, Ssubw2) \
440 V(ssubw, Ssubw) \
441 V(subhn2, Subhn2) \
442 V(subhn, Subhn) \
443 V(sub, Sub) \
444 V(trn1, Trn1) \
445 V(trn2, Trn2) \
446 V(uabal2, Uabal2) \
447 V(uabal, Uabal) \
448 V(uaba, Uaba) \
449 V(uabdl2, Uabdl2) \
450 V(uabdl, Uabdl) \
451 V(uabd, Uabd) \
452 V(uaddl2, Uaddl2) \
453 V(uaddl, Uaddl) \
454 V(uaddw2, Uaddw2) \
455 V(uaddw, Uaddw) \
456 V(uhadd, Uhadd) \
457 V(uhsub, Uhsub) \
458 V(umaxp, Umaxp) \
459 V(umax, Umax) \
460 V(uminp, Uminp) \
461 V(umin, Umin) \
462 V(umlal2, Umlal2) \
463 V(umlal, Umlal) \
464 V(umlsl2, Umlsl2) \
465 V(umlsl, Umlsl) \
466 V(umull2, Umull2) \
467 V(umull, Umull) \
468 V(uqadd, Uqadd) \
469 V(uqrshl, Uqrshl) \
470 V(uqshl, Uqshl) \
471 V(uqsub, Uqsub) \
472 V(urhadd, Urhadd) \
473 V(urshl, Urshl) \
474 V(ushl, Ushl) \
475 V(usubl2, Usubl2) \
476 V(usubl, Usubl) \
477 V(usubw2, Usubw2) \
478 V(usubw, Usubw) \
479 V(uzp1, Uzp1) \
480 V(uzp2, Uzp2) \
481 V(zip1, Zip1) \
482 V(zip2, Zip2)
483
484 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
485 void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
486 DCHECK(allow_macro_instructions()); \
487 ASM(vd, vn, vm); \
488 }
489 NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
490 #undef DEFINE_MACRO_ASM_FUNC
491
492 void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
493 DCHECK(allow_macro_instructions());
494 bic(vd, imm8, left_shift);
495 }
496
497 // This is required for compatibility in architecture independent code.
498 inline void jmp(Label* L);
499
500 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
501 inline void B(Label* label);
502 inline void B(Condition cond, Label* label);
503 void B(Label* label, Condition cond);
504
505 void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
506 void Tbz(const Register& rt, unsigned bit_pos, Label* label);
507
508 void Cbnz(const Register& rt, Label* label);
509 void Cbz(const Register& rt, Label* label);
510
Pacibsp()511 void Pacibsp() {
512 DCHECK(allow_macro_instructions_);
513 pacibsp();
514 }
Autibsp()515 void Autibsp() {
516 DCHECK(allow_macro_instructions_);
517 autibsp();
518 }
519
520 // The 1716 pac and aut instructions encourage people to use x16 and x17
521 // directly, perhaps without realising that this is forbidden. For example:
522 //
523 // UseScratchRegisterScope temps(&masm);
524 // Register temp = temps.AcquireX(); // temp will be x16
525 // __ Mov(x17, ptr);
526 // __ Mov(x16, modifier); // Will override temp!
527 // __ Pacib1716();
528 //
529 // To work around this issue, you must exclude x16 and x17 from the scratch
530 // register list. You may need to replace them with other registers:
531 //
532 // UseScratchRegisterScope temps(&masm);
533 // temps.Exclude(x16, x17);
534 // temps.Include(x10, x11);
535 // __ Mov(x17, ptr);
536 // __ Mov(x16, modifier);
537 // __ Pacib1716();
Pacib1716()538 void Pacib1716() {
539 DCHECK(allow_macro_instructions_);
540 DCHECK(!TmpList()->IncludesAliasOf(x16));
541 DCHECK(!TmpList()->IncludesAliasOf(x17));
542 pacib1716();
543 }
Autib1716()544 void Autib1716() {
545 DCHECK(allow_macro_instructions_);
546 DCHECK(!TmpList()->IncludesAliasOf(x16));
547 DCHECK(!TmpList()->IncludesAliasOf(x17));
548 autib1716();
549 }
550
551 inline void Dmb(BarrierDomain domain, BarrierType type);
552 inline void Dsb(BarrierDomain domain, BarrierType type);
553 inline void Isb();
554 inline void Csdb();
555
556 inline void SmiUntag(Register dst, Register src);
557 inline void SmiUntag(Register dst, const MemOperand& src);
558 inline void SmiUntag(Register smi);
559
560 inline void SmiTag(Register dst, Register src);
561 inline void SmiTag(Register smi);
562
563 inline void SmiToInt32(Register smi);
564
565 // Calls Abort(msg) if the condition cond is not satisfied.
566 // Use --debug_code to enable.
567 void Assert(Condition cond, AbortReason reason);
568
569 // Like Assert(), but without condition.
570 // Use --debug_code to enable.
571 void AssertUnreachable(AbortReason reason);
572
573 void AssertSmi(Register object,
574 AbortReason reason = AbortReason::kOperandIsNotASmi);
575
576 // Like Assert(), but always enabled.
577 void Check(Condition cond, AbortReason reason);
578
579 inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
580
581 void Trap();
582 void DebugBreak();
583
584 // Print a message to stderr and abort execution.
585 void Abort(AbortReason reason);
586
587 // Like printf, but print at run-time from generated code.
588 //
589 // The caller must ensure that arguments for floating-point placeholders
590 // (such as %e, %f or %g) are VRegisters, and that arguments for integer
591 // placeholders are Registers.
592 //
593 // Format placeholders that refer to more than one argument, or to a specific
594 // argument, are not supported. This includes formats like "%1$d" or "%.*d".
595 //
596 // This function automatically preserves caller-saved registers so that
597 // calling code can use Printf at any point without having to worry about
598 // corruption. The preservation mechanism generates a lot of code. If this is
599 // a problem, preserve the important registers manually and then call
600 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
601 // implicitly preserved.
602 void Printf(const char* format, CPURegister arg0 = NoCPUReg,
603 CPURegister arg1 = NoCPUReg, CPURegister arg2 = NoCPUReg,
604 CPURegister arg3 = NoCPUReg);
605
606 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
607 //
608 // The return code from the system printf call will be returned in x0.
609 void PrintfNoPreserve(const char* format, const CPURegister& arg0 = NoCPUReg,
610 const CPURegister& arg1 = NoCPUReg,
611 const CPURegister& arg2 = NoCPUReg,
612 const CPURegister& arg3 = NoCPUReg);
613
614 // Remaining instructions are simple pass-through calls to the assembler.
615 inline void Asr(const Register& rd, const Register& rn, unsigned shift);
616 inline void Asr(const Register& rd, const Register& rn, const Register& rm);
617
618 // Try to move an immediate into the destination register in a single
619 // instruction. Returns true for success, and updates the contents of dst.
620 // Returns false, otherwise.
621 bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
622
623 inline void Bind(Label* label,
624 BranchTargetIdentifier id = BranchTargetIdentifier::kNone);
625
626 // Control-flow integrity:
627
628 // Define a function entrypoint.
629 inline void CodeEntry();
630 // Define an exception handler.
631 inline void ExceptionHandler();
632 // Define an exception handler and bind a label.
633 inline void BindExceptionHandler(Label* label);
634
635 // Control-flow integrity:
636
637 // Define a jump (BR) target.
638 inline void JumpTarget();
639 // Define a jump (BR) target and bind a label.
640 inline void BindJumpTarget(Label* label);
641 // Define a call (BLR) target. The target also allows tail calls (via BR)
642 // when the target is x16 or x17.
643 inline void CallTarget();
644 // Define a jump/call target.
645 inline void JumpOrCallTarget();
646 // Define a jump/call target and bind a label.
647 inline void BindJumpOrCallTarget(Label* label);
648
649 static unsigned CountSetHalfWords(uint64_t imm, unsigned reg_size);
650
TmpList()651 CPURegList* TmpList() { return &tmp_list_; }
FPTmpList()652 CPURegList* FPTmpList() { return &fptmp_list_; }
653
654 static CPURegList DefaultTmpList();
655 static CPURegList DefaultFPTmpList();
656
657 // Move macros.
658 inline void Mvn(const Register& rd, uint64_t imm);
659 void Mvn(const Register& rd, const Operand& operand);
660 static bool IsImmMovn(uint64_t imm, unsigned reg_size);
661 static bool IsImmMovz(uint64_t imm, unsigned reg_size);
662
663 void LogicalMacro(const Register& rd, const Register& rn,
664 const Operand& operand, LogicalOp op);
665 void AddSubMacro(const Register& rd, const Register& rn,
666 const Operand& operand, FlagsUpdate S, AddSubOp op);
667 inline void Orr(const Register& rd, const Register& rn,
668 const Operand& operand);
669 void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
670 DCHECK(allow_macro_instructions());
671 orr(vd, imm8, left_shift);
672 }
673 inline void Orn(const Register& rd, const Register& rn,
674 const Operand& operand);
675 inline void Eor(const Register& rd, const Register& rn,
676 const Operand& operand);
677 inline void Eon(const Register& rd, const Register& rn,
678 const Operand& operand);
679 inline void And(const Register& rd, const Register& rn,
680 const Operand& operand);
681 inline void Ands(const Register& rd, const Register& rn,
682 const Operand& operand);
683 inline void Tst(const Register& rn, const Operand& operand);
684 inline void Bic(const Register& rd, const Register& rn,
685 const Operand& operand);
686 inline void Blr(const Register& xn);
687 inline void Cmp(const Register& rn, const Operand& operand);
688 inline void CmpTagged(const Register& rn, const Operand& operand);
689 inline void Subs(const Register& rd, const Register& rn,
690 const Operand& operand);
691 void Csel(const Register& rd, const Register& rn, const Operand& operand,
692 Condition cond);
693 inline void Fcsel(const VRegister& fd, const VRegister& fn,
694 const VRegister& fm, Condition cond);
695
696 // Emits a runtime assert that the stack pointer is aligned.
697 void AssertSpAligned();
698
699 // Copy slot_count stack slots from the stack offset specified by src to
700 // the stack offset specified by dst. The offsets and count are expressed in
701 // slot-sized units. Offset dst must be less than src, or the gap between
702 // them must be greater than or equal to slot_count, otherwise the result is
703 // unpredictable. The function may corrupt its register arguments. The
704 // registers must not alias each other.
705 void CopySlots(int dst, Register src, Register slot_count);
706 void CopySlots(Register dst, Register src, Register slot_count);
707
708 // Copy count double words from the address in register src to the address
709 // in register dst. There are three modes for this function:
710 // 1) Address dst must be less than src, or the gap between them must be
711 // greater than or equal to count double words, otherwise the result is
712 // unpredictable. This is the default mode.
713 // 2) Address src must be less than dst, or the gap between them must be
714 // greater than or equal to count double words, otherwise the result is
715 // undpredictable. In this mode, src and dst specify the last (highest)
716 // address of the regions to copy from and to.
717 // 3) The same as mode 1, but the words are copied in the reversed order.
718 // The case where src == dst is not supported.
719 // The function may corrupt its register arguments. The registers must not
720 // alias each other.
721 enum CopyDoubleWordsMode {
722 kDstLessThanSrc,
723 kSrcLessThanDst,
724 kDstLessThanSrcAndReverse
725 };
726 void CopyDoubleWords(Register dst, Register src, Register count,
727 CopyDoubleWordsMode mode = kDstLessThanSrc);
728
729 // Calculate the address of a double word-sized slot at slot_offset from the
730 // stack pointer, and write it to dst. Positive slot_offsets are at addresses
731 // greater than sp, with slot zero at sp.
732 void SlotAddress(Register dst, int slot_offset);
733 void SlotAddress(Register dst, Register slot_offset);
734
735 // Load a literal from the inline constant pool.
736 inline void Ldr(const CPURegister& rt, const Operand& imm);
737
738 // Claim or drop stack space.
739 //
740 // On Windows, Claim will write a value every 4k, as is required by the stack
741 // expansion mechanism.
742 //
743 // The stack pointer must be aligned to 16 bytes and the size claimed or
744 // dropped must be a multiple of 16 bytes.
745 //
746 // Note that unit_size must be specified in bytes. For variants which take a
747 // Register count, the unit size must be a power of two.
748 inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
749 inline void Claim(const Register& count, uint64_t unit_size = kXRegSize);
750 inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
751 inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
752
753 // Drop 'count' arguments from the stack, rounded up to a multiple of two,
754 // without actually accessing memory.
755 // We assume the size of the arguments is the pointer size.
756 // An optional mode argument is passed, which can indicate we need to
757 // explicitly add the receiver to the count.
758 enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
759 inline void DropArguments(const Register& count,
760 ArgumentsCountMode mode = kCountIncludesReceiver);
761 inline void DropArguments(int64_t count,
762 ArgumentsCountMode mode = kCountIncludesReceiver);
763
764 // Drop 'count' slots from stack, rounded up to a multiple of two, without
765 // actually accessing memory.
766 inline void DropSlots(int64_t count);
767
768 // Push a single argument, with padding, to the stack.
769 inline void PushArgument(const Register& arg);
770
771 // Add and sub macros.
772 inline void Add(const Register& rd, const Register& rn,
773 const Operand& operand);
774 inline void Adds(const Register& rd, const Register& rn,
775 const Operand& operand);
776 inline void Sub(const Register& rd, const Register& rn,
777 const Operand& operand);
778
779 // Abort execution if argument is not a positive or zero integer, enabled via
780 // --debug-code.
781 void AssertPositiveOrZero(Register value);
782
783 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
784 inline void FN(const REGTYPE REG, const MemOperand& addr);
785 LS_MACRO_LIST(DECLARE_FUNCTION)
786 #undef DECLARE_FUNCTION
787
788 // Push or pop up to 4 registers of the same width to or from the stack.
789 //
790 // If an argument register is 'NoReg', all further arguments are also assumed
791 // to be 'NoReg', and are thus not pushed or popped.
792 //
793 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
794 // to "Push(a); Push(b);".
795 //
796 // It is valid to push the same register more than once, and there is no
797 // restriction on the order in which registers are specified.
798 //
799 // It is not valid to pop into the same register more than once in one
800 // operation, not even into the zero register.
801 //
802 // The stack pointer must be aligned to 16 bytes on entry and the total size
803 // of the specified registers must also be a multiple of 16 bytes.
804 //
805 // Other than the registers passed into Pop, the stack pointer, (possibly)
806 // the system stack pointer and (possibly) the link register, these methods
807 // do not modify any other registers.
808 //
809 // Some of the methods take an optional LoadLRMode or StoreLRMode template
810 // argument, which specifies whether we need to sign the link register at the
811 // start of the operation, or authenticate it at the end of the operation,
812 // when control flow integrity measures are enabled.
813 // When the mode is kDontLoadLR or kDontStoreLR, LR must not be passed as an
814 // argument to the operation.
815 enum LoadLRMode { kAuthLR, kDontLoadLR };
816 enum StoreLRMode { kSignLR, kDontStoreLR };
817 template <StoreLRMode lr_mode = kDontStoreLR>
818 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
819 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
820 void Push(const CPURegister& src0, const CPURegister& src1,
821 const CPURegister& src2, const CPURegister& src3,
822 const CPURegister& src4, const CPURegister& src5 = NoReg,
823 const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
824 template <LoadLRMode lr_mode = kDontLoadLR>
825 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
826 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
827 void Pop(const CPURegister& dst0, const CPURegister& dst1,
828 const CPURegister& dst2, const CPURegister& dst3,
829 const CPURegister& dst4, const CPURegister& dst5 = NoReg,
830 const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
831 template <StoreLRMode lr_mode = kDontStoreLR>
832 void Push(const Register& src0, const VRegister& src1);
833
834 void MaybeSaveRegisters(RegList registers);
835 void MaybeRestoreRegisters(RegList registers);
836
837 void CallEphemeronKeyBarrier(Register object, Operand offset,
838 SaveFPRegsMode fp_mode);
839
840 void CallRecordWriteStubSaveRegisters(
841 Register object, Operand offset,
842 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
843 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
844 void CallRecordWriteStub(
845 Register object, Register slot_address,
846 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
847 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
848
849 // For a given |object| and |offset|:
850 // - Move |object| to |dst_object|.
851 // - Compute the address of the slot pointed to by |offset| in |object| and
852 // write it to |dst_slot|.
853 // This method makes sure |object| and |offset| are allowed to overlap with
854 // the destination registers.
855 void MoveObjectAndSlot(Register dst_object, Register dst_slot,
856 Register object, Operand offset);
857
858 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
859 // specifies the registers that are to be pushed or popped. Higher-numbered
860 // registers are associated with higher memory addresses (as in the A32 push
861 // and pop instructions).
862 //
863 // (Push|Pop)SizeRegList allow you to specify the register size as a
864 // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
865 // kSRegSizeInBits are supported.
866 //
867 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
868 void PushCPURegList(CPURegList registers);
869 void PopCPURegList(CPURegList registers);
870
871 // Calculate how much stack space (in bytes) are required to store caller
872 // registers excluding those specified in the arguments.
873 int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
874 Register exclusion) const;
875
876 // Push caller saved registers on the stack, and return the number of bytes
877 // stack pointer is adjusted.
878 int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
879
880 // Restore caller saved registers from the stack, and return the number of
881 // bytes stack pointer is adjusted.
882 int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
883
884 // Move an immediate into register dst, and return an Operand object for use
885 // with a subsequent instruction that accepts a shift. The value moved into
886 // dst is not necessarily equal to imm; it may have had a shifting operation
887 // applied to it that will be subsequently undone by the shift applied in the
888 // Operand.
889 Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
890 PreShiftImmMode mode);
891
892 void CheckPageFlag(const Register& object, int mask, Condition cc,
893 Label* condition_met);
894
895 // Compare a register with an operand, and branch to label depending on the
896 // condition. May corrupt the status flags.
897 inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
898 Condition cond, Label* label);
899 inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs,
900 Condition cond, Label* label);
901
902 // Test the bits of register defined by bit_pattern, and branch if ANY of
903 // those bits are set. May corrupt the status flags.
904 inline void TestAndBranchIfAnySet(const Register& reg,
905 const uint64_t bit_pattern, Label* label);
906
907 // Test the bits of register defined by bit_pattern, and branch if ALL of
908 // those bits are clear (ie. not set.) May corrupt the status flags.
909 inline void TestAndBranchIfAllClear(const Register& reg,
910 const uint64_t bit_pattern, Label* label);
911
912 inline void Brk(int code);
913
914 inline void JumpIfSmi(Register value, Label* smi_label,
915 Label* not_smi_label = nullptr);
916
917 inline void JumpIfEqual(Register x, int32_t y, Label* dest);
918 inline void JumpIfLessThan(Register x, int32_t y, Label* dest);
919
920 void LoadMap(Register dst, Register object);
921
922 inline void Fmov(VRegister fd, VRegister fn);
923 inline void Fmov(VRegister fd, Register rn);
924 // Provide explicit double and float interfaces for FP immediate moves, rather
925 // than relying on implicit C++ casts. This allows signalling NaNs to be
926 // preserved when the immediate matches the format of fd. Most systems convert
927 // signalling NaNs to quiet NaNs when converting between float and double.
928 inline void Fmov(VRegister fd, double imm);
929 inline void Fmov(VRegister fd, float imm);
930 // Provide a template to allow other types to be converted automatically.
931 template <typename T>
Fmov(VRegister fd,T imm)932 void Fmov(VRegister fd, T imm) {
933 DCHECK(allow_macro_instructions());
934 Fmov(fd, static_cast<double>(imm));
935 }
936 inline void Fmov(Register rd, VRegister fn);
937
938 void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL,
939 int shift_amount = 0);
940 void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
941
942 void LoadFromConstantsTable(Register destination, int constant_index) final;
943 void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
944 void LoadRootRelative(Register destination, int32_t offset) final;
945
946 void Jump(Register target, Condition cond = al);
947 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
948 void Jump(Handle<CodeT> code, RelocInfo::Mode rmode, Condition cond = al);
949 void Jump(const ExternalReference& reference);
950
951 void Call(Register target);
952 void Call(Address target, RelocInfo::Mode rmode);
953 void Call(Handle<CodeT> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
954 void Call(ExternalReference target);
955
956 // Generate an indirect call (for when a direct call's range is not adequate).
957 void IndirectCall(Address target, RelocInfo::Mode rmode);
958
959 // Load the builtin given by the Smi in |builtin_| into the same
960 // register.
961 void LoadEntryFromBuiltinIndex(Register builtin);
962 void LoadEntryFromBuiltin(Builtin builtin, Register destination);
963 MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
964 void CallBuiltinByIndex(Register builtin);
965 void CallBuiltin(Builtin builtin);
966 void TailCallBuiltin(Builtin builtin);
967
968 void LoadCodeObjectEntry(Register destination, Register code_object);
969 void CallCodeObject(Register code_object);
970 void JumpCodeObject(Register code_object,
971 JumpMode jump_mode = JumpMode::kJump);
972
973 // Load code entry point from the CodeDataContainer object.
974 void LoadCodeDataContainerEntry(Register destination,
975 Register code_data_container_object);
976 // Load code entry point from the CodeDataContainer object and compute
977 // Code object pointer out of it. Must not be used for CodeDataContainers
978 // corresponding to builtins, because their entry points values point to
979 // the embedded instruction stream in .text section.
980 void LoadCodeDataContainerCodeNonBuiltin(Register destination,
981 Register code_data_container_object);
982 void CallCodeDataContainerObject(Register code_data_container_object);
983 void JumpCodeDataContainerObject(Register code_data_container_object,
984 JumpMode jump_mode = JumpMode::kJump);
985
986 // Helper functions that dispatch either to Call/JumpCodeObject or to
987 // Call/JumpCodeDataContainerObject.
988 // TODO(v8:11880): remove since CodeT targets are now default.
989 void LoadCodeTEntry(Register destination, Register code);
990 void CallCodeTObject(Register code);
991 void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
992
993 // Generates an instruction sequence s.t. the return address points to the
994 // instruction following the call.
995 // The return address on the stack is used by frame iteration.
996 void StoreReturnAddressAndCall(Register target);
997
998 void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
999 DeoptimizeKind kind, Label* ret,
1000 Label* jump_deoptimization_entry_label);
1001
1002 // Calls a C function.
1003 // The called function is not allowed to trigger a
1004 // garbage collection, since that might move the code and invalidate the
1005 // return address (unless this is somehow accounted for by the called
1006 // function).
1007 void CallCFunction(ExternalReference function, int num_reg_arguments);
1008 void CallCFunction(ExternalReference function, int num_reg_arguments,
1009 int num_double_arguments);
1010 void CallCFunction(Register function, int num_reg_arguments,
1011 int num_double_arguments);
1012
1013 // Performs a truncating conversion of a floating point number as used by
1014 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1015 // Exits with 'result' holding the answer.
1016 void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
1017 DoubleRegister double_input, StubCallMode stub_mode,
1018 LinkRegisterStatus lr_status);
1019
1020 inline void Mul(const Register& rd, const Register& rn, const Register& rm);
1021
1022 inline void Fcvtzs(const Register& rd, const VRegister& fn);
1023 void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1024 DCHECK(allow_macro_instructions());
1025 fcvtzs(vd, vn, fbits);
1026 }
1027
Fjcvtzs(const Register & rd,const VRegister & vn)1028 void Fjcvtzs(const Register& rd, const VRegister& vn) {
1029 DCHECK(allow_macro_instructions());
1030 DCHECK(!rd.IsZero());
1031 fjcvtzs(rd, vn);
1032 }
1033
1034 inline void Fcvtzu(const Register& rd, const VRegister& fn);
1035 void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1036 DCHECK(allow_macro_instructions());
1037 fcvtzu(vd, vn, fbits);
1038 }
1039
1040 inline void Madd(const Register& rd, const Register& rn, const Register& rm,
1041 const Register& ra);
1042 inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
1043 inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
1044 inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
1045 inline void Msub(const Register& rd, const Register& rn, const Register& rm,
1046 const Register& ra);
1047
1048 inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
1049 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
1050 inline void Umull(const Register& rd, const Register& rn, const Register& rm);
1051 inline void Smull(const Register& rd, const Register& rn, const Register& rm);
1052
1053 inline void Sxtb(const Register& rd, const Register& rn);
1054 inline void Sxth(const Register& rd, const Register& rn);
1055 inline void Sxtw(const Register& rd, const Register& rn);
1056 inline void Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
1057 unsigned width);
1058 inline void Ubfx(const Register& rd, const Register& rn, unsigned lsb,
1059 unsigned width);
1060 inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
1061 inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
1062 inline void Ror(const Register& rd, const Register& rs, unsigned shift);
1063 inline void Ror(const Register& rd, const Register& rn, const Register& rm);
1064 inline void Cmn(const Register& rn, const Operand& operand);
1065 inline void Fadd(const VRegister& fd, const VRegister& fn,
1066 const VRegister& fm);
1067 inline void Fcmp(const VRegister& fn, const VRegister& fm);
1068 inline void Fcmp(const VRegister& fn, double value);
1069 inline void Fabs(const VRegister& fd, const VRegister& fn);
1070 inline void Fmul(const VRegister& fd, const VRegister& fn,
1071 const VRegister& fm);
1072 inline void Fsub(const VRegister& fd, const VRegister& fn,
1073 const VRegister& fm);
1074 inline void Fdiv(const VRegister& fd, const VRegister& fn,
1075 const VRegister& fm);
1076 inline void Fmax(const VRegister& fd, const VRegister& fn,
1077 const VRegister& fm);
1078 inline void Fmin(const VRegister& fd, const VRegister& fn,
1079 const VRegister& fm);
1080 inline void Rbit(const Register& rd, const Register& rn);
1081 inline void Rev(const Register& rd, const Register& rn);
1082
1083 enum AdrHint {
1084 // The target must be within the immediate range of adr.
1085 kAdrNear,
1086 // The target may be outside of the immediate range of adr. Additional
1087 // instructions may be emitted.
1088 kAdrFar
1089 };
1090 void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
1091
1092 // Add/sub with carry macros.
1093 inline void Adc(const Register& rd, const Register& rn,
1094 const Operand& operand);
1095
1096 // Conditional macros.
1097 inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
1098 Condition cond);
1099 inline void CcmpTagged(const Register& rn, const Operand& operand,
1100 StatusFlags nzcv, Condition cond);
1101
1102 inline void Clz(const Register& rd, const Register& rn);
1103
1104 // Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
1105 // be 16 byte aligned.
1106 // When the optional template argument is kSignLR and control flow integrity
1107 // measures are enabled, we sign the link register before poking it onto the
1108 // stack. 'src' must be lr in this case.
1109 template <StoreLRMode lr_mode = kDontStoreLR>
1110 void Poke(const CPURegister& src, const Operand& offset);
1111
1112 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
1113 // The stack pointer must be aligned to 16 bytes.
1114 // When the optional template argument is kAuthLR and control flow integrity
1115 // measures are enabled, we authenticate the link register after peeking the
1116 // value. 'dst' must be lr in this case.
1117 template <LoadLRMode lr_mode = kDontLoadLR>
1118 void Peek(const CPURegister& dst, const Operand& offset);
1119
1120 // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
1121 // with 'src2' at a higher address than 'src1'. The offset is in bytes. The
1122 // stack pointer must be 16 byte aligned.
1123 void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
1124
1125 inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
1126 unsigned width);
1127
1128 inline void Bfi(const Register& rd, const Register& rn, unsigned lsb,
1129 unsigned width);
1130
1131 inline void Scvtf(const VRegister& fd, const Register& rn,
1132 unsigned fbits = 0);
1133 void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1134 DCHECK(allow_macro_instructions());
1135 scvtf(vd, vn, fbits);
1136 }
1137 inline void Ucvtf(const VRegister& fd, const Register& rn,
1138 unsigned fbits = 0);
1139 void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1140 DCHECK(allow_macro_instructions());
1141 ucvtf(vd, vn, fbits);
1142 }
1143
1144 void AssertFPCRState(Register fpcr = NoReg);
1145 void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
CanonicalizeNaN(const VRegister & reg)1146 void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
1147
1148 inline void CmovX(const Register& rd, const Register& rn, Condition cond);
1149 inline void Cset(const Register& rd, Condition cond);
1150 inline void Csetm(const Register& rd, Condition cond);
1151 inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
1152 Condition cond);
1153 inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
1154 Condition cond);
1155
1156 inline void Fcvt(const VRegister& fd, const VRegister& fn);
1157
1158 int ActivationFrameAlignment();
1159
Ins(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)1160 void Ins(const VRegister& vd, int vd_index, const VRegister& vn,
1161 int vn_index) {
1162 DCHECK(allow_macro_instructions());
1163 ins(vd, vd_index, vn, vn_index);
1164 }
Ins(const VRegister & vd,int vd_index,const Register & rn)1165 void Ins(const VRegister& vd, int vd_index, const Register& rn) {
1166 DCHECK(allow_macro_instructions());
1167 ins(vd, vd_index, rn);
1168 }
1169
1170 inline void Bl(Label* label);
1171 inline void Br(const Register& xn);
1172
1173 inline void Uxtb(const Register& rd, const Register& rn);
1174 inline void Uxth(const Register& rd, const Register& rn);
1175 inline void Uxtw(const Register& rd, const Register& rn);
1176
Dup(const VRegister & vd,const VRegister & vn,int index)1177 void Dup(const VRegister& vd, const VRegister& vn, int index) {
1178 DCHECK(allow_macro_instructions());
1179 dup(vd, vn, index);
1180 }
Dup(const VRegister & vd,const Register & rn)1181 void Dup(const VRegister& vd, const Register& rn) {
1182 DCHECK(allow_macro_instructions());
1183 dup(vd, rn);
1184 }
1185
1186 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
1187 inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
LSPAIR_MACRO_LIST(DECLARE_FUNCTION)1188 LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
1189 #undef DECLARE_FUNCTION
1190
1191 void St1(const VRegister& vt, const MemOperand& dst) {
1192 DCHECK(allow_macro_instructions());
1193 st1(vt, dst);
1194 }
St1(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)1195 void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1196 DCHECK(allow_macro_instructions());
1197 st1(vt, vt2, dst);
1198 }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)1199 void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1200 const MemOperand& dst) {
1201 DCHECK(allow_macro_instructions());
1202 st1(vt, vt2, vt3, dst);
1203 }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)1204 void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1205 const VRegister& vt4, const MemOperand& dst) {
1206 DCHECK(allow_macro_instructions());
1207 st1(vt, vt2, vt3, vt4, dst);
1208 }
St1(const VRegister & vt,int lane,const MemOperand & dst)1209 void St1(const VRegister& vt, int lane, const MemOperand& dst) {
1210 DCHECK(allow_macro_instructions());
1211 st1(vt, lane, dst);
1212 }
1213
1214 #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
1215 V(rshrn, Rshrn) \
1216 V(rshrn2, Rshrn2) \
1217 V(shl, Shl) \
1218 V(shll, Shll) \
1219 V(shll2, Shll2) \
1220 V(shrn, Shrn) \
1221 V(shrn2, Shrn2) \
1222 V(sli, Sli) \
1223 V(sqrshrn, Sqrshrn) \
1224 V(sqrshrn2, Sqrshrn2) \
1225 V(sqrshrun, Sqrshrun) \
1226 V(sqrshrun2, Sqrshrun2) \
1227 V(sqshl, Sqshl) \
1228 V(sqshlu, Sqshlu) \
1229 V(sqshrn, Sqshrn) \
1230 V(sqshrn2, Sqshrn2) \
1231 V(sqshrun, Sqshrun) \
1232 V(sqshrun2, Sqshrun2) \
1233 V(sri, Sri) \
1234 V(srshr, Srshr) \
1235 V(srsra, Srsra) \
1236 V(sshll, Sshll) \
1237 V(sshll2, Sshll2) \
1238 V(sshr, Sshr) \
1239 V(ssra, Ssra) \
1240 V(uqrshrn, Uqrshrn) \
1241 V(uqrshrn2, Uqrshrn2) \
1242 V(uqshl, Uqshl) \
1243 V(uqshrn, Uqshrn) \
1244 V(uqshrn2, Uqshrn2) \
1245 V(urshr, Urshr) \
1246 V(ursra, Ursra) \
1247 V(ushll, Ushll) \
1248 V(ushll2, Ushll2) \
1249 V(ushr, Ushr) \
1250 V(usra, Usra)
1251
1252 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
1253 void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
1254 DCHECK(allow_macro_instructions()); \
1255 ASM(vd, vn, shift); \
1256 }
NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)1257 NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
1258 #undef DEFINE_MACRO_ASM_FUNC
1259
1260 void Umov(const Register& rd, const VRegister& vn, int vn_index) {
1261 DCHECK(allow_macro_instructions());
1262 umov(rd, vn, vn_index);
1263 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vm)1264 void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1265 DCHECK(allow_macro_instructions());
1266 tbl(vd, vn, vm);
1267 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1268 void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1269 const VRegister& vm) {
1270 DCHECK(allow_macro_instructions());
1271 tbl(vd, vn, vn2, vm);
1272 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1273 void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1274 const VRegister& vn3, const VRegister& vm) {
1275 DCHECK(allow_macro_instructions());
1276 tbl(vd, vn, vn2, vn3, vm);
1277 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1278 void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1279 const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1280 DCHECK(allow_macro_instructions());
1281 tbl(vd, vn, vn2, vn3, vn4, vm);
1282 }
Ext(const VRegister & vd,const VRegister & vn,const VRegister & vm,int index)1283 void Ext(const VRegister& vd, const VRegister& vn, const VRegister& vm,
1284 int index) {
1285 DCHECK(allow_macro_instructions());
1286 ext(vd, vn, vm, index);
1287 }
1288
Smov(const Register & rd,const VRegister & vn,int vn_index)1289 void Smov(const Register& rd, const VRegister& vn, int vn_index) {
1290 DCHECK(allow_macro_instructions());
1291 smov(rd, vn, vn_index);
1292 }
1293
1294 // Load-acquire/store-release macros.
1295 #define DECLARE_FUNCTION(FN, OP) \
1296 inline void FN(const Register& rt, const Register& rn);
1297 LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
1298 #undef DECLARE_FUNCTION
1299
1300 // Load an object from the root table.
1301 void LoadRoot(Register destination, RootIndex index) final;
1302 void PushRoot(RootIndex index);
1303
1304 inline void Ret(const Register& xn = lr);
1305
1306 // Perform a conversion from a double to a signed int64. If the input fits in
1307 // range of the 64-bit result, execution branches to done. Otherwise,
1308 // execution falls through, and the sign of the result can be used to
1309 // determine if overflow was towards positive or negative infinity.
1310 //
1311 // On successful conversion, the least significant 32 bits of the result are
1312 // equivalent to the ECMA-262 operation "ToInt32".
1313 void TryConvertDoubleToInt64(Register result, DoubleRegister input,
1314 Label* done);
1315
1316 inline void Mrs(const Register& rt, SystemRegister sysreg);
1317 inline void Msr(SystemRegister sysreg, const Register& rt);
1318
1319 // Prologue claims an extra slot due to arm64's alignement constraints.
1320 static constexpr int kExtraSlotClaimedByPrologue = 1;
1321 // Generates function prologue code.
1322 void Prologue();
1323
Cmgt(const VRegister & vd,const VRegister & vn,int imm)1324 void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
1325 DCHECK(allow_macro_instructions());
1326 cmgt(vd, vn, imm);
1327 }
Cmge(const VRegister & vd,const VRegister & vn,int imm)1328 void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
1329 DCHECK(allow_macro_instructions());
1330 cmge(vd, vn, imm);
1331 }
Cmeq(const VRegister & vd,const VRegister & vn,int imm)1332 void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
1333 DCHECK(allow_macro_instructions());
1334 cmeq(vd, vn, imm);
1335 }
Cmlt(const VRegister & vd,const VRegister & vn,int imm)1336 void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
1337 DCHECK(allow_macro_instructions());
1338 cmlt(vd, vn, imm);
1339 }
Cmle(const VRegister & vd,const VRegister & vn,int imm)1340 void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
1341 DCHECK(allow_macro_instructions());
1342 cmle(vd, vn, imm);
1343 }
1344
1345 inline void Neg(const Register& rd, const Operand& operand);
1346 inline void Negs(const Register& rd, const Operand& operand);
1347
1348 // Compute rd = abs(rm).
1349 // This function clobbers the condition flags. On output the overflow flag is
1350 // set iff the negation overflowed.
1351 //
1352 // If rm is the minimum representable value, the result is not representable.
1353 // Handlers for each case can be specified using the relevant labels.
1354 void Abs(const Register& rd, const Register& rm,
1355 Label* is_not_representable = nullptr,
1356 Label* is_representable = nullptr);
1357
1358 inline void Cls(const Register& rd, const Register& rn);
1359 inline void Cneg(const Register& rd, const Register& rn, Condition cond);
1360 inline void Rev16(const Register& rd, const Register& rn);
1361 inline void Rev32(const Register& rd, const Register& rn);
1362 inline void Fcvtns(const Register& rd, const VRegister& fn);
1363 inline void Fcvtnu(const Register& rd, const VRegister& fn);
1364 inline void Fcvtms(const Register& rd, const VRegister& fn);
1365 inline void Fcvtmu(const Register& rd, const VRegister& fn);
1366 inline void Fcvtas(const Register& rd, const VRegister& fn);
1367 inline void Fcvtau(const Register& rd, const VRegister& fn);
1368
1369 // Compute the start of the generated instruction stream from the current PC.
1370 // This is an alternative to embedding the {CodeObject} handle as a reference.
1371 void ComputeCodeStartAddress(const Register& rd);
1372
1373 // ---------------------------------------------------------------------------
1374 // Pointer compression Support
1375
1376 // Loads a field containing a HeapObject and decompresses it if pointer
1377 // compression is enabled.
1378 void LoadTaggedPointerField(const Register& destination,
1379 const MemOperand& field_operand);
1380
1381 // Loads a field containing any tagged value and decompresses it if necessary.
1382 void LoadAnyTaggedField(const Register& destination,
1383 const MemOperand& field_operand);
1384
1385 // Loads a field containing a tagged signed value and decompresses it if
1386 // necessary.
1387 void LoadTaggedSignedField(const Register& destination,
1388 const MemOperand& field_operand);
1389
1390 // Loads a field containing smi value and untags it.
1391 void SmiUntagField(Register dst, const MemOperand& src);
1392
1393 // Compresses and stores tagged value to given on-heap location.
1394 void StoreTaggedField(const Register& value,
1395 const MemOperand& dst_field_operand);
1396
1397 void AtomicStoreTaggedField(const Register& value, const Register& dst_base,
1398 const Register& dst_index, const Register& temp);
1399
1400 void DecompressTaggedSigned(const Register& destination,
1401 const MemOperand& field_operand);
1402 void DecompressTaggedPointer(const Register& destination,
1403 const MemOperand& field_operand);
1404 void DecompressTaggedPointer(const Register& destination,
1405 const Register& source);
1406 void DecompressAnyTagged(const Register& destination,
1407 const MemOperand& field_operand);
1408
1409 void AtomicDecompressTaggedSigned(const Register& destination,
1410 const Register& base, const Register& index,
1411 const Register& temp);
1412 void AtomicDecompressTaggedPointer(const Register& destination,
1413 const Register& base,
1414 const Register& index,
1415 const Register& temp);
1416 void AtomicDecompressAnyTagged(const Register& destination,
1417 const Register& base, const Register& index,
1418 const Register& temp);
1419
1420 // Restore FP and LR from the values stored in the current frame. This will
1421 // authenticate the LR when pointer authentication is enabled.
1422 void RestoreFPAndLR();
1423
1424 #if V8_ENABLE_WEBASSEMBLY
1425 void StoreReturnAddressInWasmExitFrame(Label* return_location);
1426 #endif // V8_ENABLE_WEBASSEMBLY
1427
1428 // Wasm helpers. These instructions don't have direct lowering
1429 // to native instructions. These helpers allow us to define the optimal code
1430 // sequence, and be used in both TurboFan and Liftoff.
1431 void PopcntHelper(Register dst, Register src);
1432 void I64x2BitMask(Register dst, VRegister src);
1433 void I64x2AllTrue(Register dst, VRegister src);
1434
1435 // ---------------------------------------------------------------------------
1436 // V8 Sandbox support
1437
1438 // Transform a SandboxedPointer from/to its encoded form, which is used when
1439 // the pointer is stored on the heap and ensures that the pointer will always
1440 // point into the sandbox.
1441 void EncodeSandboxedPointer(const Register& value);
1442 void DecodeSandboxedPointer(const Register& value);
1443
1444 // Load and decode a SandboxedPointer from the heap.
1445 void LoadSandboxedPointerField(const Register& destination,
1446 const MemOperand& field_operand);
1447 // Encode and store a SandboxedPointer to the heap.
1448 void StoreSandboxedPointerField(const Register& value,
1449 const MemOperand& dst_field_operand);
1450
1451 // Loads a field containing off-heap pointer and does necessary decoding
1452 // if sandboxed external pointers are enabled.
1453 void LoadExternalPointerField(Register destination, MemOperand field_operand,
1454 ExternalPointerTag tag,
1455 Register isolate_root = Register::no_reg());
1456
1457 protected:
1458 // The actual Push and Pop implementations. These don't generate any code
1459 // other than that required for the push or pop. This allows
1460 // (Push|Pop)CPURegList to bundle together run-time assertions for a large
1461 // block of registers.
1462 //
1463 // Note that size is per register, and is specified in bytes.
1464 void PushHelper(int count, int size, const CPURegister& src0,
1465 const CPURegister& src1, const CPURegister& src2,
1466 const CPURegister& src3);
1467 void PopHelper(int count, int size, const CPURegister& dst0,
1468 const CPURegister& dst1, const CPURegister& dst2,
1469 const CPURegister& dst3);
1470
1471 void ConditionalCompareMacro(const Register& rn, const Operand& operand,
1472 StatusFlags nzcv, Condition cond,
1473 ConditionalCompareOp op);
1474
1475 void AddSubWithCarryMacro(const Register& rd, const Register& rn,
1476 const Operand& operand, FlagsUpdate S,
1477 AddSubWithCarryOp op);
1478
1479 // Call Printf. On a native build, a simple call will be generated, but if the
1480 // simulator is being used then a suitable pseudo-instruction is used. The
1481 // arguments and stack must be prepared by the caller as for a normal AAPCS64
1482 // call to 'printf'.
1483 //
1484 // The 'args' argument should point to an array of variable arguments in their
1485 // proper PCS registers (and in calling order). The argument registers can
1486 // have mixed types. The format string (x0) should not be included.
1487 void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
1488
1489 private:
1490 #if DEBUG
1491 // Tell whether any of the macro instruction can be used. When false the
1492 // MacroAssembler will assert if a method which can emit a variable number
1493 // of instructions is called.
1494 bool allow_macro_instructions_ = true;
1495 #endif
1496
1497 // Scratch registers available for use by the MacroAssembler.
1498 CPURegList tmp_list_ = DefaultTmpList();
1499 CPURegList fptmp_list_ = DefaultFPTmpList();
1500
1501 // Helps resolve branching to labels potentially out of range.
1502 // If the label is not bound, it registers the information necessary to later
1503 // be able to emit a veneer for this branch if necessary.
1504 // If the label is bound, it returns true if the label (or the previous link
1505 // in the label chain) is out of range. In that case the caller is responsible
1506 // for generating appropriate code.
1507 // Otherwise it returns false.
1508 // This function also checks wether veneers need to be emitted.
1509 bool NeedExtraInstructionsOrRegisterBranch(Label* label,
1510 ImmBranchType branch_type);
1511
1512 void Movi16bitHelper(const VRegister& vd, uint64_t imm);
1513 void Movi32bitHelper(const VRegister& vd, uint64_t imm);
1514 void Movi64bitHelper(const VRegister& vd, uint64_t imm);
1515
1516 void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
1517 LoadStoreOp op);
1518
1519 void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
1520 const MemOperand& addr, LoadStorePairOp op);
1521
1522 int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
1523 byte* pc);
1524
1525 void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
1526 };
1527
1528 class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
1529 public:
1530 using TurboAssembler::TurboAssembler;
1531
1532 // Instruction set functions ------------------------------------------------
1533 // Logical macros.
1534 inline void Bics(const Register& rd, const Register& rn,
1535 const Operand& operand);
1536
1537 inline void Adcs(const Register& rd, const Register& rn,
1538 const Operand& operand);
1539 inline void Sbc(const Register& rd, const Register& rn,
1540 const Operand& operand);
1541 inline void Sbcs(const Register& rd, const Register& rn,
1542 const Operand& operand);
1543 inline void Ngc(const Register& rd, const Operand& operand);
1544 inline void Ngcs(const Register& rd, const Operand& operand);
1545
1546 inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
1547 Condition cond);
1548
1549 #define DECLARE_FUNCTION(FN, OP) \
1550 inline void FN(const Register& rs, const Register& rt, const Register& rn);
1551 STLX_MACRO_LIST(DECLARE_FUNCTION)
1552 #undef DECLARE_FUNCTION
1553
1554 // Branch type inversion relies on these relations.
1555 STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
1556 (reg_bit_clear == (reg_bit_set ^ 1)) &&
1557 (always == (never ^ 1)));
1558
1559 inline void Bfxil(const Register& rd, const Register& rn, unsigned lsb,
1560 unsigned width);
1561 inline void Cinc(const Register& rd, const Register& rn, Condition cond);
1562 inline void Cinv(const Register& rd, const Register& rn, Condition cond);
1563 inline void CzeroX(const Register& rd, Condition cond);
1564 inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
1565 Condition cond);
1566 inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
1567 Condition cond);
1568 inline void Extr(const Register& rd, const Register& rn, const Register& rm,
1569 unsigned lsb);
Fcvtl(const VRegister & vd,const VRegister & vn)1570 void Fcvtl(const VRegister& vd, const VRegister& vn) {
1571 DCHECK(allow_macro_instructions());
1572 fcvtl(vd, vn);
1573 }
Fcvtl2(const VRegister & vd,const VRegister & vn)1574 void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1575 DCHECK(allow_macro_instructions());
1576 fcvtl2(vd, vn);
1577 }
Fcvtn(const VRegister & vd,const VRegister & vn)1578 void Fcvtn(const VRegister& vd, const VRegister& vn) {
1579 DCHECK(allow_macro_instructions());
1580 fcvtn(vd, vn);
1581 }
Fcvtn2(const VRegister & vd,const VRegister & vn)1582 void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1583 DCHECK(allow_macro_instructions());
1584 fcvtn2(vd, vn);
1585 }
Fcvtxn(const VRegister & vd,const VRegister & vn)1586 void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1587 DCHECK(allow_macro_instructions());
1588 fcvtxn(vd, vn);
1589 }
Fcvtxn2(const VRegister & vd,const VRegister & vn)1590 void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1591 DCHECK(allow_macro_instructions());
1592 fcvtxn2(vd, vn);
1593 }
1594 inline void Fmadd(const VRegister& fd, const VRegister& fn,
1595 const VRegister& fm, const VRegister& fa);
1596 inline void Fmaxnm(const VRegister& fd, const VRegister& fn,
1597 const VRegister& fm);
1598 inline void Fminnm(const VRegister& fd, const VRegister& fn,
1599 const VRegister& fm);
1600 inline void Fmsub(const VRegister& fd, const VRegister& fn,
1601 const VRegister& fm, const VRegister& fa);
1602 inline void Fnmadd(const VRegister& fd, const VRegister& fn,
1603 const VRegister& fm, const VRegister& fa);
1604 inline void Fnmsub(const VRegister& fd, const VRegister& fn,
1605 const VRegister& fm, const VRegister& fa);
1606 inline void Hint(SystemHint code);
1607 inline void Hlt(int code);
1608 inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
1609 const MemOperand& src);
1610 inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
Nop()1611 inline void Nop() { nop(); }
1612 void Mvni(const VRegister& vd, const int imm8, Shift shift = LSL,
1613 const int shift_amount = 0) {
1614 DCHECK(allow_macro_instructions());
1615 mvni(vd, imm8, shift, shift_amount);
1616 }
1617 inline void Rev(const Register& rd, const Register& rn);
1618 inline void Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
1619 unsigned width);
1620 inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
1621 const Register& ra);
1622 inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
1623 const Register& ra);
1624 inline void Smulh(const Register& rd, const Register& rn, const Register& rm);
1625 inline void Stnp(const CPURegister& rt, const CPURegister& rt2,
1626 const MemOperand& dst);
1627 inline void Umaddl(const Register& rd, const Register& rn, const Register& rm,
1628 const Register& ra);
1629 inline void Umsubl(const Register& rd, const Register& rn, const Register& rm,
1630 const Register& ra);
1631
Ld1(const VRegister & vt,const MemOperand & src)1632 void Ld1(const VRegister& vt, const MemOperand& src) {
1633 DCHECK(allow_macro_instructions());
1634 ld1(vt, src);
1635 }
Ld1(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1636 void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1637 DCHECK(allow_macro_instructions());
1638 ld1(vt, vt2, src);
1639 }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1640 void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1641 const MemOperand& src) {
1642 DCHECK(allow_macro_instructions());
1643 ld1(vt, vt2, vt3, src);
1644 }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1645 void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1646 const VRegister& vt4, const MemOperand& src) {
1647 DCHECK(allow_macro_instructions());
1648 ld1(vt, vt2, vt3, vt4, src);
1649 }
Ld1(const VRegister & vt,int lane,const MemOperand & src)1650 void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
1651 DCHECK(allow_macro_instructions());
1652 ld1(vt, lane, src);
1653 }
Ld1r(const VRegister & vt,const MemOperand & src)1654 void Ld1r(const VRegister& vt, const MemOperand& src) {
1655 DCHECK(allow_macro_instructions());
1656 ld1r(vt, src);
1657 }
Ld2(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1658 void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1659 DCHECK(allow_macro_instructions());
1660 ld2(vt, vt2, src);
1661 }
Ld2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & src)1662 void Ld2(const VRegister& vt, const VRegister& vt2, int lane,
1663 const MemOperand& src) {
1664 DCHECK(allow_macro_instructions());
1665 ld2(vt, vt2, lane, src);
1666 }
Ld2r(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1667 void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1668 DCHECK(allow_macro_instructions());
1669 ld2r(vt, vt2, src);
1670 }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1671 void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1672 const MemOperand& src) {
1673 DCHECK(allow_macro_instructions());
1674 ld3(vt, vt2, vt3, src);
1675 }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & src)1676 void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1677 int lane, const MemOperand& src) {
1678 DCHECK(allow_macro_instructions());
1679 ld3(vt, vt2, vt3, lane, src);
1680 }
Ld3r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1681 void Ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1682 const MemOperand& src) {
1683 DCHECK(allow_macro_instructions());
1684 ld3r(vt, vt2, vt3, src);
1685 }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1686 void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1687 const VRegister& vt4, const MemOperand& src) {
1688 DCHECK(allow_macro_instructions());
1689 ld4(vt, vt2, vt3, vt4, src);
1690 }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & src)1691 void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1692 const VRegister& vt4, int lane, const MemOperand& src) {
1693 DCHECK(allow_macro_instructions());
1694 ld4(vt, vt2, vt3, vt4, lane, src);
1695 }
Ld4r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1696 void Ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1697 const VRegister& vt4, const MemOperand& src) {
1698 DCHECK(allow_macro_instructions());
1699 ld4r(vt, vt2, vt3, vt4, src);
1700 }
St2(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)1701 void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1702 DCHECK(allow_macro_instructions());
1703 st2(vt, vt2, dst);
1704 }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)1705 void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1706 const MemOperand& dst) {
1707 DCHECK(allow_macro_instructions());
1708 st3(vt, vt2, vt3, dst);
1709 }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)1710 void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1711 const VRegister& vt4, const MemOperand& dst) {
1712 DCHECK(allow_macro_instructions());
1713 st4(vt, vt2, vt3, vt4, dst);
1714 }
St2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & dst)1715 void St2(const VRegister& vt, const VRegister& vt2, int lane,
1716 const MemOperand& dst) {
1717 DCHECK(allow_macro_instructions());
1718 st2(vt, vt2, lane, dst);
1719 }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & dst)1720 void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1721 int lane, const MemOperand& dst) {
1722 DCHECK(allow_macro_instructions());
1723 st3(vt, vt2, vt3, lane, dst);
1724 }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & dst)1725 void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1726 const VRegister& vt4, int lane, const MemOperand& dst) {
1727 DCHECK(allow_macro_instructions());
1728 st4(vt, vt2, vt3, vt4, lane, dst);
1729 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vm)1730 void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1731 DCHECK(allow_macro_instructions());
1732 tbx(vd, vn, vm);
1733 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1734 void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1735 const VRegister& vm) {
1736 DCHECK(allow_macro_instructions());
1737 tbx(vd, vn, vn2, vm);
1738 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1739 void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1740 const VRegister& vn3, const VRegister& vm) {
1741 DCHECK(allow_macro_instructions());
1742 tbx(vd, vn, vn2, vn3, vm);
1743 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1744 void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1745 const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1746 DCHECK(allow_macro_instructions());
1747 tbx(vd, vn, vn2, vn3, vn4, vm);
1748 }
1749
PushSizeRegList(RegList registers,unsigned reg_size)1750 inline void PushSizeRegList(RegList registers, unsigned reg_size) {
1751 PushCPURegList(CPURegList(reg_size, registers));
1752 }
PushSizeRegList(DoubleRegList registers,unsigned reg_size)1753 inline void PushSizeRegList(DoubleRegList registers, unsigned reg_size) {
1754 PushCPURegList(CPURegList(reg_size, registers));
1755 }
PopSizeRegList(RegList registers,unsigned reg_size)1756 inline void PopSizeRegList(RegList registers, unsigned reg_size) {
1757 PopCPURegList(CPURegList(reg_size, registers));
1758 }
PopSizeRegList(DoubleRegList registers,unsigned reg_size)1759 inline void PopSizeRegList(DoubleRegList registers, unsigned reg_size) {
1760 PopCPURegList(CPURegList(reg_size, registers));
1761 }
PushXRegList(RegList regs)1762 inline void PushXRegList(RegList regs) {
1763 PushSizeRegList(regs, kXRegSizeInBits);
1764 }
PopXRegList(RegList regs)1765 inline void PopXRegList(RegList regs) {
1766 PopSizeRegList(regs, kXRegSizeInBits);
1767 }
PushWRegList(RegList regs)1768 inline void PushWRegList(RegList regs) {
1769 PushSizeRegList(regs, kWRegSizeInBits);
1770 }
PopWRegList(RegList regs)1771 inline void PopWRegList(RegList regs) {
1772 PopSizeRegList(regs, kWRegSizeInBits);
1773 }
PushQRegList(DoubleRegList regs)1774 inline void PushQRegList(DoubleRegList regs) {
1775 PushSizeRegList(regs, kQRegSizeInBits);
1776 }
PopQRegList(DoubleRegList regs)1777 inline void PopQRegList(DoubleRegList regs) {
1778 PopSizeRegList(regs, kQRegSizeInBits);
1779 }
PushDRegList(DoubleRegList regs)1780 inline void PushDRegList(DoubleRegList regs) {
1781 PushSizeRegList(regs, kDRegSizeInBits);
1782 }
PopDRegList(DoubleRegList regs)1783 inline void PopDRegList(DoubleRegList regs) {
1784 PopSizeRegList(regs, kDRegSizeInBits);
1785 }
PushSRegList(DoubleRegList regs)1786 inline void PushSRegList(DoubleRegList regs) {
1787 PushSizeRegList(regs, kSRegSizeInBits);
1788 }
PopSRegList(DoubleRegList regs)1789 inline void PopSRegList(DoubleRegList regs) {
1790 PopSizeRegList(regs, kSRegSizeInBits);
1791 }
1792
1793 // Push the specified register 'count' times.
1794 void PushMultipleTimes(CPURegister src, Register count);
1795
1796 // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
1797 // values peeked will be adjacent, with the value in 'dst2' being from a
1798 // higher address than 'dst1'. The offset is in bytes. The stack pointer must
1799 // be aligned to 16 bytes.
1800 void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
1801
1802 // Preserve the callee-saved registers (as defined by AAPCS64).
1803 //
1804 // Higher-numbered registers are pushed before lower-numbered registers, and
1805 // thus get higher addresses.
1806 // Floating-point registers are pushed before general-purpose registers, and
1807 // thus get higher addresses.
1808 //
1809 // When control flow integrity measures are enabled, this method signs the
1810 // link register before pushing it.
1811 //
1812 // Note that registers are not checked for invalid values. Use this method
1813 // only if you know that the GC won't try to examine the values on the stack.
1814 void PushCalleeSavedRegisters();
1815
1816 // Restore the callee-saved registers (as defined by AAPCS64).
1817 //
1818 // Higher-numbered registers are popped after lower-numbered registers, and
1819 // thus come from higher addresses.
1820 // Floating-point registers are popped after general-purpose registers, and
1821 // thus come from higher addresses.
1822 //
1823 // When control flow integrity measures are enabled, this method
1824 // authenticates the link register after popping it.
1825 void PopCalleeSavedRegisters();
1826
1827 // Helpers ------------------------------------------------------------------
1828
1829 template <typename Field>
DecodeField(Register dst,Register src)1830 void DecodeField(Register dst, Register src) {
1831 static const int shift = Field::kShift;
1832 static const int setbits = CountSetBits(Field::kMask, 32);
1833 Ubfx(dst, src, shift, setbits);
1834 }
1835
1836 template <typename Field>
DecodeField(Register reg)1837 void DecodeField(Register reg) {
1838 DecodeField<Field>(reg, reg);
1839 }
1840
1841 Operand ReceiverOperand(const Register arg_count);
1842
1843 // ---- SMI and Number Utilities ----
1844
1845 inline void JumpIfNotSmi(Register value, Label* not_smi_label);
1846
1847 // Abort execution if argument is a smi, enabled via --debug-code.
1848 void AssertNotSmi(Register object,
1849 AbortReason reason = AbortReason::kOperandIsASmi);
1850
1851 // Abort execution if argument is not a CodeT, enabled via --debug-code.
1852 void AssertCodeT(Register object);
1853
1854 // Abort execution if argument is not a Constructor, enabled via --debug-code.
1855 void AssertConstructor(Register object);
1856
1857 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1858 void AssertFunction(Register object);
1859
1860 // Abort execution if argument is not a callable JSFunction, enabled via
1861 // --debug-code.
1862 void AssertCallableFunction(Register object);
1863
1864 // Abort execution if argument is not a JSGeneratorObject (or subclass),
1865 // enabled via --debug-code.
1866 void AssertGeneratorObject(Register object);
1867
1868 // Abort execution if argument is not a JSBoundFunction,
1869 // enabled via --debug-code.
1870 void AssertBoundFunction(Register object);
1871
1872 // Abort execution if argument is not undefined or an AllocationSite, enabled
1873 // via --debug-code.
1874 void AssertUndefinedOrAllocationSite(Register object);
1875
1876 // ---- Calling / Jumping helpers ----
1877
1878 void CallRuntime(const Runtime::Function* f, int num_arguments,
1879 SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
1880
1881 // Convenience function: Same as above, but takes the fid instead.
1882 void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1883 SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1884 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1885 }
1886
1887 // Convenience function: Same as above, but takes the fid instead.
1888 void CallRuntime(Runtime::FunctionId fid,
1889 SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1890 const Runtime::Function* function = Runtime::FunctionForId(fid);
1891 CallRuntime(function, function->nargs, save_doubles);
1892 }
1893
1894 void TailCallRuntime(Runtime::FunctionId fid);
1895
1896 // Jump to a runtime routine.
1897 void JumpToExternalReference(const ExternalReference& builtin,
1898 bool builtin_exit_frame = false);
1899
1900 // Generates a trampoline to jump to the off-heap instruction stream.
1901 void JumpToOffHeapInstructionStream(Address entry);
1902
1903 // Registers used through the invocation chain are hard-coded.
1904 // We force passing the parameters to ensure the contracts are correctly
1905 // honoured by the caller.
1906 // 'function' must be x1.
1907 // 'actual' must use an immediate or x0.
1908 // 'expected' must use an immediate or x2.
1909 // 'call_kind' must be x5.
1910 void InvokePrologue(Register expected_parameter_count,
1911 Register actual_parameter_count, Label* done,
1912 InvokeType type);
1913
1914 // On function call, call into the debugger.
1915 void CallDebugOnFunctionCall(Register fun, Register new_target,
1916 Register expected_parameter_count,
1917 Register actual_parameter_count);
1918 void InvokeFunctionCode(Register function, Register new_target,
1919 Register expected_parameter_count,
1920 Register actual_parameter_count, InvokeType type);
1921 // Invoke the JavaScript function in the given register.
1922 // Changes the current context to the context in the function before invoking.
1923 void InvokeFunctionWithNewTarget(Register function, Register new_target,
1924 Register actual_parameter_count,
1925 InvokeType type);
1926 void InvokeFunction(Register function, Register expected_parameter_count,
1927 Register actual_parameter_count, InvokeType type);
1928
1929 // ---- Code generation helpers ----
1930
1931 // ---------------------------------------------------------------------------
1932 // Support functions.
1933
1934 // Compare object type for heap object. heap_object contains a non-Smi
1935 // whose object type should be compared with the given type. This both
1936 // sets the flags and leaves the object type in the type_reg register.
1937 // It leaves the map in the map register (unless the type_reg and map register
1938 // are the same register). It leaves the heap object in the heap_object
1939 // register unless the heap_object register is the same register as one of the
1940 // other registers.
1941 void CompareObjectType(Register heap_object, Register map, Register type_reg,
1942 InstanceType type);
1943
1944 // Compare object type for heap object, and branch if equal (or not.)
1945 // heap_object contains a non-Smi whose object type should be compared with
1946 // the given type. This both sets the flags and leaves the object type in
1947 // the type_reg register. It leaves the map in the map register (unless the
1948 // type_reg and map register are the same register). It leaves the heap
1949 // object in the heap_object register unless the heap_object register is the
1950 // same register as one of the other registers.
1951 void JumpIfObjectType(Register object, Register map, Register type_reg,
1952 InstanceType type, Label* if_cond_pass,
1953 Condition cond = eq);
1954
1955 // Compare instance type in a map. map contains a valid map object whose
1956 // object type should be compared with the given type. This both
1957 // sets the flags and leaves the object type in the type_reg register.
1958 void CompareInstanceType(Register map, Register type_reg, InstanceType type);
1959
1960 // Compare instance type ranges for a map (lower_limit and higher_limit
1961 // inclusive).
1962 //
1963 // Always use unsigned comparisons: ls for a positive result.
1964 void CompareInstanceTypeRange(Register map, Register type_reg,
1965 InstanceType lower_limit,
1966 InstanceType higher_limit);
1967
1968 // Load the elements kind field from a map, and return it in the result
1969 // register.
1970 void LoadElementsKindFromMap(Register result, Register map);
1971
1972 // Compare the object in a register to a value from the root list.
1973 void CompareRoot(const Register& obj, RootIndex index);
1974
1975 // Compare the object in a register to a value and jump if they are equal.
1976 void JumpIfRoot(const Register& obj, RootIndex index, Label* if_equal);
1977
1978 // Compare the object in a register to a value and jump if they are not equal.
1979 void JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal);
1980
1981 // Checks if value is in range [lower_limit, higher_limit] using a single
1982 // comparison.
1983 void JumpIfIsInRange(const Register& value, unsigned lower_limit,
1984 unsigned higher_limit, Label* on_in_range);
1985
1986 // ---------------------------------------------------------------------------
1987 // Frames.
1988
1989 void ExitFramePreserveFPRegs();
1990 void ExitFrameRestoreFPRegs();
1991
1992 // Enter exit frame. Exit frames are used when calling C code from generated
1993 // (JavaScript) code.
1994 //
1995 // The only registers modified by this function are the provided scratch
1996 // register, the frame pointer and the stack pointer.
1997 //
1998 // The 'extra_space' argument can be used to allocate some space in the exit
1999 // frame that will be ignored by the GC. This space will be reserved in the
2000 // bottom of the frame immediately above the return address slot.
2001 //
2002 // Set up a stack frame and registers as follows:
2003 // fp[8]: CallerPC (lr)
2004 // fp -> fp[0]: CallerFP (old fp)
2005 // fp[-8]: SPOffset (new sp)
2006 // fp[-16]: CodeObject()
2007 // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
2008 // sp[8]: Memory reserved for the caller if extra_space != 0.
2009 // Alignment padding, if necessary.
2010 // sp -> sp[0]: Space reserved for the return address.
2011 //
2012 // This function also stores the new frame information in the top frame, so
2013 // that the new frame becomes the current frame.
2014 void EnterExitFrame(bool save_doubles, const Register& scratch,
2015 int extra_space = 0,
2016 StackFrame::Type frame_type = StackFrame::EXIT);
2017
2018 // Leave the current exit frame, after a C function has returned to generated
2019 // (JavaScript) code.
2020 //
2021 // This effectively unwinds the operation of EnterExitFrame:
2022 // * Preserved doubles are restored (if restore_doubles is true).
2023 // * The frame information is removed from the top frame.
2024 // * The exit frame is dropped.
2025 void LeaveExitFrame(bool save_doubles, const Register& scratch,
2026 const Register& scratch2);
2027
2028 // Load the global proxy from the current context.
2029 void LoadGlobalProxy(Register dst);
2030
2031 // ---------------------------------------------------------------------------
2032 // In-place weak references.
2033 void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
2034
2035 // ---------------------------------------------------------------------------
2036 // StatsCounter support
2037
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2038 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
2039 Register scratch2) {
2040 if (!FLAG_native_code_counters) return;
2041 EmitIncrementCounter(counter, value, scratch1, scratch2);
2042 }
2043 void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
2044 Register scratch2);
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2045 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
2046 Register scratch2) {
2047 if (!FLAG_native_code_counters) return;
2048 EmitIncrementCounter(counter, -value, scratch1, scratch2);
2049 }
2050
2051 // ---------------------------------------------------------------------------
2052 // Stack limit utilities
2053 void LoadStackLimit(Register destination, StackLimitKind kind);
2054 void StackOverflowCheck(Register num_args, Label* stack_overflow);
2055
2056 // ---------------------------------------------------------------------------
2057 // Garbage collector support (GC).
2058
2059 // Notify the garbage collector that we wrote a pointer into an object.
2060 // |object| is the object being stored into, |value| is the object being
2061 // stored.
2062 // The offset is the offset from the start of the object, not the offset from
2063 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
2064 void RecordWriteField(
2065 Register object, int offset, Register value, LinkRegisterStatus lr_status,
2066 SaveFPRegsMode save_fp,
2067 RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
2068 SmiCheck smi_check = SmiCheck::kInline);
2069
2070 // For a given |object| notify the garbage collector that the slot at |offset|
2071 // has been written. |value| is the object being stored.
2072 void RecordWrite(
2073 Register object, Operand offset, Register value,
2074 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
2075 RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
2076 SmiCheck smi_check = SmiCheck::kInline);
2077
2078 // ---------------------------------------------------------------------------
2079 // Debugging.
2080
2081 void LoadNativeContextSlot(Register dst, int index);
2082
2083 DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
2084 };
2085
2086 // Use this scope when you need a one-to-one mapping between methods and
2087 // instructions. This scope prevents the MacroAssembler from being called and
2088 // literal pools from being emitted. It also asserts the number of instructions
2089 // emitted is what you specified when creating the scope.
2090 class V8_NODISCARD InstructionAccurateScope {
2091 public:
2092 explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
tasm_(tasm)2093 : tasm_(tasm),
2094 block_pool_(tasm, count * kInstrSize)
2095 #ifdef DEBUG
2096 ,
2097 size_(count * kInstrSize)
2098 #endif
2099 {
2100 tasm_->CheckVeneerPool(false, true, count * kInstrSize);
2101 tasm_->StartBlockVeneerPool();
2102 #ifdef DEBUG
2103 if (count != 0) {
2104 tasm_->bind(&start_);
2105 }
2106 previous_allow_macro_instructions_ = tasm_->allow_macro_instructions();
2107 tasm_->set_allow_macro_instructions(false);
2108 #endif
2109 }
2110
~InstructionAccurateScope()2111 ~InstructionAccurateScope() {
2112 tasm_->EndBlockVeneerPool();
2113 #ifdef DEBUG
2114 if (start_.is_bound()) {
2115 DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
2116 }
2117 tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2118 #endif
2119 }
2120
2121 private:
2122 TurboAssembler* tasm_;
2123 TurboAssembler::BlockConstPoolScope block_pool_;
2124 #ifdef DEBUG
2125 size_t size_;
2126 Label start_;
2127 bool previous_allow_macro_instructions_;
2128 #endif
2129 };
2130
2131 // This scope utility allows scratch registers to be managed safely. The
2132 // TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2133 // registers. These registers can be allocated on demand, and will be returned
2134 // at the end of the scope.
2135 //
2136 // When the scope ends, the MacroAssembler's lists will be restored to their
2137 // original state, even if the lists were modified by some other means. Note
2138 // that this scope can be nested but the destructors need to run in the opposite
2139 // order as the constructors. We do not have assertions for this.
2140 class V8_NODISCARD UseScratchRegisterScope {
2141 public:
UseScratchRegisterScope(TurboAssembler * tasm)2142 explicit UseScratchRegisterScope(TurboAssembler* tasm)
2143 : available_(tasm->TmpList()),
2144 availablefp_(tasm->FPTmpList()),
2145 old_available_(available_->bits()),
2146 old_availablefp_(availablefp_->bits()) {
2147 DCHECK_EQ(available_->type(), CPURegister::kRegister);
2148 DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
2149 }
2150
2151 V8_EXPORT_PRIVATE ~UseScratchRegisterScope();
2152
2153 // Take a register from the appropriate temps list. It will be returned
2154 // automatically when the scope ends.
AcquireW()2155 Register AcquireW() { return AcquireNextAvailable(available_).W(); }
AcquireX()2156 Register AcquireX() { return AcquireNextAvailable(available_).X(); }
AcquireS()2157 VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
AcquireD()2158 VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
AcquireQ()2159 VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
AcquireV(VectorFormat format)2160 VRegister AcquireV(VectorFormat format) {
2161 return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
2162 }
2163
2164 Register AcquireSameSizeAs(const Register& reg);
2165 V8_EXPORT_PRIVATE VRegister AcquireSameSizeAs(const VRegister& reg);
2166
Include(const CPURegList & list)2167 void Include(const CPURegList& list) { available_->Combine(list); }
Exclude(const CPURegList & list)2168 void Exclude(const CPURegList& list) {
2169 #if DEBUG
2170 CPURegList copy(list);
2171 while (!copy.IsEmpty()) {
2172 const CPURegister& reg = copy.PopHighestIndex();
2173 DCHECK(available_->IncludesAliasOf(reg));
2174 }
2175 #endif
2176 available_->Remove(list);
2177 }
2178 void Include(const Register& reg1, const Register& reg2 = NoReg) {
2179 CPURegList list(reg1, reg2);
2180 Include(list);
2181 }
2182 void Exclude(const Register& reg1, const Register& reg2 = NoReg) {
2183 CPURegList list(reg1, reg2);
2184 Exclude(list);
2185 }
2186
2187 private:
2188 V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable(
2189 CPURegList* available);
2190
2191 // Available scratch registers.
2192 CPURegList* available_; // kRegister
2193 CPURegList* availablefp_; // kVRegister
2194
2195 // The state of the available lists at the start of this scope.
2196 uint64_t old_available_; // kRegister
2197 uint64_t old_availablefp_; // kVRegister
2198 };
2199
2200 } // namespace internal
2201 } // namespace v8
2202
2203 #define ACCESS_MASM(masm) masm->
2204
2205 #endif // V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
2206