• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_ARM64
6 
7 #include "src/assembler.h"
8 #include "src/base/bits.h"
9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h"
11 #include "src/callable.h"
12 #include "src/code-factory.h"
13 #include "src/code-stubs.h"
14 #include "src/debug/debug.h"
15 #include "src/external-reference-table.h"
16 #include "src/frame-constants.h"
17 #include "src/frames-inl.h"
18 #include "src/heap/heap-inl.h"
19 #include "src/instruction-stream.h"
20 #include "src/register-configuration.h"
21 #include "src/runtime/runtime.h"
22 #include "src/snapshot/snapshot.h"
23 #include "src/wasm/wasm-code-manager.h"
24 
25 #include "src/arm64/macro-assembler-arm64-inl.h"
26 #include "src/arm64/macro-assembler-arm64.h"  // Cannot be the first include
27 
28 namespace v8 {
29 namespace internal {
30 
MacroAssembler(Isolate * isolate,const AssemblerOptions & options,void * buffer,int size,CodeObjectRequired create_code_object)31 MacroAssembler::MacroAssembler(Isolate* isolate,
32                                const AssemblerOptions& options, void* buffer,
33                                int size, CodeObjectRequired create_code_object)
34     : TurboAssembler(isolate, options, buffer, size, create_code_object) {
35   if (create_code_object == CodeObjectRequired::kYes) {
36     // Unlike TurboAssembler, which can be used off the main thread and may not
37     // allocate, macro assembler creates its own copy of the self-reference
38     // marker in order to disambiguate between self-references during nested
39     // code generation (e.g.: codegen of the current object triggers stub
40     // compilation through CodeStub::GetCode()).
41     code_object_ = Handle<HeapObject>::New(
42         *isolate->factory()->NewSelfReferenceMarker(), isolate);
43   }
44 }
45 
DefaultTmpList()46 CPURegList TurboAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); }
47 
DefaultFPTmpList()48 CPURegList TurboAssembler::DefaultFPTmpList() {
49   return CPURegList(fp_scratch1, fp_scratch2);
50 }
51 
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion) const52 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
53                                                     Register exclusion) const {
54   int bytes = 0;
55   auto list = kCallerSaved;
56   DCHECK_EQ(list.Count() % 2, 0);
57   // We only allow one exclusion register, so if the list is of even length
58   // before exclusions, it must still be afterwards, to maintain alignment.
59   // Therefore, we can ignore the exclusion register in the computation.
60   // However, we leave it in the argument list to mirror the prototype for
61   // Push/PopCallerSaved().
62   USE(exclusion);
63   bytes += list.Count() * kXRegSizeInBits / 8;
64 
65   if (fp_mode == kSaveFPRegs) {
66     DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
67     bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
68   }
69   return bytes;
70 }
71 
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion)72 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
73                                     Register exclusion) {
74   int bytes = 0;
75   auto list = kCallerSaved;
76   DCHECK_EQ(list.Count() % 2, 0);
77   if (!exclusion.Is(no_reg)) {
78     // Replace the excluded register with padding to maintain alignment.
79     list.Remove(exclusion);
80     list.Combine(padreg);
81   }
82   PushCPURegList(list);
83   bytes += list.Count() * kXRegSizeInBits / 8;
84 
85   if (fp_mode == kSaveFPRegs) {
86     DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
87     PushCPURegList(kCallerSavedV);
88     bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
89   }
90   return bytes;
91 }
92 
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion)93 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
94   int bytes = 0;
95   if (fp_mode == kSaveFPRegs) {
96     DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
97     PopCPURegList(kCallerSavedV);
98     bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
99   }
100 
101   auto list = kCallerSaved;
102   DCHECK_EQ(list.Count() % 2, 0);
103   if (!exclusion.Is(no_reg)) {
104     // Replace the excluded register with padding to maintain alignment.
105     list.Remove(exclusion);
106     list.Combine(padreg);
107   }
108   PopCPURegList(list);
109   bytes += list.Count() * kXRegSizeInBits / 8;
110 
111   return bytes;
112 }
113 
LogicalMacro(const Register & rd,const Register & rn,const Operand & operand,LogicalOp op)114 void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
115                                   const Operand& operand, LogicalOp op) {
116   UseScratchRegisterScope temps(this);
117 
118   if (operand.NeedsRelocation(this)) {
119     Register temp = temps.AcquireX();
120     Ldr(temp, operand.immediate());
121     Logical(rd, rn, temp, op);
122 
123   } else if (operand.IsImmediate()) {
124     int64_t immediate = operand.ImmediateValue();
125     unsigned reg_size = rd.SizeInBits();
126 
127     // If the operation is NOT, invert the operation and immediate.
128     if ((op & NOT) == NOT) {
129       op = static_cast<LogicalOp>(op & ~NOT);
130       immediate = ~immediate;
131     }
132 
133     // Ignore the top 32 bits of an immediate if we're moving to a W register.
134     if (rd.Is32Bits()) {
135       // Check that the top 32 bits are consistent.
136       DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
137              ((immediate >> kWRegSizeInBits) == -1));
138       immediate &= kWRegMask;
139     }
140 
141     DCHECK(rd.Is64Bits() || is_uint32(immediate));
142 
143     // Special cases for all set or all clear immediates.
144     if (immediate == 0) {
145       switch (op) {
146         case AND:
147           Mov(rd, 0);
148           return;
149         case ORR:  // Fall through.
150         case EOR:
151           Mov(rd, rn);
152           return;
153         case ANDS:  // Fall through.
154         case BICS:
155           break;
156         default:
157           UNREACHABLE();
158       }
159     } else if ((rd.Is64Bits() && (immediate == -1L)) ||
160                (rd.Is32Bits() && (immediate == 0xFFFFFFFFL))) {
161       switch (op) {
162         case AND:
163           Mov(rd, rn);
164           return;
165         case ORR:
166           Mov(rd, immediate);
167           return;
168         case EOR:
169           Mvn(rd, rn);
170           return;
171         case ANDS:  // Fall through.
172         case BICS:
173           break;
174         default:
175           UNREACHABLE();
176       }
177     }
178 
179     unsigned n, imm_s, imm_r;
180     if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
181       // Immediate can be encoded in the instruction.
182       LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
183     } else {
184       // Immediate can't be encoded: synthesize using move immediate.
185       Register temp = temps.AcquireSameSizeAs(rn);
186 
187       // If the left-hand input is the stack pointer, we can't pre-shift the
188       // immediate, as the encoding won't allow the subsequent post shift.
189       PreShiftImmMode mode = rn.Is(sp) ? kNoShift : kAnyShift;
190       Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
191 
192       if (rd.IsSP()) {
193         // If rd is the stack pointer we cannot use it as the destination
194         // register so we use the temp register as an intermediate again.
195         Logical(temp, rn, imm_operand, op);
196         Mov(sp, temp);
197       } else {
198         Logical(rd, rn, imm_operand, op);
199       }
200     }
201 
202   } else if (operand.IsExtendedRegister()) {
203     DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
204     // Add/sub extended supports shift <= 4. We want to support exactly the
205     // same modes here.
206     DCHECK_LE(operand.shift_amount(), 4);
207     DCHECK(operand.reg().Is64Bits() ||
208            ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
209     Register temp = temps.AcquireSameSizeAs(rn);
210     EmitExtendShift(temp, operand.reg(), operand.extend(),
211                     operand.shift_amount());
212     Logical(rd, rn, temp, op);
213 
214   } else {
215     // The operand can be encoded in the instruction.
216     DCHECK(operand.IsShiftedRegister());
217     Logical(rd, rn, operand, op);
218   }
219 }
220 
Mov(const Register & rd,uint64_t imm)221 void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
222   DCHECK(allow_macro_instructions());
223   DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
224   DCHECK(!rd.IsZero());
225 
226   // TODO(all) extend to support more immediates.
227   //
228   // Immediates on Aarch64 can be produced using an initial value, and zero to
229   // three move keep operations.
230   //
231   // Initial values can be generated with:
232   //  1. 64-bit move zero (movz).
233   //  2. 32-bit move inverted (movn).
234   //  3. 64-bit move inverted.
235   //  4. 32-bit orr immediate.
236   //  5. 64-bit orr immediate.
237   // Move-keep may then be used to modify each of the 16-bit half-words.
238   //
239   // The code below supports all five initial value generators, and
240   // applying move-keep operations to move-zero and move-inverted initial
241   // values.
242 
243   // Try to move the immediate in one instruction, and if that fails, switch to
244   // using multiple instructions.
245   if (!TryOneInstrMoveImmediate(rd, imm)) {
246     unsigned reg_size = rd.SizeInBits();
247 
248     // Generic immediate case. Imm will be represented by
249     //   [imm3, imm2, imm1, imm0], where each imm is 16 bits.
250     // A move-zero or move-inverted is generated for the first non-zero or
251     // non-0xFFFF immX, and a move-keep for subsequent non-zero immX.
252 
253     uint64_t ignored_halfword = 0;
254     bool invert_move = false;
255     // If the number of 0xFFFF halfwords is greater than the number of 0x0000
256     // halfwords, it's more efficient to use move-inverted.
257     if (CountClearHalfWords(~imm, reg_size) >
258         CountClearHalfWords(imm, reg_size)) {
259       ignored_halfword = 0xFFFFL;
260       invert_move = true;
261     }
262 
263     // Mov instructions can't move immediate values into the stack pointer, so
264     // set up a temporary register, if needed.
265     UseScratchRegisterScope temps(this);
266     Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
267 
268     // Iterate through the halfwords. Use movn/movz for the first non-ignored
269     // halfword, and movk for subsequent halfwords.
270     DCHECK_EQ(reg_size % 16, 0);
271     bool first_mov_done = false;
272     for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
273       uint64_t imm16 = (imm >> (16 * i)) & 0xFFFFL;
274       if (imm16 != ignored_halfword) {
275         if (!first_mov_done) {
276           if (invert_move) {
277             movn(temp, (~imm16) & 0xFFFFL, 16 * i);
278           } else {
279             movz(temp, imm16, 16 * i);
280           }
281           first_mov_done = true;
282         } else {
283           // Construct a wider constant.
284           movk(temp, imm16, 16 * i);
285         }
286       }
287     }
288     DCHECK(first_mov_done);
289 
290     // Move the temporary if the original destination register was the stack
291     // pointer.
292     if (rd.IsSP()) {
293       mov(rd, temp);
294     }
295   }
296 }
297 
Mov(const Register & rd,const Operand & operand,DiscardMoveMode discard_mode)298 void TurboAssembler::Mov(const Register& rd, const Operand& operand,
299                          DiscardMoveMode discard_mode) {
300   DCHECK(allow_macro_instructions());
301   DCHECK(!rd.IsZero());
302 
303   // Provide a swap register for instructions that need to write into the
304   // system stack pointer (and can't do this inherently).
305   UseScratchRegisterScope temps(this);
306   Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
307 
308   if (operand.NeedsRelocation(this)) {
309     if (FLAG_embedded_builtins) {
310       if (root_array_available_ && options().isolate_independent_code) {
311         if (operand.ImmediateRMode() == RelocInfo::EXTERNAL_REFERENCE) {
312           Address addr = static_cast<Address>(operand.ImmediateValue());
313           ExternalReference reference = bit_cast<ExternalReference>(addr);
314           IndirectLoadExternalReference(rd, reference);
315           return;
316         } else if (operand.ImmediateRMode() == RelocInfo::EMBEDDED_OBJECT) {
317           Handle<HeapObject> x(
318               reinterpret_cast<HeapObject**>(operand.ImmediateValue()));
319           IndirectLoadConstant(rd, x);
320           return;
321         }
322       }
323     }
324     Ldr(dst, operand);
325   } else if (operand.IsImmediate()) {
326     // Call the macro assembler for generic immediates.
327     Mov(dst, operand.ImmediateValue());
328   } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
329     // Emit a shift instruction if moving a shifted register. This operation
330     // could also be achieved using an orr instruction (like orn used by Mvn),
331     // but using a shift instruction makes the disassembly clearer.
332     EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
333   } else if (operand.IsExtendedRegister()) {
334     // Emit an extend instruction if moving an extended register. This handles
335     // extend with post-shift operations, too.
336     EmitExtendShift(dst, operand.reg(), operand.extend(),
337                     operand.shift_amount());
338   } else {
339     // Otherwise, emit a register move only if the registers are distinct, or
340     // if they are not X registers.
341     //
342     // Note that mov(w0, w0) is not a no-op because it clears the top word of
343     // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
344     // registers is not required to clear the top word of the X register. In
345     // this case, the instruction is discarded.
346     //
347     // If sp is an operand, add #0 is emitted, otherwise, orr #0.
348     if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
349                                   (discard_mode == kDontDiscardForSameWReg))) {
350       Assembler::mov(rd, operand.reg());
351     }
352     // This case can handle writes into the system stack pointer directly.
353     dst = rd;
354   }
355 
356   // Copy the result to the system stack pointer.
357   if (!dst.Is(rd)) {
358     DCHECK(rd.IsSP());
359     Assembler::mov(rd, dst);
360   }
361 }
362 
Movi16bitHelper(const VRegister & vd,uint64_t imm)363 void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
364   DCHECK(is_uint16(imm));
365   int byte1 = (imm & 0xFF);
366   int byte2 = ((imm >> 8) & 0xFF);
367   if (byte1 == byte2) {
368     movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
369   } else if (byte1 == 0) {
370     movi(vd, byte2, LSL, 8);
371   } else if (byte2 == 0) {
372     movi(vd, byte1);
373   } else if (byte1 == 0xFF) {
374     mvni(vd, ~byte2 & 0xFF, LSL, 8);
375   } else if (byte2 == 0xFF) {
376     mvni(vd, ~byte1 & 0xFF);
377   } else {
378     UseScratchRegisterScope temps(this);
379     Register temp = temps.AcquireW();
380     movz(temp, imm);
381     dup(vd, temp);
382   }
383 }
384 
Movi32bitHelper(const VRegister & vd,uint64_t imm)385 void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
386   DCHECK(is_uint32(imm));
387 
388   uint8_t bytes[sizeof(imm)];
389   memcpy(bytes, &imm, sizeof(imm));
390 
391   // All bytes are either 0x00 or 0xFF.
392   {
393     bool all0orff = true;
394     for (int i = 0; i < 4; ++i) {
395       if ((bytes[i] != 0) && (bytes[i] != 0xFF)) {
396         all0orff = false;
397         break;
398       }
399     }
400 
401     if (all0orff == true) {
402       movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm));
403       return;
404     }
405   }
406 
407   // Of the 4 bytes, only one byte is non-zero.
408   for (int i = 0; i < 4; i++) {
409     if ((imm & (0xFF << (i * 8))) == imm) {
410       movi(vd, bytes[i], LSL, i * 8);
411       return;
412     }
413   }
414 
415   // Of the 4 bytes, only one byte is not 0xFF.
416   for (int i = 0; i < 4; i++) {
417     uint32_t mask = ~(0xFF << (i * 8));
418     if ((imm & mask) == mask) {
419       mvni(vd, ~bytes[i] & 0xFF, LSL, i * 8);
420       return;
421     }
422   }
423 
424   // Immediate is of the form 0x00MMFFFF.
425   if ((imm & 0xFF00FFFF) == 0x0000FFFF) {
426     movi(vd, bytes[2], MSL, 16);
427     return;
428   }
429 
430   // Immediate is of the form 0x0000MMFF.
431   if ((imm & 0xFFFF00FF) == 0x000000FF) {
432     movi(vd, bytes[1], MSL, 8);
433     return;
434   }
435 
436   // Immediate is of the form 0xFFMM0000.
437   if ((imm & 0xFF00FFFF) == 0xFF000000) {
438     mvni(vd, ~bytes[2] & 0xFF, MSL, 16);
439     return;
440   }
441   // Immediate is of the form 0xFFFFMM00.
442   if ((imm & 0xFFFF00FF) == 0xFFFF0000) {
443     mvni(vd, ~bytes[1] & 0xFF, MSL, 8);
444     return;
445   }
446 
447   // Top and bottom 16-bits are equal.
448   if (((imm >> 16) & 0xFFFF) == (imm & 0xFFFF)) {
449     Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xFFFF);
450     return;
451   }
452 
453   // Default case.
454   {
455     UseScratchRegisterScope temps(this);
456     Register temp = temps.AcquireW();
457     Mov(temp, imm);
458     dup(vd, temp);
459   }
460 }
461 
Movi64bitHelper(const VRegister & vd,uint64_t imm)462 void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
463   // All bytes are either 0x00 or 0xFF.
464   {
465     bool all0orff = true;
466     for (int i = 0; i < 8; ++i) {
467       int byteval = (imm >> (i * 8)) & 0xFF;
468       if (byteval != 0 && byteval != 0xFF) {
469         all0orff = false;
470         break;
471       }
472     }
473     if (all0orff == true) {
474       movi(vd, imm);
475       return;
476     }
477   }
478 
479   // Top and bottom 32-bits are equal.
480   if (((imm >> 32) & 0xFFFFFFFF) == (imm & 0xFFFFFFFF)) {
481     Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xFFFFFFFF);
482     return;
483   }
484 
485   // Default case.
486   {
487     UseScratchRegisterScope temps(this);
488     Register temp = temps.AcquireX();
489     Mov(temp, imm);
490     if (vd.Is1D()) {
491       mov(vd.D(), 0, temp);
492     } else {
493       dup(vd.V2D(), temp);
494     }
495   }
496 }
497 
Movi(const VRegister & vd,uint64_t imm,Shift shift,int shift_amount)498 void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
499                           int shift_amount) {
500   DCHECK(allow_macro_instructions());
501   if (shift_amount != 0 || shift != LSL) {
502     movi(vd, imm, shift, shift_amount);
503   } else if (vd.Is8B() || vd.Is16B()) {
504     // 8-bit immediate.
505     DCHECK(is_uint8(imm));
506     movi(vd, imm);
507   } else if (vd.Is4H() || vd.Is8H()) {
508     // 16-bit immediate.
509     Movi16bitHelper(vd, imm);
510   } else if (vd.Is2S() || vd.Is4S()) {
511     // 32-bit immediate.
512     Movi32bitHelper(vd, imm);
513   } else {
514     // 64-bit immediate.
515     Movi64bitHelper(vd, imm);
516   }
517 }
518 
Movi(const VRegister & vd,uint64_t hi,uint64_t lo)519 void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
520   // TODO(all): Move 128-bit values in a more efficient way.
521   DCHECK(vd.Is128Bits());
522   UseScratchRegisterScope temps(this);
523   Movi(vd.V2D(), lo);
524   Register temp = temps.AcquireX();
525   Mov(temp, hi);
526   Ins(vd.V2D(), 1, temp);
527 }
528 
Mvn(const Register & rd,const Operand & operand)529 void TurboAssembler::Mvn(const Register& rd, const Operand& operand) {
530   DCHECK(allow_macro_instructions());
531 
532   if (operand.NeedsRelocation(this)) {
533     Ldr(rd, operand.immediate());
534     mvn(rd, rd);
535 
536   } else if (operand.IsImmediate()) {
537     // Call the macro assembler for generic immediates.
538     Mov(rd, ~operand.ImmediateValue());
539 
540   } else if (operand.IsExtendedRegister()) {
541     // Emit two instructions for the extend case. This differs from Mov, as
542     // the extend and invert can't be achieved in one instruction.
543     EmitExtendShift(rd, operand.reg(), operand.extend(),
544                     operand.shift_amount());
545     mvn(rd, rd);
546 
547   } else {
548     mvn(rd, operand);
549   }
550 }
551 
CountClearHalfWords(uint64_t imm,unsigned reg_size)552 unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
553   DCHECK_EQ(reg_size % 8, 0);
554   int count = 0;
555   for (unsigned i = 0; i < (reg_size / 16); i++) {
556     if ((imm & 0xFFFF) == 0) {
557       count++;
558     }
559     imm >>= 16;
560   }
561   return count;
562 }
563 
564 
565 // The movz instruction can generate immediates containing an arbitrary 16-bit
566 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
IsImmMovz(uint64_t imm,unsigned reg_size)567 bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
568   DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
569   return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
570 }
571 
572 // The movn instruction can generate immediates containing an arbitrary 16-bit
573 // half-word, with remaining bits set, eg. 0xFFFF1234, 0xFFFF1234FFFFFFFF.
IsImmMovn(uint64_t imm,unsigned reg_size)574 bool TurboAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
575   return IsImmMovz(~imm, reg_size);
576 }
577 
ConditionalCompareMacro(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond,ConditionalCompareOp op)578 void TurboAssembler::ConditionalCompareMacro(const Register& rn,
579                                              const Operand& operand,
580                                              StatusFlags nzcv, Condition cond,
581                                              ConditionalCompareOp op) {
582   DCHECK((cond != al) && (cond != nv));
583   if (operand.NeedsRelocation(this)) {
584     UseScratchRegisterScope temps(this);
585     Register temp = temps.AcquireX();
586     Ldr(temp, operand.immediate());
587     ConditionalCompareMacro(rn, temp, nzcv, cond, op);
588 
589   } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
590              (operand.IsImmediate() &&
591               IsImmConditionalCompare(operand.ImmediateValue()))) {
592     // The immediate can be encoded in the instruction, or the operand is an
593     // unshifted register: call the assembler.
594     ConditionalCompare(rn, operand, nzcv, cond, op);
595 
596   } else {
597     // The operand isn't directly supported by the instruction: perform the
598     // operation on a temporary register.
599     UseScratchRegisterScope temps(this);
600     Register temp = temps.AcquireSameSizeAs(rn);
601     Mov(temp, operand);
602     ConditionalCompare(rn, temp, nzcv, cond, op);
603   }
604 }
605 
Csel(const Register & rd,const Register & rn,const Operand & operand,Condition cond)606 void TurboAssembler::Csel(const Register& rd, const Register& rn,
607                           const Operand& operand, Condition cond) {
608   DCHECK(allow_macro_instructions());
609   DCHECK(!rd.IsZero());
610   DCHECK((cond != al) && (cond != nv));
611   if (operand.IsImmediate()) {
612     // Immediate argument. Handle special cases of 0, 1 and -1 using zero
613     // register.
614     int64_t imm = operand.ImmediateValue();
615     Register zr = AppropriateZeroRegFor(rn);
616     if (imm == 0) {
617       csel(rd, rn, zr, cond);
618     } else if (imm == 1) {
619       csinc(rd, rn, zr, cond);
620     } else if (imm == -1) {
621       csinv(rd, rn, zr, cond);
622     } else {
623       UseScratchRegisterScope temps(this);
624       Register temp = temps.AcquireSameSizeAs(rn);
625       Mov(temp, imm);
626       csel(rd, rn, temp, cond);
627     }
628   } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
629     // Unshifted register argument.
630     csel(rd, rn, operand.reg(), cond);
631   } else {
632     // All other arguments.
633     UseScratchRegisterScope temps(this);
634     Register temp = temps.AcquireSameSizeAs(rn);
635     Mov(temp, operand);
636     csel(rd, rn, temp, cond);
637   }
638 }
639 
TryOneInstrMoveImmediate(const Register & dst,int64_t imm)640 bool TurboAssembler::TryOneInstrMoveImmediate(const Register& dst,
641                                               int64_t imm) {
642   unsigned n, imm_s, imm_r;
643   int reg_size = dst.SizeInBits();
644   if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
645     // Immediate can be represented in a move zero instruction. Movz can't write
646     // to the stack pointer.
647     movz(dst, imm);
648     return true;
649   } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
650     // Immediate can be represented in a move not instruction. Movn can't write
651     // to the stack pointer.
652     movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
653     return true;
654   } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
655     // Immediate can be represented in a logical orr instruction.
656     LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
657     return true;
658   }
659   return false;
660 }
661 
MoveImmediateForShiftedOp(const Register & dst,int64_t imm,PreShiftImmMode mode)662 Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst,
663                                                   int64_t imm,
664                                                   PreShiftImmMode mode) {
665   int reg_size = dst.SizeInBits();
666   // Encode the immediate in a single move instruction, if possible.
667   if (TryOneInstrMoveImmediate(dst, imm)) {
668     // The move was successful; nothing to do here.
669   } else {
670     // Pre-shift the immediate to the least-significant bits of the register.
671     int shift_low = CountTrailingZeros(imm, reg_size);
672     if (mode == kLimitShiftForSP) {
673       // When applied to the stack pointer, the subsequent arithmetic operation
674       // can use the extend form to shift left by a maximum of four bits. Right
675       // shifts are not allowed, so we filter them out later before the new
676       // immediate is tested.
677       shift_low = std::min(shift_low, 4);
678     }
679     int64_t imm_low = imm >> shift_low;
680 
681     // Pre-shift the immediate to the most-significant bits of the register. We
682     // insert set bits in the least-significant bits, as this creates a
683     // different immediate that may be encodable using movn or orr-immediate.
684     // If this new immediate is encodable, the set bits will be eliminated by
685     // the post shift on the following instruction.
686     int shift_high = CountLeadingZeros(imm, reg_size);
687     int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
688 
689     if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) {
690       // The new immediate has been moved into the destination's low bits:
691       // return a new leftward-shifting operand.
692       return Operand(dst, LSL, shift_low);
693     } else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) {
694       // The new immediate has been moved into the destination's high bits:
695       // return a new rightward-shifting operand.
696       return Operand(dst, LSR, shift_high);
697     } else {
698       // Use the generic move operation to set up the immediate.
699       Mov(dst, imm);
700     }
701   }
702   return Operand(dst);
703 }
704 
AddSubMacro(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubOp op)705 void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
706                                  const Operand& operand, FlagsUpdate S,
707                                  AddSubOp op) {
708   if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
709       !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
710     // The instruction would be a nop. Avoid generating useless code.
711     return;
712   }
713 
714   if (operand.NeedsRelocation(this)) {
715     UseScratchRegisterScope temps(this);
716     Register temp = temps.AcquireX();
717     Ldr(temp, operand.immediate());
718     AddSubMacro(rd, rn, temp, S, op);
719   } else if ((operand.IsImmediate() &&
720               !IsImmAddSub(operand.ImmediateValue()))      ||
721              (rn.IsZero() && !operand.IsShiftedRegister()) ||
722              (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
723     UseScratchRegisterScope temps(this);
724     Register temp = temps.AcquireSameSizeAs(rn);
725     if (operand.IsImmediate()) {
726       PreShiftImmMode mode = kAnyShift;
727 
728       // If the destination or source register is the stack pointer, we can
729       // only pre-shift the immediate right by values supported in the add/sub
730       // extend encoding.
731       if (rd.Is(sp)) {
732         // If the destination is SP and flags will be set, we can't pre-shift
733         // the immediate at all.
734         mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
735       } else if (rn.Is(sp)) {
736         mode = kLimitShiftForSP;
737       }
738 
739       Operand imm_operand =
740           MoveImmediateForShiftedOp(temp, operand.ImmediateValue(), mode);
741       AddSub(rd, rn, imm_operand, S, op);
742     } else {
743       Mov(temp, operand);
744       AddSub(rd, rn, temp, S, op);
745     }
746   } else {
747     AddSub(rd, rn, operand, S, op);
748   }
749 }
750 
AddSubWithCarryMacro(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubWithCarryOp op)751 void TurboAssembler::AddSubWithCarryMacro(const Register& rd,
752                                           const Register& rn,
753                                           const Operand& operand, FlagsUpdate S,
754                                           AddSubWithCarryOp op) {
755   DCHECK(rd.SizeInBits() == rn.SizeInBits());
756   UseScratchRegisterScope temps(this);
757 
758   if (operand.NeedsRelocation(this)) {
759     Register temp = temps.AcquireX();
760     Ldr(temp, operand.immediate());
761     AddSubWithCarryMacro(rd, rn, temp, S, op);
762 
763   } else if (operand.IsImmediate() ||
764              (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
765     // Add/sub with carry (immediate or ROR shifted register.)
766     Register temp = temps.AcquireSameSizeAs(rn);
767     Mov(temp, operand);
768     AddSubWithCarry(rd, rn, temp, S, op);
769 
770   } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
771     // Add/sub with carry (shifted register).
772     DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
773     DCHECK(operand.shift() != ROR);
774     DCHECK(is_uintn(operand.shift_amount(),
775           rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
776                                              : kWRegSizeInBitsLog2));
777     Register temp = temps.AcquireSameSizeAs(rn);
778     EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
779     AddSubWithCarry(rd, rn, temp, S, op);
780 
781   } else if (operand.IsExtendedRegister()) {
782     // Add/sub with carry (extended register).
783     DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
784     // Add/sub extended supports a shift <= 4. We want to support exactly the
785     // same modes.
786     DCHECK_LE(operand.shift_amount(), 4);
787     DCHECK(operand.reg().Is64Bits() ||
788            ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
789     Register temp = temps.AcquireSameSizeAs(rn);
790     EmitExtendShift(temp, operand.reg(), operand.extend(),
791                     operand.shift_amount());
792     AddSubWithCarry(rd, rn, temp, S, op);
793 
794   } else {
795     // The addressing mode is directly supported by the instruction.
796     AddSubWithCarry(rd, rn, operand, S, op);
797   }
798 }
799 
LoadStoreMacro(const CPURegister & rt,const MemOperand & addr,LoadStoreOp op)800 void TurboAssembler::LoadStoreMacro(const CPURegister& rt,
801                                     const MemOperand& addr, LoadStoreOp op) {
802   int64_t offset = addr.offset();
803   unsigned size = CalcLSDataSize(op);
804 
805   // Check if an immediate offset fits in the immediate field of the
806   // appropriate instruction. If not, emit two instructions to perform
807   // the operation.
808   if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
809       !IsImmLSUnscaled(offset)) {
810     // Immediate offset that can't be encoded using unsigned or unscaled
811     // addressing modes.
812     UseScratchRegisterScope temps(this);
813     Register temp = temps.AcquireSameSizeAs(addr.base());
814     Mov(temp, addr.offset());
815     LoadStore(rt, MemOperand(addr.base(), temp), op);
816   } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
817     // Post-index beyond unscaled addressing range.
818     LoadStore(rt, MemOperand(addr.base()), op);
819     add(addr.base(), addr.base(), offset);
820   } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
821     // Pre-index beyond unscaled addressing range.
822     add(addr.base(), addr.base(), offset);
823     LoadStore(rt, MemOperand(addr.base()), op);
824   } else {
825     // Encodable in one load/store instruction.
826     LoadStore(rt, addr, op);
827   }
828 }
829 
LoadStorePairMacro(const CPURegister & rt,const CPURegister & rt2,const MemOperand & addr,LoadStorePairOp op)830 void TurboAssembler::LoadStorePairMacro(const CPURegister& rt,
831                                         const CPURegister& rt2,
832                                         const MemOperand& addr,
833                                         LoadStorePairOp op) {
834   // TODO(all): Should we support register offset for load-store-pair?
835   DCHECK(!addr.IsRegisterOffset());
836 
837   int64_t offset = addr.offset();
838   unsigned size = CalcLSPairDataSize(op);
839 
840   // Check if the offset fits in the immediate field of the appropriate
841   // instruction. If not, emit two instructions to perform the operation.
842   if (IsImmLSPair(offset, size)) {
843     // Encodable in one load/store pair instruction.
844     LoadStorePair(rt, rt2, addr, op);
845   } else {
846     Register base = addr.base();
847     if (addr.IsImmediateOffset()) {
848       UseScratchRegisterScope temps(this);
849       Register temp = temps.AcquireSameSizeAs(base);
850       Add(temp, base, offset);
851       LoadStorePair(rt, rt2, MemOperand(temp), op);
852     } else if (addr.IsPostIndex()) {
853       LoadStorePair(rt, rt2, MemOperand(base), op);
854       Add(base, base, offset);
855     } else {
856       DCHECK(addr.IsPreIndex());
857       Add(base, base, offset);
858       LoadStorePair(rt, rt2, MemOperand(base), op);
859     }
860   }
861 }
862 
NeedExtraInstructionsOrRegisterBranch(Label * label,ImmBranchType b_type)863 bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch(
864     Label* label, ImmBranchType b_type) {
865   bool need_longer_range = false;
866   // There are two situations in which we care about the offset being out of
867   // range:
868   //  - The label is bound but too far away.
869   //  - The label is not bound but linked, and the previous branch
870   //    instruction in the chain is too far away.
871   if (label->is_bound() || label->is_linked()) {
872     need_longer_range =
873       !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
874   }
875   if (!need_longer_range && !label->is_bound()) {
876     int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
877     unresolved_branches_.insert(
878         std::pair<int, FarBranchInfo>(max_reachable_pc,
879                                       FarBranchInfo(pc_offset(), label)));
880     // Also maintain the next pool check.
881     next_veneer_pool_check_ =
882       Min(next_veneer_pool_check_,
883           max_reachable_pc - kVeneerDistanceCheckMargin);
884   }
885   return need_longer_range;
886 }
887 
Adr(const Register & rd,Label * label,AdrHint hint)888 void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
889   DCHECK(allow_macro_instructions());
890   DCHECK(!rd.IsZero());
891 
892   if (hint == kAdrNear) {
893     adr(rd, label);
894     return;
895   }
896 
897   DCHECK_EQ(hint, kAdrFar);
898   if (label->is_bound()) {
899     int label_offset = label->pos() - pc_offset();
900     if (Instruction::IsValidPCRelOffset(label_offset)) {
901       adr(rd, label);
902     } else {
903       DCHECK_LE(label_offset, 0);
904       int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
905       adr(rd, min_adr_offset);
906       Add(rd, rd, label_offset - min_adr_offset);
907     }
908   } else {
909     UseScratchRegisterScope temps(this);
910     Register scratch = temps.AcquireX();
911 
912     InstructionAccurateScope scope(
913         this, PatchingAssembler::kAdrFarPatchableNInstrs);
914     adr(rd, label);
915     for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
916       nop(ADR_FAR_NOP);
917     }
918     movz(scratch, 0);
919   }
920 }
921 
B(Label * label,BranchType type,Register reg,int bit)922 void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) {
923   DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
924          (bit == -1 || type >= kBranchTypeFirstUsingBit));
925   if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
926     B(static_cast<Condition>(type), label);
927   } else {
928     switch (type) {
929       case always:        B(label);              break;
930       case never:         break;
931       case reg_zero:      Cbz(reg, label);       break;
932       case reg_not_zero:  Cbnz(reg, label);      break;
933       case reg_bit_clear: Tbz(reg, bit, label);  break;
934       case reg_bit_set:   Tbnz(reg, bit, label); break;
935       default:
936         UNREACHABLE();
937     }
938   }
939 }
940 
B(Label * label,Condition cond)941 void TurboAssembler::B(Label* label, Condition cond) {
942   DCHECK(allow_macro_instructions());
943   DCHECK((cond != al) && (cond != nv));
944 
945   Label done;
946   bool need_extra_instructions =
947     NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
948 
949   if (need_extra_instructions) {
950     b(&done, NegateCondition(cond));
951     B(label);
952   } else {
953     b(label, cond);
954   }
955   bind(&done);
956 }
957 
Tbnz(const Register & rt,unsigned bit_pos,Label * label)958 void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
959   DCHECK(allow_macro_instructions());
960 
961   Label done;
962   bool need_extra_instructions =
963     NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
964 
965   if (need_extra_instructions) {
966     tbz(rt, bit_pos, &done);
967     B(label);
968   } else {
969     tbnz(rt, bit_pos, label);
970   }
971   bind(&done);
972 }
973 
Tbz(const Register & rt,unsigned bit_pos,Label * label)974 void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
975   DCHECK(allow_macro_instructions());
976 
977   Label done;
978   bool need_extra_instructions =
979     NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
980 
981   if (need_extra_instructions) {
982     tbnz(rt, bit_pos, &done);
983     B(label);
984   } else {
985     tbz(rt, bit_pos, label);
986   }
987   bind(&done);
988 }
989 
Cbnz(const Register & rt,Label * label)990 void TurboAssembler::Cbnz(const Register& rt, Label* label) {
991   DCHECK(allow_macro_instructions());
992 
993   Label done;
994   bool need_extra_instructions =
995     NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
996 
997   if (need_extra_instructions) {
998     cbz(rt, &done);
999     B(label);
1000   } else {
1001     cbnz(rt, label);
1002   }
1003   bind(&done);
1004 }
1005 
Cbz(const Register & rt,Label * label)1006 void TurboAssembler::Cbz(const Register& rt, Label* label) {
1007   DCHECK(allow_macro_instructions());
1008 
1009   Label done;
1010   bool need_extra_instructions =
1011     NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
1012 
1013   if (need_extra_instructions) {
1014     cbnz(rt, &done);
1015     B(label);
1016   } else {
1017     cbz(rt, label);
1018   }
1019   bind(&done);
1020 }
1021 
1022 
1023 // Pseudo-instructions.
1024 
Abs(const Register & rd,const Register & rm,Label * is_not_representable,Label * is_representable)1025 void TurboAssembler::Abs(const Register& rd, const Register& rm,
1026                          Label* is_not_representable, Label* is_representable) {
1027   DCHECK(allow_macro_instructions());
1028   DCHECK(AreSameSizeAndType(rd, rm));
1029 
1030   Cmp(rm, 1);
1031   Cneg(rd, rm, lt);
1032 
1033   // If the comparison sets the v flag, the input was the smallest value
1034   // representable by rm, and the mathematical result of abs(rm) is not
1035   // representable using two's complement.
1036   if ((is_not_representable != nullptr) && (is_representable != nullptr)) {
1037     B(is_not_representable, vs);
1038     B(is_representable);
1039   } else if (is_not_representable != nullptr) {
1040     B(is_not_representable, vs);
1041   } else if (is_representable != nullptr) {
1042     B(is_representable, vc);
1043   }
1044 }
1045 
1046 
1047 // Abstracted stack operations.
1048 
Push(const CPURegister & src0,const CPURegister & src1,const CPURegister & src2,const CPURegister & src3)1049 void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
1050                           const CPURegister& src2, const CPURegister& src3) {
1051   DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1052 
1053   int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
1054   int size = src0.SizeInBytes();
1055   DCHECK_EQ(0, (size * count) % 16);
1056 
1057   PushHelper(count, size, src0, src1, src2, src3);
1058 }
1059 
Push(const CPURegister & src0,const CPURegister & src1,const CPURegister & src2,const CPURegister & src3,const CPURegister & src4,const CPURegister & src5,const CPURegister & src6,const CPURegister & src7)1060 void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
1061                           const CPURegister& src2, const CPURegister& src3,
1062                           const CPURegister& src4, const CPURegister& src5,
1063                           const CPURegister& src6, const CPURegister& src7) {
1064   DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
1065 
1066   int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
1067   int size = src0.SizeInBytes();
1068   DCHECK_EQ(0, (size * count) % 16);
1069 
1070   PushHelper(4, size, src0, src1, src2, src3);
1071   PushHelper(count - 4, size, src4, src5, src6, src7);
1072 }
1073 
Pop(const CPURegister & dst0,const CPURegister & dst1,const CPURegister & dst2,const CPURegister & dst3)1074 void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
1075                          const CPURegister& dst2, const CPURegister& dst3) {
1076   // It is not valid to pop into the same register more than once in one
1077   // instruction, not even into the zero register.
1078   DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
1079   DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1080   DCHECK(dst0.IsValid());
1081 
1082   int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
1083   int size = dst0.SizeInBytes();
1084   DCHECK_EQ(0, (size * count) % 16);
1085 
1086   PopHelper(count, size, dst0, dst1, dst2, dst3);
1087 }
1088 
Pop(const CPURegister & dst0,const CPURegister & dst1,const CPURegister & dst2,const CPURegister & dst3,const CPURegister & dst4,const CPURegister & dst5,const CPURegister & dst6,const CPURegister & dst7)1089 void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
1090                          const CPURegister& dst2, const CPURegister& dst3,
1091                          const CPURegister& dst4, const CPURegister& dst5,
1092                          const CPURegister& dst6, const CPURegister& dst7) {
1093   // It is not valid to pop into the same register more than once in one
1094   // instruction, not even into the zero register.
1095   DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
1096   DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
1097   DCHECK(dst0.IsValid());
1098 
1099   int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
1100   int size = dst0.SizeInBytes();
1101   DCHECK_EQ(0, (size * count) % 16);
1102 
1103   PopHelper(4, size, dst0, dst1, dst2, dst3);
1104   PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
1105 }
1106 
Push(const Register & src0,const VRegister & src1)1107 void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
1108   int size = src0.SizeInBytes() + src1.SizeInBytes();
1109   DCHECK_EQ(0, size % 16);
1110 
1111   // Reserve room for src0 and push src1.
1112   str(src1, MemOperand(sp, -size, PreIndex));
1113   // Fill the gap with src0.
1114   str(src0, MemOperand(sp, src1.SizeInBytes()));
1115 }
1116 
PushQueued()1117 void MacroAssembler::PushPopQueue::PushQueued() {
1118   DCHECK_EQ(0, size_ % 16);
1119   if (queued_.empty()) return;
1120 
1121   size_t count = queued_.size();
1122   size_t index = 0;
1123   while (index < count) {
1124     // PushHelper can only handle registers with the same size and type, and it
1125     // can handle only four at a time. Batch them up accordingly.
1126     CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
1127     int batch_index = 0;
1128     do {
1129       batch[batch_index++] = queued_[index++];
1130     } while ((batch_index < 4) && (index < count) &&
1131              batch[0].IsSameSizeAndType(queued_[index]));
1132 
1133     masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
1134                       batch[0], batch[1], batch[2], batch[3]);
1135   }
1136 
1137   queued_.clear();
1138 }
1139 
1140 
PopQueued()1141 void MacroAssembler::PushPopQueue::PopQueued() {
1142   DCHECK_EQ(0, size_ % 16);
1143   if (queued_.empty()) return;
1144 
1145   size_t count = queued_.size();
1146   size_t index = 0;
1147   while (index < count) {
1148     // PopHelper can only handle registers with the same size and type, and it
1149     // can handle only four at a time. Batch them up accordingly.
1150     CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
1151     int batch_index = 0;
1152     do {
1153       batch[batch_index++] = queued_[index++];
1154     } while ((batch_index < 4) && (index < count) &&
1155              batch[0].IsSameSizeAndType(queued_[index]));
1156 
1157     masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
1158                      batch[0], batch[1], batch[2], batch[3]);
1159   }
1160 
1161   queued_.clear();
1162 }
1163 
PushCPURegList(CPURegList registers)1164 void TurboAssembler::PushCPURegList(CPURegList registers) {
1165   int size = registers.RegisterSizeInBytes();
1166   DCHECK_EQ(0, (size * registers.Count()) % 16);
1167 
1168   // Push up to four registers at a time.
1169   while (!registers.IsEmpty()) {
1170     int count_before = registers.Count();
1171     const CPURegister& src0 = registers.PopHighestIndex();
1172     const CPURegister& src1 = registers.PopHighestIndex();
1173     const CPURegister& src2 = registers.PopHighestIndex();
1174     const CPURegister& src3 = registers.PopHighestIndex();
1175     int count = count_before - registers.Count();
1176     PushHelper(count, size, src0, src1, src2, src3);
1177   }
1178 }
1179 
PopCPURegList(CPURegList registers)1180 void TurboAssembler::PopCPURegList(CPURegList registers) {
1181   int size = registers.RegisterSizeInBytes();
1182   DCHECK_EQ(0, (size * registers.Count()) % 16);
1183 
1184   // Pop up to four registers at a time.
1185   while (!registers.IsEmpty()) {
1186     int count_before = registers.Count();
1187     const CPURegister& dst0 = registers.PopLowestIndex();
1188     const CPURegister& dst1 = registers.PopLowestIndex();
1189     const CPURegister& dst2 = registers.PopLowestIndex();
1190     const CPURegister& dst3 = registers.PopLowestIndex();
1191     int count = count_before - registers.Count();
1192     PopHelper(count, size, dst0, dst1, dst2, dst3);
1193   }
1194 }
1195 
PushMultipleTimes(CPURegister src,Register count)1196 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1197   UseScratchRegisterScope temps(this);
1198   Register temp = temps.AcquireSameSizeAs(count);
1199 
1200   if (FLAG_optimize_for_size) {
1201     Label loop, done;
1202 
1203     Subs(temp, count, 1);
1204     B(mi, &done);
1205 
1206     // Push all registers individually, to save code size.
1207     Bind(&loop);
1208     Subs(temp, temp, 1);
1209     PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1210     B(pl, &loop);
1211 
1212     Bind(&done);
1213   } else {
1214     Label loop, leftover2, leftover1, done;
1215 
1216     Subs(temp, count, 4);
1217     B(mi, &leftover2);
1218 
1219     // Push groups of four first.
1220     Bind(&loop);
1221     Subs(temp, temp, 4);
1222     PushHelper(4, src.SizeInBytes(), src, src, src, src);
1223     B(pl, &loop);
1224 
1225     // Push groups of two.
1226     Bind(&leftover2);
1227     Tbz(count, 1, &leftover1);
1228     PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1229 
1230     // Push the last one (if required).
1231     Bind(&leftover1);
1232     Tbz(count, 0, &done);
1233     PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1234 
1235     Bind(&done);
1236   }
1237 }
1238 
PushHelper(int count,int size,const CPURegister & src0,const CPURegister & src1,const CPURegister & src2,const CPURegister & src3)1239 void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
1240                                 const CPURegister& src1,
1241                                 const CPURegister& src2,
1242                                 const CPURegister& src3) {
1243   // Ensure that we don't unintentially modify scratch or debug registers.
1244   InstructionAccurateScope scope(this);
1245 
1246   DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1247   DCHECK(size == src0.SizeInBytes());
1248 
1249   // When pushing multiple registers, the store order is chosen such that
1250   // Push(a, b) is equivalent to Push(a) followed by Push(b).
1251   switch (count) {
1252     case 1:
1253       DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1254       str(src0, MemOperand(sp, -1 * size, PreIndex));
1255       break;
1256     case 2:
1257       DCHECK(src2.IsNone() && src3.IsNone());
1258       stp(src1, src0, MemOperand(sp, -2 * size, PreIndex));
1259       break;
1260     case 3:
1261       DCHECK(src3.IsNone());
1262       stp(src2, src1, MemOperand(sp, -3 * size, PreIndex));
1263       str(src0, MemOperand(sp, 2 * size));
1264       break;
1265     case 4:
1266       // Skip over 4 * size, then fill in the gap. This allows four W registers
1267       // to be pushed using sp, whilst maintaining 16-byte alignment for sp
1268       // at all times.
1269       stp(src3, src2, MemOperand(sp, -4 * size, PreIndex));
1270       stp(src1, src0, MemOperand(sp, 2 * size));
1271       break;
1272     default:
1273       UNREACHABLE();
1274   }
1275 }
1276 
PopHelper(int count,int size,const CPURegister & dst0,const CPURegister & dst1,const CPURegister & dst2,const CPURegister & dst3)1277 void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
1278                                const CPURegister& dst1, const CPURegister& dst2,
1279                                const CPURegister& dst3) {
1280   // Ensure that we don't unintentially modify scratch or debug registers.
1281   InstructionAccurateScope scope(this);
1282 
1283   DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1284   DCHECK(size == dst0.SizeInBytes());
1285 
1286   // When popping multiple registers, the load order is chosen such that
1287   // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1288   switch (count) {
1289     case 1:
1290       DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1291       ldr(dst0, MemOperand(sp, 1 * size, PostIndex));
1292       break;
1293     case 2:
1294       DCHECK(dst2.IsNone() && dst3.IsNone());
1295       ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex));
1296       break;
1297     case 3:
1298       DCHECK(dst3.IsNone());
1299       ldr(dst2, MemOperand(sp, 2 * size));
1300       ldp(dst0, dst1, MemOperand(sp, 3 * size, PostIndex));
1301       break;
1302     case 4:
1303       // Load the higher addresses first, then load the lower addresses and
1304       // skip the whole block in the second instruction. This allows four W
1305       // registers to be popped using sp, whilst maintaining 16-byte alignment
1306       // for sp at all times.
1307       ldp(dst2, dst3, MemOperand(sp, 2 * size));
1308       ldp(dst0, dst1, MemOperand(sp, 4 * size, PostIndex));
1309       break;
1310     default:
1311       UNREACHABLE();
1312   }
1313 }
1314 
Poke(const CPURegister & src,const Operand & offset)1315 void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
1316   if (offset.IsImmediate()) {
1317     DCHECK_GE(offset.ImmediateValue(), 0);
1318   } else if (emit_debug_code()) {
1319     Cmp(xzr, offset);
1320     Check(le, AbortReason::kStackAccessBelowStackPointer);
1321   }
1322 
1323   Str(src, MemOperand(sp, offset));
1324 }
1325 
Peek(const CPURegister & dst,const Operand & offset)1326 void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1327   if (offset.IsImmediate()) {
1328     DCHECK_GE(offset.ImmediateValue(), 0);
1329   } else if (emit_debug_code()) {
1330     Cmp(xzr, offset);
1331     Check(le, AbortReason::kStackAccessBelowStackPointer);
1332   }
1333 
1334   Ldr(dst, MemOperand(sp, offset));
1335 }
1336 
PokePair(const CPURegister & src1,const CPURegister & src2,int offset)1337 void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
1338                               int offset) {
1339   DCHECK(AreSameSizeAndType(src1, src2));
1340   DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1341   Stp(src1, src2, MemOperand(sp, offset));
1342 }
1343 
1344 
PeekPair(const CPURegister & dst1,const CPURegister & dst2,int offset)1345 void MacroAssembler::PeekPair(const CPURegister& dst1,
1346                               const CPURegister& dst2,
1347                               int offset) {
1348   DCHECK(AreSameSizeAndType(dst1, dst2));
1349   DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1350   Ldp(dst1, dst2, MemOperand(sp, offset));
1351 }
1352 
1353 
PushCalleeSavedRegisters()1354 void MacroAssembler::PushCalleeSavedRegisters() {
1355   // Ensure that the macro-assembler doesn't use any scratch registers.
1356   InstructionAccurateScope scope(this);
1357 
1358   MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
1359 
1360   stp(d14, d15, tos);
1361   stp(d12, d13, tos);
1362   stp(d10, d11, tos);
1363   stp(d8, d9, tos);
1364 
1365   stp(x29, x30, tos);
1366   stp(x27, x28, tos);
1367   stp(x25, x26, tos);
1368   stp(x23, x24, tos);
1369   stp(x21, x22, tos);
1370   stp(x19, x20, tos);
1371 }
1372 
1373 
PopCalleeSavedRegisters()1374 void MacroAssembler::PopCalleeSavedRegisters() {
1375   // Ensure that the macro-assembler doesn't use any scratch registers.
1376   InstructionAccurateScope scope(this);
1377 
1378   MemOperand tos(sp, 2 * kXRegSize, PostIndex);
1379 
1380   ldp(x19, x20, tos);
1381   ldp(x21, x22, tos);
1382   ldp(x23, x24, tos);
1383   ldp(x25, x26, tos);
1384   ldp(x27, x28, tos);
1385   ldp(x29, x30, tos);
1386 
1387   ldp(d8, d9, tos);
1388   ldp(d10, d11, tos);
1389   ldp(d12, d13, tos);
1390   ldp(d14, d15, tos);
1391 }
1392 
AssertSpAligned()1393 void TurboAssembler::AssertSpAligned() {
1394   if (emit_debug_code()) {
1395     HardAbortScope hard_abort(this);  // Avoid calls to Abort.
1396     // Arm64 requires the stack pointer to be 16-byte aligned prior to address
1397     // calculation.
1398     UseScratchRegisterScope scope(this);
1399     Register temp = scope.AcquireX();
1400     Mov(temp, sp);
1401     Tst(temp, 15);
1402     Check(eq, AbortReason::kUnexpectedStackPointer);
1403   }
1404 }
1405 
CopySlots(int dst,Register src,Register slot_count)1406 void TurboAssembler::CopySlots(int dst, Register src, Register slot_count) {
1407   DCHECK(!src.IsZero());
1408   UseScratchRegisterScope scope(this);
1409   Register dst_reg = scope.AcquireX();
1410   SlotAddress(dst_reg, dst);
1411   SlotAddress(src, src);
1412   CopyDoubleWords(dst_reg, src, slot_count);
1413 }
1414 
CopySlots(Register dst,Register src,Register slot_count)1415 void TurboAssembler::CopySlots(Register dst, Register src,
1416                                Register slot_count) {
1417   DCHECK(!dst.IsZero() && !src.IsZero());
1418   SlotAddress(dst, dst);
1419   SlotAddress(src, src);
1420   CopyDoubleWords(dst, src, slot_count);
1421 }
1422 
CopyDoubleWords(Register dst,Register src,Register count,CopyDoubleWordsMode mode)1423 void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
1424                                      CopyDoubleWordsMode mode) {
1425   DCHECK(!AreAliased(dst, src, count));
1426 
1427   if (emit_debug_code()) {
1428     Register pointer1 = dst;
1429     Register pointer2 = src;
1430     if (mode == kSrcLessThanDst) {
1431       pointer1 = src;
1432       pointer2 = dst;
1433     }
1434     // Copy requires pointer1 < pointer2 || (pointer1 - pointer2) >= count.
1435     Label pointer1_below_pointer2;
1436     Subs(pointer1, pointer1, pointer2);
1437     B(lt, &pointer1_below_pointer2);
1438     Cmp(pointer1, count);
1439     Check(ge, AbortReason::kOffsetOutOfRange);
1440     Bind(&pointer1_below_pointer2);
1441     Add(pointer1, pointer1, pointer2);
1442   }
1443   static_assert(kPointerSize == kDRegSize,
1444                 "pointers must be the same size as doubles");
1445 
1446   int direction = (mode == kDstLessThanSrc) ? 1 : -1;
1447   UseScratchRegisterScope scope(this);
1448   VRegister temp0 = scope.AcquireD();
1449   VRegister temp1 = scope.AcquireD();
1450 
1451   Label pairs, loop, done;
1452 
1453   Tbz(count, 0, &pairs);
1454   Ldr(temp0, MemOperand(src, direction * kPointerSize, PostIndex));
1455   Sub(count, count, 1);
1456   Str(temp0, MemOperand(dst, direction * kPointerSize, PostIndex));
1457 
1458   Bind(&pairs);
1459   if (mode == kSrcLessThanDst) {
1460     // Adjust pointers for post-index ldp/stp with negative offset:
1461     Sub(dst, dst, kPointerSize);
1462     Sub(src, src, kPointerSize);
1463   }
1464   Bind(&loop);
1465   Cbz(count, &done);
1466   Ldp(temp0, temp1, MemOperand(src, 2 * direction * kPointerSize, PostIndex));
1467   Sub(count, count, 2);
1468   Stp(temp0, temp1, MemOperand(dst, 2 * direction * kPointerSize, PostIndex));
1469   B(&loop);
1470 
1471   // TODO(all): large copies may benefit from using temporary Q registers
1472   // to copy four double words per iteration.
1473 
1474   Bind(&done);
1475 }
1476 
SlotAddress(Register dst,int slot_offset)1477 void TurboAssembler::SlotAddress(Register dst, int slot_offset) {
1478   Add(dst, sp, slot_offset << kPointerSizeLog2);
1479 }
1480 
SlotAddress(Register dst,Register slot_offset)1481 void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
1482   Add(dst, sp, Operand(slot_offset, LSL, kPointerSizeLog2));
1483 }
1484 
AssertFPCRState(Register fpcr)1485 void TurboAssembler::AssertFPCRState(Register fpcr) {
1486   if (emit_debug_code()) {
1487     Label unexpected_mode, done;
1488     UseScratchRegisterScope temps(this);
1489     if (fpcr.IsNone()) {
1490       fpcr = temps.AcquireX();
1491       Mrs(fpcr, FPCR);
1492     }
1493 
1494     // Settings left to their default values:
1495     //   - Assert that flush-to-zero is not set.
1496     Tbnz(fpcr, FZ_offset, &unexpected_mode);
1497     //   - Assert that the rounding mode is nearest-with-ties-to-even.
1498     STATIC_ASSERT(FPTieEven == 0);
1499     Tst(fpcr, RMode_mask);
1500     B(eq, &done);
1501 
1502     Bind(&unexpected_mode);
1503     Abort(AbortReason::kUnexpectedFPCRMode);
1504 
1505     Bind(&done);
1506   }
1507 }
1508 
CanonicalizeNaN(const VRegister & dst,const VRegister & src)1509 void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
1510                                      const VRegister& src) {
1511   AssertFPCRState();
1512 
1513   // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
1514   // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
1515   // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
1516   Fsub(dst, src, fp_zero);
1517 }
1518 
LoadRoot(Register destination,Heap::RootListIndex index)1519 void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
1520   // TODO(jbramley): Most root values are constants, and can be synthesized
1521   // without a load. Refer to the ARM back end for details.
1522   Ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
1523 }
1524 
1525 
LoadObject(Register result,Handle<Object> object)1526 void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
1527   AllowDeferredHandleDereference heap_object_check;
1528   if (object->IsHeapObject()) {
1529     Mov(result, Handle<HeapObject>::cast(object));
1530   } else {
1531     Mov(result, Operand(Smi::cast(*object)));
1532   }
1533 }
1534 
Move(Register dst,Smi * src)1535 void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
1536 
Swap(Register lhs,Register rhs)1537 void TurboAssembler::Swap(Register lhs, Register rhs) {
1538   DCHECK(lhs.IsSameSizeAndType(rhs));
1539   DCHECK(!lhs.Is(rhs));
1540   UseScratchRegisterScope temps(this);
1541   Register temp = temps.AcquireX();
1542   Mov(temp, rhs);
1543   Mov(rhs, lhs);
1544   Mov(lhs, temp);
1545 }
1546 
Swap(VRegister lhs,VRegister rhs)1547 void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
1548   DCHECK(lhs.IsSameSizeAndType(rhs));
1549   DCHECK(!lhs.Is(rhs));
1550   UseScratchRegisterScope temps(this);
1551   VRegister temp = VRegister::no_reg();
1552   if (lhs.IsS()) {
1553     temp = temps.AcquireS();
1554   } else if (lhs.IsD()) {
1555     temp = temps.AcquireD();
1556   } else {
1557     DCHECK(lhs.IsQ());
1558     temp = temps.AcquireQ();
1559   }
1560   Mov(temp, rhs);
1561   Mov(rhs, lhs);
1562   Mov(lhs, temp);
1563 }
1564 
AssertSmi(Register object,AbortReason reason)1565 void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
1566   if (emit_debug_code()) {
1567     STATIC_ASSERT(kSmiTag == 0);
1568     Tst(object, kSmiTagMask);
1569     Check(eq, reason);
1570   }
1571 }
1572 
AssertNotSmi(Register object,AbortReason reason)1573 void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
1574   if (emit_debug_code()) {
1575     STATIC_ASSERT(kSmiTag == 0);
1576     Tst(object, kSmiTagMask);
1577     Check(ne, reason);
1578   }
1579 }
1580 
AssertConstructor(Register object)1581 void MacroAssembler::AssertConstructor(Register object) {
1582   if (emit_debug_code()) {
1583     AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAConstructor);
1584 
1585     UseScratchRegisterScope temps(this);
1586     Register temp = temps.AcquireX();
1587 
1588     Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1589     Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1590     Tst(temp, Operand(Map::IsConstructorBit::kMask));
1591 
1592     Check(ne, AbortReason::kOperandIsNotAConstructor);
1593   }
1594 }
1595 
AssertFunction(Register object)1596 void MacroAssembler::AssertFunction(Register object) {
1597   if (emit_debug_code()) {
1598     AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
1599 
1600     UseScratchRegisterScope temps(this);
1601     Register temp = temps.AcquireX();
1602 
1603     CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
1604     Check(eq, AbortReason::kOperandIsNotAFunction);
1605   }
1606 }
1607 
1608 
AssertBoundFunction(Register object)1609 void MacroAssembler::AssertBoundFunction(Register object) {
1610   if (emit_debug_code()) {
1611     AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction);
1612 
1613     UseScratchRegisterScope temps(this);
1614     Register temp = temps.AcquireX();
1615 
1616     CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
1617     Check(eq, AbortReason::kOperandIsNotABoundFunction);
1618   }
1619 }
1620 
AssertGeneratorObject(Register object)1621 void MacroAssembler::AssertGeneratorObject(Register object) {
1622   if (!emit_debug_code()) return;
1623   AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
1624 
1625   // Load map
1626   UseScratchRegisterScope temps(this);
1627   Register temp = temps.AcquireX();
1628   Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1629 
1630   Label do_check;
1631   // Load instance type and check if JSGeneratorObject
1632   CompareInstanceType(temp, temp, JS_GENERATOR_OBJECT_TYPE);
1633   B(eq, &do_check);
1634 
1635   // Check if JSAsyncGeneratorObject
1636   Cmp(temp, JS_ASYNC_GENERATOR_OBJECT_TYPE);
1637 
1638   bind(&do_check);
1639   // Restore generator object to register and perform assertion
1640   Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
1641 }
1642 
AssertUndefinedOrAllocationSite(Register object)1643 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
1644   if (emit_debug_code()) {
1645     UseScratchRegisterScope temps(this);
1646     Register scratch = temps.AcquireX();
1647     Label done_checking;
1648     AssertNotSmi(object);
1649     JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1650     Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1651     CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
1652     Assert(eq, AbortReason::kExpectedUndefinedOrCell);
1653     Bind(&done_checking);
1654   }
1655 }
1656 
AssertPositiveOrZero(Register value)1657 void TurboAssembler::AssertPositiveOrZero(Register value) {
1658   if (emit_debug_code()) {
1659     Label done;
1660     int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
1661     Tbz(value, sign_bit, &done);
1662     Abort(AbortReason::kUnexpectedNegativeValue);
1663     Bind(&done);
1664   }
1665 }
1666 
CallStubDelayed(CodeStub * stub)1667 void TurboAssembler::CallStubDelayed(CodeStub* stub) {
1668   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
1669   BlockPoolsScope scope(this);
1670 #ifdef DEBUG
1671   Label start;
1672   Bind(&start);
1673 #endif
1674   Operand operand = Operand::EmbeddedCode(stub);
1675   near_call(operand.heap_object_request());
1676   DCHECK_EQ(kNearCallSize, SizeOfCodeGeneratedSince(&start));
1677 }
1678 
CallStub(CodeStub * stub)1679 void MacroAssembler::CallStub(CodeStub* stub) {
1680   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
1681   Call(stub->GetCode(), RelocInfo::CODE_TARGET);
1682 }
1683 
TailCallStub(CodeStub * stub)1684 void MacroAssembler::TailCallStub(CodeStub* stub) {
1685   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1686 }
1687 
CallRuntimeWithCEntry(Runtime::FunctionId fid,Register centry)1688 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
1689                                            Register centry) {
1690   const Runtime::Function* f = Runtime::FunctionForId(fid);
1691   // TODO(1236192): Most runtime routines don't need the number of
1692   // arguments passed in because it is constant. At some point we
1693   // should remove this need and make the runtime routine entry code
1694   // smarter.
1695   Mov(x0, f->nargs);
1696   Mov(x1, ExternalReference::Create(f));
1697   DCHECK(!AreAliased(centry, x0, x1));
1698   Add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
1699   Call(centry);
1700 }
1701 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)1702 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1703                                  int num_arguments,
1704                                  SaveFPRegsMode save_doubles) {
1705   // All arguments must be on the stack before this function is called.
1706   // x0 holds the return value after the call.
1707 
1708   // Check that the number of arguments matches what the function expects.
1709   // If f->nargs is -1, the function can accept a variable number of arguments.
1710   CHECK(f->nargs < 0 || f->nargs == num_arguments);
1711 
1712   // Place the necessary arguments.
1713   Mov(x0, num_arguments);
1714   Mov(x1, ExternalReference::Create(f));
1715 
1716   Handle<Code> code =
1717       CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1718   Call(code, RelocInfo::CODE_TARGET);
1719 }
1720 
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)1721 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1722                                              bool builtin_exit_frame) {
1723   Mov(x1, builtin);
1724   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1725                                           kArgvOnStack, builtin_exit_frame);
1726   Jump(code, RelocInfo::CODE_TARGET);
1727 }
1728 
JumpToInstructionStream(Address entry)1729 void MacroAssembler::JumpToInstructionStream(Address entry) {
1730   Mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1731   Br(kOffHeapTrampolineRegister);
1732 }
1733 
TailCallRuntime(Runtime::FunctionId fid)1734 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1735   const Runtime::Function* function = Runtime::FunctionForId(fid);
1736   DCHECK_EQ(1, function->result_size);
1737   if (function->nargs >= 0) {
1738     // TODO(1236192): Most runtime routines don't need the number of
1739     // arguments passed in because it is constant. At some point we
1740     // should remove this need and make the runtime routine entry code
1741     // smarter.
1742     Mov(x0, function->nargs);
1743   }
1744   JumpToExternalReference(ExternalReference::Create(fid));
1745 }
1746 
ActivationFrameAlignment()1747 int TurboAssembler::ActivationFrameAlignment() {
1748 #if V8_HOST_ARCH_ARM64
1749   // Running on the real platform. Use the alignment as mandated by the local
1750   // environment.
1751   // Note: This will break if we ever start generating snapshots on one ARM
1752   // platform for another ARM platform with a different alignment.
1753   return base::OS::ActivationFrameAlignment();
1754 #else  // V8_HOST_ARCH_ARM64
1755   // If we are using the simulator then we should always align to the expected
1756   // alignment. As the simulator is used to generate snapshots we do not know
1757   // if the target platform will need alignment, so this is controlled from a
1758   // flag.
1759   return FLAG_sim_stack_alignment;
1760 #endif  // V8_HOST_ARCH_ARM64
1761 }
1762 
CallCFunction(ExternalReference function,int num_of_reg_args)1763 void TurboAssembler::CallCFunction(ExternalReference function,
1764                                    int num_of_reg_args) {
1765   CallCFunction(function, num_of_reg_args, 0);
1766 }
1767 
CallCFunction(ExternalReference function,int num_of_reg_args,int num_of_double_args)1768 void TurboAssembler::CallCFunction(ExternalReference function,
1769                                    int num_of_reg_args,
1770                                    int num_of_double_args) {
1771   UseScratchRegisterScope temps(this);
1772   Register temp = temps.AcquireX();
1773   Mov(temp, function);
1774   CallCFunction(temp, num_of_reg_args, num_of_double_args);
1775 }
1776 
1777 static const int kRegisterPassedArguments = 8;
1778 
CallCFunction(Register function,int num_of_reg_args,int num_of_double_args)1779 void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
1780                                    int num_of_double_args) {
1781   DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters);
1782   DCHECK(has_frame());
1783 
1784   // If we're passing doubles, we're limited to the following prototypes
1785   // (defined by ExternalReference::Type):
1786   //  BUILTIN_COMPARE_CALL:  int f(double, double)
1787   //  BUILTIN_FP_FP_CALL:    double f(double, double)
1788   //  BUILTIN_FP_CALL:       double f(double)
1789   //  BUILTIN_FP_INT_CALL:   double f(double, int)
1790   if (num_of_double_args > 0) {
1791     DCHECK_LE(num_of_reg_args, 1);
1792     DCHECK_LE(num_of_double_args + num_of_reg_args, 2);
1793   }
1794 
1795   // Call directly. The function called cannot cause a GC, or allow preemption,
1796   // so the return address in the link register stays correct.
1797   Call(function);
1798 
1799   if (num_of_reg_args > kRegisterPassedArguments) {
1800     // Drop the register passed arguments.
1801     int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
1802     Drop(claim_slots);
1803   }
1804 }
1805 
LoadFromConstantsTable(Register destination,int constant_index)1806 void TurboAssembler::LoadFromConstantsTable(Register destination,
1807                                             int constant_index) {
1808   DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
1809       Heap::kBuiltinsConstantsTableRootIndex));
1810   LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
1811   Ldr(destination,
1812       FieldMemOperand(destination,
1813                       FixedArray::kHeaderSize + constant_index * kPointerSize));
1814 }
1815 
LoadRootRelative(Register destination,int32_t offset)1816 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
1817   Ldr(destination, MemOperand(kRootRegister, offset));
1818 }
1819 
LoadRootRegisterOffset(Register destination,intptr_t offset)1820 void TurboAssembler::LoadRootRegisterOffset(Register destination,
1821                                             intptr_t offset) {
1822   if (offset == 0) {
1823     Mov(destination, kRootRegister);
1824   } else {
1825     Add(destination, kRootRegister, offset);
1826   }
1827 }
1828 
Jump(Register target,Condition cond)1829 void TurboAssembler::Jump(Register target, Condition cond) {
1830   if (cond == nv) return;
1831   Label done;
1832   if (cond != al) B(NegateCondition(cond), &done);
1833   Br(target);
1834   Bind(&done);
1835 }
1836 
JumpHelper(int64_t offset,RelocInfo::Mode rmode,Condition cond)1837 void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode,
1838                                 Condition cond) {
1839   if (cond == nv) return;
1840   Label done;
1841   if (cond != al) B(NegateCondition(cond), &done);
1842   if (CanUseNearCallOrJump(rmode)) {
1843     DCHECK(IsNearCallOffset(offset));
1844     near_jump(static_cast<int>(offset), rmode);
1845   } else {
1846     UseScratchRegisterScope temps(this);
1847     Register temp = temps.AcquireX();
1848     uint64_t imm = reinterpret_cast<uint64_t>(pc_) + offset * kInstrSize;
1849     Mov(temp, Immediate(imm, rmode));
1850     Br(temp);
1851   }
1852   Bind(&done);
1853 }
1854 
1855 namespace {
1856 
1857 // The calculated offset is either:
1858 // * the 'target' input unmodified if this is a WASM call, or
1859 // * the offset of the target from the current PC, in instructions, for any
1860 //   other type of call.
CalculateTargetOffset(Address target,RelocInfo::Mode rmode,byte * pc)1861 static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
1862                                      byte* pc) {
1863   int64_t offset = static_cast<int64_t>(target);
1864   // The target of WebAssembly calls is still an index instead of an actual
1865   // address at this point, and needs to be encoded as-is.
1866   if (rmode != RelocInfo::WASM_CALL && rmode != RelocInfo::WASM_STUB_CALL) {
1867     offset -= reinterpret_cast<int64_t>(pc);
1868     DCHECK_EQ(offset % kInstrSize, 0);
1869     offset = offset / static_cast<int>(kInstrSize);
1870   }
1871   return offset;
1872 }
1873 }  // namespace
1874 
Jump(Address target,RelocInfo::Mode rmode,Condition cond)1875 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
1876                           Condition cond) {
1877   JumpHelper(CalculateTargetOffset(target, rmode, pc_), rmode, cond);
1878 }
1879 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)1880 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
1881                           Condition cond) {
1882   DCHECK(RelocInfo::IsCodeTarget(rmode));
1883   if (FLAG_embedded_builtins) {
1884     if (root_array_available_ && options().isolate_independent_code &&
1885         !Builtins::IsIsolateIndependentBuiltin(*code)) {
1886       // Calls to embedded targets are initially generated as standard
1887       // pc-relative calls below. When creating the embedded blob, call offsets
1888       // are patched up to point directly to the off-heap instruction start.
1889       // Note: It is safe to dereference {code} above since code generation
1890       // for builtins and code stubs happens on the main thread.
1891       UseScratchRegisterScope temps(this);
1892       Register scratch = temps.AcquireX();
1893       IndirectLoadConstant(scratch, code);
1894       Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
1895       Jump(scratch, cond);
1896       return;
1897     } else if (options().inline_offheap_trampolines) {
1898       int builtin_index = Builtins::kNoBuiltinId;
1899       if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
1900           Builtins::IsIsolateIndependent(builtin_index)) {
1901         // Inline the trampoline.
1902         RecordCommentForOffHeapTrampoline(builtin_index);
1903         CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1904         UseScratchRegisterScope temps(this);
1905         Register scratch = temps.AcquireX();
1906         EmbeddedData d = EmbeddedData::FromBlob();
1907         Address entry = d.InstructionStartOfBuiltin(builtin_index);
1908         Mov(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1909         Jump(scratch, cond);
1910         return;
1911       }
1912     }
1913   }
1914   if (CanUseNearCallOrJump(rmode)) {
1915     JumpHelper(static_cast<int64_t>(AddCodeTarget(code)), rmode, cond);
1916   } else {
1917     Jump(code.address(), rmode, cond);
1918   }
1919 }
1920 
Call(Register target)1921 void TurboAssembler::Call(Register target) {
1922   BlockPoolsScope scope(this);
1923   Blr(target);
1924 }
1925 
Call(Address target,RelocInfo::Mode rmode)1926 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
1927   BlockPoolsScope scope(this);
1928 
1929   if (CanUseNearCallOrJump(rmode)) {
1930     int64_t offset = CalculateTargetOffset(target, rmode, pc_);
1931     DCHECK(IsNearCallOffset(offset));
1932     near_call(static_cast<int>(offset), rmode);
1933   } else {
1934     IndirectCall(target, rmode);
1935   }
1936 }
1937 
Call(Handle<Code> code,RelocInfo::Mode rmode)1938 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
1939   BlockPoolsScope scope(this);
1940 
1941   if (FLAG_embedded_builtins) {
1942     if (root_array_available_ && options().isolate_independent_code &&
1943         !Builtins::IsIsolateIndependentBuiltin(*code)) {
1944       // Calls to embedded targets are initially generated as standard
1945       // pc-relative calls below. When creating the embedded blob, call offsets
1946       // are patched up to point directly to the off-heap instruction start.
1947       // Note: It is safe to dereference {code} above since code generation
1948       // for builtins and code stubs happens on the main thread.
1949       UseScratchRegisterScope temps(this);
1950       Register scratch = temps.AcquireX();
1951       IndirectLoadConstant(scratch, code);
1952       Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
1953       Call(scratch);
1954       return;
1955     } else if (options().inline_offheap_trampolines) {
1956       int builtin_index = Builtins::kNoBuiltinId;
1957       if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
1958           Builtins::IsIsolateIndependent(builtin_index)) {
1959         // Inline the trampoline.
1960         RecordCommentForOffHeapTrampoline(builtin_index);
1961         CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1962         UseScratchRegisterScope temps(this);
1963         Register scratch = temps.AcquireX();
1964         EmbeddedData d = EmbeddedData::FromBlob();
1965         Address entry = d.InstructionStartOfBuiltin(builtin_index);
1966         Mov(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1967         Call(scratch);
1968         return;
1969       }
1970     }
1971   }
1972   if (CanUseNearCallOrJump(rmode)) {
1973     near_call(AddCodeTarget(code), rmode);
1974   } else {
1975     IndirectCall(code.address(), rmode);
1976   }
1977 }
1978 
Call(ExternalReference target)1979 void TurboAssembler::Call(ExternalReference target) {
1980   UseScratchRegisterScope temps(this);
1981   Register temp = temps.AcquireX();
1982   Mov(temp, target);
1983   Call(temp);
1984 }
1985 
IndirectCall(Address target,RelocInfo::Mode rmode)1986 void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) {
1987   UseScratchRegisterScope temps(this);
1988   Register temp = temps.AcquireX();
1989   Mov(temp, Immediate(target, rmode));
1990   Blr(temp);
1991 }
1992 
IsNearCallOffset(int64_t offset)1993 bool TurboAssembler::IsNearCallOffset(int64_t offset) {
1994   return is_int26(offset);
1995 }
1996 
CallForDeoptimization(Address target,int deopt_id,RelocInfo::Mode rmode)1997 void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
1998                                            RelocInfo::Mode rmode) {
1999   DCHECK_EQ(rmode, RelocInfo::RUNTIME_ENTRY);
2000 
2001   BlockPoolsScope scope(this);
2002 #ifdef DEBUG
2003   Label start;
2004   Bind(&start);
2005 #endif
2006   // The deoptimizer requires the deoptimization id to be in x16.
2007   UseScratchRegisterScope temps(this);
2008   Register temp = temps.AcquireX();
2009   DCHECK(temp.Is(x16));
2010   // Make sure that the deopt id can be encoded in 16 bits, so can be encoded
2011   // in a single movz instruction with a zero shift.
2012   DCHECK(is_uint16(deopt_id));
2013   movz(temp, deopt_id);
2014   int64_t offset = static_cast<int64_t>(target) -
2015                    static_cast<int64_t>(options().code_range_start);
2016   DCHECK_EQ(offset % kInstrSize, 0);
2017   offset = offset / static_cast<int>(kInstrSize);
2018   DCHECK(IsNearCallOffset(offset));
2019   near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
2020 
2021   DCHECK_EQ(kNearCallSize + kInstrSize, SizeOfCodeGeneratedSince(&start));
2022 }
2023 
TryRepresentDoubleAsInt(Register as_int,VRegister value,VRegister scratch_d,Label * on_successful_conversion,Label * on_failed_conversion)2024 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
2025                                              VRegister scratch_d,
2026                                              Label* on_successful_conversion,
2027                                              Label* on_failed_conversion) {
2028   // Convert to an int and back again, then compare with the original value.
2029   Fcvtzs(as_int, value);
2030   Scvtf(scratch_d, as_int);
2031   Fcmp(value, scratch_d);
2032 
2033   if (on_successful_conversion) {
2034     B(on_successful_conversion, eq);
2035   }
2036   if (on_failed_conversion) {
2037     B(on_failed_conversion, ne);
2038   }
2039 }
2040 
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)2041 void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
2042                                         Register caller_args_count_reg,
2043                                         Register scratch0, Register scratch1) {
2044 #if DEBUG
2045   if (callee_args_count.is_reg()) {
2046     DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
2047                        scratch1));
2048   } else {
2049     DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
2050   }
2051 #endif
2052 
2053   // Calculate the end of destination area where we will put the arguments
2054   // after we drop current frame. We add kPointerSize to count the receiver
2055   // argument which is not included into formal parameters count.
2056   Register dst_reg = scratch0;
2057   Add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
2058   Add(dst_reg, dst_reg, StandardFrameConstants::kCallerSPOffset + kPointerSize);
2059   // Round dst_reg up to a multiple of 16 bytes, so that we overwrite any
2060   // potential padding.
2061   Add(dst_reg, dst_reg, 15);
2062   Bic(dst_reg, dst_reg, 15);
2063 
2064   Register src_reg = caller_args_count_reg;
2065   // Calculate the end of source area. +kPointerSize is for the receiver.
2066   if (callee_args_count.is_reg()) {
2067     Add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
2068     Add(src_reg, src_reg, kPointerSize);
2069   } else {
2070     Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize);
2071   }
2072 
2073   // Round src_reg up to a multiple of 16 bytes, so we include any potential
2074   // padding in the copy.
2075   Add(src_reg, src_reg, 15);
2076   Bic(src_reg, src_reg, 15);
2077 
2078   if (FLAG_debug_code) {
2079     Cmp(src_reg, dst_reg);
2080     Check(lo, AbortReason::kStackAccessBelowStackPointer);
2081   }
2082 
2083   // Restore caller's frame pointer and return address now as they will be
2084   // overwritten by the copying loop.
2085   Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
2086   Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2087 
2088   // Now copy callee arguments to the caller frame going backwards to avoid
2089   // callee arguments corruption (source and destination areas could overlap).
2090 
2091   // Both src_reg and dst_reg are pointing to the word after the one to copy,
2092   // so they must be pre-decremented in the loop.
2093   Register tmp_reg = scratch1;
2094   Label loop, entry;
2095   B(&entry);
2096   bind(&loop);
2097   Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
2098   Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
2099   bind(&entry);
2100   Cmp(sp, src_reg);
2101   B(ne, &loop);
2102 
2103   // Leave current frame.
2104   Mov(sp, dst_reg);
2105 }
2106 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,InvokeFlag flag,bool * definitely_mismatches)2107 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2108                                     const ParameterCount& actual, Label* done,
2109                                     InvokeFlag flag,
2110                                     bool* definitely_mismatches) {
2111   bool definitely_matches = false;
2112   *definitely_mismatches = false;
2113   Label regular_invoke;
2114 
2115   // Check whether the expected and actual arguments count match. If not,
2116   // setup registers according to contract with ArgumentsAdaptorTrampoline:
2117   //  x0: actual arguments count.
2118   //  x1: function (passed through to callee).
2119   //  x2: expected arguments count.
2120 
2121   // The code below is made a lot easier because the calling code already sets
2122   // up actual and expected registers according to the contract if values are
2123   // passed in registers.
2124   DCHECK(actual.is_immediate() || actual.reg().is(x0));
2125   DCHECK(expected.is_immediate() || expected.reg().is(x2));
2126 
2127   if (expected.is_immediate()) {
2128     DCHECK(actual.is_immediate());
2129     Mov(x0, actual.immediate());
2130     if (expected.immediate() == actual.immediate()) {
2131       definitely_matches = true;
2132 
2133     } else {
2134       if (expected.immediate() ==
2135           SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2136         // Don't worry about adapting arguments for builtins that
2137         // don't want that done. Skip adaption code by making it look
2138         // like we have a match between expected and actual number of
2139         // arguments.
2140         definitely_matches = true;
2141       } else {
2142         *definitely_mismatches = true;
2143         // Set up x2 for the argument adaptor.
2144         Mov(x2, expected.immediate());
2145       }
2146     }
2147 
2148   } else {  // expected is a register.
2149     Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2150                                               : Operand(actual.reg());
2151     Mov(x0, actual_op);
2152     // If actual == expected perform a regular invocation.
2153     Cmp(expected.reg(), actual_op);
2154     B(eq, &regular_invoke);
2155   }
2156 
2157   // If the argument counts may mismatch, generate a call to the argument
2158   // adaptor.
2159   if (!definitely_matches) {
2160     Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
2161     if (flag == CALL_FUNCTION) {
2162       Call(adaptor);
2163       if (!*definitely_mismatches) {
2164         // If the arg counts don't match, no extra code is emitted by
2165         // MAsm::InvokeFunctionCode and we can just fall through.
2166         B(done);
2167       }
2168     } else {
2169       Jump(adaptor, RelocInfo::CODE_TARGET);
2170     }
2171   }
2172   Bind(&regular_invoke);
2173 }
2174 
CheckDebugHook(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)2175 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
2176                                     const ParameterCount& expected,
2177                                     const ParameterCount& actual) {
2178   Label skip_hook;
2179 
2180   Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate()));
2181   Ldrsb(x4, MemOperand(x4));
2182   Cbz(x4, &skip_hook);
2183 
2184   {
2185     // Load receiver to pass it later to DebugOnFunctionCall hook.
2186     Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2187                                               : Operand(actual.reg());
2188     Mov(x4, actual_op);
2189     Ldr(x4, MemOperand(sp, x4, LSL, kPointerSizeLog2));
2190     FrameScope frame(this,
2191                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
2192 
2193     Register expected_reg = padreg;
2194     Register actual_reg = padreg;
2195     if (expected.is_reg()) expected_reg = expected.reg();
2196     if (actual.is_reg()) actual_reg = actual.reg();
2197     if (!new_target.is_valid()) new_target = padreg;
2198 
2199     // Save values on stack.
2200     SmiTag(expected_reg);
2201     SmiTag(actual_reg);
2202     Push(expected_reg, actual_reg, new_target, fun);
2203     Push(fun, x4);
2204     CallRuntime(Runtime::kDebugOnFunctionCall);
2205 
2206     // Restore values from stack.
2207     Pop(fun, new_target, actual_reg, expected_reg);
2208     SmiUntag(actual_reg);
2209     SmiUntag(expected_reg);
2210   }
2211   Bind(&skip_hook);
2212 }
2213 
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)2214 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
2215                                         const ParameterCount& expected,
2216                                         const ParameterCount& actual,
2217                                         InvokeFlag flag) {
2218   // You can't call a function without a valid frame.
2219   DCHECK(flag == JUMP_FUNCTION || has_frame());
2220   DCHECK(function.is(x1));
2221   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
2222 
2223   // On function call, call into the debugger if necessary.
2224   CheckDebugHook(function, new_target, expected, actual);
2225 
2226   // Clear the new.target register if not given.
2227   if (!new_target.is_valid()) {
2228     LoadRoot(x3, Heap::kUndefinedValueRootIndex);
2229   }
2230 
2231   Label done;
2232   bool definitely_mismatches = false;
2233   InvokePrologue(expected, actual, &done, flag, &definitely_mismatches);
2234 
2235   // If we are certain that actual != expected, then we know InvokePrologue will
2236   // have handled the call through the argument adaptor mechanism.
2237   // The called function expects the call kind in x5.
2238   if (!definitely_mismatches) {
2239     // We call indirectly through the code field in the function to
2240     // allow recompilation to take effect without changing any of the
2241     // call sites.
2242     Register code = kJavaScriptCallCodeStartRegister;
2243     Ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
2244     Add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
2245     if (flag == CALL_FUNCTION) {
2246       Call(code);
2247     } else {
2248       DCHECK(flag == JUMP_FUNCTION);
2249       Jump(code);
2250     }
2251   }
2252 
2253   // Continue here if InvokePrologue does handle the invocation due to
2254   // mismatched parameter counts.
2255   Bind(&done);
2256 }
2257 
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag)2258 void MacroAssembler::InvokeFunction(Register function, Register new_target,
2259                                     const ParameterCount& actual,
2260                                     InvokeFlag flag) {
2261   // You can't call a function without a valid frame.
2262   DCHECK(flag == JUMP_FUNCTION || has_frame());
2263 
2264   // Contract with called JS functions requires that function is passed in x1.
2265   // (See FullCodeGenerator::Generate().)
2266   DCHECK(function.is(x1));
2267 
2268   Register expected_reg = x2;
2269 
2270   Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2271   // The number of arguments is stored as an int32_t, and -1 is a marker
2272   // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2273   // extension to correctly handle it.
2274   Ldr(expected_reg, FieldMemOperand(function,
2275                                     JSFunction::kSharedFunctionInfoOffset));
2276   Ldrh(expected_reg,
2277        FieldMemOperand(expected_reg,
2278                        SharedFunctionInfo::kFormalParameterCountOffset));
2279 
2280   ParameterCount expected(expected_reg);
2281   InvokeFunctionCode(function, new_target, expected, actual, flag);
2282 }
2283 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)2284 void MacroAssembler::InvokeFunction(Register function,
2285                                     const ParameterCount& expected,
2286                                     const ParameterCount& actual,
2287                                     InvokeFlag flag) {
2288   // You can't call a function without a valid frame.
2289   DCHECK(flag == JUMP_FUNCTION || has_frame());
2290 
2291   // Contract with called JS functions requires that function is passed in x1.
2292   // (See FullCodeGenerator::Generate().)
2293   DCHECK(function.Is(x1));
2294 
2295   // Set up the context.
2296   Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2297 
2298   InvokeFunctionCode(function, no_reg, expected, actual, flag);
2299 }
2300 
TryConvertDoubleToInt64(Register result,DoubleRegister double_input,Label * done)2301 void TurboAssembler::TryConvertDoubleToInt64(Register result,
2302                                              DoubleRegister double_input,
2303                                              Label* done) {
2304   // Try to convert with an FPU convert instruction. It's trivial to compute
2305   // the modulo operation on an integer register so we convert to a 64-bit
2306   // integer.
2307   //
2308   // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF)
2309   // when the double is out of range. NaNs and infinities will be converted to 0
2310   // (as ECMA-262 requires).
2311   Fcvtzs(result.X(), double_input);
2312 
2313   // The values INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF) are not
2314   // representable using a double, so if the result is one of those then we know
2315   // that saturation occurred, and we need to manually handle the conversion.
2316   //
2317   // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2318   // 1 will cause signed overflow.
2319   Cmp(result.X(), 1);
2320   Ccmp(result.X(), -1, VFlag, vc);
2321 
2322   B(vc, done);
2323 }
2324 
TruncateDoubleToI(Isolate * isolate,Zone * zone,Register result,DoubleRegister double_input,StubCallMode stub_mode)2325 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
2326                                        Register result,
2327                                        DoubleRegister double_input,
2328                                        StubCallMode stub_mode) {
2329   Label done;
2330 
2331   // Try to convert the double to an int64. If successful, the bottom 32 bits
2332   // contain our truncated int32 result.
2333   TryConvertDoubleToInt64(result, double_input, &done);
2334 
2335   // If we fell through then inline version didn't succeed - call stub instead.
2336   Push(lr, double_input);
2337 
2338   // DoubleToI preserves any registers it needs to clobber.
2339   if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
2340     Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
2341   } else {
2342     Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
2343   }
2344   Ldr(result, MemOperand(sp, 0));
2345 
2346   DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
2347   Pop(xzr, lr);  // xzr to drop the double input on the stack.
2348 
2349   Bind(&done);
2350   // Keep our invariant that the upper 32 bits are zero.
2351   Uxtw(result.W(), result.W());
2352 }
2353 
Prologue()2354 void TurboAssembler::Prologue() {
2355   Push(lr, fp, cp, x1);
2356   Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
2357 }
2358 
EnterFrame(StackFrame::Type type)2359 void TurboAssembler::EnterFrame(StackFrame::Type type) {
2360   UseScratchRegisterScope temps(this);
2361 
2362   if (type == StackFrame::INTERNAL) {
2363     Register type_reg = temps.AcquireX();
2364     Mov(type_reg, StackFrame::TypeToMarker(type));
2365     // type_reg pushed twice for alignment.
2366     Push(lr, fp, type_reg, type_reg);
2367     const int kFrameSize =
2368         TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize;
2369     Add(fp, sp, kFrameSize);
2370     // sp[3] : lr
2371     // sp[2] : fp
2372     // sp[1] : type
2373     // sp[0] : for alignment
2374   } else if (type == StackFrame::WASM_COMPILED ||
2375              type == StackFrame::WASM_COMPILE_LAZY) {
2376     Register type_reg = temps.AcquireX();
2377     Mov(type_reg, StackFrame::TypeToMarker(type));
2378     Push(lr, fp);
2379     Mov(fp, sp);
2380     Push(type_reg, padreg);
2381     // sp[3] : lr
2382     // sp[2] : fp
2383     // sp[1] : type
2384     // sp[0] : for alignment
2385   } else {
2386     DCHECK_EQ(type, StackFrame::CONSTRUCT);
2387     Register type_reg = temps.AcquireX();
2388     Mov(type_reg, StackFrame::TypeToMarker(type));
2389 
2390     // Users of this frame type push a context pointer after the type field,
2391     // so do it here to keep the stack pointer aligned.
2392     Push(lr, fp, type_reg, cp);
2393 
2394     // The context pointer isn't part of the fixed frame, so add an extra slot
2395     // to account for it.
2396     Add(fp, sp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
2397     // sp[3] : lr
2398     // sp[2] : fp
2399     // sp[1] : type
2400     // sp[0] : cp
2401   }
2402 }
2403 
LeaveFrame(StackFrame::Type type)2404 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
2405   // Drop the execution stack down to the frame pointer and restore
2406   // the caller frame pointer and return address.
2407   Mov(sp, fp);
2408   Pop(fp, lr);
2409 }
2410 
2411 
ExitFramePreserveFPRegs()2412 void MacroAssembler::ExitFramePreserveFPRegs() {
2413   DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
2414   PushCPURegList(kCallerSavedV);
2415 }
2416 
2417 
ExitFrameRestoreFPRegs()2418 void MacroAssembler::ExitFrameRestoreFPRegs() {
2419   // Read the registers from the stack without popping them. The stack pointer
2420   // will be reset as part of the unwinding process.
2421   CPURegList saved_fp_regs = kCallerSavedV;
2422   DCHECK_EQ(saved_fp_regs.Count() % 2, 0);
2423 
2424   int offset = ExitFrameConstants::kLastExitFrameField;
2425   while (!saved_fp_regs.IsEmpty()) {
2426     const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2427     const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2428     offset -= 2 * kDRegSize;
2429     Ldp(dst1, dst0, MemOperand(fp, offset));
2430   }
2431 }
2432 
EnterExitFrame(bool save_doubles,const Register & scratch,int extra_space,StackFrame::Type frame_type)2433 void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
2434                                     int extra_space,
2435                                     StackFrame::Type frame_type) {
2436   DCHECK(frame_type == StackFrame::EXIT ||
2437          frame_type == StackFrame::BUILTIN_EXIT);
2438 
2439   // Set up the new stack frame.
2440   Push(lr, fp);
2441   Mov(fp, sp);
2442   Mov(scratch, StackFrame::TypeToMarker(frame_type));
2443   Push(scratch, xzr);
2444   Mov(scratch, CodeObject());
2445   Push(scratch, padreg);
2446   //          fp[8]: CallerPC (lr)
2447   //    fp -> fp[0]: CallerFP (old fp)
2448   //          fp[-8]: STUB marker
2449   //          fp[-16]: Space reserved for SPOffset.
2450   //          fp[-24]: CodeObject()
2451   //    sp -> fp[-32]: padding
2452   STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
2453   STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
2454   STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
2455   STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
2456   STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
2457   STATIC_ASSERT((-4 * kPointerSize) == ExitFrameConstants::kPaddingOffset);
2458 
2459   // Save the frame pointer and context pointer in the top frame.
2460   Mov(scratch,
2461       ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
2462   Str(fp, MemOperand(scratch));
2463   Mov(scratch,
2464       ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
2465   Str(cp, MemOperand(scratch));
2466 
2467   STATIC_ASSERT((-4 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
2468   if (save_doubles) {
2469     ExitFramePreserveFPRegs();
2470   }
2471 
2472   // Round the number of space we need to claim to a multiple of two.
2473   int slots_to_claim = RoundUp(extra_space + 1, 2);
2474 
2475   // Reserve space for the return address and for user requested memory.
2476   // We do this before aligning to make sure that we end up correctly
2477   // aligned with the minimum of wasted space.
2478   Claim(slots_to_claim, kXRegSize);
2479   //         fp[8]: CallerPC (lr)
2480   //   fp -> fp[0]: CallerFP (old fp)
2481   //         fp[-8]: STUB marker
2482   //         fp[-16]: Space reserved for SPOffset.
2483   //         fp[-24]: CodeObject()
2484   //         fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
2485   //         sp[8]: Extra space reserved for caller (if extra_space != 0).
2486   //   sp -> sp[0]: Space reserved for the return address.
2487 
2488   // ExitFrame::GetStateForFramePointer expects to find the return address at
2489   // the memory address immediately below the pointer stored in SPOffset.
2490   // It is not safe to derive much else from SPOffset, because the size of the
2491   // padding can vary.
2492   Add(scratch, sp, kXRegSize);
2493   Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
2494 }
2495 
2496 
2497 // Leave the current exit frame.
LeaveExitFrame(bool restore_doubles,const Register & scratch,const Register & scratch2)2498 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
2499                                     const Register& scratch,
2500                                     const Register& scratch2) {
2501   if (restore_doubles) {
2502     ExitFrameRestoreFPRegs();
2503   }
2504 
2505   // Restore the context pointer from the top frame.
2506   Mov(scratch,
2507       ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
2508   Ldr(cp, MemOperand(scratch));
2509 
2510   if (emit_debug_code()) {
2511     // Also emit debug code to clear the cp in the top frame.
2512     Mov(scratch2, Operand(Context::kInvalidContext));
2513     Mov(scratch, ExternalReference::Create(IsolateAddressId::kContextAddress,
2514                                            isolate()));
2515     Str(scratch2, MemOperand(scratch));
2516   }
2517   // Clear the frame pointer from the top frame.
2518   Mov(scratch,
2519       ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
2520   Str(xzr, MemOperand(scratch));
2521 
2522   // Pop the exit frame.
2523   //         fp[8]: CallerPC (lr)
2524   //   fp -> fp[0]: CallerFP (old fp)
2525   //         fp[...]: The rest of the frame.
2526   Mov(sp, fp);
2527   Pop(fp, lr);
2528 }
2529 
LoadWeakValue(Register out,Register in,Label * target_if_cleared)2530 void MacroAssembler::LoadWeakValue(Register out, Register in,
2531                                    Label* target_if_cleared) {
2532   CompareAndBranch(in, Operand(kClearedWeakHeapObject), eq, target_if_cleared);
2533 
2534   and_(out, in, Operand(~kWeakHeapObjectMask));
2535 }
2536 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2537 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2538                                       Register scratch1, Register scratch2) {
2539   DCHECK_NE(value, 0);
2540   if (FLAG_native_code_counters && counter->Enabled()) {
2541     Mov(scratch2, ExternalReference::Create(counter));
2542     Ldr(scratch1.W(), MemOperand(scratch2));
2543     Add(scratch1.W(), scratch1.W(), value);
2544     Str(scratch1.W(), MemOperand(scratch2));
2545   }
2546 }
2547 
2548 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2549 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2550                                       Register scratch1, Register scratch2) {
2551   IncrementCounter(counter, -value, scratch1, scratch2);
2552 }
2553 
MaybeDropFrames()2554 void MacroAssembler::MaybeDropFrames() {
2555   // Check whether we need to drop frames to restart a function on the stack.
2556   Mov(x1, ExternalReference::debug_restart_fp_address(isolate()));
2557   Ldr(x1, MemOperand(x1));
2558   Tst(x1, x1);
2559   Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
2560        ne);
2561 }
2562 
JumpIfObjectType(Register object,Register map,Register type_reg,InstanceType type,Label * if_cond_pass,Condition cond)2563 void MacroAssembler::JumpIfObjectType(Register object,
2564                                       Register map,
2565                                       Register type_reg,
2566                                       InstanceType type,
2567                                       Label* if_cond_pass,
2568                                       Condition cond) {
2569   CompareObjectType(object, map, type_reg, type);
2570   B(cond, if_cond_pass);
2571 }
2572 
2573 
2574 // Sets condition flags based on comparison, and returns type in type_reg.
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)2575 void MacroAssembler::CompareObjectType(Register object,
2576                                        Register map,
2577                                        Register type_reg,
2578                                        InstanceType type) {
2579   Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2580   CompareInstanceType(map, type_reg, type);
2581 }
2582 
2583 
2584 // Sets condition flags based on comparison, and returns type in type_reg.
CompareInstanceType(Register map,Register type_reg,InstanceType type)2585 void MacroAssembler::CompareInstanceType(Register map,
2586                                          Register type_reg,
2587                                          InstanceType type) {
2588   Ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2589   Cmp(type_reg, type);
2590 }
2591 
2592 
LoadElementsKindFromMap(Register result,Register map)2593 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
2594   // Load the map's "bit field 2".
2595   Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
2596   // Retrieve elements_kind from bit field 2.
2597   DecodeField<Map::ElementsKindBits>(result);
2598 }
2599 
CompareRoot(const Register & obj,Heap::RootListIndex index)2600 void MacroAssembler::CompareRoot(const Register& obj,
2601                                  Heap::RootListIndex index) {
2602   UseScratchRegisterScope temps(this);
2603   Register temp = temps.AcquireX();
2604   DCHECK(!AreAliased(obj, temp));
2605   LoadRoot(temp, index);
2606   Cmp(obj, temp);
2607 }
2608 
2609 
JumpIfRoot(const Register & obj,Heap::RootListIndex index,Label * if_equal)2610 void MacroAssembler::JumpIfRoot(const Register& obj,
2611                                 Heap::RootListIndex index,
2612                                 Label* if_equal) {
2613   CompareRoot(obj, index);
2614   B(eq, if_equal);
2615 }
2616 
2617 
JumpIfNotRoot(const Register & obj,Heap::RootListIndex index,Label * if_not_equal)2618 void MacroAssembler::JumpIfNotRoot(const Register& obj,
2619                                    Heap::RootListIndex index,
2620                                    Label* if_not_equal) {
2621   CompareRoot(obj, index);
2622   B(ne, if_not_equal);
2623 }
2624 
2625 
CompareAndSplit(const Register & lhs,const Operand & rhs,Condition cond,Label * if_true,Label * if_false,Label * fall_through)2626 void MacroAssembler::CompareAndSplit(const Register& lhs,
2627                                      const Operand& rhs,
2628                                      Condition cond,
2629                                      Label* if_true,
2630                                      Label* if_false,
2631                                      Label* fall_through) {
2632   if ((if_true == if_false) && (if_false == fall_through)) {
2633     // Fall through.
2634   } else if (if_true == if_false) {
2635     B(if_true);
2636   } else if (if_false == fall_through) {
2637     CompareAndBranch(lhs, rhs, cond, if_true);
2638   } else if (if_true == fall_through) {
2639     CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
2640   } else {
2641     CompareAndBranch(lhs, rhs, cond, if_true);
2642     B(if_false);
2643   }
2644 }
2645 
2646 
TestAndSplit(const Register & reg,uint64_t bit_pattern,Label * if_all_clear,Label * if_any_set,Label * fall_through)2647 void MacroAssembler::TestAndSplit(const Register& reg,
2648                                   uint64_t bit_pattern,
2649                                   Label* if_all_clear,
2650                                   Label* if_any_set,
2651                                   Label* fall_through) {
2652   if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
2653     // Fall through.
2654   } else if (if_all_clear == if_any_set) {
2655     B(if_all_clear);
2656   } else if (if_all_clear == fall_through) {
2657     TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
2658   } else if (if_any_set == fall_through) {
2659     TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
2660   } else {
2661     TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
2662     B(if_all_clear);
2663   }
2664 }
2665 
AllowThisStubCall(CodeStub * stub)2666 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
2667   return has_frame() || !stub->SometimesSetsUpAFrame();
2668 }
2669 
PopSafepointRegisters()2670 void MacroAssembler::PopSafepointRegisters() {
2671   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
2672   DCHECK_GE(num_unsaved, 0);
2673   DCHECK_EQ(num_unsaved % 2, 0);
2674   DCHECK_EQ(kSafepointSavedRegisters % 2, 0);
2675   PopXRegList(kSafepointSavedRegisters);
2676   Drop(num_unsaved);
2677 }
2678 
2679 
PushSafepointRegisters()2680 void MacroAssembler::PushSafepointRegisters() {
2681   // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
2682   // adjust the stack for unsaved registers.
2683   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
2684   DCHECK_GE(num_unsaved, 0);
2685   DCHECK_EQ(num_unsaved % 2, 0);
2686   DCHECK_EQ(kSafepointSavedRegisters % 2, 0);
2687   Claim(num_unsaved);
2688   PushXRegList(kSafepointSavedRegisters);
2689 }
2690 
SafepointRegisterStackIndex(int reg_code)2691 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
2692   // Make sure the safepoint registers list is what we expect.
2693   DCHECK_EQ(CPURegList::GetSafepointSavedRegisters().list(), 0x6FFCFFFF);
2694 
2695   // Safepoint registers are stored contiguously on the stack, but not all the
2696   // registers are saved. The following registers are excluded:
2697   //  - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
2698   //    the macro assembler.
2699   //  - x31 (sp) because the system stack pointer doesn't need to be included
2700   //    in safepoint registers.
2701   //
2702   // This function implements the mapping of register code to index into the
2703   // safepoint register slots.
2704   if ((reg_code >= 0) && (reg_code <= 15)) {
2705     return reg_code;
2706   } else if ((reg_code >= 18) && (reg_code <= 30)) {
2707     // Skip ip0 and ip1.
2708     return reg_code - 2;
2709   } else {
2710     // This register has no safepoint register slot.
2711     UNREACHABLE();
2712   }
2713 }
2714 
CheckPageFlag(const Register & object,const Register & scratch,int mask,Condition cc,Label * condition_met)2715 void MacroAssembler::CheckPageFlag(const Register& object,
2716                                    const Register& scratch, int mask,
2717                                    Condition cc, Label* condition_met) {
2718   And(scratch, object, ~kPageAlignmentMask);
2719   Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2720   if (cc == eq) {
2721     TestAndBranchIfAnySet(scratch, mask, condition_met);
2722   } else {
2723     TestAndBranchIfAllClear(scratch, mask, condition_met);
2724   }
2725 }
2726 
CheckPageFlagSet(const Register & object,const Register & scratch,int mask,Label * if_any_set)2727 void TurboAssembler::CheckPageFlagSet(const Register& object,
2728                                       const Register& scratch, int mask,
2729                                       Label* if_any_set) {
2730   And(scratch, object, ~kPageAlignmentMask);
2731   Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2732   TestAndBranchIfAnySet(scratch, mask, if_any_set);
2733 }
2734 
CheckPageFlagClear(const Register & object,const Register & scratch,int mask,Label * if_all_clear)2735 void TurboAssembler::CheckPageFlagClear(const Register& object,
2736                                         const Register& scratch, int mask,
2737                                         Label* if_all_clear) {
2738   And(scratch, object, ~kPageAlignmentMask);
2739   Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2740   TestAndBranchIfAllClear(scratch, mask, if_all_clear);
2741 }
2742 
RecordWriteField(Register object,int offset,Register value,Register scratch,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)2743 void MacroAssembler::RecordWriteField(Register object, int offset,
2744                                       Register value, Register scratch,
2745                                       LinkRegisterStatus lr_status,
2746                                       SaveFPRegsMode save_fp,
2747                                       RememberedSetAction remembered_set_action,
2748                                       SmiCheck smi_check) {
2749   // First, check if a write barrier is even needed. The tests below
2750   // catch stores of Smis.
2751   Label done;
2752 
2753   // Skip the barrier if writing a smi.
2754   if (smi_check == INLINE_SMI_CHECK) {
2755     JumpIfSmi(value, &done);
2756   }
2757 
2758   // Although the object register is tagged, the offset is relative to the start
2759   // of the object, so offset must be a multiple of kPointerSize.
2760   DCHECK(IsAligned(offset, kPointerSize));
2761 
2762   Add(scratch, object, offset - kHeapObjectTag);
2763   if (emit_debug_code()) {
2764     Label ok;
2765     Tst(scratch, kPointerSize - 1);
2766     B(eq, &ok);
2767     Abort(AbortReason::kUnalignedCellInWriteBarrier);
2768     Bind(&ok);
2769   }
2770 
2771   RecordWrite(object, scratch, value, lr_status, save_fp, remembered_set_action,
2772               OMIT_SMI_CHECK);
2773 
2774   Bind(&done);
2775 
2776   // Clobber clobbered input registers when running with the debug-code flag
2777   // turned on to provoke errors.
2778   if (emit_debug_code()) {
2779     Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
2780     Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
2781   }
2782 }
2783 
SaveRegisters(RegList registers)2784 void TurboAssembler::SaveRegisters(RegList registers) {
2785   DCHECK_GT(NumRegs(registers), 0);
2786   CPURegList regs(lr);
2787   for (int i = 0; i < Register::kNumRegisters; ++i) {
2788     if ((registers >> i) & 1u) {
2789       regs.Combine(Register::XRegFromCode(i));
2790     }
2791   }
2792 
2793   PushCPURegList(regs);
2794 }
2795 
RestoreRegisters(RegList registers)2796 void TurboAssembler::RestoreRegisters(RegList registers) {
2797   DCHECK_GT(NumRegs(registers), 0);
2798   CPURegList regs(lr);
2799   for (int i = 0; i < Register::kNumRegisters; ++i) {
2800     if ((registers >> i) & 1u) {
2801       regs.Combine(Register::XRegFromCode(i));
2802     }
2803   }
2804 
2805   PopCPURegList(regs);
2806 }
2807 
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)2808 void TurboAssembler::CallRecordWriteStub(
2809     Register object, Register address,
2810     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
2811   // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
2812   // i.e. always emit remember set and save FP registers in RecordWriteStub. If
2813   // large performance regression is observed, we should use these values to
2814   // avoid unnecessary work.
2815 
2816   Callable const callable =
2817       Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
2818   RegList registers = callable.descriptor().allocatable_registers();
2819 
2820   SaveRegisters(registers);
2821 
2822   Register object_parameter(callable.descriptor().GetRegisterParameter(
2823       RecordWriteDescriptor::kObject));
2824   Register slot_parameter(
2825       callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
2826   Register isolate_parameter(callable.descriptor().GetRegisterParameter(
2827       RecordWriteDescriptor::kIsolate));
2828   Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
2829       RecordWriteDescriptor::kRememberedSet));
2830   Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
2831       RecordWriteDescriptor::kFPMode));
2832 
2833   Push(object, address);
2834 
2835   Pop(slot_parameter, object_parameter);
2836 
2837   Mov(isolate_parameter, ExternalReference::isolate_address(isolate()));
2838   Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
2839   Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
2840   Call(callable.code(), RelocInfo::CODE_TARGET);
2841 
2842   RestoreRegisters(registers);
2843 }
2844 
2845 // Will clobber: object, address, value.
2846 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
2847 //
2848 // The register 'object' contains a heap object pointer. The heap object tag is
2849 // shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)2850 void MacroAssembler::RecordWrite(Register object, Register address,
2851                                  Register value, LinkRegisterStatus lr_status,
2852                                  SaveFPRegsMode fp_mode,
2853                                  RememberedSetAction remembered_set_action,
2854                                  SmiCheck smi_check) {
2855   ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite");
2856   DCHECK(!AreAliased(object, value));
2857 
2858   if (emit_debug_code()) {
2859     UseScratchRegisterScope temps(this);
2860     Register temp = temps.AcquireX();
2861 
2862     Ldr(temp, MemOperand(address));
2863     Cmp(temp, value);
2864     Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
2865   }
2866 
2867   // First, check if a write barrier is even needed. The tests below
2868   // catch stores of smis and stores into the young generation.
2869   Label done;
2870 
2871   if (smi_check == INLINE_SMI_CHECK) {
2872     DCHECK_EQ(0, kSmiTag);
2873     JumpIfSmi(value, &done);
2874   }
2875 
2876   CheckPageFlagClear(value,
2877                      value,  // Used as scratch.
2878                      MemoryChunk::kPointersToHereAreInterestingMask, &done);
2879   CheckPageFlagClear(object,
2880                      value,  // Used as scratch.
2881                      MemoryChunk::kPointersFromHereAreInterestingMask,
2882                      &done);
2883 
2884   // Record the actual write.
2885   if (lr_status == kLRHasNotBeenSaved) {
2886     Push(padreg, lr);
2887   }
2888   CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
2889   if (lr_status == kLRHasNotBeenSaved) {
2890     Pop(lr, padreg);
2891   }
2892 
2893   Bind(&done);
2894 
2895   // Count number of write barriers in generated code.
2896   isolate()->counters()->write_barriers_static()->Increment();
2897   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
2898                    value);
2899 
2900   // Clobber clobbered registers when running with the debug-code flag
2901   // turned on to provoke errors.
2902   if (emit_debug_code()) {
2903     Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
2904     Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
2905   }
2906 }
2907 
Assert(Condition cond,AbortReason reason)2908 void TurboAssembler::Assert(Condition cond, AbortReason reason) {
2909   if (emit_debug_code()) {
2910     Check(cond, reason);
2911   }
2912 }
2913 
AssertUnreachable(AbortReason reason)2914 void TurboAssembler::AssertUnreachable(AbortReason reason) {
2915   if (emit_debug_code()) Abort(reason);
2916 }
2917 
AssertRegisterIsRoot(Register reg,Heap::RootListIndex index,AbortReason reason)2918 void MacroAssembler::AssertRegisterIsRoot(Register reg,
2919                                           Heap::RootListIndex index,
2920                                           AbortReason reason) {
2921   if (emit_debug_code()) {
2922     CompareRoot(reg, index);
2923     Check(eq, reason);
2924   }
2925 }
2926 
Check(Condition cond,AbortReason reason)2927 void TurboAssembler::Check(Condition cond, AbortReason reason) {
2928   Label ok;
2929   B(cond, &ok);
2930   Abort(reason);
2931   // Will not return here.
2932   Bind(&ok);
2933 }
2934 
Abort(AbortReason reason)2935 void TurboAssembler::Abort(AbortReason reason) {
2936 #ifdef DEBUG
2937   RecordComment("Abort message: ");
2938   RecordComment(GetAbortReason(reason));
2939 #endif
2940 
2941   // Avoid emitting call to builtin if requested.
2942   if (trap_on_abort()) {
2943     Brk(0);
2944     return;
2945   }
2946 
2947   // We need some scratch registers for the MacroAssembler, so make sure we have
2948   // some. This is safe here because Abort never returns.
2949   RegList old_tmp_list = TmpList()->list();
2950   TmpList()->Combine(MacroAssembler::DefaultTmpList());
2951 
2952   if (should_abort_hard()) {
2953     // We don't care if we constructed a frame. Just pretend we did.
2954     FrameScope assume_frame(this, StackFrame::NONE);
2955     Mov(w0, static_cast<int>(reason));
2956     Call(ExternalReference::abort_with_reason());
2957     return;
2958   }
2959 
2960   // Avoid infinite recursion; Push contains some assertions that use Abort.
2961   HardAbortScope hard_aborts(this);
2962 
2963   Mov(x1, Smi::FromInt(static_cast<int>(reason)));
2964 
2965   if (!has_frame_) {
2966     // We don't actually want to generate a pile of code for this, so just
2967     // claim there is a stack frame, without generating one.
2968     FrameScope scope(this, StackFrame::NONE);
2969     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
2970   } else {
2971     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
2972   }
2973 
2974   TmpList()->set_list(old_tmp_list);
2975 }
2976 
LoadNativeContextSlot(int index,Register dst)2977 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2978   Ldr(dst, NativeContextMemOperand());
2979   Ldr(dst, ContextMemOperand(dst, index));
2980 }
2981 
2982 
2983 // This is the main Printf implementation. All other Printf variants call
2984 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
PrintfNoPreserve(const char * format,const CPURegister & arg0,const CPURegister & arg1,const CPURegister & arg2,const CPURegister & arg3)2985 void MacroAssembler::PrintfNoPreserve(const char * format,
2986                                       const CPURegister& arg0,
2987                                       const CPURegister& arg1,
2988                                       const CPURegister& arg2,
2989                                       const CPURegister& arg3) {
2990   // We cannot handle a caller-saved stack pointer. It doesn't make much sense
2991   // in most cases anyway, so this restriction shouldn't be too serious.
2992   DCHECK(!kCallerSaved.IncludesAliasOf(sp));
2993 
2994   // The provided arguments, and their proper procedure-call standard registers.
2995   CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
2996   CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
2997 
2998   int arg_count = kPrintfMaxArgCount;
2999 
3000   // The PCS varargs registers for printf. Note that x0 is used for the printf
3001   // format string.
3002   static const CPURegList kPCSVarargs =
3003       CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
3004   static const CPURegList kPCSVarargsFP =
3005       CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, arg_count - 1);
3006 
3007   // We can use caller-saved registers as scratch values, except for the
3008   // arguments and the PCS registers where they might need to go.
3009   CPURegList tmp_list = kCallerSaved;
3010   tmp_list.Remove(x0);      // Used to pass the format string.
3011   tmp_list.Remove(kPCSVarargs);
3012   tmp_list.Remove(arg0, arg1, arg2, arg3);
3013 
3014   CPURegList fp_tmp_list = kCallerSavedV;
3015   fp_tmp_list.Remove(kPCSVarargsFP);
3016   fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
3017 
3018   // Override the MacroAssembler's scratch register list. The lists will be
3019   // reset automatically at the end of the UseScratchRegisterScope.
3020   UseScratchRegisterScope temps(this);
3021   TmpList()->set_list(tmp_list.list());
3022   FPTmpList()->set_list(fp_tmp_list.list());
3023 
3024   // Copies of the printf vararg registers that we can pop from.
3025   CPURegList pcs_varargs = kPCSVarargs;
3026   CPURegList pcs_varargs_fp = kPCSVarargsFP;
3027 
3028   // Place the arguments. There are lots of clever tricks and optimizations we
3029   // could use here, but Printf is a debug tool so instead we just try to keep
3030   // it simple: Move each input that isn't already in the right place to a
3031   // scratch register, then move everything back.
3032   for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
3033     // Work out the proper PCS register for this argument.
3034     if (args[i].IsRegister()) {
3035       pcs[i] = pcs_varargs.PopLowestIndex().X();
3036       // We might only need a W register here. We need to know the size of the
3037       // argument so we can properly encode it for the simulator call.
3038       if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
3039     } else if (args[i].IsVRegister()) {
3040       // In C, floats are always cast to doubles for varargs calls.
3041       pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
3042     } else {
3043       DCHECK(args[i].IsNone());
3044       arg_count = i;
3045       break;
3046     }
3047 
3048     // If the argument is already in the right place, leave it where it is.
3049     if (args[i].Aliases(pcs[i])) continue;
3050 
3051     // Otherwise, if the argument is in a PCS argument register, allocate an
3052     // appropriate scratch register and then move it out of the way.
3053     if (kPCSVarargs.IncludesAliasOf(args[i]) ||
3054         kPCSVarargsFP.IncludesAliasOf(args[i])) {
3055       if (args[i].IsRegister()) {
3056         Register old_arg = args[i].Reg();
3057         Register new_arg = temps.AcquireSameSizeAs(old_arg);
3058         Mov(new_arg, old_arg);
3059         args[i] = new_arg;
3060       } else {
3061         VRegister old_arg = args[i].VReg();
3062         VRegister new_arg = temps.AcquireSameSizeAs(old_arg);
3063         Fmov(new_arg, old_arg);
3064         args[i] = new_arg;
3065       }
3066     }
3067   }
3068 
3069   // Do a second pass to move values into their final positions and perform any
3070   // conversions that may be required.
3071   for (int i = 0; i < arg_count; i++) {
3072     DCHECK(pcs[i].type() == args[i].type());
3073     if (pcs[i].IsRegister()) {
3074       Mov(pcs[i].Reg(), args[i].Reg(), kDiscardForSameWReg);
3075     } else {
3076       DCHECK(pcs[i].IsVRegister());
3077       if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
3078         Fmov(pcs[i].VReg(), args[i].VReg());
3079       } else {
3080         Fcvt(pcs[i].VReg(), args[i].VReg());
3081       }
3082     }
3083   }
3084 
3085   // Load the format string into x0, as per the procedure-call standard.
3086   //
3087   // To make the code as portable as possible, the format string is encoded
3088   // directly in the instruction stream. It might be cleaner to encode it in a
3089   // literal pool, but since Printf is usually used for debugging, it is
3090   // beneficial for it to be minimally dependent on other features.
3091   Label format_address;
3092   Adr(x0, &format_address);
3093 
3094   // Emit the format string directly in the instruction stream.
3095   { BlockPoolsScope scope(this);
3096     Label after_data;
3097     B(&after_data);
3098     Bind(&format_address);
3099     EmitStringData(format);
3100     Unreachable();
3101     Bind(&after_data);
3102   }
3103 
3104   CallPrintf(arg_count, pcs);
3105 }
3106 
CallPrintf(int arg_count,const CPURegister * args)3107 void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
3108 // A call to printf needs special handling for the simulator, since the system
3109 // printf function will use a different instruction set and the procedure-call
3110 // standard will not be compatible.
3111 #ifdef USE_SIMULATOR
3112   {
3113     InstructionAccurateScope scope(this, kPrintfLength / kInstrSize);
3114     hlt(kImmExceptionIsPrintf);
3115     dc32(arg_count);          // kPrintfArgCountOffset
3116 
3117     // Determine the argument pattern.
3118     uint32_t arg_pattern_list = 0;
3119     for (int i = 0; i < arg_count; i++) {
3120       uint32_t arg_pattern;
3121       if (args[i].IsRegister()) {
3122         arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
3123       } else {
3124         DCHECK(args[i].Is64Bits());
3125         arg_pattern = kPrintfArgD;
3126       }
3127       DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
3128       arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
3129     }
3130     dc32(arg_pattern_list);   // kPrintfArgPatternListOffset
3131   }
3132 #else
3133   Call(ExternalReference::printf_function());
3134 #endif
3135 }
3136 
3137 
Printf(const char * format,CPURegister arg0,CPURegister arg1,CPURegister arg2,CPURegister arg3)3138 void MacroAssembler::Printf(const char * format,
3139                             CPURegister arg0,
3140                             CPURegister arg1,
3141                             CPURegister arg2,
3142                             CPURegister arg3) {
3143   // Printf is expected to preserve all registers, so make sure that none are
3144   // available as scratch registers until we've preserved them.
3145   RegList old_tmp_list = TmpList()->list();
3146   RegList old_fp_tmp_list = FPTmpList()->list();
3147   TmpList()->set_list(0);
3148   FPTmpList()->set_list(0);
3149 
3150   // Preserve all caller-saved registers as well as NZCV.
3151   // PushCPURegList asserts that the size of each list is a multiple of 16
3152   // bytes.
3153   PushCPURegList(kCallerSaved);
3154   PushCPURegList(kCallerSavedV);
3155 
3156   // We can use caller-saved registers as scratch values (except for argN).
3157   CPURegList tmp_list = kCallerSaved;
3158   CPURegList fp_tmp_list = kCallerSavedV;
3159   tmp_list.Remove(arg0, arg1, arg2, arg3);
3160   fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
3161   TmpList()->set_list(tmp_list.list());
3162   FPTmpList()->set_list(fp_tmp_list.list());
3163 
3164   { UseScratchRegisterScope temps(this);
3165     // If any of the arguments are the current stack pointer, allocate a new
3166     // register for them, and adjust the value to compensate for pushing the
3167     // caller-saved registers.
3168     bool arg0_sp = sp.Aliases(arg0);
3169     bool arg1_sp = sp.Aliases(arg1);
3170     bool arg2_sp = sp.Aliases(arg2);
3171     bool arg3_sp = sp.Aliases(arg3);
3172     if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
3173       // Allocate a register to hold the original stack pointer value, to pass
3174       // to PrintfNoPreserve as an argument.
3175       Register arg_sp = temps.AcquireX();
3176       Add(arg_sp, sp,
3177           kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes());
3178       if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
3179       if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
3180       if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
3181       if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
3182     }
3183 
3184     // Preserve NZCV.
3185     { UseScratchRegisterScope temps(this);
3186       Register tmp = temps.AcquireX();
3187       Mrs(tmp, NZCV);
3188       Push(tmp, xzr);
3189     }
3190 
3191     PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
3192 
3193     // Restore NZCV.
3194     { UseScratchRegisterScope temps(this);
3195       Register tmp = temps.AcquireX();
3196       Pop(xzr, tmp);
3197       Msr(NZCV, tmp);
3198     }
3199   }
3200 
3201   PopCPURegList(kCallerSavedV);
3202   PopCPURegList(kCallerSaved);
3203 
3204   TmpList()->set_list(old_tmp_list);
3205   FPTmpList()->set_list(old_fp_tmp_list);
3206 }
3207 
~UseScratchRegisterScope()3208 UseScratchRegisterScope::~UseScratchRegisterScope() {
3209   available_->set_list(old_available_);
3210   availablefp_->set_list(old_availablefp_);
3211 }
3212 
3213 
AcquireSameSizeAs(const Register & reg)3214 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
3215   int code = AcquireNextAvailable(available_).code();
3216   return Register::Create(code, reg.SizeInBits());
3217 }
3218 
AcquireSameSizeAs(const VRegister & reg)3219 VRegister UseScratchRegisterScope::AcquireSameSizeAs(const VRegister& reg) {
3220   int code = AcquireNextAvailable(availablefp_).code();
3221   return VRegister::Create(code, reg.SizeInBits());
3222 }
3223 
3224 
AcquireNextAvailable(CPURegList * available)3225 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
3226     CPURegList* available) {
3227   CHECK(!available->IsEmpty());
3228   CPURegister result = available->PopLowestIndex();
3229   DCHECK(!AreAliased(result, xzr, sp));
3230   return result;
3231 }
3232 
3233 
ContextMemOperand(Register context,int index)3234 MemOperand ContextMemOperand(Register context, int index) {
3235   return MemOperand(context, Context::SlotOffset(index));
3236 }
3237 
NativeContextMemOperand()3238 MemOperand NativeContextMemOperand() {
3239   return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
3240 }
3241 
3242 #define __ masm->
3243 
Emit(MacroAssembler * masm,const Register & reg,const Label * smi_check)3244 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
3245                               const Label* smi_check) {
3246   Assembler::BlockPoolsScope scope(masm);
3247   if (reg.IsValid()) {
3248     DCHECK(smi_check->is_bound());
3249     DCHECK(reg.Is64Bits());
3250 
3251     // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
3252     // 'check' in the other bits. The possible offset is limited in that we
3253     // use BitField to pack the data, and the underlying data type is a
3254     // uint32_t.
3255     uint32_t delta =
3256         static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
3257     __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
3258   } else {
3259     DCHECK(!smi_check->is_bound());
3260 
3261     // An offset of 0 indicates that there is no patch site.
3262     __ InlineData(0);
3263   }
3264 }
3265 
InlineSmiCheckInfo(Address info)3266 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
3267     : reg_(NoReg), smi_check_delta_(0), smi_check_(nullptr) {
3268   InstructionSequence* inline_data = InstructionSequence::At(info);
3269   DCHECK(inline_data->IsInlineData());
3270   if (inline_data->IsInlineData()) {
3271     uint64_t payload = inline_data->InlineData();
3272     // We use BitField to decode the payload, and BitField can only handle
3273     // 32-bit values.
3274     DCHECK(is_uint32(payload));
3275     if (payload != 0) {
3276       uint32_t payload32 = static_cast<uint32_t>(payload);
3277       int reg_code = RegisterBits::decode(payload32);
3278       reg_ = Register::XRegFromCode(reg_code);
3279       smi_check_delta_ = DeltaBits::decode(payload32);
3280       DCHECK_NE(0, smi_check_delta_);
3281       smi_check_ = inline_data->preceding(smi_check_delta_);
3282     }
3283   }
3284 }
3285 
ComputeCodeStartAddress(const Register & rd)3286 void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
3287   // We can use adr to load a pc relative location.
3288   adr(rd, -pc_offset());
3289 }
3290 
ResetSpeculationPoisonRegister()3291 void TurboAssembler::ResetSpeculationPoisonRegister() {
3292   Mov(kSpeculationPoisonRegister, -1);
3293 }
3294 
3295 #undef __
3296 
3297 
3298 }  // namespace internal
3299 }  // namespace v8
3300 
3301 #endif  // V8_TARGET_ARCH_ARM64
3302