• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <limits.h>  // For LONG_MIN, LONG_MAX.
6 
7 #if V8_TARGET_ARCH_ARM
8 
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/base/utils/random-number-generator.h"
12 #include "src/codegen/assembler-inl.h"
13 #include "src/codegen/callable.h"
14 #include "src/codegen/code-factory.h"
15 #include "src/codegen/external-reference-table.h"
16 #include "src/codegen/macro-assembler.h"
17 #include "src/codegen/register-configuration.h"
18 #include "src/debug/debug.h"
19 #include "src/execution/frames-inl.h"
20 #include "src/heap/memory-chunk.h"
21 #include "src/init/bootstrapper.h"
22 #include "src/logging/counters.h"
23 #include "src/numbers/double.h"
24 #include "src/objects/objects-inl.h"
25 #include "src/runtime/runtime.h"
26 #include "src/snapshot/embedded/embedded-data.h"
27 #include "src/snapshot/snapshot.h"
28 #include "src/wasm/wasm-code-manager.h"
29 
30 // Satisfy cpplint check, but don't include platform-specific header. It is
31 // included recursively via macro-assembler.h.
32 #if 0
33 #include "src/codegen/arm/macro-assembler-arm.h"
34 #endif
35 
36 namespace v8 {
37 namespace internal {
38 
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const39 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
40                                                     Register exclusion1,
41                                                     Register exclusion2,
42                                                     Register exclusion3) const {
43   int bytes = 0;
44   RegList exclusions = 0;
45   if (exclusion1 != no_reg) {
46     exclusions |= exclusion1.bit();
47     if (exclusion2 != no_reg) {
48       exclusions |= exclusion2.bit();
49       if (exclusion3 != no_reg) {
50         exclusions |= exclusion3.bit();
51       }
52     }
53   }
54 
55   RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
56 
57   bytes += NumRegs(list) * kPointerSize;
58 
59   if (fp_mode == kSaveFPRegs) {
60     bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
61   }
62 
63   return bytes;
64 }
65 
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)66 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
67                                     Register exclusion2, Register exclusion3) {
68   int bytes = 0;
69   RegList exclusions = 0;
70   if (exclusion1 != no_reg) {
71     exclusions |= exclusion1.bit();
72     if (exclusion2 != no_reg) {
73       exclusions |= exclusion2.bit();
74       if (exclusion3 != no_reg) {
75         exclusions |= exclusion3.bit();
76       }
77     }
78   }
79 
80   RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
81   stm(db_w, sp, list);
82 
83   bytes += NumRegs(list) * kPointerSize;
84 
85   if (fp_mode == kSaveFPRegs) {
86     SaveFPRegs(sp, lr);
87     bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
88   }
89 
90   return bytes;
91 }
92 
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)93 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
94                                    Register exclusion2, Register exclusion3) {
95   int bytes = 0;
96   if (fp_mode == kSaveFPRegs) {
97     RestoreFPRegs(sp, lr);
98     bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
99   }
100 
101   RegList exclusions = 0;
102   if (exclusion1 != no_reg) {
103     exclusions |= exclusion1.bit();
104     if (exclusion2 != no_reg) {
105       exclusions |= exclusion2.bit();
106       if (exclusion3 != no_reg) {
107         exclusions |= exclusion3.bit();
108       }
109     }
110   }
111 
112   RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
113   ldm(ia_w, sp, list);
114 
115   bytes += NumRegs(list) * kPointerSize;
116 
117   return bytes;
118 }
119 
LoadFromConstantsTable(Register destination,int constant_index)120 void TurboAssembler::LoadFromConstantsTable(Register destination,
121                                             int constant_index) {
122   DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
123 
124   const uint32_t offset =
125       FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
126 
127   LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
128   ldr(destination, MemOperand(destination, offset));
129 }
130 
LoadRootRelative(Register destination,int32_t offset)131 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
132   ldr(destination, MemOperand(kRootRegister, offset));
133 }
134 
LoadRootRegisterOffset(Register destination,intptr_t offset)135 void TurboAssembler::LoadRootRegisterOffset(Register destination,
136                                             intptr_t offset) {
137   if (offset == 0) {
138     Move(destination, kRootRegister);
139   } else {
140     add(destination, kRootRegister, Operand(offset));
141   }
142 }
143 
Jump(Register target,Condition cond)144 void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
145 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)146 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
147                           Condition cond) {
148   mov(pc, Operand(target, rmode), LeaveCC, cond);
149 }
150 
Jump(Address target,RelocInfo::Mode rmode,Condition cond)151 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
152                           Condition cond) {
153   DCHECK(!RelocInfo::IsCodeTarget(rmode));
154   Jump(static_cast<intptr_t>(target), rmode, cond);
155 }
156 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)157 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
158                           Condition cond) {
159   DCHECK(RelocInfo::IsCodeTarget(rmode));
160   DCHECK_IMPLIES(options().isolate_independent_code,
161                  Builtins::IsIsolateIndependentBuiltin(*code));
162   DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
163                  Builtins::IsIsolateIndependentBuiltin(*code));
164 
165   int builtin_index = Builtins::kNoBuiltinId;
166   bool target_is_builtin =
167       isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
168 
169   if (options().use_pc_relative_calls_and_jumps && target_is_builtin) {
170     int32_t code_target_index = AddCodeTarget(code);
171     b(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
172     return;
173   } else if (root_array_available_ && options().isolate_independent_code) {
174     // This branch is taken only for specific cctests, where we force isolate
175     // creation at runtime. At this point, Code space isn't restricted to a
176     // size s.t. pc-relative calls may be used.
177     UseScratchRegisterScope temps(this);
178     Register scratch = temps.Acquire();
179     int offset = IsolateData::builtin_entry_slot_offset(
180         static_cast<Builtins::Name>(code->builtin_index()));
181     ldr(scratch, MemOperand(kRootRegister, offset));
182     Jump(scratch, cond);
183     return;
184   } else if (options().inline_offheap_trampolines && target_is_builtin) {
185     // Inline the trampoline.
186     RecordCommentForOffHeapTrampoline(builtin_index);
187     EmbeddedData d = EmbeddedData::FromBlob();
188     Address entry = d.InstructionStartOfBuiltin(builtin_index);
189     // Use ip directly instead of using UseScratchRegisterScope, as we do not
190     // preserve scratch registers across calls.
191     mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
192     Jump(ip, cond);
193     return;
194   }
195 
196   // 'code' is always generated ARM code, never THUMB code
197   Jump(static_cast<intptr_t>(code.address()), rmode, cond);
198 }
199 
Jump(const ExternalReference & reference)200 void TurboAssembler::Jump(const ExternalReference& reference) {
201   UseScratchRegisterScope temps(this);
202   Register scratch = temps.Acquire();
203   Move(scratch, reference);
204   Jump(scratch);
205 }
206 
Call(Register target,Condition cond)207 void TurboAssembler::Call(Register target, Condition cond) {
208   // Block constant pool for the call instruction sequence.
209   BlockConstPoolScope block_const_pool(this);
210   blx(target, cond);
211 }
212 
Call(Address target,RelocInfo::Mode rmode,Condition cond,TargetAddressStorageMode mode,bool check_constant_pool)213 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
214                           TargetAddressStorageMode mode,
215                           bool check_constant_pool) {
216   // Check if we have to emit the constant pool before we block it.
217   if (check_constant_pool) MaybeCheckConstPool();
218   // Block constant pool for the call instruction sequence.
219   BlockConstPoolScope block_const_pool(this);
220 
221   bool old_predictable_code_size = predictable_code_size();
222   if (mode == NEVER_INLINE_TARGET_ADDRESS) {
223     set_predictable_code_size(true);
224   }
225 
226   // Use ip directly instead of using UseScratchRegisterScope, as we do not
227   // preserve scratch registers across calls.
228 
229   // Call sequence on V7 or later may be :
230   //  movw  ip, #... @ call address low 16
231   //  movt  ip, #... @ call address high 16
232   //  blx   ip
233   //                      @ return address
234   // Or for pre-V7 or values that may be back-patched
235   // to avoid ICache flushes:
236   //  ldr   ip, [pc, #...] @ call address
237   //  blx   ip
238   //                      @ return address
239 
240   mov(ip, Operand(target, rmode));
241   blx(ip, cond);
242 
243   if (mode == NEVER_INLINE_TARGET_ADDRESS) {
244     set_predictable_code_size(old_predictable_code_size);
245   }
246 }
247 
Call(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,TargetAddressStorageMode mode,bool check_constant_pool)248 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
249                           Condition cond, TargetAddressStorageMode mode,
250                           bool check_constant_pool) {
251   DCHECK(RelocInfo::IsCodeTarget(rmode));
252   DCHECK_IMPLIES(options().isolate_independent_code,
253                  Builtins::IsIsolateIndependentBuiltin(*code));
254   DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
255                  Builtins::IsIsolateIndependentBuiltin(*code));
256 
257   int builtin_index = Builtins::kNoBuiltinId;
258   bool target_is_builtin =
259       isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
260 
261   if (target_is_builtin && options().use_pc_relative_calls_and_jumps) {
262     int32_t code_target_index = AddCodeTarget(code);
263     bl(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
264     return;
265   } else if (root_array_available_ && options().isolate_independent_code) {
266     // This branch is taken only for specific cctests, where we force isolate
267     // creation at runtime. At this point, Code space isn't restricted to a
268     // size s.t. pc-relative calls may be used.
269     int offset = IsolateData::builtin_entry_slot_offset(
270         static_cast<Builtins::Name>(code->builtin_index()));
271     ldr(ip, MemOperand(kRootRegister, offset));
272     Call(ip, cond);
273     return;
274   } else if (target_is_builtin && options().inline_offheap_trampolines) {
275     // Inline the trampoline.
276     CallBuiltin(builtin_index);
277     return;
278   }
279 
280   // 'code' is always generated ARM code, never THUMB code
281   DCHECK(code->IsExecutable());
282   Call(code.address(), rmode, cond, mode);
283 }
284 
LoadEntryFromBuiltinIndex(Register builtin_index)285 void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
286   STATIC_ASSERT(kSystemPointerSize == 4);
287   STATIC_ASSERT(kSmiShiftSize == 0);
288   STATIC_ASSERT(kSmiTagSize == 1);
289   STATIC_ASSERT(kSmiTag == 0);
290 
291   // The builtin_index register contains the builtin index as a Smi.
292   // Untagging is folded into the indexing operand below.
293   mov(builtin_index,
294       Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiTagSize));
295   add(builtin_index, builtin_index,
296       Operand(IsolateData::builtin_entry_table_offset()));
297   ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
298 }
299 
CallBuiltinByIndex(Register builtin_index)300 void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
301   LoadEntryFromBuiltinIndex(builtin_index);
302   Call(builtin_index);
303 }
304 
CallBuiltin(int builtin_index,Condition cond)305 void TurboAssembler::CallBuiltin(int builtin_index, Condition cond) {
306   DCHECK(Builtins::IsBuiltinId(builtin_index));
307   RecordCommentForOffHeapTrampoline(builtin_index);
308   EmbeddedData d = EmbeddedData::FromBlob();
309   Address entry = d.InstructionStartOfBuiltin(builtin_index);
310   // Use ip directly instead of using UseScratchRegisterScope, as we do not
311   // preserve scratch registers across calls.
312   mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
313   Call(ip, cond);
314 }
315 
LoadCodeObjectEntry(Register destination,Register code_object)316 void TurboAssembler::LoadCodeObjectEntry(Register destination,
317                                          Register code_object) {
318   // Code objects are called differently depending on whether we are generating
319   // builtin code (which will later be embedded into the binary) or compiling
320   // user JS code at runtime.
321   // * Builtin code runs in --jitless mode and thus must not call into on-heap
322   //   Code targets. Instead, we dispatch through the builtins entry table.
323   // * Codegen at runtime does not have this restriction and we can use the
324   //   shorter, branchless instruction sequence. The assumption here is that
325   //   targets are usually generated code and not builtin Code objects.
326 
327   if (options().isolate_independent_code) {
328     DCHECK(root_array_available());
329     Label if_code_is_off_heap, out;
330 
331     UseScratchRegisterScope temps(this);
332     Register scratch = temps.Acquire();
333 
334     DCHECK(!AreAliased(destination, scratch));
335     DCHECK(!AreAliased(code_object, scratch));
336 
337     // Check whether the Code object is an off-heap trampoline. If so, call its
338     // (off-heap) entry point directly without going through the (on-heap)
339     // trampoline.  Otherwise, just call the Code object as always.
340     ldr(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
341     tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
342     b(ne, &if_code_is_off_heap);
343 
344     // Not an off-heap trampoline, the entry point is at
345     // Code::raw_instruction_start().
346     add(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
347     jmp(&out);
348 
349     // An off-heap trampoline, the entry point is loaded from the builtin entry
350     // table.
351     bind(&if_code_is_off_heap);
352     ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
353     lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
354     add(destination, destination, kRootRegister);
355     ldr(destination,
356         MemOperand(destination, IsolateData::builtin_entry_table_offset()));
357 
358     bind(&out);
359   } else {
360     add(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
361   }
362 }
363 
CallCodeObject(Register code_object)364 void TurboAssembler::CallCodeObject(Register code_object) {
365   LoadCodeObjectEntry(code_object, code_object);
366   Call(code_object);
367 }
368 
JumpCodeObject(Register code_object)369 void TurboAssembler::JumpCodeObject(Register code_object) {
370   LoadCodeObjectEntry(code_object, code_object);
371   Jump(code_object);
372 }
373 
StoreReturnAddressAndCall(Register target)374 void TurboAssembler::StoreReturnAddressAndCall(Register target) {
375   // This generates the final instruction sequence for calls to C functions
376   // once an exit frame has been constructed.
377   //
378   // Note that this assumes the caller code (i.e. the Code object currently
379   // being generated) is immovable or that the callee function cannot trigger
380   // GC, since the callee function will return to it.
381 
382   // Compute the return address in lr to return to after the jump below. The pc
383   // is already at '+ 8' from the current instruction; but return is after three
384   // instructions, so add another 4 to pc to get the return address.
385   Assembler::BlockConstPoolScope block_const_pool(this);
386   add(lr, pc, Operand(4));
387   str(lr, MemOperand(sp));
388   Call(target);
389 }
390 
Ret(Condition cond)391 void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
392 
Drop(int count,Condition cond)393 void TurboAssembler::Drop(int count, Condition cond) {
394   if (count > 0) {
395     add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
396   }
397 }
398 
Drop(Register count,Condition cond)399 void TurboAssembler::Drop(Register count, Condition cond) {
400   add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
401 }
402 
Ret(int drop,Condition cond)403 void TurboAssembler::Ret(int drop, Condition cond) {
404   Drop(drop, cond);
405   Ret(cond);
406 }
407 
Call(Label * target)408 void TurboAssembler::Call(Label* target) { bl(target); }
409 
Push(Handle<HeapObject> handle)410 void TurboAssembler::Push(Handle<HeapObject> handle) {
411   UseScratchRegisterScope temps(this);
412   Register scratch = temps.Acquire();
413   mov(scratch, Operand(handle));
414   push(scratch);
415 }
416 
Push(Smi smi)417 void TurboAssembler::Push(Smi smi) {
418   UseScratchRegisterScope temps(this);
419   Register scratch = temps.Acquire();
420   mov(scratch, Operand(smi));
421   push(scratch);
422 }
423 
PushArray(Register array,Register size,Register scratch,PushArrayOrder order)424 void TurboAssembler::PushArray(Register array, Register size, Register scratch,
425                                PushArrayOrder order) {
426   UseScratchRegisterScope temps(this);
427   Register counter = scratch;
428   Register tmp = temps.Acquire();
429   DCHECK(!AreAliased(array, size, counter, tmp));
430   Label loop, entry;
431   if (order == PushArrayOrder::kReverse) {
432     mov(counter, Operand(0));
433     b(&entry);
434     bind(&loop);
435     ldr(tmp, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
436     push(tmp);
437     add(counter, counter, Operand(1));
438     bind(&entry);
439     cmp(counter, size);
440     b(lt, &loop);
441   } else {
442     mov(counter, size);
443     b(&entry);
444     bind(&loop);
445     ldr(tmp, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
446     push(tmp);
447     bind(&entry);
448     sub(counter, counter, Operand(1), SetCC);
449     b(ge, &loop);
450   }
451 }
452 
Move(Register dst,Smi smi)453 void TurboAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); }
454 
Move(Register dst,Handle<HeapObject> value)455 void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
456   // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
457   // non-isolate-independent code. In many cases it might be cheaper than
458   // embedding the relocatable value.
459   if (root_array_available_ && options().isolate_independent_code) {
460     IndirectLoadConstant(dst, value);
461     return;
462   }
463   mov(dst, Operand(value));
464 }
465 
Move(Register dst,ExternalReference reference)466 void TurboAssembler::Move(Register dst, ExternalReference reference) {
467   // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
468   // non-isolate-independent code. In many cases it might be cheaper than
469   // embedding the relocatable value.
470   if (root_array_available_ && options().isolate_independent_code) {
471     IndirectLoadExternalReference(dst, reference);
472     return;
473   }
474   mov(dst, Operand(reference));
475 }
476 
Move(Register dst,Register src,Condition cond)477 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
478   if (dst != src) {
479     mov(dst, src, LeaveCC, cond);
480   }
481 }
482 
Move(SwVfpRegister dst,SwVfpRegister src,Condition cond)483 void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
484                           Condition cond) {
485   if (dst != src) {
486     vmov(dst, src, cond);
487   }
488 }
489 
Move(DwVfpRegister dst,DwVfpRegister src,Condition cond)490 void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
491                           Condition cond) {
492   if (dst != src) {
493     vmov(dst, src, cond);
494   }
495 }
496 
Move(QwNeonRegister dst,QwNeonRegister src)497 void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
498   if (dst != src) {
499     vmov(dst, src);
500   }
501 }
502 
MovePair(Register dst0,Register src0,Register dst1,Register src1)503 void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
504                               Register src1) {
505   DCHECK_NE(dst0, dst1);
506   if (dst0 != src1) {
507     Move(dst0, src0);
508     Move(dst1, src1);
509   } else if (dst1 != src0) {
510     // Swap the order of the moves to resolve the overlap.
511     Move(dst1, src1);
512     Move(dst0, src0);
513   } else {
514     // Worse case scenario, this is a swap.
515     Swap(dst0, src0);
516   }
517 }
518 
Swap(Register srcdst0,Register srcdst1)519 void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
520   DCHECK(srcdst0 != srcdst1);
521   UseScratchRegisterScope temps(this);
522   Register scratch = temps.Acquire();
523   mov(scratch, srcdst0);
524   mov(srcdst0, srcdst1);
525   mov(srcdst1, scratch);
526 }
527 
Swap(DwVfpRegister srcdst0,DwVfpRegister srcdst1)528 void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
529   DCHECK(srcdst0 != srcdst1);
530   DCHECK(VfpRegisterIsAvailable(srcdst0));
531   DCHECK(VfpRegisterIsAvailable(srcdst1));
532 
533   if (CpuFeatures::IsSupported(NEON)) {
534     vswp(srcdst0, srcdst1);
535   } else {
536     UseScratchRegisterScope temps(this);
537     DwVfpRegister scratch = temps.AcquireD();
538     vmov(scratch, srcdst0);
539     vmov(srcdst0, srcdst1);
540     vmov(srcdst1, scratch);
541   }
542 }
543 
Swap(QwNeonRegister srcdst0,QwNeonRegister srcdst1)544 void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
545   DCHECK(srcdst0 != srcdst1);
546   vswp(srcdst0, srcdst1);
547 }
548 
Mls(Register dst,Register src1,Register src2,Register srcA,Condition cond)549 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
550                          Register srcA, Condition cond) {
551   if (CpuFeatures::IsSupported(ARMv7)) {
552     CpuFeatureScope scope(this, ARMv7);
553     mls(dst, src1, src2, srcA, cond);
554   } else {
555     UseScratchRegisterScope temps(this);
556     Register scratch = temps.Acquire();
557     DCHECK(srcA != scratch);
558     mul(scratch, src1, src2, LeaveCC, cond);
559     sub(dst, srcA, scratch, LeaveCC, cond);
560   }
561 }
562 
And(Register dst,Register src1,const Operand & src2,Condition cond)563 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
564                          Condition cond) {
565   if (!src2.IsRegister() && !src2.MustOutputRelocInfo(this) &&
566       src2.immediate() == 0) {
567     mov(dst, Operand::Zero(), LeaveCC, cond);
568   } else if (!(src2.InstructionsRequired(this) == 1) &&
569              !src2.MustOutputRelocInfo(this) &&
570              CpuFeatures::IsSupported(ARMv7) &&
571              base::bits::IsPowerOfTwo(src2.immediate() + 1)) {
572     CpuFeatureScope scope(this, ARMv7);
573     ubfx(dst, src1, 0,
574          base::bits::WhichPowerOfTwo(static_cast<uint32_t>(src2.immediate()) +
575                                      1),
576          cond);
577   } else {
578     and_(dst, src1, src2, LeaveCC, cond);
579   }
580 }
581 
Ubfx(Register dst,Register src1,int lsb,int width,Condition cond)582 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
583                           Condition cond) {
584   DCHECK_LT(lsb, 32);
585   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
586     int mask = (1u << (width + lsb)) - 1u - ((1u << lsb) - 1u);
587     and_(dst, src1, Operand(mask), LeaveCC, cond);
588     if (lsb != 0) {
589       mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
590     }
591   } else {
592     CpuFeatureScope scope(this, ARMv7);
593     ubfx(dst, src1, lsb, width, cond);
594   }
595 }
596 
Sbfx(Register dst,Register src1,int lsb,int width,Condition cond)597 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
598                           Condition cond) {
599   DCHECK_LT(lsb, 32);
600   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
601     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
602     and_(dst, src1, Operand(mask), LeaveCC, cond);
603     int shift_up = 32 - lsb - width;
604     int shift_down = lsb + shift_up;
605     if (shift_up != 0) {
606       mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
607     }
608     if (shift_down != 0) {
609       mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
610     }
611   } else {
612     CpuFeatureScope scope(this, ARMv7);
613     sbfx(dst, src1, lsb, width, cond);
614   }
615 }
616 
Bfc(Register dst,Register src,int lsb,int width,Condition cond)617 void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
618                          Condition cond) {
619   DCHECK_LT(lsb, 32);
620   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
621     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
622     bic(dst, src, Operand(mask));
623   } else {
624     CpuFeatureScope scope(this, ARMv7);
625     Move(dst, src, cond);
626     bfc(dst, lsb, width, cond);
627   }
628 }
629 
LoadRoot(Register destination,RootIndex index,Condition cond)630 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
631                               Condition cond) {
632   ldr(destination,
633       MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), cond);
634 }
635 
RecordWriteField(Register object,int offset,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)636 void MacroAssembler::RecordWriteField(Register object, int offset,
637                                       Register value,
638                                       LinkRegisterStatus lr_status,
639                                       SaveFPRegsMode save_fp,
640                                       RememberedSetAction remembered_set_action,
641                                       SmiCheck smi_check) {
642   // First, check if a write barrier is even needed. The tests below
643   // catch stores of Smis.
644   Label done;
645 
646   // Skip barrier if writing a smi.
647   if (smi_check == INLINE_SMI_CHECK) {
648     JumpIfSmi(value, &done);
649   }
650 
651   // Although the object register is tagged, the offset is relative to the start
652   // of the object, so so offset must be a multiple of kPointerSize.
653   DCHECK(IsAligned(offset, kPointerSize));
654 
655   if (emit_debug_code()) {
656     Label ok;
657     UseScratchRegisterScope temps(this);
658     Register scratch = temps.Acquire();
659     add(scratch, object, Operand(offset - kHeapObjectTag));
660     tst(scratch, Operand(kPointerSize - 1));
661     b(eq, &ok);
662     stop();
663     bind(&ok);
664   }
665 
666   RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status,
667               save_fp, remembered_set_action, OMIT_SMI_CHECK);
668 
669   bind(&done);
670 }
671 
SaveRegisters(RegList registers)672 void TurboAssembler::SaveRegisters(RegList registers) {
673   DCHECK_GT(NumRegs(registers), 0);
674   RegList regs = 0;
675   for (int i = 0; i < Register::kNumRegisters; ++i) {
676     if ((registers >> i) & 1u) {
677       regs |= Register::from_code(i).bit();
678     }
679   }
680 
681   stm(db_w, sp, regs);
682 }
683 
RestoreRegisters(RegList registers)684 void TurboAssembler::RestoreRegisters(RegList registers) {
685   DCHECK_GT(NumRegs(registers), 0);
686   RegList regs = 0;
687   for (int i = 0; i < Register::kNumRegisters; ++i) {
688     if ((registers >> i) & 1u) {
689       regs |= Register::from_code(i).bit();
690     }
691   }
692   ldm(ia_w, sp, regs);
693 }
694 
CallEphemeronKeyBarrier(Register object,Operand offset,SaveFPRegsMode fp_mode)695 void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
696                                              SaveFPRegsMode fp_mode) {
697   EphemeronKeyBarrierDescriptor descriptor;
698   RegList registers = descriptor.allocatable_registers();
699 
700   SaveRegisters(registers);
701 
702   Register object_parameter(
703       descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
704   Register slot_parameter(descriptor.GetRegisterParameter(
705       EphemeronKeyBarrierDescriptor::kSlotAddress));
706   Register fp_mode_parameter(
707       descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
708 
709   MoveObjectAndSlot(object_parameter, slot_parameter, object, offset);
710   Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
711   Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
712        RelocInfo::CODE_TARGET);
713   RestoreRegisters(registers);
714 }
715 
CallRecordWriteStub(Register object,Operand offset,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)716 void TurboAssembler::CallRecordWriteStub(
717     Register object, Operand offset, RememberedSetAction remembered_set_action,
718     SaveFPRegsMode fp_mode) {
719   CallRecordWriteStub(
720       object, offset, remembered_set_action, fp_mode,
721       isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
722       kNullAddress);
723 }
724 
CallRecordWriteStub(Register object,Operand offset,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,Address wasm_target)725 void TurboAssembler::CallRecordWriteStub(
726     Register object, Operand offset, RememberedSetAction remembered_set_action,
727     SaveFPRegsMode fp_mode, Address wasm_target) {
728   CallRecordWriteStub(object, offset, remembered_set_action, fp_mode,
729                       Handle<Code>::null(), wasm_target);
730 }
731 
CallRecordWriteStub(Register object,Operand offset,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,Handle<Code> code_target,Address wasm_target)732 void TurboAssembler::CallRecordWriteStub(
733     Register object, Operand offset, RememberedSetAction remembered_set_action,
734     SaveFPRegsMode fp_mode, Handle<Code> code_target, Address wasm_target) {
735   DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
736   // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
737   // i.e. always emit remember set and save FP registers in RecordWriteStub. If
738   // large performance regression is observed, we should use these values to
739   // avoid unnecessary work.
740 
741   RecordWriteDescriptor descriptor;
742   RegList registers = descriptor.allocatable_registers();
743 
744   SaveRegisters(registers);
745 
746   Register object_parameter(
747       descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
748   Register slot_parameter(
749       descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
750   Register remembered_set_parameter(
751       descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
752   Register fp_mode_parameter(
753       descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
754 
755   MoveObjectAndSlot(object_parameter, slot_parameter, object, offset);
756 
757   Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
758   Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
759   if (code_target.is_null()) {
760     Call(wasm_target, RelocInfo::WASM_STUB_CALL);
761   } else {
762     Call(code_target, RelocInfo::CODE_TARGET);
763   }
764 
765   RestoreRegisters(registers);
766 }
767 
MoveObjectAndSlot(Register dst_object,Register dst_slot,Register object,Operand offset)768 void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
769                                        Register object, Operand offset) {
770   DCHECK_NE(dst_object, dst_slot);
771   DCHECK(offset.IsRegister() || offset.IsImmediate());
772   // If `offset` is a register, it cannot overlap with `object`.
773   DCHECK_IMPLIES(offset.IsRegister(), offset.rm() != object);
774 
775   // If the slot register does not overlap with the object register, we can
776   // overwrite it.
777   if (dst_slot != object) {
778     add(dst_slot, object, offset);
779     Move(dst_object, object);
780     return;
781   }
782 
783   DCHECK_EQ(dst_slot, object);
784 
785   // If the destination object register does not overlap with the offset
786   // register, we can overwrite it.
787   if (!offset.IsRegister() || (offset.rm() != dst_object)) {
788     Move(dst_object, dst_slot);
789     add(dst_slot, dst_slot, offset);
790     return;
791   }
792 
793   DCHECK_EQ(dst_object, offset.rm());
794 
795   // We only have `dst_slot` and `dst_object` left as distinct registers so we
796   // have to swap them. We write this as a add+sub sequence to avoid using a
797   // scratch register.
798   add(dst_slot, dst_slot, dst_object);
799   sub(dst_object, dst_slot, dst_object);
800 }
801 
802 // The register 'object' contains a heap object pointer. The heap object tag is
803 // shifted away. A scratch register also needs to be available.
RecordWrite(Register object,Operand offset,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)804 void MacroAssembler::RecordWrite(Register object, Operand offset,
805                                  Register value, LinkRegisterStatus lr_status,
806                                  SaveFPRegsMode fp_mode,
807                                  RememberedSetAction remembered_set_action,
808                                  SmiCheck smi_check) {
809   DCHECK_NE(object, value);
810   if (emit_debug_code()) {
811     {
812       UseScratchRegisterScope temps(this);
813       Register scratch = temps.Acquire();
814       add(scratch, object, offset);
815       ldr(scratch, MemOperand(scratch));
816       cmp(scratch, value);
817     }
818     Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
819   }
820 
821   if ((remembered_set_action == OMIT_REMEMBERED_SET &&
822        !FLAG_incremental_marking) ||
823       FLAG_disable_write_barriers) {
824     return;
825   }
826 
827   // First, check if a write barrier is even needed. The tests below
828   // catch stores of smis and stores into the young generation.
829   Label done;
830 
831   if (smi_check == INLINE_SMI_CHECK) {
832     JumpIfSmi(value, &done);
833   }
834 
835   CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask, eq,
836                 &done);
837   CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq,
838                 &done);
839 
840   // Record the actual write.
841   if (lr_status == kLRHasNotBeenSaved) {
842     push(lr);
843   }
844   CallRecordWriteStub(object, offset, remembered_set_action, fp_mode);
845   if (lr_status == kLRHasNotBeenSaved) {
846     pop(lr);
847   }
848 
849   bind(&done);
850 }
851 
PushCommonFrame(Register marker_reg)852 void TurboAssembler::PushCommonFrame(Register marker_reg) {
853   if (marker_reg.is_valid()) {
854     if (marker_reg.code() > fp.code()) {
855       stm(db_w, sp, fp.bit() | lr.bit());
856       mov(fp, Operand(sp));
857       Push(marker_reg);
858     } else {
859       stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
860       add(fp, sp, Operand(kPointerSize));
861     }
862   } else {
863     stm(db_w, sp, fp.bit() | lr.bit());
864     mov(fp, sp);
865   }
866 }
867 
PushStandardFrame(Register function_reg)868 void TurboAssembler::PushStandardFrame(Register function_reg) {
869   DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
870   stm(db_w, sp,
871       (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() | fp.bit() |
872           lr.bit());
873   int offset = -StandardFrameConstants::kContextOffset;
874   offset += function_reg.is_valid() ? kPointerSize : 0;
875   add(fp, sp, Operand(offset));
876   Push(kJavaScriptCallArgCountRegister);
877 }
878 
VFPCanonicalizeNaN(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)879 void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
880                                         const DwVfpRegister src,
881                                         const Condition cond) {
882   // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
883   // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
884   // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
885   vsub(dst, src, kDoubleRegZero, cond);
886 }
887 
VFPCompareAndSetFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)888 void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
889                                            const SwVfpRegister src2,
890                                            const Condition cond) {
891   // Compare and move FPSCR flags to the normal condition flags.
892   VFPCompareAndLoadFlags(src1, src2, pc, cond);
893 }
894 
VFPCompareAndSetFlags(const SwVfpRegister src1,const float src2,const Condition cond)895 void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
896                                            const float src2,
897                                            const Condition cond) {
898   // Compare and move FPSCR flags to the normal condition flags.
899   VFPCompareAndLoadFlags(src1, src2, pc, cond);
900 }
901 
VFPCompareAndSetFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)902 void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
903                                            const DwVfpRegister src2,
904                                            const Condition cond) {
905   // Compare and move FPSCR flags to the normal condition flags.
906   VFPCompareAndLoadFlags(src1, src2, pc, cond);
907 }
908 
VFPCompareAndSetFlags(const DwVfpRegister src1,const double src2,const Condition cond)909 void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
910                                            const double src2,
911                                            const Condition cond) {
912   // Compare and move FPSCR flags to the normal condition flags.
913   VFPCompareAndLoadFlags(src1, src2, pc, cond);
914 }
915 
VFPCompareAndLoadFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Register fpscr_flags,const Condition cond)916 void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
917                                             const SwVfpRegister src2,
918                                             const Register fpscr_flags,
919                                             const Condition cond) {
920   // Compare and load FPSCR.
921   vcmp(src1, src2, cond);
922   vmrs(fpscr_flags, cond);
923 }
924 
VFPCompareAndLoadFlags(const SwVfpRegister src1,const float src2,const Register fpscr_flags,const Condition cond)925 void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
926                                             const float src2,
927                                             const Register fpscr_flags,
928                                             const Condition cond) {
929   // Compare and load FPSCR.
930   vcmp(src1, src2, cond);
931   vmrs(fpscr_flags, cond);
932 }
933 
VFPCompareAndLoadFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Register fpscr_flags,const Condition cond)934 void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
935                                             const DwVfpRegister src2,
936                                             const Register fpscr_flags,
937                                             const Condition cond) {
938   // Compare and load FPSCR.
939   vcmp(src1, src2, cond);
940   vmrs(fpscr_flags, cond);
941 }
942 
VFPCompareAndLoadFlags(const DwVfpRegister src1,const double src2,const Register fpscr_flags,const Condition cond)943 void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
944                                             const double src2,
945                                             const Register fpscr_flags,
946                                             const Condition cond) {
947   // Compare and load FPSCR.
948   vcmp(src1, src2, cond);
949   vmrs(fpscr_flags, cond);
950 }
951 
VmovHigh(Register dst,DwVfpRegister src)952 void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) {
953   if (src.code() < 16) {
954     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
955     vmov(dst, loc.high());
956   } else {
957     vmov(NeonS32, dst, src, 1);
958   }
959 }
960 
VmovHigh(DwVfpRegister dst,Register src)961 void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) {
962   if (dst.code() < 16) {
963     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
964     vmov(loc.high(), src);
965   } else {
966     vmov(NeonS32, dst, 1, src);
967   }
968 }
969 
VmovLow(Register dst,DwVfpRegister src)970 void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) {
971   if (src.code() < 16) {
972     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
973     vmov(dst, loc.low());
974   } else {
975     vmov(NeonS32, dst, src, 0);
976   }
977 }
978 
VmovLow(DwVfpRegister dst,Register src)979 void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
980   if (dst.code() < 16) {
981     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
982     vmov(loc.low(), src);
983   } else {
984     vmov(NeonS32, dst, 0, src);
985   }
986 }
987 
VmovExtended(Register dst,int src_code)988 void TurboAssembler::VmovExtended(Register dst, int src_code) {
989   DCHECK_LE(SwVfpRegister::kNumRegisters, src_code);
990   DCHECK_GT(SwVfpRegister::kNumRegisters * 2, src_code);
991   if (src_code & 0x1) {
992     VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
993   } else {
994     VmovLow(dst, DwVfpRegister::from_code(src_code / 2));
995   }
996 }
997 
VmovExtended(int dst_code,Register src)998 void TurboAssembler::VmovExtended(int dst_code, Register src) {
999   DCHECK_LE(SwVfpRegister::kNumRegisters, dst_code);
1000   DCHECK_GT(SwVfpRegister::kNumRegisters * 2, dst_code);
1001   if (dst_code & 0x1) {
1002     VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
1003   } else {
1004     VmovLow(DwVfpRegister::from_code(dst_code / 2), src);
1005   }
1006 }
1007 
VmovExtended(int dst_code,int src_code)1008 void TurboAssembler::VmovExtended(int dst_code, int src_code) {
1009   if (src_code == dst_code) return;
1010 
1011   if (src_code < SwVfpRegister::kNumRegisters &&
1012       dst_code < SwVfpRegister::kNumRegisters) {
1013     // src and dst are both s-registers.
1014     vmov(SwVfpRegister::from_code(dst_code),
1015          SwVfpRegister::from_code(src_code));
1016     return;
1017   }
1018   DwVfpRegister dst_d_reg = DwVfpRegister::from_code(dst_code / 2);
1019   DwVfpRegister src_d_reg = DwVfpRegister::from_code(src_code / 2);
1020   int dst_offset = dst_code & 1;
1021   int src_offset = src_code & 1;
1022   if (CpuFeatures::IsSupported(NEON)) {
1023     UseScratchRegisterScope temps(this);
1024     DwVfpRegister scratch = temps.AcquireD();
1025     // On Neon we can shift and insert from d-registers.
1026     if (src_offset == dst_offset) {
1027       // Offsets are the same, use vdup to copy the source to the opposite lane.
1028       vdup(Neon32, scratch, src_d_reg, src_offset);
1029       // Here we are extending the lifetime of scratch.
1030       src_d_reg = scratch;
1031       src_offset = dst_offset ^ 1;
1032     }
1033     if (dst_offset) {
1034       if (dst_d_reg == src_d_reg) {
1035         vdup(Neon32, dst_d_reg, src_d_reg, 0);
1036       } else {
1037         vsli(Neon64, dst_d_reg, src_d_reg, 32);
1038       }
1039     } else {
1040       if (dst_d_reg == src_d_reg) {
1041         vdup(Neon32, dst_d_reg, src_d_reg, 1);
1042       } else {
1043         vsri(Neon64, dst_d_reg, src_d_reg, 32);
1044       }
1045     }
1046     return;
1047   }
1048 
1049   // Without Neon, use the scratch registers to move src and/or dst into
1050   // s-registers.
1051   UseScratchRegisterScope temps(this);
1052   LowDwVfpRegister d_scratch = temps.AcquireLowD();
1053   LowDwVfpRegister d_scratch2 = temps.AcquireLowD();
1054   int s_scratch_code = d_scratch.low().code();
1055   int s_scratch_code2 = d_scratch2.low().code();
1056   if (src_code < SwVfpRegister::kNumRegisters) {
1057     // src is an s-register, dst is not.
1058     vmov(d_scratch, dst_d_reg);
1059     vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
1060          SwVfpRegister::from_code(src_code));
1061     vmov(dst_d_reg, d_scratch);
1062   } else if (dst_code < SwVfpRegister::kNumRegisters) {
1063     // dst is an s-register, src is not.
1064     vmov(d_scratch, src_d_reg);
1065     vmov(SwVfpRegister::from_code(dst_code),
1066          SwVfpRegister::from_code(s_scratch_code + src_offset));
1067   } else {
1068     // Neither src or dst are s-registers. Both scratch double registers are
1069     // available when there are 32 VFP registers.
1070     vmov(d_scratch, src_d_reg);
1071     vmov(d_scratch2, dst_d_reg);
1072     vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
1073          SwVfpRegister::from_code(s_scratch_code2 + src_offset));
1074     vmov(dst_d_reg, d_scratch2);
1075   }
1076 }
1077 
VmovExtended(int dst_code,const MemOperand & src)1078 void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
1079   if (dst_code < SwVfpRegister::kNumRegisters) {
1080     vldr(SwVfpRegister::from_code(dst_code), src);
1081   } else {
1082     UseScratchRegisterScope temps(this);
1083     LowDwVfpRegister scratch = temps.AcquireLowD();
1084     // TODO(bbudge) If Neon supported, use load single lane form of vld1.
1085     int dst_s_code = scratch.low().code() + (dst_code & 1);
1086     vmov(scratch, DwVfpRegister::from_code(dst_code / 2));
1087     vldr(SwVfpRegister::from_code(dst_s_code), src);
1088     vmov(DwVfpRegister::from_code(dst_code / 2), scratch);
1089   }
1090 }
1091 
VmovExtended(const MemOperand & dst,int src_code)1092 void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
1093   if (src_code < SwVfpRegister::kNumRegisters) {
1094     vstr(SwVfpRegister::from_code(src_code), dst);
1095   } else {
1096     // TODO(bbudge) If Neon supported, use store single lane form of vst1.
1097     UseScratchRegisterScope temps(this);
1098     LowDwVfpRegister scratch = temps.AcquireLowD();
1099     int src_s_code = scratch.low().code() + (src_code & 1);
1100     vmov(scratch, DwVfpRegister::from_code(src_code / 2));
1101     vstr(SwVfpRegister::from_code(src_s_code), dst);
1102   }
1103 }
1104 
ExtractLane(Register dst,QwNeonRegister src,NeonDataType dt,int lane)1105 void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src,
1106                                  NeonDataType dt, int lane) {
1107   int size = NeonSz(dt);  // 0, 1, 2
1108   int byte = lane << size;
1109   int double_word = byte >> kDoubleSizeLog2;
1110   int double_byte = byte & (kDoubleSize - 1);
1111   int double_lane = double_byte >> size;
1112   DwVfpRegister double_source =
1113       DwVfpRegister::from_code(src.code() * 2 + double_word);
1114   vmov(dt, dst, double_source, double_lane);
1115 }
1116 
ExtractLane(Register dst,DwVfpRegister src,NeonDataType dt,int lane)1117 void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src,
1118                                  NeonDataType dt, int lane) {
1119   int size = NeonSz(dt);  // 0, 1, 2
1120   int byte = lane << size;
1121   int double_byte = byte & (kDoubleSize - 1);
1122   int double_lane = double_byte >> size;
1123   vmov(dt, dst, src, double_lane);
1124 }
1125 
ExtractLane(SwVfpRegister dst,QwNeonRegister src,int lane)1126 void TurboAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
1127                                  int lane) {
1128   int s_code = src.code() * 4 + lane;
1129   VmovExtended(dst.code(), s_code);
1130 }
1131 
ExtractLane(DwVfpRegister dst,QwNeonRegister src,int lane)1132 void TurboAssembler::ExtractLane(DwVfpRegister dst, QwNeonRegister src,
1133                                  int lane) {
1134   DwVfpRegister double_dst = DwVfpRegister::from_code(src.code() * 2 + lane);
1135   vmov(dst, double_dst);
1136 }
1137 
ReplaceLane(QwNeonRegister dst,QwNeonRegister src,Register src_lane,NeonDataType dt,int lane)1138 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1139                                  Register src_lane, NeonDataType dt, int lane) {
1140   Move(dst, src);
1141   int size = NeonSz(dt);  // 0, 1, 2
1142   int byte = lane << size;
1143   int double_word = byte >> kDoubleSizeLog2;
1144   int double_byte = byte & (kDoubleSize - 1);
1145   int double_lane = double_byte >> size;
1146   DwVfpRegister double_dst =
1147       DwVfpRegister::from_code(dst.code() * 2 + double_word);
1148   vmov(dt, double_dst, double_lane, src_lane);
1149 }
1150 
ReplaceLane(QwNeonRegister dst,QwNeonRegister src,SwVfpRegister src_lane,int lane)1151 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1152                                  SwVfpRegister src_lane, int lane) {
1153   Move(dst, src);
1154   int s_code = dst.code() * 4 + lane;
1155   VmovExtended(s_code, src_lane.code());
1156 }
1157 
ReplaceLane(QwNeonRegister dst,QwNeonRegister src,DwVfpRegister src_lane,int lane)1158 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1159                                  DwVfpRegister src_lane, int lane) {
1160   Move(dst, src);
1161   DwVfpRegister double_dst = DwVfpRegister::from_code(dst.code() * 2 + lane);
1162   vmov(double_dst, src_lane);
1163 }
1164 
LslPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1165 void TurboAssembler::LslPair(Register dst_low, Register dst_high,
1166                              Register src_low, Register src_high,
1167                              Register shift) {
1168   DCHECK(!AreAliased(dst_high, src_low));
1169   DCHECK(!AreAliased(dst_high, shift));
1170   UseScratchRegisterScope temps(this);
1171   Register scratch = temps.Acquire();
1172 
1173   Label less_than_32;
1174   Label done;
1175   rsb(scratch, shift, Operand(32), SetCC);
1176   b(gt, &less_than_32);
1177   // If shift >= 32
1178   and_(scratch, shift, Operand(0x1F));
1179   lsl(dst_high, src_low, Operand(scratch));
1180   mov(dst_low, Operand(0));
1181   jmp(&done);
1182   bind(&less_than_32);
1183   // If shift < 32
1184   lsl(dst_high, src_high, Operand(shift));
1185   orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
1186   lsl(dst_low, src_low, Operand(shift));
1187   bind(&done);
1188 }
1189 
LslPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1190 void TurboAssembler::LslPair(Register dst_low, Register dst_high,
1191                              Register src_low, Register src_high,
1192                              uint32_t shift) {
1193   DCHECK_GE(63, shift);
1194   DCHECK(!AreAliased(dst_high, src_low));
1195 
1196   if (shift == 0) {
1197     Move(dst_high, src_high);
1198     Move(dst_low, src_low);
1199   } else if (shift == 32) {
1200     Move(dst_high, src_low);
1201     Move(dst_low, Operand(0));
1202   } else if (shift >= 32) {
1203     shift &= 0x1F;
1204     lsl(dst_high, src_low, Operand(shift));
1205     mov(dst_low, Operand(0));
1206   } else {
1207     lsl(dst_high, src_high, Operand(shift));
1208     orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
1209     lsl(dst_low, src_low, Operand(shift));
1210   }
1211 }
1212 
LsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1213 void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
1214                              Register src_low, Register src_high,
1215                              Register shift) {
1216   DCHECK(!AreAliased(dst_low, src_high));
1217   DCHECK(!AreAliased(dst_low, shift));
1218   UseScratchRegisterScope temps(this);
1219   Register scratch = temps.Acquire();
1220 
1221   Label less_than_32;
1222   Label done;
1223   rsb(scratch, shift, Operand(32), SetCC);
1224   b(gt, &less_than_32);
1225   // If shift >= 32
1226   and_(scratch, shift, Operand(0x1F));
1227   lsr(dst_low, src_high, Operand(scratch));
1228   mov(dst_high, Operand(0));
1229   jmp(&done);
1230   bind(&less_than_32);
1231   // If shift < 32
1232 
1233   lsr(dst_low, src_low, Operand(shift));
1234   orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1235   lsr(dst_high, src_high, Operand(shift));
1236   bind(&done);
1237 }
1238 
LsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1239 void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
1240                              Register src_low, Register src_high,
1241                              uint32_t shift) {
1242   DCHECK_GE(63, shift);
1243   DCHECK(!AreAliased(dst_low, src_high));
1244 
1245   if (shift == 32) {
1246     mov(dst_low, src_high);
1247     mov(dst_high, Operand(0));
1248   } else if (shift > 32) {
1249     shift &= 0x1F;
1250     lsr(dst_low, src_high, Operand(shift));
1251     mov(dst_high, Operand(0));
1252   } else if (shift == 0) {
1253     Move(dst_low, src_low);
1254     Move(dst_high, src_high);
1255   } else {
1256     lsr(dst_low, src_low, Operand(shift));
1257     orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1258     lsr(dst_high, src_high, Operand(shift));
1259   }
1260 }
1261 
AsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1262 void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
1263                              Register src_low, Register src_high,
1264                              Register shift) {
1265   DCHECK(!AreAliased(dst_low, src_high));
1266   DCHECK(!AreAliased(dst_low, shift));
1267   UseScratchRegisterScope temps(this);
1268   Register scratch = temps.Acquire();
1269 
1270   Label less_than_32;
1271   Label done;
1272   rsb(scratch, shift, Operand(32), SetCC);
1273   b(gt, &less_than_32);
1274   // If shift >= 32
1275   and_(scratch, shift, Operand(0x1F));
1276   asr(dst_low, src_high, Operand(scratch));
1277   asr(dst_high, src_high, Operand(31));
1278   jmp(&done);
1279   bind(&less_than_32);
1280   // If shift < 32
1281   lsr(dst_low, src_low, Operand(shift));
1282   orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1283   asr(dst_high, src_high, Operand(shift));
1284   bind(&done);
1285 }
1286 
AsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1287 void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
1288                              Register src_low, Register src_high,
1289                              uint32_t shift) {
1290   DCHECK_GE(63, shift);
1291   DCHECK(!AreAliased(dst_low, src_high));
1292 
1293   if (shift == 32) {
1294     mov(dst_low, src_high);
1295     asr(dst_high, src_high, Operand(31));
1296   } else if (shift > 32) {
1297     shift &= 0x1F;
1298     asr(dst_low, src_high, Operand(shift));
1299     asr(dst_high, src_high, Operand(31));
1300   } else if (shift == 0) {
1301     Move(dst_low, src_low);
1302     Move(dst_high, src_high);
1303   } else {
1304     lsr(dst_low, src_low, Operand(shift));
1305     orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1306     asr(dst_high, src_high, Operand(shift));
1307   }
1308 }
1309 
StubPrologue(StackFrame::Type type)1310 void TurboAssembler::StubPrologue(StackFrame::Type type) {
1311   UseScratchRegisterScope temps(this);
1312   Register scratch = temps.Acquire();
1313   mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1314   PushCommonFrame(scratch);
1315 }
1316 
Prologue()1317 void TurboAssembler::Prologue() { PushStandardFrame(r1); }
1318 
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)1319 void TurboAssembler::EnterFrame(StackFrame::Type type,
1320                                 bool load_constant_pool_pointer_reg) {
1321   // r0-r3: preserved
1322   UseScratchRegisterScope temps(this);
1323   Register scratch = temps.Acquire();
1324   mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1325   PushCommonFrame(scratch);
1326 }
1327 
LeaveFrame(StackFrame::Type type)1328 int TurboAssembler::LeaveFrame(StackFrame::Type type) {
1329   // r0: preserved
1330   // r1: preserved
1331   // r2: preserved
1332 
1333   // Drop the execution stack down to the frame pointer and restore
1334   // the caller frame pointer and return address.
1335   mov(sp, fp);
1336   int frame_ends = pc_offset();
1337   ldm(ia_w, sp, fp.bit() | lr.bit());
1338   return frame_ends;
1339 }
1340 
1341 #ifdef V8_OS_WIN
AllocateStackSpace(Register bytes_scratch)1342 void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
1343   // "Functions that allocate 4 KB or more on the stack must ensure that each
1344   // page prior to the final page is touched in order." Source:
1345   // https://docs.microsoft.com/en-us/cpp/build/overview-of-arm-abi-conventions?view=vs-2019#stack
1346   UseScratchRegisterScope temps(this);
1347   DwVfpRegister scratch = temps.AcquireD();
1348   Label check_offset;
1349   Label touch_next_page;
1350   jmp(&check_offset);
1351   bind(&touch_next_page);
1352   sub(sp, sp, Operand(kStackPageSize));
1353   // Just to touch the page, before we increment further.
1354   vldr(scratch, MemOperand(sp));
1355   sub(bytes_scratch, bytes_scratch, Operand(kStackPageSize));
1356 
1357   bind(&check_offset);
1358   cmp(bytes_scratch, Operand(kStackPageSize));
1359   b(gt, &touch_next_page);
1360 
1361   sub(sp, sp, bytes_scratch);
1362 }
1363 
AllocateStackSpace(int bytes)1364 void TurboAssembler::AllocateStackSpace(int bytes) {
1365   UseScratchRegisterScope temps(this);
1366   DwVfpRegister scratch = no_dreg;
1367   while (bytes > kStackPageSize) {
1368     if (scratch == no_dreg) {
1369       scratch = temps.AcquireD();
1370     }
1371     sub(sp, sp, Operand(kStackPageSize));
1372     vldr(scratch, MemOperand(sp));
1373     bytes -= kStackPageSize;
1374   }
1375   sub(sp, sp, Operand(bytes));
1376 }
1377 #endif
1378 
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1379 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1380                                     StackFrame::Type frame_type) {
1381   DCHECK(frame_type == StackFrame::EXIT ||
1382          frame_type == StackFrame::BUILTIN_EXIT);
1383   UseScratchRegisterScope temps(this);
1384   Register scratch = temps.Acquire();
1385 
1386   // Set up the frame structure on the stack.
1387   DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1388   DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1389   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1390   mov(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
1391   PushCommonFrame(scratch);
1392   // Reserve room for saved entry sp.
1393   sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1394   if (emit_debug_code()) {
1395     mov(scratch, Operand::Zero());
1396     str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
1397   }
1398 
1399   // Save the frame pointer and the context in top.
1400   Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1401                                           isolate()));
1402   str(fp, MemOperand(scratch));
1403   Move(scratch,
1404        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1405   str(cp, MemOperand(scratch));
1406 
1407   // Optionally save all double registers.
1408   if (save_doubles) {
1409     SaveFPRegs(sp, scratch);
1410     // Note that d0 will be accessible at
1411     //   fp - ExitFrameConstants::kFrameSize -
1412     //   DwVfpRegister::kNumRegisters * kDoubleSize,
1413     // since the sp slot and code slot were pushed after the fp.
1414   }
1415 
1416   // Reserve place for the return address and stack space and align the frame
1417   // preparing for calling the runtime function.
1418   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1419   AllocateStackSpace((stack_space + 1) * kPointerSize);
1420   if (frame_alignment > 0) {
1421     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1422     and_(sp, sp, Operand(-frame_alignment));
1423   }
1424 
1425   // Set the exit frame sp value to point just before the return address
1426   // location.
1427   add(scratch, sp, Operand(kPointerSize));
1428   str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
1429 }
1430 
ActivationFrameAlignment()1431 int TurboAssembler::ActivationFrameAlignment() {
1432 #if V8_HOST_ARCH_ARM
1433   // Running on the real platform. Use the alignment as mandated by the local
1434   // environment.
1435   // Note: This will break if we ever start generating snapshots on one ARM
1436   // platform for another ARM platform with a different alignment.
1437   return base::OS::ActivationFrameAlignment();
1438 #else   // V8_HOST_ARCH_ARM
1439   // If we are using the simulator then we should always align to the expected
1440   // alignment. As the simulator is used to generate snapshots we do not know
1441   // if the target platform will need alignment, so this is controlled from a
1442   // flag.
1443   return FLAG_sim_stack_alignment;
1444 #endif  // V8_HOST_ARCH_ARM
1445 }
1446 
LeaveExitFrame(bool save_doubles,Register argument_count,bool argument_count_is_length)1447 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1448                                     bool argument_count_is_length) {
1449   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1450   UseScratchRegisterScope temps(this);
1451   Register scratch = temps.Acquire();
1452 
1453   // Optionally restore all double registers.
1454   if (save_doubles) {
1455     // Calculate the stack location of the saved doubles and restore them.
1456     const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
1457     sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
1458     RestoreFPRegs(r3, scratch);
1459   }
1460 
1461   // Clear top frame.
1462   mov(r3, Operand::Zero());
1463   Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1464                                           isolate()));
1465   str(r3, MemOperand(scratch));
1466 
1467   // Restore current context from top and clear it in debug mode.
1468   Move(scratch,
1469        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1470   ldr(cp, MemOperand(scratch));
1471 #ifdef DEBUG
1472   mov(r3, Operand(Context::kInvalidContext));
1473   Move(scratch,
1474        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1475   str(r3, MemOperand(scratch));
1476 #endif
1477 
1478   // Tear down the exit frame, pop the arguments, and return.
1479   mov(sp, Operand(fp));
1480   ldm(ia_w, sp, fp.bit() | lr.bit());
1481   if (argument_count.is_valid()) {
1482     if (argument_count_is_length) {
1483       add(sp, sp, argument_count);
1484     } else {
1485       add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1486     }
1487   }
1488 }
1489 
MovFromFloatResult(const DwVfpRegister dst)1490 void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1491   if (use_eabi_hardfloat()) {
1492     Move(dst, d0);
1493   } else {
1494     vmov(dst, r0, r1);
1495   }
1496 }
1497 
1498 // On ARM this is just a synonym to make the purpose clear.
MovFromFloatParameter(DwVfpRegister dst)1499 void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1500   MovFromFloatResult(dst);
1501 }
1502 
PrepareForTailCall(Register callee_args_count,Register caller_args_count,Register scratch0,Register scratch1)1503 void TurboAssembler::PrepareForTailCall(Register callee_args_count,
1504                                         Register caller_args_count,
1505                                         Register scratch0, Register scratch1) {
1506   DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
1507 
1508   // Calculate the end of destination area where we will put the arguments
1509   // after we drop current frame. We add kPointerSize to count the receiver
1510   // argument which is not included into formal parameters count.
1511   Register dst_reg = scratch0;
1512   add(dst_reg, fp, Operand(caller_args_count, LSL, kPointerSizeLog2));
1513   add(dst_reg, dst_reg,
1514       Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1515 
1516   Register src_reg = caller_args_count;
1517   // Calculate the end of source area. +kPointerSize is for the receiver.
1518   add(src_reg, sp, Operand(callee_args_count, LSL, kPointerSizeLog2));
1519   add(src_reg, src_reg, Operand(kPointerSize));
1520 
1521   if (FLAG_debug_code) {
1522     cmp(src_reg, dst_reg);
1523     Check(lo, AbortReason::kStackAccessBelowStackPointer);
1524   }
1525 
1526   // Restore caller's frame pointer and return address now as they will be
1527   // overwritten by the copying loop.
1528   ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1529   ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1530 
1531   // Now copy callee arguments to the caller frame going backwards to avoid
1532   // callee arguments corruption (source and destination areas could overlap).
1533 
1534   // Both src_reg and dst_reg are pointing to the word after the one to copy,
1535   // so they must be pre-decremented in the loop.
1536   Register tmp_reg = scratch1;
1537   Label loop, entry;
1538   b(&entry);
1539   bind(&loop);
1540   ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
1541   str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
1542   bind(&entry);
1543   cmp(sp, src_reg);
1544   b(ne, &loop);
1545 
1546   // Leave current frame.
1547   mov(sp, dst_reg);
1548 }
1549 
LoadStackLimit(Register destination,StackLimitKind kind)1550 void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
1551   DCHECK(root_array_available());
1552   Isolate* isolate = this->isolate();
1553   ExternalReference limit =
1554       kind == StackLimitKind::kRealStackLimit
1555           ? ExternalReference::address_of_real_jslimit(isolate)
1556           : ExternalReference::address_of_jslimit(isolate);
1557   DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
1558 
1559   intptr_t offset =
1560       TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
1561   CHECK(is_int32(offset));
1562   ldr(destination, MemOperand(kRootRegister, offset));
1563 }
1564 
StackOverflowCheck(Register num_args,Register scratch,Label * stack_overflow)1565 void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
1566                                         Label* stack_overflow) {
1567   // Check the stack for overflow. We are not trying to catch
1568   // interruptions (e.g. debug break and preemption) here, so the "real stack
1569   // limit" is checked.
1570   LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
1571   // Make scratch the space we have left. The stack might already be overflowed
1572   // here which will cause scratch to become negative.
1573   sub(scratch, sp, scratch);
1574   // Check if the arguments will overflow the stack.
1575   cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
1576   b(le, stack_overflow);  // Signed comparison.
1577 }
1578 
InvokePrologue(Register expected_parameter_count,Register actual_parameter_count,Label * done,InvokeFlag flag)1579 void MacroAssembler::InvokePrologue(Register expected_parameter_count,
1580                                     Register actual_parameter_count,
1581                                     Label* done, InvokeFlag flag) {
1582   Label regular_invoke;
1583   //  r0: actual arguments count
1584   //  r1: function (passed through to callee)
1585   //  r2: expected arguments count
1586   DCHECK_EQ(actual_parameter_count, r0);
1587   DCHECK_EQ(expected_parameter_count, r2);
1588 
1589 #ifdef V8_NO_ARGUMENTS_ADAPTOR
1590   // If the expected parameter count is equal to the adaptor sentinel, no need
1591   // to push undefined value as arguments.
1592   cmp(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
1593   b(eq, &regular_invoke);
1594 
1595   // If overapplication or if the actual argument count is equal to the
1596   // formal parameter count, no need to push extra undefined values.
1597   sub(expected_parameter_count, expected_parameter_count,
1598       actual_parameter_count, SetCC);
1599   b(le, &regular_invoke);
1600 
1601   Label stack_overflow;
1602   Register scratch = r4;
1603   StackOverflowCheck(expected_parameter_count, scratch, &stack_overflow);
1604 
1605   // Underapplication. Move the arguments already in the stack, including the
1606   // receiver and the return address.
1607   {
1608     Label copy, check;
1609     Register num = r5, src = r6, dest = r9;  // r7 and r8 are context and root.
1610     mov(src, sp);
1611     // Update stack pointer.
1612     lsl(scratch, expected_parameter_count, Operand(kSystemPointerSizeLog2));
1613     AllocateStackSpace(scratch);
1614     mov(dest, sp);
1615     mov(num, actual_parameter_count);
1616     b(&check);
1617     bind(&copy);
1618     ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
1619     str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
1620     sub(num, num, Operand(1), SetCC);
1621     bind(&check);
1622     b(ge, &copy);
1623   }
1624 
1625   // Fill remaining expected arguments with undefined values.
1626   LoadRoot(scratch, RootIndex::kUndefinedValue);
1627   {
1628     Label loop;
1629     bind(&loop);
1630     str(scratch, MemOperand(r9, kSystemPointerSize, PostIndex));
1631     sub(expected_parameter_count, expected_parameter_count, Operand(1), SetCC);
1632     b(gt, &loop);
1633   }
1634   b(&regular_invoke);
1635 
1636   bind(&stack_overflow);
1637   {
1638     FrameScope frame(this,
1639                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1640     CallRuntime(Runtime::kThrowStackOverflow);
1641     bkpt(0);
1642   }
1643 #else
1644   // Check whether the expected and actual arguments count match. If not,
1645   // setup registers according to contract with ArgumentsAdaptorTrampoline.
1646   cmp(expected_parameter_count, actual_parameter_count);
1647   b(eq, &regular_invoke);
1648 
1649   Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1650   if (flag == CALL_FUNCTION) {
1651     Call(adaptor);
1652     b(done);
1653   } else {
1654     Jump(adaptor, RelocInfo::CODE_TARGET);
1655   }
1656 #endif
1657   bind(&regular_invoke);
1658 }
1659 
CallDebugOnFunctionCall(Register fun,Register new_target,Register expected_parameter_count,Register actual_parameter_count)1660 void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
1661                                              Register expected_parameter_count,
1662                                              Register actual_parameter_count) {
1663   // Load receiver to pass it later to DebugOnFunctionCall hook.
1664   ldr(r4, ReceiverOperand(actual_parameter_count));
1665   FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1666 
1667   SmiTag(expected_parameter_count);
1668   Push(expected_parameter_count);
1669 
1670   SmiTag(actual_parameter_count);
1671   Push(actual_parameter_count);
1672 
1673   if (new_target.is_valid()) {
1674     Push(new_target);
1675   }
1676   Push(fun);
1677   Push(fun);
1678   Push(r4);
1679   CallRuntime(Runtime::kDebugOnFunctionCall);
1680   Pop(fun);
1681   if (new_target.is_valid()) {
1682     Pop(new_target);
1683   }
1684 
1685   Pop(actual_parameter_count);
1686   SmiUntag(actual_parameter_count);
1687 
1688   Pop(expected_parameter_count);
1689   SmiUntag(expected_parameter_count);
1690 }
1691 
InvokeFunctionCode(Register function,Register new_target,Register expected_parameter_count,Register actual_parameter_count,InvokeFlag flag)1692 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1693                                         Register expected_parameter_count,
1694                                         Register actual_parameter_count,
1695                                         InvokeFlag flag) {
1696   // You can't call a function without a valid frame.
1697   DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1698   DCHECK_EQ(function, r1);
1699   DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
1700 
1701   // On function call, call into the debugger if necessary.
1702   Label debug_hook, continue_after_hook;
1703   {
1704     ExternalReference debug_hook_active =
1705         ExternalReference::debug_hook_on_function_call_address(isolate());
1706     Move(r4, debug_hook_active);
1707     ldrsb(r4, MemOperand(r4));
1708     cmp(r4, Operand(0));
1709     b(ne, &debug_hook);
1710   }
1711   bind(&continue_after_hook);
1712 
1713   // Clear the new.target register if not given.
1714   if (!new_target.is_valid()) {
1715     LoadRoot(r3, RootIndex::kUndefinedValue);
1716   }
1717 
1718   Label done;
1719   InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
1720   // We call indirectly through the code field in the function to
1721   // allow recompilation to take effect without changing any of the
1722   // call sites.
1723   Register code = kJavaScriptCallCodeStartRegister;
1724   ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
1725   if (flag == CALL_FUNCTION) {
1726     CallCodeObject(code);
1727   } else {
1728     DCHECK(flag == JUMP_FUNCTION);
1729     JumpCodeObject(code);
1730   }
1731   b(&done);
1732 
1733   // Deferred debug hook.
1734   bind(&debug_hook);
1735   CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
1736                           actual_parameter_count);
1737   b(&continue_after_hook);
1738 
1739   // Continue here if InvokePrologue does handle the invocation due to
1740   // mismatched parameter counts.
1741   bind(&done);
1742 }
1743 
InvokeFunctionWithNewTarget(Register fun,Register new_target,Register actual_parameter_count,InvokeFlag flag)1744 void MacroAssembler::InvokeFunctionWithNewTarget(
1745     Register fun, Register new_target, Register actual_parameter_count,
1746     InvokeFlag flag) {
1747   // You can't call a function without a valid frame.
1748   DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1749 
1750   // Contract with called JS functions requires that function is passed in r1.
1751   DCHECK_EQ(fun, r1);
1752 
1753   Register expected_reg = r2;
1754   Register temp_reg = r4;
1755 
1756   ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1757   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1758   ldrh(expected_reg,
1759        FieldMemOperand(temp_reg,
1760                        SharedFunctionInfo::kFormalParameterCountOffset));
1761 
1762   InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
1763                      flag);
1764 }
1765 
InvokeFunction(Register function,Register expected_parameter_count,Register actual_parameter_count,InvokeFlag flag)1766 void MacroAssembler::InvokeFunction(Register function,
1767                                     Register expected_parameter_count,
1768                                     Register actual_parameter_count,
1769                                     InvokeFlag flag) {
1770   // You can't call a function without a valid frame.
1771   DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1772 
1773   // Contract with called JS functions requires that function is passed in r1.
1774   DCHECK_EQ(function, r1);
1775 
1776   // Get the function and setup the context.
1777   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1778 
1779   InvokeFunctionCode(r1, no_reg, expected_parameter_count,
1780                      actual_parameter_count, flag);
1781 }
1782 
MaybeDropFrames()1783 void MacroAssembler::MaybeDropFrames() {
1784   // Check whether we need to drop frames to restart a function on the stack.
1785   ExternalReference restart_fp =
1786       ExternalReference::debug_restart_fp_address(isolate());
1787   Move(r1, restart_fp);
1788   ldr(r1, MemOperand(r1));
1789   tst(r1, r1);
1790   Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1791        ne);
1792 }
1793 
PushStackHandler()1794 void MacroAssembler::PushStackHandler() {
1795   // Adjust this code if not the case.
1796   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1797   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1798 
1799   Push(Smi::zero());  // Padding.
1800   // Link the current handler as the next handler.
1801   Move(r6,
1802        ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1803   ldr(r5, MemOperand(r6));
1804   push(r5);
1805   // Set this new handler as the current one.
1806   str(sp, MemOperand(r6));
1807 }
1808 
PopStackHandler()1809 void MacroAssembler::PopStackHandler() {
1810   UseScratchRegisterScope temps(this);
1811   Register scratch = temps.Acquire();
1812   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1813   pop(r1);
1814   Move(scratch,
1815        ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1816   str(r1, MemOperand(scratch));
1817   add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1818 }
1819 
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)1820 void MacroAssembler::CompareObjectType(Register object, Register map,
1821                                        Register type_reg, InstanceType type) {
1822   UseScratchRegisterScope temps(this);
1823   const Register temp = type_reg == no_reg ? temps.Acquire() : type_reg;
1824 
1825   LoadMap(map, object);
1826   CompareInstanceType(map, temp, type);
1827 }
1828 
CompareInstanceType(Register map,Register type_reg,InstanceType type)1829 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1830                                          InstanceType type) {
1831   ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1832   cmp(type_reg, Operand(type));
1833 }
1834 
CompareRoot(Register obj,RootIndex index)1835 void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1836   UseScratchRegisterScope temps(this);
1837   Register scratch = temps.Acquire();
1838   DCHECK(obj != scratch);
1839   LoadRoot(scratch, index);
1840   cmp(obj, scratch);
1841 }
1842 
JumpIfIsInRange(Register value,unsigned lower_limit,unsigned higher_limit,Label * on_in_range)1843 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
1844                                      unsigned higher_limit,
1845                                      Label* on_in_range) {
1846   if (lower_limit != 0) {
1847     UseScratchRegisterScope temps(this);
1848     Register scratch = temps.Acquire();
1849     sub(scratch, value, Operand(lower_limit));
1850     cmp(scratch, Operand(higher_limit - lower_limit));
1851   } else {
1852     cmp(value, Operand(higher_limit));
1853   }
1854   b(ls, on_in_range);
1855 }
1856 
TryInlineTruncateDoubleToI(Register result,DwVfpRegister double_input,Label * done)1857 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1858                                                 DwVfpRegister double_input,
1859                                                 Label* done) {
1860   UseScratchRegisterScope temps(this);
1861   SwVfpRegister single_scratch = SwVfpRegister::no_reg();
1862   if (temps.CanAcquireVfp<SwVfpRegister>()) {
1863     single_scratch = temps.AcquireS();
1864   } else {
1865     // Re-use the input as a scratch register. However, we can only do this if
1866     // the input register is d0-d15 as there are no s32+ registers.
1867     DCHECK_LT(double_input.code(), LowDwVfpRegister::kNumRegisters);
1868     LowDwVfpRegister double_scratch =
1869         LowDwVfpRegister::from_code(double_input.code());
1870     single_scratch = double_scratch.low();
1871   }
1872   vcvt_s32_f64(single_scratch, double_input);
1873   vmov(result, single_scratch);
1874 
1875   Register scratch = temps.Acquire();
1876   // If result is not saturated (0x7FFFFFFF or 0x80000000), we are done.
1877   sub(scratch, result, Operand(1));
1878   cmp(scratch, Operand(0x7FFFFFFE));
1879   b(lt, done);
1880 }
1881 
TruncateDoubleToI(Isolate * isolate,Zone * zone,Register result,DwVfpRegister double_input,StubCallMode stub_mode)1882 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1883                                        Register result,
1884                                        DwVfpRegister double_input,
1885                                        StubCallMode stub_mode) {
1886   Label done;
1887 
1888   TryInlineTruncateDoubleToI(result, double_input, &done);
1889 
1890   // If we fell through then inline version didn't succeed - call stub instead.
1891   push(lr);
1892   AllocateStackSpace(kDoubleSize);  // Put input on stack.
1893   vstr(double_input, MemOperand(sp, 0));
1894 
1895   if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1896     Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1897   } else if (options().inline_offheap_trampolines) {
1898     CallBuiltin(Builtins::kDoubleToI);
1899   } else {
1900     Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1901   }
1902   ldr(result, MemOperand(sp, 0));
1903 
1904   add(sp, sp, Operand(kDoubleSize));
1905   pop(lr);
1906 
1907   bind(&done);
1908 }
1909 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)1910 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1911                                  SaveFPRegsMode save_doubles) {
1912   // All parameters are on the stack.  r0 has the return value after call.
1913 
1914   // If the expected number of arguments of the runtime function is
1915   // constant, we check that the actual number of arguments match the
1916   // expectation.
1917   CHECK(f->nargs < 0 || f->nargs == num_arguments);
1918 
1919   // TODO(1236192): Most runtime routines don't need the number of
1920   // arguments passed in because it is constant. At some point we
1921   // should remove this need and make the runtime routine entry code
1922   // smarter.
1923   mov(r0, Operand(num_arguments));
1924   Move(r1, ExternalReference::Create(f));
1925   Handle<Code> code =
1926       CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1927   Call(code, RelocInfo::CODE_TARGET);
1928 }
1929 
TailCallRuntime(Runtime::FunctionId fid)1930 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1931   const Runtime::Function* function = Runtime::FunctionForId(fid);
1932   DCHECK_EQ(1, function->result_size);
1933   if (function->nargs >= 0) {
1934     // TODO(1236192): Most runtime routines don't need the number of
1935     // arguments passed in because it is constant. At some point we
1936     // should remove this need and make the runtime routine entry code
1937     // smarter.
1938     mov(r0, Operand(function->nargs));
1939   }
1940   JumpToExternalReference(ExternalReference::Create(fid));
1941 }
1942 
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)1943 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1944                                              bool builtin_exit_frame) {
1945 #if defined(__thumb__)
1946   // Thumb mode builtin.
1947   DCHECK_EQ(builtin.address() & 1, 1);
1948 #endif
1949   Move(r1, builtin);
1950   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1951                                           kArgvOnStack, builtin_exit_frame);
1952   Jump(code, RelocInfo::CODE_TARGET);
1953 }
1954 
JumpToInstructionStream(Address entry)1955 void MacroAssembler::JumpToInstructionStream(Address entry) {
1956   mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1957   Jump(kOffHeapTrampolineRegister);
1958 }
1959 
LoadWeakValue(Register out,Register in,Label * target_if_cleared)1960 void MacroAssembler::LoadWeakValue(Register out, Register in,
1961                                    Label* target_if_cleared) {
1962   cmp(in, Operand(kClearedWeakHeapObjectLower32));
1963   b(eq, target_if_cleared);
1964 
1965   and_(out, in, Operand(~kWeakHeapObjectMask));
1966 }
1967 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1968 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1969                                       Register scratch1, Register scratch2) {
1970   DCHECK_GT(value, 0);
1971   if (FLAG_native_code_counters && counter->Enabled()) {
1972     Move(scratch2, ExternalReference::Create(counter));
1973     ldr(scratch1, MemOperand(scratch2));
1974     add(scratch1, scratch1, Operand(value));
1975     str(scratch1, MemOperand(scratch2));
1976   }
1977 }
1978 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1979 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1980                                       Register scratch1, Register scratch2) {
1981   DCHECK_GT(value, 0);
1982   if (FLAG_native_code_counters && counter->Enabled()) {
1983     Move(scratch2, ExternalReference::Create(counter));
1984     ldr(scratch1, MemOperand(scratch2));
1985     sub(scratch1, scratch1, Operand(value));
1986     str(scratch1, MemOperand(scratch2));
1987   }
1988 }
1989 
Assert(Condition cond,AbortReason reason)1990 void TurboAssembler::Assert(Condition cond, AbortReason reason) {
1991   if (emit_debug_code()) Check(cond, reason);
1992 }
1993 
AssertUnreachable(AbortReason reason)1994 void TurboAssembler::AssertUnreachable(AbortReason reason) {
1995   if (emit_debug_code()) Abort(reason);
1996 }
1997 
Check(Condition cond,AbortReason reason)1998 void TurboAssembler::Check(Condition cond, AbortReason reason) {
1999   Label L;
2000   b(cond, &L);
2001   Abort(reason);
2002   // will not return here
2003   bind(&L);
2004 }
2005 
Abort(AbortReason reason)2006 void TurboAssembler::Abort(AbortReason reason) {
2007   Label abort_start;
2008   bind(&abort_start);
2009 #ifdef DEBUG
2010   const char* msg = GetAbortReason(reason);
2011   RecordComment("Abort message: ");
2012   RecordComment(msg);
2013 #endif
2014 
2015   // Avoid emitting call to builtin if requested.
2016   if (trap_on_abort()) {
2017     stop();
2018     return;
2019   }
2020 
2021   if (should_abort_hard()) {
2022     // We don't care if we constructed a frame. Just pretend we did.
2023     FrameScope assume_frame(this, StackFrame::NONE);
2024     Move32BitImmediate(r0, Operand(static_cast<int>(reason)));
2025     PrepareCallCFunction(1, 0, r1);
2026     Move(r1, ExternalReference::abort_with_reason());
2027     // Use Call directly to avoid any unneeded overhead. The function won't
2028     // return anyway.
2029     Call(r1);
2030     return;
2031   }
2032 
2033   Move(r1, Smi::FromInt(static_cast<int>(reason)));
2034 
2035   // Disable stub call restrictions to always allow calls to abort.
2036   if (!has_frame()) {
2037     // We don't actually want to generate a pile of code for this, so just
2038     // claim there is a stack frame, without generating one.
2039     FrameScope scope(this, StackFrame::NONE);
2040     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
2041   } else {
2042     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
2043   }
2044   // will not return here
2045 }
2046 
LoadMap(Register destination,Register object)2047 void MacroAssembler::LoadMap(Register destination, Register object) {
2048   ldr(destination, FieldMemOperand(object, HeapObject::kMapOffset));
2049 }
2050 
LoadGlobalProxy(Register dst)2051 void MacroAssembler::LoadGlobalProxy(Register dst) {
2052   LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
2053 }
2054 
LoadNativeContextSlot(int index,Register dst)2055 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2056   LoadMap(dst, cp);
2057   ldr(dst, FieldMemOperand(
2058                dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
2059   ldr(dst, MemOperand(dst, Context::SlotOffset(index)));
2060 }
2061 
InitializeRootRegister()2062 void TurboAssembler::InitializeRootRegister() {
2063   ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
2064   mov(kRootRegister, Operand(isolate_root));
2065 }
2066 
SmiTag(Register reg,SBit s)2067 void MacroAssembler::SmiTag(Register reg, SBit s) {
2068   add(reg, reg, Operand(reg), s);
2069 }
2070 
SmiTag(Register dst,Register src,SBit s)2071 void MacroAssembler::SmiTag(Register dst, Register src, SBit s) {
2072   add(dst, src, Operand(src), s);
2073 }
2074 
SmiTst(Register value)2075 void MacroAssembler::SmiTst(Register value) {
2076   tst(value, Operand(kSmiTagMask));
2077 }
2078 
JumpIfSmi(Register value,Label * smi_label)2079 void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
2080   tst(value, Operand(kSmiTagMask));
2081   b(eq, smi_label);
2082 }
2083 
JumpIfEqual(Register x,int32_t y,Label * dest)2084 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
2085   cmp(x, Operand(y));
2086   b(eq, dest);
2087 }
2088 
JumpIfLessThan(Register x,int32_t y,Label * dest)2089 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
2090   cmp(x, Operand(y));
2091   b(lt, dest);
2092 }
2093 
JumpIfNotSmi(Register value,Label * not_smi_label)2094 void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
2095   tst(value, Operand(kSmiTagMask));
2096   b(ne, not_smi_label);
2097 }
2098 
AssertNotSmi(Register object)2099 void MacroAssembler::AssertNotSmi(Register object) {
2100   if (emit_debug_code()) {
2101     STATIC_ASSERT(kSmiTag == 0);
2102     tst(object, Operand(kSmiTagMask));
2103     Check(ne, AbortReason::kOperandIsASmi);
2104   }
2105 }
2106 
AssertSmi(Register object)2107 void MacroAssembler::AssertSmi(Register object) {
2108   if (emit_debug_code()) {
2109     STATIC_ASSERT(kSmiTag == 0);
2110     tst(object, Operand(kSmiTagMask));
2111     Check(eq, AbortReason::kOperandIsNotASmi);
2112   }
2113 }
2114 
AssertConstructor(Register object)2115 void MacroAssembler::AssertConstructor(Register object) {
2116   if (emit_debug_code()) {
2117     STATIC_ASSERT(kSmiTag == 0);
2118     tst(object, Operand(kSmiTagMask));
2119     Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
2120     push(object);
2121     LoadMap(object, object);
2122     ldrb(object, FieldMemOperand(object, Map::kBitFieldOffset));
2123     tst(object, Operand(Map::Bits1::IsConstructorBit::kMask));
2124     pop(object);
2125     Check(ne, AbortReason::kOperandIsNotAConstructor);
2126   }
2127 }
2128 
AssertFunction(Register object)2129 void MacroAssembler::AssertFunction(Register object) {
2130   if (emit_debug_code()) {
2131     STATIC_ASSERT(kSmiTag == 0);
2132     tst(object, Operand(kSmiTagMask));
2133     Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
2134     push(object);
2135     CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2136     pop(object);
2137     Check(eq, AbortReason::kOperandIsNotAFunction);
2138   }
2139 }
2140 
AssertBoundFunction(Register object)2141 void MacroAssembler::AssertBoundFunction(Register object) {
2142   if (emit_debug_code()) {
2143     STATIC_ASSERT(kSmiTag == 0);
2144     tst(object, Operand(kSmiTagMask));
2145     Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
2146     push(object);
2147     CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2148     pop(object);
2149     Check(eq, AbortReason::kOperandIsNotABoundFunction);
2150   }
2151 }
2152 
AssertGeneratorObject(Register object)2153 void MacroAssembler::AssertGeneratorObject(Register object) {
2154   if (!emit_debug_code()) return;
2155   tst(object, Operand(kSmiTagMask));
2156   Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
2157 
2158   // Load map
2159   Register map = object;
2160   push(object);
2161   LoadMap(map, object);
2162 
2163   // Check if JSGeneratorObject
2164   Label do_check;
2165   Register instance_type = object;
2166   CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
2167   b(eq, &do_check);
2168 
2169   // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
2170   cmp(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
2171   b(eq, &do_check);
2172 
2173   // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
2174   cmp(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
2175 
2176   bind(&do_check);
2177   // Restore generator object to register and perform assertion
2178   pop(object);
2179   Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
2180 }
2181 
AssertUndefinedOrAllocationSite(Register object,Register scratch)2182 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2183                                                      Register scratch) {
2184   if (emit_debug_code()) {
2185     Label done_checking;
2186     AssertNotSmi(object);
2187     CompareRoot(object, RootIndex::kUndefinedValue);
2188     b(eq, &done_checking);
2189     LoadMap(scratch, object);
2190     CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
2191     Assert(eq, AbortReason::kExpectedUndefinedOrCell);
2192     bind(&done_checking);
2193   }
2194 }
2195 
CheckFor32DRegs(Register scratch)2196 void TurboAssembler::CheckFor32DRegs(Register scratch) {
2197   Move(scratch, ExternalReference::cpu_features());
2198   ldr(scratch, MemOperand(scratch));
2199   tst(scratch, Operand(1u << VFP32DREGS));
2200 }
2201 
SaveFPRegs(Register location,Register scratch)2202 void TurboAssembler::SaveFPRegs(Register location, Register scratch) {
2203   CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2204   CheckFor32DRegs(scratch);
2205   vstm(db_w, location, d16, d31, ne);
2206   sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2207   vstm(db_w, location, d0, d15);
2208 }
2209 
RestoreFPRegs(Register location,Register scratch)2210 void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
2211   CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2212   CheckFor32DRegs(scratch);
2213   vldm(ia_w, location, d0, d15);
2214   vldm(ia_w, location, d16, d31, ne);
2215   add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2216 }
2217 
SaveFPRegsToHeap(Register location,Register scratch)2218 void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) {
2219   CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2220   CheckFor32DRegs(scratch);
2221   vstm(ia_w, location, d0, d15);
2222   vstm(ia_w, location, d16, d31, ne);
2223   add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2224 }
2225 
RestoreFPRegsFromHeap(Register location,Register scratch)2226 void TurboAssembler::RestoreFPRegsFromHeap(Register location,
2227                                            Register scratch) {
2228   CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2229   CheckFor32DRegs(scratch);
2230   vldm(ia_w, location, d0, d15);
2231   vldm(ia_w, location, d16, d31, ne);
2232   add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2233 }
2234 
2235 template <typename T>
FloatMaxHelper(T result,T left,T right,Label * out_of_line)2236 void TurboAssembler::FloatMaxHelper(T result, T left, T right,
2237                                     Label* out_of_line) {
2238   // This trivial case is caught sooner, so that the out-of-line code can be
2239   // completely avoided.
2240   DCHECK(left != right);
2241 
2242   if (CpuFeatures::IsSupported(ARMv8)) {
2243     CpuFeatureScope scope(this, ARMv8);
2244     VFPCompareAndSetFlags(left, right);
2245     b(vs, out_of_line);
2246     vmaxnm(result, left, right);
2247   } else {
2248     Label done;
2249     VFPCompareAndSetFlags(left, right);
2250     b(vs, out_of_line);
2251     // Avoid a conditional instruction if the result register is unique.
2252     bool aliased_result_reg = result == left || result == right;
2253     Move(result, right, aliased_result_reg ? mi : al);
2254     Move(result, left, gt);
2255     b(ne, &done);
2256     // Left and right are equal, but check for +/-0.
2257     VFPCompareAndSetFlags(left, 0.0);
2258     b(eq, out_of_line);
2259     // The arguments are equal and not zero, so it doesn't matter which input we
2260     // pick. We have already moved one input into the result (if it didn't
2261     // already alias) so there's nothing more to do.
2262     bind(&done);
2263   }
2264 }
2265 
2266 template <typename T>
FloatMaxOutOfLineHelper(T result,T left,T right)2267 void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
2268   DCHECK(left != right);
2269 
2270   // ARMv8: At least one of left and right is a NaN.
2271   // Anything else: At least one of left and right is a NaN, or both left and
2272   // right are zeroes with unknown sign.
2273 
2274   // If left and right are +/-0, select the one with the most positive sign.
2275   // If left or right are NaN, vadd propagates the appropriate one.
2276   vadd(result, left, right);
2277 }
2278 
2279 template <typename T>
FloatMinHelper(T result,T left,T right,Label * out_of_line)2280 void TurboAssembler::FloatMinHelper(T result, T left, T right,
2281                                     Label* out_of_line) {
2282   // This trivial case is caught sooner, so that the out-of-line code can be
2283   // completely avoided.
2284   DCHECK(left != right);
2285 
2286   if (CpuFeatures::IsSupported(ARMv8)) {
2287     CpuFeatureScope scope(this, ARMv8);
2288     VFPCompareAndSetFlags(left, right);
2289     b(vs, out_of_line);
2290     vminnm(result, left, right);
2291   } else {
2292     Label done;
2293     VFPCompareAndSetFlags(left, right);
2294     b(vs, out_of_line);
2295     // Avoid a conditional instruction if the result register is unique.
2296     bool aliased_result_reg = result == left || result == right;
2297     Move(result, left, aliased_result_reg ? mi : al);
2298     Move(result, right, gt);
2299     b(ne, &done);
2300     // Left and right are equal, but check for +/-0.
2301     VFPCompareAndSetFlags(left, 0.0);
2302     // If the arguments are equal and not zero, it doesn't matter which input we
2303     // pick. We have already moved one input into the result (if it didn't
2304     // already alias) so there's nothing more to do.
2305     b(ne, &done);
2306     // At this point, both left and right are either 0 or -0.
2307     // We could use a single 'vorr' instruction here if we had NEON support.
2308     // The algorithm used is -((-L) + (-R)), which is most efficiently expressed
2309     // as -((-L) - R).
2310     if (left == result) {
2311       DCHECK(right != result);
2312       vneg(result, left);
2313       vsub(result, result, right);
2314       vneg(result, result);
2315     } else {
2316       DCHECK(left != result);
2317       vneg(result, right);
2318       vsub(result, result, left);
2319       vneg(result, result);
2320     }
2321     bind(&done);
2322   }
2323 }
2324 
2325 template <typename T>
FloatMinOutOfLineHelper(T result,T left,T right)2326 void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
2327   DCHECK(left != right);
2328 
2329   // At least one of left and right is a NaN. Use vadd to propagate the NaN
2330   // appropriately. +/-0 is handled inline.
2331   vadd(result, left, right);
2332 }
2333 
FloatMax(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right,Label * out_of_line)2334 void TurboAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
2335                               SwVfpRegister right, Label* out_of_line) {
2336   FloatMaxHelper(result, left, right, out_of_line);
2337 }
2338 
FloatMin(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right,Label * out_of_line)2339 void TurboAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
2340                               SwVfpRegister right, Label* out_of_line) {
2341   FloatMinHelper(result, left, right, out_of_line);
2342 }
2343 
FloatMax(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right,Label * out_of_line)2344 void TurboAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
2345                               DwVfpRegister right, Label* out_of_line) {
2346   FloatMaxHelper(result, left, right, out_of_line);
2347 }
2348 
FloatMin(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right,Label * out_of_line)2349 void TurboAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
2350                               DwVfpRegister right, Label* out_of_line) {
2351   FloatMinHelper(result, left, right, out_of_line);
2352 }
2353 
FloatMaxOutOfLine(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right)2354 void TurboAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
2355                                        SwVfpRegister right) {
2356   FloatMaxOutOfLineHelper(result, left, right);
2357 }
2358 
FloatMinOutOfLine(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right)2359 void TurboAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
2360                                        SwVfpRegister right) {
2361   FloatMinOutOfLineHelper(result, left, right);
2362 }
2363 
FloatMaxOutOfLine(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right)2364 void TurboAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
2365                                        DwVfpRegister right) {
2366   FloatMaxOutOfLineHelper(result, left, right);
2367 }
2368 
FloatMinOutOfLine(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right)2369 void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
2370                                        DwVfpRegister right) {
2371   FloatMinOutOfLineHelper(result, left, right);
2372 }
2373 
2374 static const int kRegisterPassedArguments = 4;
2375 // The hardfloat calling convention passes double arguments in registers d0-d7.
2376 static const int kDoubleRegisterPassedArguments = 8;
2377 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)2378 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
2379                                               int num_double_arguments) {
2380   int stack_passed_words = 0;
2381   if (use_eabi_hardfloat()) {
2382     // In the hard floating point calling convention, we can use the first 8
2383     // registers to pass doubles.
2384     if (num_double_arguments > kDoubleRegisterPassedArguments) {
2385       stack_passed_words +=
2386           2 * (num_double_arguments - kDoubleRegisterPassedArguments);
2387     }
2388   } else {
2389     // In the soft floating point calling convention, every double
2390     // argument is passed using two registers.
2391     num_reg_arguments += 2 * num_double_arguments;
2392   }
2393   // Up to four simple arguments are passed in registers r0..r3.
2394   if (num_reg_arguments > kRegisterPassedArguments) {
2395     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2396   }
2397   return stack_passed_words;
2398 }
2399 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)2400 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
2401                                           int num_double_arguments,
2402                                           Register scratch) {
2403   int frame_alignment = ActivationFrameAlignment();
2404   int stack_passed_arguments =
2405       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2406   if (frame_alignment > kPointerSize) {
2407     UseScratchRegisterScope temps(this);
2408     if (!scratch.is_valid()) scratch = temps.Acquire();
2409     // Make stack end at alignment and make room for num_arguments - 4 words
2410     // and the original value of sp.
2411     mov(scratch, sp);
2412     AllocateStackSpace((stack_passed_arguments + 1) * kPointerSize);
2413     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2414     and_(sp, sp, Operand(-frame_alignment));
2415     str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
2416   } else if (stack_passed_arguments > 0) {
2417     AllocateStackSpace(stack_passed_arguments * kPointerSize);
2418   }
2419 }
2420 
MovToFloatParameter(DwVfpRegister src)2421 void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
2422   DCHECK(src == d0);
2423   if (!use_eabi_hardfloat()) {
2424     vmov(r0, r1, src);
2425   }
2426 }
2427 
2428 // On ARM this is just a synonym to make the purpose clear.
MovToFloatResult(DwVfpRegister src)2429 void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
2430   MovToFloatParameter(src);
2431 }
2432 
MovToFloatParameters(DwVfpRegister src1,DwVfpRegister src2)2433 void TurboAssembler::MovToFloatParameters(DwVfpRegister src1,
2434                                           DwVfpRegister src2) {
2435   DCHECK(src1 == d0);
2436   DCHECK(src2 == d1);
2437   if (!use_eabi_hardfloat()) {
2438     vmov(r0, r1, src1);
2439     vmov(r2, r3, src2);
2440   }
2441 }
2442 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)2443 void TurboAssembler::CallCFunction(ExternalReference function,
2444                                    int num_reg_arguments,
2445                                    int num_double_arguments) {
2446   UseScratchRegisterScope temps(this);
2447   Register scratch = temps.Acquire();
2448   Move(scratch, function);
2449   CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
2450 }
2451 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)2452 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
2453                                    int num_double_arguments) {
2454   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
2455 }
2456 
CallCFunction(ExternalReference function,int num_arguments)2457 void TurboAssembler::CallCFunction(ExternalReference function,
2458                                    int num_arguments) {
2459   CallCFunction(function, num_arguments, 0);
2460 }
2461 
CallCFunction(Register function,int num_arguments)2462 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
2463   CallCFunction(function, num_arguments, 0);
2464 }
2465 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)2466 void TurboAssembler::CallCFunctionHelper(Register function,
2467                                          int num_reg_arguments,
2468                                          int num_double_arguments) {
2469   DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2470   DCHECK(has_frame());
2471   // Make sure that the stack is aligned before calling a C function unless
2472   // running in the simulator. The simulator has its own alignment check which
2473   // provides more information.
2474 #if V8_HOST_ARCH_ARM
2475   if (emit_debug_code()) {
2476     int frame_alignment = base::OS::ActivationFrameAlignment();
2477     int frame_alignment_mask = frame_alignment - 1;
2478     if (frame_alignment > kPointerSize) {
2479       DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2480       Label alignment_as_expected;
2481       tst(sp, Operand(frame_alignment_mask));
2482       b(eq, &alignment_as_expected);
2483       // Don't use Check here, as it will call Runtime_Abort possibly
2484       // re-entering here.
2485       stop();
2486       bind(&alignment_as_expected);
2487     }
2488   }
2489 #endif
2490 
2491   // Save the frame pointer and PC so that the stack layout remains iterable,
2492   // even without an ExitFrame which normally exists between JS and C frames.
2493   Register addr_scratch = r4;
2494   // See x64 code for reasoning about how to address the isolate data fields.
2495   if (root_array_available()) {
2496     str(pc,
2497         MemOperand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset()));
2498     str(fp,
2499         MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
2500   } else {
2501     DCHECK_NOT_NULL(isolate());
2502     Push(addr_scratch);
2503 
2504     Move(addr_scratch,
2505          ExternalReference::fast_c_call_caller_pc_address(isolate()));
2506     str(pc, MemOperand(addr_scratch));
2507     Move(addr_scratch,
2508          ExternalReference::fast_c_call_caller_fp_address(isolate()));
2509     str(fp, MemOperand(addr_scratch));
2510 
2511     Pop(addr_scratch);
2512   }
2513 
2514   // Just call directly. The function called cannot cause a GC, or
2515   // allow preemption, so the return address in the link register
2516   // stays correct.
2517   Call(function);
2518 
2519   // We don't unset the PC; the FP is the source of truth.
2520   Register zero_scratch = r5;
2521   Push(zero_scratch);
2522   mov(zero_scratch, Operand::Zero());
2523 
2524   if (root_array_available()) {
2525     str(zero_scratch,
2526         MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
2527   } else {
2528     DCHECK_NOT_NULL(isolate());
2529     Push(addr_scratch);
2530     Move(addr_scratch,
2531          ExternalReference::fast_c_call_caller_fp_address(isolate()));
2532     str(zero_scratch, MemOperand(addr_scratch));
2533     Pop(addr_scratch);
2534   }
2535 
2536   Pop(zero_scratch);
2537 
2538   int stack_passed_arguments =
2539       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2540   if (ActivationFrameAlignment() > kPointerSize) {
2541     ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
2542   } else {
2543     add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
2544   }
2545 }
2546 
CheckPageFlag(Register object,int mask,Condition cc,Label * condition_met)2547 void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
2548                                    Label* condition_met) {
2549   UseScratchRegisterScope temps(this);
2550   Register scratch = temps.Acquire();
2551   DCHECK(cc == eq || cc == ne);
2552   Bfc(scratch, object, 0, kPageSizeBits);
2553   ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
2554   tst(scratch, Operand(mask));
2555   b(cc, condition_met);
2556 }
2557 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)2558 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2559                                    Register reg4, Register reg5,
2560                                    Register reg6) {
2561   RegList regs = 0;
2562   if (reg1.is_valid()) regs |= reg1.bit();
2563   if (reg2.is_valid()) regs |= reg2.bit();
2564   if (reg3.is_valid()) regs |= reg3.bit();
2565   if (reg4.is_valid()) regs |= reg4.bit();
2566   if (reg5.is_valid()) regs |= reg5.bit();
2567   if (reg6.is_valid()) regs |= reg6.bit();
2568 
2569   const RegisterConfiguration* config = RegisterConfiguration::Default();
2570   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2571     int code = config->GetAllocatableGeneralCode(i);
2572     Register candidate = Register::from_code(code);
2573     if (regs & candidate.bit()) continue;
2574     return candidate;
2575   }
2576   UNREACHABLE();
2577 }
2578 
ComputeCodeStartAddress(Register dst)2579 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
2580   // We can use the register pc - 8 for the address of the current instruction.
2581   sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
2582 }
2583 
ResetSpeculationPoisonRegister()2584 void TurboAssembler::ResetSpeculationPoisonRegister() {
2585   mov(kSpeculationPoisonRegister, Operand(-1));
2586 }
2587 
CallForDeoptimization(Builtins::Name target,int,Label * exit,DeoptimizeKind kind,Label *)2588 void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
2589                                            Label* exit, DeoptimizeKind kind,
2590                                            Label*) {
2591   BlockConstPoolScope block_const_pool(this);
2592   ldr(ip, MemOperand(kRootRegister,
2593                      IsolateData::builtin_entry_slot_offset(target)));
2594   Call(ip);
2595   DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
2596             (kind == DeoptimizeKind::kLazy)
2597                 ? Deoptimizer::kLazyDeoptExitSize
2598                 : Deoptimizer::kNonLazyDeoptExitSize);
2599   USE(exit, kind);
2600 }
2601 
Trap()2602 void TurboAssembler::Trap() { stop(); }
DebugBreak()2603 void TurboAssembler::DebugBreak() { stop(); }
2604 
2605 }  // namespace internal
2606 }  // namespace v8
2607 
2608 #endif  // V8_TARGET_ARCH_ARM
2609