• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <assert.h>  // For assert
6 #include <limits.h>  // For LONG_MIN, LONG_MAX.
7 
8 #if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
9 
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/codegen/callable.h"
13 #include "src/codegen/code-factory.h"
14 #include "src/codegen/external-reference-table.h"
15 #include "src/codegen/interface-descriptors-inl.h"
16 #include "src/codegen/macro-assembler.h"
17 #include "src/codegen/register-configuration.h"
18 #include "src/debug/debug.h"
19 #include "src/deoptimizer/deoptimizer.h"
20 #include "src/execution/frames-inl.h"
21 #include "src/heap/memory-chunk.h"
22 #include "src/init/bootstrapper.h"
23 #include "src/logging/counters.h"
24 #include "src/runtime/runtime.h"
25 #include "src/snapshot/snapshot.h"
26 
27 #if V8_ENABLE_WEBASSEMBLY
28 #include "src/wasm/wasm-code-manager.h"
29 #endif  // V8_ENABLE_WEBASSEMBLY
30 
31 // Satisfy cpplint check, but don't include platform-specific header. It is
32 // included recursively via macro-assembler.h.
33 #if 0
34 #include "src/codegen/ppc/macro-assembler-ppc.h"
35 #endif
36 
37 namespace v8 {
38 namespace internal {
39 
40 namespace {
41 
42 // Simd and Floating Pointer registers are not shared. For WebAssembly we save
43 // both registers, If we are not running Wasm, we can get away with only saving
44 // FP registers.
45 #if V8_ENABLE_WEBASSEMBLY
46 constexpr int kStackSavedSavedFPSizeInBytes =
47     (kNumCallerSavedDoubles * kSimd128Size) +
48     (kNumCallerSavedDoubles * kDoubleSize);
49 #else
50 constexpr int kStackSavedSavedFPSizeInBytes =
51     kNumCallerSavedDoubles * kDoubleSize;
52 #endif  // V8_ENABLE_WEBASSEMBLY
53 
54 }  // namespace
55 
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const56 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
57                                                     Register exclusion1,
58                                                     Register exclusion2,
59                                                     Register exclusion3) const {
60   int bytes = 0;
61 
62   RegList exclusions = {exclusion1, exclusion2, exclusion3};
63   RegList list = kJSCallerSaved - exclusions;
64   bytes += list.Count() * kSystemPointerSize;
65 
66   if (fp_mode == SaveFPRegsMode::kSave) {
67     bytes += kStackSavedSavedFPSizeInBytes;
68   }
69 
70   return bytes;
71 }
72 
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)73 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
74                                     Register exclusion2, Register exclusion3) {
75   int bytes = 0;
76 
77   RegList exclusions = {exclusion1, exclusion2, exclusion3};
78   RegList list = kJSCallerSaved - exclusions;
79   MultiPush(list);
80   bytes += list.Count() * kSystemPointerSize;
81 
82   if (fp_mode == SaveFPRegsMode::kSave) {
83     MultiPushF64AndV128(kCallerSavedDoubles, kCallerSavedSimd128s);
84     bytes += kStackSavedSavedFPSizeInBytes;
85   }
86 
87   return bytes;
88 }
89 
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)90 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
91                                    Register exclusion2, Register exclusion3) {
92   int bytes = 0;
93   if (fp_mode == SaveFPRegsMode::kSave) {
94     MultiPopF64AndV128(kCallerSavedDoubles, kCallerSavedSimd128s);
95     bytes += kStackSavedSavedFPSizeInBytes;
96   }
97 
98   RegList exclusions = {exclusion1, exclusion2, exclusion3};
99   RegList list = kJSCallerSaved - exclusions;
100   MultiPop(list);
101   bytes += list.Count() * kSystemPointerSize;
102 
103   return bytes;
104 }
105 
Jump(Register target)106 void TurboAssembler::Jump(Register target) {
107   mtctr(target);
108   bctr();
109 }
110 
LoadFromConstantsTable(Register destination,int constant_index)111 void TurboAssembler::LoadFromConstantsTable(Register destination,
112                                             int constant_index) {
113   DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
114 
115   DCHECK_NE(destination, r0);
116   LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
117   LoadTaggedPointerField(
118       destination,
119       FieldMemOperand(destination,
120                       FixedArray::OffsetOfElementAt(constant_index)),
121       r0);
122 }
123 
LoadRootRelative(Register destination,int32_t offset)124 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
125   LoadU64(destination, MemOperand(kRootRegister, offset), r0);
126 }
127 
LoadRootRegisterOffset(Register destination,intptr_t offset)128 void TurboAssembler::LoadRootRegisterOffset(Register destination,
129                                             intptr_t offset) {
130   if (offset == 0) {
131     mr(destination, kRootRegister);
132   } else {
133     AddS64(destination, kRootRegister, Operand(offset), destination);
134   }
135 }
136 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,CRegister cr)137 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
138                           Condition cond, CRegister cr) {
139   Label skip;
140 
141   if (cond != al) b(NegateCondition(cond), &skip, cr);
142 
143   mov(ip, Operand(target, rmode));
144   mtctr(ip);
145   bctr();
146 
147   bind(&skip);
148 }
149 
Jump(Address target,RelocInfo::Mode rmode,Condition cond,CRegister cr)150 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
151                           CRegister cr) {
152   DCHECK(!RelocInfo::IsCodeTarget(rmode));
153   Jump(static_cast<intptr_t>(target), rmode, cond, cr);
154 }
155 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,CRegister cr)156 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
157                           Condition cond, CRegister cr) {
158   DCHECK(RelocInfo::IsCodeTarget(rmode));
159   DCHECK_IMPLIES(options().isolate_independent_code,
160                  Builtins::IsIsolateIndependentBuiltin(*code));
161 
162   Builtin builtin = Builtin::kNoBuiltinId;
163   bool target_is_builtin =
164       isolate()->builtins()->IsBuiltinHandle(code, &builtin);
165 
166   if (root_array_available_ && options().isolate_independent_code) {
167     Label skip;
168     Register scratch = ip;
169     int offset = IsolateData::BuiltinEntrySlotOffset(code->builtin_id());
170     LoadU64(scratch, MemOperand(kRootRegister, offset), r0);
171     if (cond != al) b(NegateCondition(cond), &skip, cr);
172     Jump(scratch);
173     bind(&skip);
174     return;
175   } else if (options().inline_offheap_trampolines && target_is_builtin) {
176     // Inline the trampoline.
177     Label skip;
178     RecordCommentForOffHeapTrampoline(builtin);
179     // Use ip directly instead of using UseScratchRegisterScope, as we do
180     // not preserve scratch registers across calls.
181     mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
182     if (cond != al) b(NegateCondition(cond), &skip, cr);
183     Jump(ip);
184     bind(&skip);
185     return;
186   }
187   int32_t target_index = AddCodeTarget(code);
188   Jump(static_cast<intptr_t>(target_index), rmode, cond, cr);
189 }
190 
Jump(const ExternalReference & reference)191 void TurboAssembler::Jump(const ExternalReference& reference) {
192   UseScratchRegisterScope temps(this);
193   Register scratch = temps.Acquire();
194   Move(scratch, reference);
195   if (ABI_USES_FUNCTION_DESCRIPTORS) {
196     // AIX uses a function descriptor. When calling C code be
197     // aware of this descriptor and pick up values from it.
198     LoadU64(ToRegister(ABI_TOC_REGISTER),
199             MemOperand(scratch, kSystemPointerSize));
200     LoadU64(scratch, MemOperand(scratch, 0));
201   }
202   Jump(scratch);
203 }
204 
Call(Register target)205 void TurboAssembler::Call(Register target) {
206   BlockTrampolinePoolScope block_trampoline_pool(this);
207   // branch via link register and set LK bit for return point
208   mtctr(target);
209   bctrl();
210 }
211 
CallJSEntry(Register target)212 void MacroAssembler::CallJSEntry(Register target) {
213   CHECK(target == r5);
214   Call(target);
215 }
216 
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)217 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
218                                                    RelocInfo::Mode rmode,
219                                                    Condition cond) {
220   return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
221 }
222 
Call(Address target,RelocInfo::Mode rmode,Condition cond)223 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
224                           Condition cond) {
225   BlockTrampolinePoolScope block_trampoline_pool(this);
226   DCHECK(cond == al);
227 
228   // This can likely be optimized to make use of bc() with 24bit relative
229   //
230   // RecordRelocInfo(x.rmode_, x.immediate);
231   // bc( BA, .... offset, LKset);
232   //
233 
234   mov(ip, Operand(target, rmode));
235   mtctr(ip);
236   bctrl();
237 }
238 
Call(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)239 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
240                           Condition cond) {
241   BlockTrampolinePoolScope block_trampoline_pool(this);
242   DCHECK(RelocInfo::IsCodeTarget(rmode));
243   DCHECK_IMPLIES(options().isolate_independent_code,
244                  Builtins::IsIsolateIndependentBuiltin(*code));
245   DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
246                  Builtins::IsIsolateIndependentBuiltin(*code));
247 
248   Builtin builtin = Builtin::kNoBuiltinId;
249   bool target_is_builtin =
250       isolate()->builtins()->IsBuiltinHandle(code, &builtin);
251 
252   if (root_array_available_ && options().isolate_independent_code) {
253     Label skip;
254     int offset = IsolateData::BuiltinEntrySlotOffset(code->builtin_id());
255     LoadU64(ip, MemOperand(kRootRegister, offset));
256     if (cond != al) b(NegateCondition(cond), &skip);
257     Call(ip);
258     bind(&skip);
259     return;
260   } else if (options().inline_offheap_trampolines && target_is_builtin) {
261     // Inline the trampoline.
262     CallBuiltin(builtin, cond);
263     return;
264   }
265   DCHECK(code->IsExecutable());
266   int32_t target_index = AddCodeTarget(code);
267   Call(static_cast<Address>(target_index), rmode, cond);
268 }
269 
CallBuiltin(Builtin builtin,Condition cond)270 void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
271   ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
272   DCHECK(Builtins::IsBuiltinId(builtin));
273   // Use ip directly instead of using UseScratchRegisterScope, as we do not
274   // preserve scratch registers across calls.
275   mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
276   Label skip;
277   if (cond != al) b(NegateCondition(cond), &skip);
278   Call(ip);
279   bind(&skip);
280 }
281 
TailCallBuiltin(Builtin builtin)282 void TurboAssembler::TailCallBuiltin(Builtin builtin) {
283   ASM_CODE_COMMENT_STRING(this,
284                           CommentForOffHeapTrampoline("tail call", builtin));
285   mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
286   Jump(ip);
287 }
288 
Drop(int count)289 void TurboAssembler::Drop(int count) {
290   if (count > 0) {
291     AddS64(sp, sp, Operand(count * kSystemPointerSize), r0);
292   }
293 }
294 
Drop(Register count,Register scratch)295 void TurboAssembler::Drop(Register count, Register scratch) {
296   ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2));
297   add(sp, sp, scratch);
298 }
299 
Call(Label * target)300 void TurboAssembler::Call(Label* target) { b(target, SetLK); }
301 
Push(Handle<HeapObject> handle)302 void TurboAssembler::Push(Handle<HeapObject> handle) {
303   mov(r0, Operand(handle));
304   push(r0);
305 }
306 
Push(Smi smi)307 void TurboAssembler::Push(Smi smi) {
308   mov(r0, Operand(smi));
309   push(r0);
310 }
311 
PushArray(Register array,Register size,Register scratch,Register scratch2,PushArrayOrder order)312 void TurboAssembler::PushArray(Register array, Register size, Register scratch,
313                                Register scratch2, PushArrayOrder order) {
314   Label loop, done;
315 
316   if (order == kNormal) {
317     cmpi(size, Operand::Zero());
318     beq(&done);
319     ShiftLeftU64(scratch, size, Operand(kSystemPointerSizeLog2));
320     add(scratch, array, scratch);
321     mtctr(size);
322 
323     bind(&loop);
324     LoadU64WithUpdate(scratch2, MemOperand(scratch, -kSystemPointerSize));
325     StoreU64WithUpdate(scratch2, MemOperand(sp, -kSystemPointerSize));
326     bdnz(&loop);
327 
328     bind(&done);
329   } else {
330     cmpi(size, Operand::Zero());
331     beq(&done);
332 
333     mtctr(size);
334     subi(scratch, array, Operand(kSystemPointerSize));
335 
336     bind(&loop);
337     LoadU64WithUpdate(scratch2, MemOperand(scratch, kSystemPointerSize));
338     StoreU64WithUpdate(scratch2, MemOperand(sp, -kSystemPointerSize));
339     bdnz(&loop);
340     bind(&done);
341   }
342 }
343 
Move(Register dst,Handle<HeapObject> value,RelocInfo::Mode rmode)344 void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
345                           RelocInfo::Mode rmode) {
346   // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
347   // non-isolate-independent code. In many cases it might be cheaper than
348   // embedding the relocatable value.
349   if (root_array_available_ && options().isolate_independent_code) {
350     IndirectLoadConstant(dst, value);
351     return;
352   } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
353     EmbeddedObjectIndex index = AddEmbeddedObject(value);
354     DCHECK(is_uint32(index));
355     mov(dst, Operand(static_cast<int>(index), rmode));
356   } else {
357     DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
358     mov(dst, Operand(value.address(), rmode));
359   }
360 }
361 
Move(Register dst,ExternalReference reference)362 void TurboAssembler::Move(Register dst, ExternalReference reference) {
363   // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
364   // non-isolate-independent code. In many cases it might be cheaper than
365   // embedding the relocatable value.
366   if (root_array_available_ && options().isolate_independent_code) {
367     IndirectLoadExternalReference(dst, reference);
368     return;
369   }
370   mov(dst, Operand(reference));
371 }
372 
Move(Register dst,Register src,Condition cond)373 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
374   DCHECK(cond == al);
375   if (dst != src) {
376     mr(dst, src);
377   }
378 }
379 
Move(DoubleRegister dst,DoubleRegister src)380 void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
381   if (dst != src) {
382     fmr(dst, src);
383   }
384 }
385 
MultiPush(RegList regs,Register location)386 void TurboAssembler::MultiPush(RegList regs, Register location) {
387   int16_t num_to_push = regs.Count();
388   int16_t stack_offset = num_to_push * kSystemPointerSize;
389 
390   subi(location, location, Operand(stack_offset));
391   for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
392     if ((regs.bits() & (1 << i)) != 0) {
393       stack_offset -= kSystemPointerSize;
394       StoreU64(ToRegister(i), MemOperand(location, stack_offset));
395     }
396   }
397 }
398 
MultiPop(RegList regs,Register location)399 void TurboAssembler::MultiPop(RegList regs, Register location) {
400   int16_t stack_offset = 0;
401 
402   for (int16_t i = 0; i < Register::kNumRegisters; i++) {
403     if ((regs.bits() & (1 << i)) != 0) {
404       LoadU64(ToRegister(i), MemOperand(location, stack_offset));
405       stack_offset += kSystemPointerSize;
406     }
407   }
408   addi(location, location, Operand(stack_offset));
409 }
410 
MultiPushDoubles(DoubleRegList dregs,Register location)411 void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
412   int16_t num_to_push = dregs.Count();
413   int16_t stack_offset = num_to_push * kDoubleSize;
414 
415   subi(location, location, Operand(stack_offset));
416   for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
417     if ((dregs.bits() & (1 << i)) != 0) {
418       DoubleRegister dreg = DoubleRegister::from_code(i);
419       stack_offset -= kDoubleSize;
420       stfd(dreg, MemOperand(location, stack_offset));
421     }
422   }
423 }
424 
MultiPushV128(Simd128RegList simd_regs,Register location)425 void TurboAssembler::MultiPushV128(Simd128RegList simd_regs,
426                                    Register location) {
427   int16_t num_to_push = simd_regs.Count();
428   int16_t stack_offset = num_to_push * kSimd128Size;
429 
430   subi(location, location, Operand(stack_offset));
431   for (int16_t i = Simd128Register::kNumRegisters - 1; i >= 0; i--) {
432     if ((simd_regs.bits() & (1 << i)) != 0) {
433       Simd128Register simd_reg = Simd128Register::from_code(i);
434       stack_offset -= kSimd128Size;
435       li(ip, Operand(stack_offset));
436       StoreSimd128(simd_reg, MemOperand(location, ip));
437     }
438   }
439 }
440 
MultiPopDoubles(DoubleRegList dregs,Register location)441 void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
442   int16_t stack_offset = 0;
443 
444   for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
445     if ((dregs.bits() & (1 << i)) != 0) {
446       DoubleRegister dreg = DoubleRegister::from_code(i);
447       lfd(dreg, MemOperand(location, stack_offset));
448       stack_offset += kDoubleSize;
449     }
450   }
451   addi(location, location, Operand(stack_offset));
452 }
453 
MultiPopV128(Simd128RegList simd_regs,Register location)454 void TurboAssembler::MultiPopV128(Simd128RegList simd_regs, Register location) {
455   int16_t stack_offset = 0;
456 
457   for (int16_t i = 0; i < Simd128Register::kNumRegisters; i++) {
458     if ((simd_regs.bits() & (1 << i)) != 0) {
459       Simd128Register simd_reg = Simd128Register::from_code(i);
460       li(ip, Operand(stack_offset));
461       LoadSimd128(simd_reg, MemOperand(location, ip));
462       stack_offset += kSimd128Size;
463     }
464   }
465   addi(location, location, Operand(stack_offset));
466 }
467 
MultiPushF64AndV128(DoubleRegList dregs,Simd128RegList simd_regs,Register location)468 void TurboAssembler::MultiPushF64AndV128(DoubleRegList dregs,
469                                          Simd128RegList simd_regs,
470                                          Register location) {
471   MultiPushDoubles(dregs);
472 #if V8_ENABLE_WEBASSEMBLY
473   bool generating_bultins =
474       isolate() && isolate()->IsGeneratingEmbeddedBuiltins();
475   if (generating_bultins) {
476     // V8 uses the same set of fp param registers as Simd param registers.
477     // As these registers are two different sets on ppc we must make
478     // sure to also save them when Simd is enabled.
479     // Check the comments under crrev.com/c/2645694 for more details.
480     Label push_empty_simd, simd_pushed;
481     Move(ip, ExternalReference::supports_wasm_simd_128_address());
482     LoadU8(ip, MemOperand(ip), r0);
483     cmpi(ip, Operand::Zero());  // If > 0 then simd is available.
484     ble(&push_empty_simd);
485     MultiPushV128(simd_regs);
486     b(&simd_pushed);
487     bind(&push_empty_simd);
488     // We still need to allocate empty space on the stack even if we
489     // are not pushing Simd registers (see kFixedFrameSizeFromFp).
490     addi(sp, sp,
491          Operand(-static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
492     bind(&simd_pushed);
493   } else {
494     if (CpuFeatures::SupportsWasmSimd128()) {
495       MultiPushV128(simd_regs);
496     } else {
497       addi(sp, sp,
498            Operand(-static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
499     }
500   }
501 #endif
502 }
503 
MultiPopF64AndV128(DoubleRegList dregs,Simd128RegList simd_regs,Register location)504 void TurboAssembler::MultiPopF64AndV128(DoubleRegList dregs,
505                                         Simd128RegList simd_regs,
506                                         Register location) {
507 #if V8_ENABLE_WEBASSEMBLY
508   bool generating_bultins =
509       isolate() && isolate()->IsGeneratingEmbeddedBuiltins();
510   if (generating_bultins) {
511     Label pop_empty_simd, simd_popped;
512     Move(ip, ExternalReference::supports_wasm_simd_128_address());
513     LoadU8(ip, MemOperand(ip), r0);
514     cmpi(ip, Operand::Zero());  // If > 0 then simd is available.
515     ble(&pop_empty_simd);
516     MultiPopV128(simd_regs);
517     b(&simd_popped);
518     bind(&pop_empty_simd);
519     addi(sp, sp,
520          Operand(static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
521     bind(&simd_popped);
522   } else {
523     if (CpuFeatures::SupportsWasmSimd128()) {
524       MultiPopV128(simd_regs);
525     } else {
526       addi(sp, sp,
527            Operand(static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
528     }
529   }
530 #endif
531   MultiPopDoubles(dregs);
532 }
533 
LoadRoot(Register destination,RootIndex index,Condition cond)534 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
535                               Condition cond) {
536   DCHECK(cond == al);
537   LoadU64(destination,
538           MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
539 }
540 
LoadTaggedPointerField(const Register & destination,const MemOperand & field_operand,const Register & scratch)541 void TurboAssembler::LoadTaggedPointerField(const Register& destination,
542                                             const MemOperand& field_operand,
543                                             const Register& scratch) {
544   if (COMPRESS_POINTERS_BOOL) {
545     DecompressTaggedPointer(destination, field_operand);
546   } else {
547     LoadU64(destination, field_operand, scratch);
548   }
549 }
550 
LoadAnyTaggedField(const Register & destination,const MemOperand & field_operand,const Register & scratch)551 void TurboAssembler::LoadAnyTaggedField(const Register& destination,
552                                         const MemOperand& field_operand,
553                                         const Register& scratch) {
554   if (COMPRESS_POINTERS_BOOL) {
555     DecompressAnyTagged(destination, field_operand);
556   } else {
557     LoadU64(destination, field_operand, scratch);
558   }
559 }
560 
SmiUntag(Register dst,const MemOperand & src,RCBit rc,Register scratch)561 void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc,
562                               Register scratch) {
563   if (SmiValuesAre31Bits()) {
564     LoadU32(dst, src, scratch);
565   } else {
566     LoadU64(dst, src, scratch);
567   }
568 
569   SmiUntag(dst, rc);
570 }
571 
StoreTaggedField(const Register & value,const MemOperand & dst_field_operand,const Register & scratch)572 void TurboAssembler::StoreTaggedField(const Register& value,
573                                       const MemOperand& dst_field_operand,
574                                       const Register& scratch) {
575   if (COMPRESS_POINTERS_BOOL) {
576     RecordComment("[ StoreTagged");
577     StoreU32(value, dst_field_operand, scratch);
578     RecordComment("]");
579   } else {
580     StoreU64(value, dst_field_operand, scratch);
581   }
582 }
583 
DecompressTaggedSigned(Register destination,Register src)584 void TurboAssembler::DecompressTaggedSigned(Register destination,
585                                             Register src) {
586   RecordComment("[ DecompressTaggedSigned");
587   ZeroExtWord32(destination, src);
588   RecordComment("]");
589 }
590 
DecompressTaggedSigned(Register destination,MemOperand field_operand)591 void TurboAssembler::DecompressTaggedSigned(Register destination,
592                                             MemOperand field_operand) {
593   RecordComment("[ DecompressTaggedSigned");
594   LoadU32(destination, field_operand, r0);
595   RecordComment("]");
596 }
597 
DecompressTaggedPointer(Register destination,Register source)598 void TurboAssembler::DecompressTaggedPointer(Register destination,
599                                              Register source) {
600   RecordComment("[ DecompressTaggedPointer");
601   ZeroExtWord32(destination, source);
602   add(destination, destination, kRootRegister);
603   RecordComment("]");
604 }
605 
DecompressTaggedPointer(Register destination,MemOperand field_operand)606 void TurboAssembler::DecompressTaggedPointer(Register destination,
607                                              MemOperand field_operand) {
608   RecordComment("[ DecompressTaggedPointer");
609   LoadU32(destination, field_operand, r0);
610   add(destination, destination, kRootRegister);
611   RecordComment("]");
612 }
613 
DecompressAnyTagged(Register destination,MemOperand field_operand)614 void TurboAssembler::DecompressAnyTagged(Register destination,
615                                          MemOperand field_operand) {
616   RecordComment("[ DecompressAnyTagged");
617   LoadU32(destination, field_operand, r0);
618   add(destination, destination, kRootRegister);
619   RecordComment("]");
620 }
621 
DecompressAnyTagged(Register destination,Register source)622 void TurboAssembler::DecompressAnyTagged(Register destination,
623                                          Register source) {
624   RecordComment("[ DecompressAnyTagged");
625   ZeroExtWord32(destination, source);
626   add(destination, destination, kRootRegister);
627   RecordComment("]");
628 }
629 
LoadTaggedSignedField(Register destination,MemOperand field_operand,Register scratch)630 void TurboAssembler::LoadTaggedSignedField(Register destination,
631                                            MemOperand field_operand,
632                                            Register scratch) {
633   if (COMPRESS_POINTERS_BOOL) {
634     DecompressTaggedSigned(destination, field_operand);
635   } else {
636     LoadU64(destination, field_operand, scratch);
637   }
638 }
639 
RecordWriteField(Register object,int offset,Register value,Register slot_address,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)640 void MacroAssembler::RecordWriteField(Register object, int offset,
641                                       Register value, Register slot_address,
642                                       LinkRegisterStatus lr_status,
643                                       SaveFPRegsMode save_fp,
644                                       RememberedSetAction remembered_set_action,
645                                       SmiCheck smi_check) {
646   // First, check if a write barrier is even needed. The tests below
647   // catch stores of Smis.
648   Label done;
649 
650   // Skip barrier if writing a smi.
651   if (smi_check == SmiCheck::kInline) {
652     JumpIfSmi(value, &done);
653   }
654 
655   // Although the object register is tagged, the offset is relative to the start
656   // of the object, so so offset must be a multiple of kSystemPointerSize.
657   DCHECK(IsAligned(offset, kTaggedSize));
658 
659   AddS64(slot_address, object, Operand(offset - kHeapObjectTag), r0);
660   if (FLAG_debug_code) {
661     Label ok;
662     andi(r0, slot_address, Operand(kTaggedSize - 1));
663     beq(&ok, cr0);
664     stop();
665     bind(&ok);
666   }
667 
668   RecordWrite(object, slot_address, value, lr_status, save_fp,
669               remembered_set_action, SmiCheck::kOmit);
670 
671   bind(&done);
672 
673   // Clobber clobbered input registers when running with the debug-code flag
674   // turned on to provoke errors.
675   if (FLAG_debug_code) {
676     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
677     mov(slot_address, Operand(bit_cast<intptr_t>(kZapValue + 8)));
678   }
679 }
680 
MaybeSaveRegisters(RegList registers)681 void TurboAssembler::MaybeSaveRegisters(RegList registers) {
682   if (registers.is_empty()) return;
683   MultiPush(registers);
684 }
685 
MaybeRestoreRegisters(RegList registers)686 void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
687   if (registers.is_empty()) return;
688   MultiPop(registers);
689 }
690 
CallEphemeronKeyBarrier(Register object,Register slot_address,SaveFPRegsMode fp_mode)691 void TurboAssembler::CallEphemeronKeyBarrier(Register object,
692                                              Register slot_address,
693                                              SaveFPRegsMode fp_mode) {
694   DCHECK(!AreAliased(object, slot_address));
695   RegList registers =
696       WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
697   MaybeSaveRegisters(registers);
698 
699   Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
700   Register slot_address_parameter =
701       WriteBarrierDescriptor::SlotAddressRegister();
702 
703   push(object);
704   push(slot_address);
705   pop(slot_address_parameter);
706   pop(object_parameter);
707 
708   Call(isolate()->builtins()->code_handle(
709            Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
710        RelocInfo::CODE_TARGET);
711   MaybeRestoreRegisters(registers);
712 }
713 
CallRecordWriteStubSaveRegisters(Register object,Register slot_address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,StubCallMode mode)714 void TurboAssembler::CallRecordWriteStubSaveRegisters(
715     Register object, Register slot_address,
716     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
717     StubCallMode mode) {
718   DCHECK(!AreAliased(object, slot_address));
719   RegList registers =
720       WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
721   MaybeSaveRegisters(registers);
722 
723   Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
724   Register slot_address_parameter =
725       WriteBarrierDescriptor::SlotAddressRegister();
726 
727   push(object);
728   push(slot_address);
729   pop(slot_address_parameter);
730   pop(object_parameter);
731 
732   CallRecordWriteStub(object_parameter, slot_address_parameter,
733                       remembered_set_action, fp_mode, mode);
734 
735   MaybeRestoreRegisters(registers);
736 }
737 
CallRecordWriteStub(Register object,Register slot_address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,StubCallMode mode)738 void TurboAssembler::CallRecordWriteStub(
739     Register object, Register slot_address,
740     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
741     StubCallMode mode) {
742   // Use CallRecordWriteStubSaveRegisters if the object and slot registers
743   // need to be caller saved.
744   DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
745   DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
746 #if V8_ENABLE_WEBASSEMBLY
747   if (mode == StubCallMode::kCallWasmRuntimeStub) {
748     // Use {near_call} for direct Wasm call within a module.
749     auto wasm_target =
750         wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
751     Call(wasm_target, RelocInfo::WASM_STUB_CALL);
752 #else
753   if (false) {
754 #endif
755   } else {
756     auto builtin_index =
757         Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
758     if (options().inline_offheap_trampolines) {
759       RecordCommentForOffHeapTrampoline(builtin_index);
760       // Use ip directly instead of using UseScratchRegisterScope, as we do
761       // not preserve scratch registers across calls.
762       mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
763       Call(ip);
764     } else {
765       Handle<Code> code_target =
766           isolate()->builtins()->code_handle(builtin_index);
767       Call(code_target, RelocInfo::CODE_TARGET);
768     }
769   }
770 }
771 
772 // Will clobber 4 registers: object, address, scratch, ip.  The
773 // register 'object' contains a heap object pointer.  The heap object
774 // tag is shifted away.
775 void MacroAssembler::RecordWrite(Register object, Register slot_address,
776                                  Register value, LinkRegisterStatus lr_status,
777                                  SaveFPRegsMode fp_mode,
778                                  RememberedSetAction remembered_set_action,
779                                  SmiCheck smi_check) {
780   DCHECK(!AreAliased(object, value, slot_address));
781   if (FLAG_debug_code) {
782     LoadTaggedPointerField(r0, MemOperand(slot_address));
783     CmpS64(r0, value);
784     Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
785   }
786 
787   if ((remembered_set_action == RememberedSetAction::kOmit &&
788        !FLAG_incremental_marking) ||
789       FLAG_disable_write_barriers) {
790     return;
791   }
792 
793   // First, check if a write barrier is even needed. The tests below
794   // catch stores of smis and stores into the young generation.
795   Label done;
796 
797   if (smi_check == SmiCheck::kInline) {
798     JumpIfSmi(value, &done);
799   }
800 
801   CheckPageFlag(value,
802                 value,  // Used as scratch.
803                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
804   CheckPageFlag(object,
805                 value,  // Used as scratch.
806                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
807 
808   // Record the actual write.
809   if (lr_status == kLRHasNotBeenSaved) {
810     mflr(r0);
811     push(r0);
812   }
813   CallRecordWriteStubSaveRegisters(object, slot_address, remembered_set_action,
814                                    fp_mode);
815   if (lr_status == kLRHasNotBeenSaved) {
816     pop(r0);
817     mtlr(r0);
818   }
819 
820   if (FLAG_debug_code) mov(slot_address, Operand(kZapValue));
821 
822   bind(&done);
823 
824   // Clobber clobbered registers when running with the debug-code flag
825   // turned on to provoke errors.
826   if (FLAG_debug_code) {
827     mov(slot_address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
828     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
829   }
830 }
831 
832 void TurboAssembler::PushCommonFrame(Register marker_reg) {
833   int fp_delta = 0;
834   mflr(r0);
835   if (FLAG_enable_embedded_constant_pool) {
836     if (marker_reg.is_valid()) {
837       Push(r0, fp, kConstantPoolRegister, marker_reg);
838       fp_delta = 2;
839     } else {
840       Push(r0, fp, kConstantPoolRegister);
841       fp_delta = 1;
842     }
843   } else {
844     if (marker_reg.is_valid()) {
845       Push(r0, fp, marker_reg);
846       fp_delta = 1;
847     } else {
848       Push(r0, fp);
849       fp_delta = 0;
850     }
851   }
852   addi(fp, sp, Operand(fp_delta * kSystemPointerSize));
853 }
854 
855 void TurboAssembler::PushStandardFrame(Register function_reg) {
856   int fp_delta = 0;
857   mflr(r0);
858   if (FLAG_enable_embedded_constant_pool) {
859     if (function_reg.is_valid()) {
860       Push(r0, fp, kConstantPoolRegister, cp, function_reg);
861       fp_delta = 3;
862     } else {
863       Push(r0, fp, kConstantPoolRegister, cp);
864       fp_delta = 2;
865     }
866   } else {
867     if (function_reg.is_valid()) {
868       Push(r0, fp, cp, function_reg);
869       fp_delta = 2;
870     } else {
871       Push(r0, fp, cp);
872       fp_delta = 1;
873     }
874   }
875   addi(fp, sp, Operand(fp_delta * kSystemPointerSize));
876   Push(kJavaScriptCallArgCountRegister);
877 }
878 
879 void TurboAssembler::RestoreFrameStateForTailCall() {
880   if (FLAG_enable_embedded_constant_pool) {
881     LoadU64(kConstantPoolRegister,
882             MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
883     set_constant_pool_available(false);
884   }
885   LoadU64(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
886   LoadU64(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
887   mtlr(r0);
888 }
889 
890 void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
891                                      const DoubleRegister src) {
892   // Turn potential sNaN into qNaN.
893   fsub(dst, src, kDoubleRegZero);
894 }
895 
896 void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
897   MovIntToDouble(dst, src, r0);
898   fcfid(dst, dst);
899 }
900 
901 void TurboAssembler::ConvertUnsignedIntToDouble(Register src,
902                                                 DoubleRegister dst) {
903   MovUnsignedIntToDouble(dst, src, r0);
904   fcfid(dst, dst);
905 }
906 
907 void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
908   MovIntToDouble(dst, src, r0);
909   fcfids(dst, dst);
910 }
911 
912 void TurboAssembler::ConvertUnsignedIntToFloat(Register src,
913                                                DoubleRegister dst) {
914   MovUnsignedIntToDouble(dst, src, r0);
915   fcfids(dst, dst);
916 }
917 
918 #if V8_TARGET_ARCH_PPC64
919 void TurboAssembler::ConvertInt64ToDouble(Register src,
920                                           DoubleRegister double_dst) {
921   MovInt64ToDouble(double_dst, src);
922   fcfid(double_dst, double_dst);
923 }
924 
925 void TurboAssembler::ConvertUnsignedInt64ToFloat(Register src,
926                                                  DoubleRegister double_dst) {
927   MovInt64ToDouble(double_dst, src);
928   fcfidus(double_dst, double_dst);
929 }
930 
931 void TurboAssembler::ConvertUnsignedInt64ToDouble(Register src,
932                                                   DoubleRegister double_dst) {
933   MovInt64ToDouble(double_dst, src);
934   fcfidu(double_dst, double_dst);
935 }
936 
937 void TurboAssembler::ConvertInt64ToFloat(Register src,
938                                          DoubleRegister double_dst) {
939   MovInt64ToDouble(double_dst, src);
940   fcfids(double_dst, double_dst);
941 }
942 #endif
943 
944 void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
945 #if !V8_TARGET_ARCH_PPC64
946                                           const Register dst_hi,
947 #endif
948                                           const Register dst,
949                                           const DoubleRegister double_dst,
950                                           FPRoundingMode rounding_mode) {
951   if (rounding_mode == kRoundToZero) {
952     fctidz(double_dst, double_input);
953   } else {
954     SetRoundingMode(rounding_mode);
955     fctid(double_dst, double_input);
956     ResetRoundingMode();
957   }
958 
959   MovDoubleToInt64(
960 #if !V8_TARGET_ARCH_PPC64
961       dst_hi,
962 #endif
963       dst, double_dst);
964 }
965 
966 #if V8_TARGET_ARCH_PPC64
967 void TurboAssembler::ConvertDoubleToUnsignedInt64(
968     const DoubleRegister double_input, const Register dst,
969     const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
970   if (rounding_mode == kRoundToZero) {
971     fctiduz(double_dst, double_input);
972   } else {
973     SetRoundingMode(rounding_mode);
974     fctidu(double_dst, double_input);
975     ResetRoundingMode();
976   }
977 
978   MovDoubleToInt64(dst, double_dst);
979 }
980 #endif
981 
982 #if !V8_TARGET_ARCH_PPC64
983 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
984                                    Register src_low, Register src_high,
985                                    Register scratch, Register shift) {
986   DCHECK(!AreAliased(dst_low, src_high));
987   DCHECK(!AreAliased(dst_high, src_low));
988   DCHECK(!AreAliased(dst_low, dst_high, shift));
989   Label less_than_32;
990   Label done;
991   cmpi(shift, Operand(32));
992   blt(&less_than_32);
993   // If shift >= 32
994   andi(scratch, shift, Operand(0x1F));
995   ShiftLeftU32(dst_high, src_low, scratch);
996   li(dst_low, Operand::Zero());
997   b(&done);
998   bind(&less_than_32);
999   // If shift < 32
1000   subfic(scratch, shift, Operand(32));
1001   ShiftLeftU32(dst_high, src_high, shift);
1002   srw(scratch, src_low, scratch);
1003   orx(dst_high, dst_high, scratch);
1004   ShiftLeftU32(dst_low, src_low, shift);
1005   bind(&done);
1006 }
1007 
1008 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
1009                                    Register src_low, Register src_high,
1010                                    uint32_t shift) {
1011   DCHECK(!AreAliased(dst_low, src_high));
1012   DCHECK(!AreAliased(dst_high, src_low));
1013   if (shift == 32) {
1014     Move(dst_high, src_low);
1015     li(dst_low, Operand::Zero());
1016   } else if (shift > 32) {
1017     shift &= 0x1F;
1018     ShiftLeftU32(dst_high, src_low, Operand(shift));
1019     li(dst_low, Operand::Zero());
1020   } else if (shift == 0) {
1021     Move(dst_low, src_low);
1022     Move(dst_high, src_high);
1023   } else {
1024     ShiftLeftU32(dst_high, src_high, Operand(shift));
1025     rlwimi(dst_high, src_low, shift, 32 - shift, 31);
1026     ShiftLeftU32(dst_low, src_low, Operand(shift));
1027   }
1028 }
1029 
1030 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
1031                                     Register src_low, Register src_high,
1032                                     Register scratch, Register shift) {
1033   DCHECK(!AreAliased(dst_low, src_high));
1034   DCHECK(!AreAliased(dst_high, src_low));
1035   DCHECK(!AreAliased(dst_low, dst_high, shift));
1036   Label less_than_32;
1037   Label done;
1038   cmpi(shift, Operand(32));
1039   blt(&less_than_32);
1040   // If shift >= 32
1041   andi(scratch, shift, Operand(0x1F));
1042   srw(dst_low, src_high, scratch);
1043   li(dst_high, Operand::Zero());
1044   b(&done);
1045   bind(&less_than_32);
1046   // If shift < 32
1047   subfic(scratch, shift, Operand(32));
1048   srw(dst_low, src_low, shift);
1049   ShiftLeftU32(scratch, src_high, scratch);
1050   orx(dst_low, dst_low, scratch);
1051   srw(dst_high, src_high, shift);
1052   bind(&done);
1053 }
1054 
1055 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
1056                                     Register src_low, Register src_high,
1057                                     uint32_t shift) {
1058   DCHECK(!AreAliased(dst_low, src_high));
1059   DCHECK(!AreAliased(dst_high, src_low));
1060   if (shift == 32) {
1061     Move(dst_low, src_high);
1062     li(dst_high, Operand::Zero());
1063   } else if (shift > 32) {
1064     shift &= 0x1F;
1065     srwi(dst_low, src_high, Operand(shift));
1066     li(dst_high, Operand::Zero());
1067   } else if (shift == 0) {
1068     Move(dst_low, src_low);
1069     Move(dst_high, src_high);
1070   } else {
1071     srwi(dst_low, src_low, Operand(shift));
1072     rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
1073     srwi(dst_high, src_high, Operand(shift));
1074   }
1075 }
1076 
1077 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
1078                                        Register src_low, Register src_high,
1079                                        Register scratch, Register shift) {
1080   DCHECK(!AreAliased(dst_low, src_high, shift));
1081   DCHECK(!AreAliased(dst_high, src_low, shift));
1082   Label less_than_32;
1083   Label done;
1084   cmpi(shift, Operand(32));
1085   blt(&less_than_32);
1086   // If shift >= 32
1087   andi(scratch, shift, Operand(0x1F));
1088   sraw(dst_low, src_high, scratch);
1089   srawi(dst_high, src_high, 31);
1090   b(&done);
1091   bind(&less_than_32);
1092   // If shift < 32
1093   subfic(scratch, shift, Operand(32));
1094   srw(dst_low, src_low, shift);
1095   ShiftLeftU32(scratch, src_high, scratch);
1096   orx(dst_low, dst_low, scratch);
1097   sraw(dst_high, src_high, shift);
1098   bind(&done);
1099 }
1100 
1101 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
1102                                        Register src_low, Register src_high,
1103                                        uint32_t shift) {
1104   DCHECK(!AreAliased(dst_low, src_high));
1105   DCHECK(!AreAliased(dst_high, src_low));
1106   if (shift == 32) {
1107     Move(dst_low, src_high);
1108     srawi(dst_high, src_high, 31);
1109   } else if (shift > 32) {
1110     shift &= 0x1F;
1111     srawi(dst_low, src_high, shift);
1112     srawi(dst_high, src_high, 31);
1113   } else if (shift == 0) {
1114     Move(dst_low, src_low);
1115     Move(dst_high, src_high);
1116   } else {
1117     srwi(dst_low, src_low, Operand(shift));
1118     rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
1119     srawi(dst_high, src_high, shift);
1120   }
1121 }
1122 #endif
1123 
1124 void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
1125     Register code_target_address) {
1126   // Builtins do not use the constant pool (see is_constant_pool_available).
1127   STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
1128 
1129   lwz(r0, MemOperand(code_target_address,
1130                      Code::kInstructionSizeOffset - Code::kHeaderSize));
1131   lwz(kConstantPoolRegister,
1132       MemOperand(code_target_address,
1133                  Code::kConstantPoolOffsetOffset - Code::kHeaderSize));
1134   add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
1135   add(kConstantPoolRegister, kConstantPoolRegister, r0);
1136 }
1137 
1138 void TurboAssembler::LoadPC(Register dst) {
1139   b(4, SetLK);
1140   mflr(dst);
1141 }
1142 
1143 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
1144   mflr(r0);
1145   LoadPC(dst);
1146   subi(dst, dst, Operand(pc_offset() - kInstrSize));
1147   mtlr(r0);
1148 }
1149 
1150 void TurboAssembler::LoadConstantPoolPointerRegister() {
1151   //
1152   // Builtins do not use the constant pool (see is_constant_pool_available).
1153   STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
1154 
1155   LoadPC(kConstantPoolRegister);
1156   int32_t delta = -pc_offset() + 4;
1157   add_label_offset(kConstantPoolRegister, kConstantPoolRegister,
1158                    ConstantPoolPosition(), delta);
1159 }
1160 
1161 void TurboAssembler::StubPrologue(StackFrame::Type type) {
1162   {
1163     ConstantPoolUnavailableScope constant_pool_unavailable(this);
1164     mov(r11, Operand(StackFrame::TypeToMarker(type)));
1165     PushCommonFrame(r11);
1166   }
1167   if (FLAG_enable_embedded_constant_pool) {
1168     LoadConstantPoolPointerRegister();
1169     set_constant_pool_available(true);
1170   }
1171 }
1172 
1173 void TurboAssembler::Prologue() {
1174   PushStandardFrame(r4);
1175   if (FLAG_enable_embedded_constant_pool) {
1176     // base contains prologue address
1177     LoadConstantPoolPointerRegister();
1178     set_constant_pool_available(true);
1179   }
1180 }
1181 
1182 void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
1183                                    ArgumentsCountMode mode) {
1184   int receiver_bytes =
1185       (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
1186   switch (type) {
1187     case kCountIsInteger: {
1188       ShiftLeftU64(ip, count, Operand(kSystemPointerSizeLog2));
1189       add(sp, sp, ip);
1190       break;
1191     }
1192     case kCountIsSmi: {
1193       STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
1194       SmiToPtrArrayOffset(count, count);
1195       add(sp, sp, count);
1196       break;
1197     }
1198     case kCountIsBytes: {
1199       add(sp, sp, count);
1200       break;
1201     }
1202   }
1203   if (receiver_bytes != 0) {
1204     addi(sp, sp, Operand(receiver_bytes));
1205   }
1206 }
1207 
1208 void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
1209                                                      Register receiver,
1210                                                      ArgumentsCountType type,
1211                                                      ArgumentsCountMode mode) {
1212   DCHECK(!AreAliased(argc, receiver));
1213   if (mode == kCountExcludesReceiver) {
1214     // Drop arguments without receiver and override old receiver.
1215     DropArguments(argc, type, kCountIncludesReceiver);
1216     StoreU64(receiver, MemOperand(sp));
1217   } else {
1218     DropArguments(argc, type, mode);
1219     push(receiver);
1220   }
1221 }
1222 
1223 void TurboAssembler::EnterFrame(StackFrame::Type type,
1224                                 bool load_constant_pool_pointer_reg) {
1225   if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
1226     // Push type explicitly so we can leverage the constant pool.
1227     // This path cannot rely on ip containing code entry.
1228     PushCommonFrame();
1229     LoadConstantPoolPointerRegister();
1230     mov(ip, Operand(StackFrame::TypeToMarker(type)));
1231     push(ip);
1232   } else {
1233     mov(ip, Operand(StackFrame::TypeToMarker(type)));
1234     PushCommonFrame(ip);
1235   }
1236 #if V8_ENABLE_WEBASSEMBLY
1237   if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
1238 #endif  // V8_ENABLE_WEBASSEMBLY
1239 }
1240 
1241 int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
1242   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1243   // r3: preserved
1244   // r4: preserved
1245   // r5: preserved
1246 
1247   // Drop the execution stack down to the frame pointer and restore
1248   // the caller's state.
1249   int frame_ends;
1250   LoadU64(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1251   LoadU64(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1252   if (FLAG_enable_embedded_constant_pool) {
1253     LoadU64(kConstantPoolRegister,
1254             MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
1255   }
1256   mtlr(r0);
1257   frame_ends = pc_offset();
1258   AddS64(sp, fp,
1259          Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment),
1260          r0);
1261   mr(fp, ip);
1262   return frame_ends;
1263 }
1264 
1265 // ExitFrame layout (probably wrongish.. needs updating)
1266 //
1267 //  SP -> previousSP
1268 //        LK reserved
1269 //        sp_on_exit (for debug?)
1270 // oldSP->prev SP
1271 //        LK
1272 //        <parameters on stack>
1273 
1274 // Prior to calling EnterExitFrame, we've got a bunch of parameters
1275 // on the stack that we need to wrap a real frame around.. so first
1276 // we reserve a slot for LK and push the previous SP which is captured
1277 // in the fp register (r31)
1278 // Then - we buy a new frame
1279 
1280 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1281                                     StackFrame::Type frame_type) {
1282   DCHECK(frame_type == StackFrame::EXIT ||
1283          frame_type == StackFrame::BUILTIN_EXIT);
1284   // Set up the frame structure on the stack.
1285   DCHECK_EQ(2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1286   DCHECK_EQ(1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset);
1287   DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
1288   DCHECK_GT(stack_space, 0);
1289 
1290   // This is an opportunity to build a frame to wrap
1291   // all of the pushes that have happened inside of V8
1292   // since we were called from C code
1293 
1294   mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
1295   PushCommonFrame(ip);
1296   // Reserve room for saved entry sp.
1297   subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1298 
1299   if (FLAG_debug_code) {
1300     li(r8, Operand::Zero());
1301     StoreU64(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1302   }
1303   if (FLAG_enable_embedded_constant_pool) {
1304     StoreU64(kConstantPoolRegister,
1305              MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1306   }
1307 
1308   // Save the frame pointer and the context in top.
1309   Move(r8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1310                                      isolate()));
1311   StoreU64(fp, MemOperand(r8));
1312   Move(r8,
1313        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1314   StoreU64(cp, MemOperand(r8));
1315 
1316   // Optionally save all volatile double registers.
1317   if (save_doubles) {
1318     MultiPushDoubles(kCallerSavedDoubles);
1319     // Note that d0 will be accessible at
1320     //   fp - ExitFrameConstants::kFrameSize -
1321     //   kNumCallerSavedDoubles * kDoubleSize,
1322     // since the sp slot and code slot were pushed after the fp.
1323   }
1324 
1325   AddS64(sp, sp, Operand(-stack_space * kSystemPointerSize));
1326 
1327   // Allocate and align the frame preparing for calling the runtime
1328   // function.
1329   const int frame_alignment = ActivationFrameAlignment();
1330   if (frame_alignment > kSystemPointerSize) {
1331     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1332     ClearRightImm(sp, sp,
1333                   Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
1334   }
1335   li(r0, Operand::Zero());
1336   StoreU64WithUpdate(
1337       r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize));
1338 
1339   // Set the exit frame sp value to point just before the return address
1340   // location.
1341   AddS64(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize),
1342          r0);
1343   StoreU64(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1344 }
1345 
1346 int TurboAssembler::ActivationFrameAlignment() {
1347 #if !defined(USE_SIMULATOR)
1348   // Running on the real platform. Use the alignment as mandated by the local
1349   // environment.
1350   // Note: This will break if we ever start generating snapshots on one PPC
1351   // platform for another PPC platform with a different alignment.
1352   return base::OS::ActivationFrameAlignment();
1353 #else  // Simulated
1354   // If we are using the simulator then we should always align to the expected
1355   // alignment. As the simulator is used to generate snapshots we do not know
1356   // if the target platform will need alignment, so this is controlled from a
1357   // flag.
1358   return FLAG_sim_stack_alignment;
1359 #endif
1360 }
1361 
1362 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1363                                     bool argument_count_is_length) {
1364   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1365   // Optionally restore all double registers.
1366   if (save_doubles) {
1367     // Calculate the stack location of the saved doubles and restore them.
1368     const int kNumRegs = kNumCallerSavedDoubles;
1369     const int offset =
1370         (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
1371     AddS64(r6, fp, Operand(-offset), r0);
1372     MultiPopDoubles(kCallerSavedDoubles, r6);
1373   }
1374 
1375   // Clear top frame.
1376   li(r6, Operand::Zero());
1377   Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1378                                      isolate()));
1379   StoreU64(r6, MemOperand(ip));
1380 
1381   // Restore current context from top and clear it in debug mode.
1382   Move(ip,
1383        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1384   LoadU64(cp, MemOperand(ip));
1385 
1386 #ifdef DEBUG
1387   mov(r6, Operand(Context::kInvalidContext));
1388   Move(ip,
1389        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1390   StoreU64(r6, MemOperand(ip));
1391 #endif
1392 
1393   // Tear down the exit frame, pop the arguments, and return.
1394   LeaveFrame(StackFrame::EXIT);
1395 
1396   if (argument_count.is_valid()) {
1397     if (!argument_count_is_length) {
1398       ShiftLeftU64(argument_count, argument_count,
1399                    Operand(kSystemPointerSizeLog2));
1400     }
1401     add(sp, sp, argument_count);
1402   }
1403 }
1404 
1405 void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
1406   Move(dst, d1);
1407 }
1408 
1409 void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1410   Move(dst, d1);
1411 }
1412 
1413 void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
1414   DCHECK(root_array_available());
1415   Isolate* isolate = this->isolate();
1416   ExternalReference limit =
1417       kind == StackLimitKind::kRealStackLimit
1418           ? ExternalReference::address_of_real_jslimit(isolate)
1419           : ExternalReference::address_of_jslimit(isolate);
1420   DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
1421 
1422   intptr_t offset =
1423       TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
1424   CHECK(is_int32(offset));
1425   LoadU64(destination, MemOperand(kRootRegister, offset), r0);
1426 }
1427 
1428 void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
1429                                         Label* stack_overflow) {
1430   // Check the stack for overflow. We are not trying to catch
1431   // interruptions (e.g. debug break and preemption) here, so the "real stack
1432   // limit" is checked.
1433   LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
1434   // Make scratch the space we have left. The stack might already be overflowed
1435   // here which will cause scratch to become negative.
1436   sub(scratch, sp, scratch);
1437   // Check if the arguments will overflow the stack.
1438   ShiftLeftU64(r0, num_args, Operand(kSystemPointerSizeLog2));
1439   CmpS64(scratch, r0);
1440   ble(stack_overflow);  // Signed comparison.
1441 }
1442 
1443 void MacroAssembler::InvokePrologue(Register expected_parameter_count,
1444                                     Register actual_parameter_count,
1445                                     Label* done, InvokeType type) {
1446   Label regular_invoke;
1447 
1448   //  r3: actual arguments count
1449   //  r4: function (passed through to callee)
1450   //  r5: expected arguments count
1451 
1452   DCHECK_EQ(actual_parameter_count, r3);
1453   DCHECK_EQ(expected_parameter_count, r5);
1454 
1455   // If the expected parameter count is equal to the adaptor sentinel, no need
1456   // to push undefined value as arguments.
1457   if (kDontAdaptArgumentsSentinel != 0) {
1458     mov(r0, Operand(kDontAdaptArgumentsSentinel));
1459     CmpS64(expected_parameter_count, r0);
1460     beq(&regular_invoke);
1461   }
1462 
1463   // If overapplication or if the actual argument count is equal to the
1464   // formal parameter count, no need to push extra undefined values.
1465   sub(expected_parameter_count, expected_parameter_count,
1466       actual_parameter_count, LeaveOE, SetRC);
1467   ble(&regular_invoke, cr0);
1468 
1469   Label stack_overflow;
1470   Register scratch = r7;
1471   StackOverflowCheck(expected_parameter_count, scratch, &stack_overflow);
1472 
1473   // Underapplication. Move the arguments already in the stack, including the
1474   // receiver and the return address.
1475   {
1476     Label copy, skip;
1477     Register src = r9, dest = r8;
1478     addi(src, sp, Operand(-kSystemPointerSize));
1479     ShiftLeftU64(r0, expected_parameter_count, Operand(kSystemPointerSizeLog2));
1480     sub(sp, sp, r0);
1481     // Update stack pointer.
1482     addi(dest, sp, Operand(-kSystemPointerSize));
1483     mr(r0, actual_parameter_count);
1484     cmpi(r0, Operand::Zero());
1485     ble(&skip);
1486     mtctr(r0);
1487 
1488     bind(&copy);
1489     LoadU64WithUpdate(r0, MemOperand(src, kSystemPointerSize));
1490     StoreU64WithUpdate(r0, MemOperand(dest, kSystemPointerSize));
1491     bdnz(&copy);
1492     bind(&skip);
1493   }
1494 
1495   // Fill remaining expected arguments with undefined values.
1496   LoadRoot(scratch, RootIndex::kUndefinedValue);
1497   {
1498     mtctr(expected_parameter_count);
1499 
1500     Label loop;
1501     bind(&loop);
1502     StoreU64WithUpdate(scratch, MemOperand(r8, kSystemPointerSize));
1503     bdnz(&loop);
1504   }
1505   b(&regular_invoke);
1506 
1507   bind(&stack_overflow);
1508   {
1509     FrameScope frame(
1510         this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1511     CallRuntime(Runtime::kThrowStackOverflow);
1512     bkpt(0);
1513   }
1514 
1515   bind(&regular_invoke);
1516 }
1517 
1518 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1519                                     Register expected_parameter_count,
1520                                     Register actual_parameter_count) {
1521   Label skip_hook;
1522 
1523   ExternalReference debug_hook_active =
1524       ExternalReference::debug_hook_on_function_call_address(isolate());
1525   Move(r7, debug_hook_active);
1526   LoadU8(r7, MemOperand(r7), r0);
1527   extsb(r7, r7);
1528   CmpSmiLiteral(r7, Smi::zero(), r0);
1529   beq(&skip_hook);
1530 
1531   {
1532     // Load receiver to pass it later to DebugOnFunctionCall hook.
1533     LoadReceiver(r7, actual_parameter_count);
1534     FrameScope frame(
1535         this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1536 
1537     SmiTag(expected_parameter_count);
1538     Push(expected_parameter_count);
1539 
1540     SmiTag(actual_parameter_count);
1541     Push(actual_parameter_count);
1542 
1543     if (new_target.is_valid()) {
1544       Push(new_target);
1545     }
1546     Push(fun, fun, r7);
1547     CallRuntime(Runtime::kDebugOnFunctionCall);
1548     Pop(fun);
1549     if (new_target.is_valid()) {
1550       Pop(new_target);
1551     }
1552 
1553     Pop(actual_parameter_count);
1554     SmiUntag(actual_parameter_count);
1555 
1556     Pop(expected_parameter_count);
1557     SmiUntag(expected_parameter_count);
1558   }
1559   bind(&skip_hook);
1560 }
1561 
1562 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1563                                         Register expected_parameter_count,
1564                                         Register actual_parameter_count,
1565                                         InvokeType type) {
1566   // You can't call a function without a valid frame.
1567   DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
1568   DCHECK_EQ(function, r4);
1569   DCHECK_IMPLIES(new_target.is_valid(), new_target == r6);
1570 
1571   // On function call, call into the debugger if necessary.
1572   CheckDebugHook(function, new_target, expected_parameter_count,
1573                  actual_parameter_count);
1574 
1575   // Clear the new.target register if not given.
1576   if (!new_target.is_valid()) {
1577     LoadRoot(r6, RootIndex::kUndefinedValue);
1578   }
1579 
1580   Label done;
1581   InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
1582   // We call indirectly through the code field in the function to
1583   // allow recompilation to take effect without changing any of the
1584   // call sites.
1585   Register code = kJavaScriptCallCodeStartRegister;
1586   LoadTaggedPointerField(
1587       code, FieldMemOperand(function, JSFunction::kCodeOffset), r0);
1588   switch (type) {
1589     case InvokeType::kCall:
1590       CallCodeObject(code);
1591       break;
1592     case InvokeType::kJump:
1593       JumpCodeObject(code);
1594       break;
1595   }
1596 
1597     // Continue here if InvokePrologue does handle the invocation due to
1598     // mismatched parameter counts.
1599     bind(&done);
1600 }
1601 
1602 void MacroAssembler::InvokeFunctionWithNewTarget(
1603     Register fun, Register new_target, Register actual_parameter_count,
1604     InvokeType type) {
1605   // You can't call a function without a valid frame.
1606   DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
1607 
1608   // Contract with called JS functions requires that function is passed in r4.
1609   DCHECK_EQ(fun, r4);
1610 
1611   Register expected_reg = r5;
1612   Register temp_reg = r7;
1613 
1614   LoadTaggedPointerField(
1615       temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
1616   LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
1617                          r0);
1618   LoadU16(expected_reg,
1619           FieldMemOperand(temp_reg,
1620                           SharedFunctionInfo::kFormalParameterCountOffset));
1621 
1622   InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
1623                      type);
1624 }
1625 
1626 void MacroAssembler::InvokeFunction(Register function,
1627                                     Register expected_parameter_count,
1628                                     Register actual_parameter_count,
1629                                     InvokeType type) {
1630   // You can't call a function without a valid frame.
1631   DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
1632 
1633   // Contract with called JS functions requires that function is passed in r4.
1634   DCHECK_EQ(function, r4);
1635 
1636   // Get the function and setup the context.
1637   LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
1638                          r0);
1639 
1640   InvokeFunctionCode(r4, no_reg, expected_parameter_count,
1641                      actual_parameter_count, type);
1642 }
1643 
1644 void MacroAssembler::PushStackHandler() {
1645   // Adjust this code if not the case.
1646   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
1647   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize);
1648 
1649   Push(Smi::zero());  // Padding.
1650 
1651   // Link the current handler as the next handler.
1652   // Preserve r4-r8.
1653   Move(r3,
1654        ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1655   LoadU64(r0, MemOperand(r3));
1656   push(r0);
1657 
1658   // Set this new handler as the current one.
1659   StoreU64(sp, MemOperand(r3));
1660 }
1661 
1662 void MacroAssembler::PopStackHandler() {
1663   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
1664   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1665 
1666   pop(r4);
1667   Move(ip,
1668        ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1669   StoreU64(r4, MemOperand(ip));
1670 
1671   Drop(1);  // Drop padding.
1672 }
1673 
1674 void MacroAssembler::CompareObjectType(Register object, Register map,
1675                                        Register type_reg, InstanceType type) {
1676   const Register temp = type_reg == no_reg ? r0 : type_reg;
1677 
1678   LoadMap(map, object);
1679   CompareInstanceType(map, temp, type);
1680 }
1681 
1682 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1683                                          InstanceType type) {
1684   STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1685   STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
1686   lhz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1687   cmpi(type_reg, Operand(type));
1688 }
1689 
1690 void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
1691                                   unsigned higher_limit) {
1692   ASM_CODE_COMMENT(this);
1693   DCHECK_LT(lower_limit, higher_limit);
1694   UseScratchRegisterScope temps(this);
1695   Register scratch = temps.Acquire();
1696   if (lower_limit != 0) {
1697     mov(scratch, Operand(lower_limit));
1698     sub(scratch, value, scratch);
1699     cmpli(scratch, Operand(higher_limit - lower_limit));
1700   } else {
1701     mov(scratch, Operand(higher_limit));
1702     CmpU64(value, scratch);
1703   }
1704 }
1705 
1706 void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
1707                                               InstanceType lower_limit,
1708                                               InstanceType higher_limit) {
1709   DCHECK_LT(lower_limit, higher_limit);
1710   LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1711   CompareRange(type_reg, lower_limit, higher_limit);
1712 }
1713 
1714 void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1715   DCHECK(obj != r0);
1716   LoadRoot(r0, index);
1717   CmpS64(obj, r0);
1718 }
1719 
1720 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1721                                             Register right,
1722                                             Register overflow_dst,
1723                                             Register scratch) {
1724   DCHECK(dst != overflow_dst);
1725   DCHECK(dst != scratch);
1726   DCHECK(overflow_dst != scratch);
1727   DCHECK(overflow_dst != left);
1728   DCHECK(overflow_dst != right);
1729 
1730   bool left_is_right = left == right;
1731   RCBit xorRC = left_is_right ? SetRC : LeaveRC;
1732 
1733   // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1734   if (dst == left) {
1735     mr(scratch, left);                        // Preserve left.
1736     add(dst, left, right);                    // Left is overwritten.
1737     xor_(overflow_dst, dst, scratch, xorRC);  // Original left.
1738     if (!left_is_right) xor_(scratch, dst, right);
1739   } else if (dst == right) {
1740     mr(scratch, right);     // Preserve right.
1741     add(dst, left, right);  // Right is overwritten.
1742     xor_(overflow_dst, dst, left, xorRC);
1743     if (!left_is_right) xor_(scratch, dst, scratch);  // Original right.
1744   } else {
1745     add(dst, left, right);
1746     xor_(overflow_dst, dst, left, xorRC);
1747     if (!left_is_right) xor_(scratch, dst, right);
1748   }
1749   if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
1750 }
1751 
1752 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1753                                             intptr_t right,
1754                                             Register overflow_dst,
1755                                             Register scratch) {
1756   Register original_left = left;
1757   DCHECK(dst != overflow_dst);
1758   DCHECK(dst != scratch);
1759   DCHECK(overflow_dst != scratch);
1760   DCHECK(overflow_dst != left);
1761 
1762   // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1763   if (dst == left) {
1764     // Preserve left.
1765     original_left = overflow_dst;
1766     mr(original_left, left);
1767   }
1768   AddS64(dst, left, Operand(right), scratch);
1769   xor_(overflow_dst, dst, original_left);
1770   if (right >= 0) {
1771     and_(overflow_dst, overflow_dst, dst, SetRC);
1772   } else {
1773     andc(overflow_dst, overflow_dst, dst, SetRC);
1774   }
1775 }
1776 
1777 void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
1778                                             Register right,
1779                                             Register overflow_dst,
1780                                             Register scratch) {
1781   DCHECK(dst != overflow_dst);
1782   DCHECK(dst != scratch);
1783   DCHECK(overflow_dst != scratch);
1784   DCHECK(overflow_dst != left);
1785   DCHECK(overflow_dst != right);
1786 
1787   // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
1788   if (dst == left) {
1789     mr(scratch, left);      // Preserve left.
1790     sub(dst, left, right);  // Left is overwritten.
1791     xor_(overflow_dst, dst, scratch);
1792     xor_(scratch, scratch, right);
1793     and_(overflow_dst, overflow_dst, scratch, SetRC);
1794   } else if (dst == right) {
1795     mr(scratch, right);     // Preserve right.
1796     sub(dst, left, right);  // Right is overwritten.
1797     xor_(overflow_dst, dst, left);
1798     xor_(scratch, left, scratch);
1799     and_(overflow_dst, overflow_dst, scratch, SetRC);
1800   } else {
1801     sub(dst, left, right);
1802     xor_(overflow_dst, dst, left);
1803     xor_(scratch, left, right);
1804     and_(overflow_dst, scratch, overflow_dst, SetRC);
1805   }
1806 }
1807 
1808 void TurboAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs,
1809                             DoubleRegister rhs, DoubleRegister scratch) {
1810   Label check_zero, return_left, return_right, return_nan, done;
1811   fcmpu(lhs, rhs);
1812   bunordered(&return_nan);
1813   if (CpuFeatures::IsSupported(PPC_7_PLUS)) {
1814     xsmindp(dst, lhs, rhs);
1815     b(&done);
1816   }
1817   beq(&check_zero);
1818   ble(&return_left);
1819   b(&return_right);
1820 
1821   bind(&check_zero);
1822   fcmpu(lhs, kDoubleRegZero);
1823   /* left == right != 0. */
1824   bne(&return_left);
1825   /* At this point, both left and right are either 0 or -0. */
1826   /* Min: The algorithm is: -((-L) + (-R)), which in case of L and R */
1827   /* being different registers is most efficiently expressed */
1828   /* as -((-L) - R). */
1829   fneg(scratch, lhs);
1830   if (scratch == rhs) {
1831     fadd(dst, scratch, rhs);
1832   } else {
1833     fsub(dst, scratch, rhs);
1834   }
1835   fneg(dst, dst);
1836   b(&done);
1837 
1838   bind(&return_nan);
1839   /* If left or right are NaN, fadd propagates the appropriate one.*/
1840   fadd(dst, lhs, rhs);
1841   b(&done);
1842 
1843   bind(&return_right);
1844   if (rhs != dst) {
1845     fmr(dst, rhs);
1846   }
1847   b(&done);
1848 
1849   bind(&return_left);
1850   if (lhs != dst) {
1851     fmr(dst, lhs);
1852   }
1853   bind(&done);
1854 }
1855 
1856 void TurboAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs,
1857                             DoubleRegister rhs, DoubleRegister scratch) {
1858   Label check_zero, return_left, return_right, return_nan, done;
1859   fcmpu(lhs, rhs);
1860   bunordered(&return_nan);
1861   if (CpuFeatures::IsSupported(PPC_7_PLUS)) {
1862     xsmaxdp(dst, lhs, rhs);
1863     b(&done);
1864   }
1865   beq(&check_zero);
1866   bge(&return_left);
1867   b(&return_right);
1868 
1869   bind(&check_zero);
1870   fcmpu(lhs, kDoubleRegZero);
1871   /* left == right != 0. */
1872   bne(&return_left);
1873   /* At this point, both left and right are either 0 or -0. */
1874   fadd(dst, lhs, rhs);
1875   b(&done);
1876 
1877   bind(&return_nan);
1878   /* If left or right are NaN, fadd propagates the appropriate one.*/
1879   fadd(dst, lhs, rhs);
1880   b(&done);
1881 
1882   bind(&return_right);
1883   if (rhs != dst) {
1884     fmr(dst, rhs);
1885   }
1886   b(&done);
1887 
1888   bind(&return_left);
1889   if (lhs != dst) {
1890     fmr(dst, lhs);
1891   }
1892   bind(&done);
1893 }
1894 
1895 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
1896                                      unsigned higher_limit,
1897                                      Label* on_in_range) {
1898   CompareRange(value, lower_limit, higher_limit);
1899   ble(on_in_range);
1900 }
1901 
1902 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1903                                        Register result,
1904                                        DoubleRegister double_input,
1905                                        StubCallMode stub_mode) {
1906   Label done;
1907 
1908   TryInlineTruncateDoubleToI(result, double_input, &done);
1909 
1910   // If we fell through then inline version didn't succeed - call stub instead.
1911   mflr(r0);
1912   push(r0);
1913   // Put input on stack.
1914   stfdu(double_input, MemOperand(sp, -kDoubleSize));
1915 
1916 #if V8_ENABLE_WEBASSEMBLY
1917   if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1918     Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1919 #else
1920   // For balance.
1921   if (false) {
1922 #endif  // V8_ENABLE_WEBASSEMBLY
1923   } else {
1924     Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1925   }
1926 
1927   LoadU64(result, MemOperand(sp));
1928   addi(sp, sp, Operand(kDoubleSize));
1929   pop(r0);
1930   mtlr(r0);
1931 
1932   bind(&done);
1933 }
1934 
1935 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1936                                                 DoubleRegister double_input,
1937                                                 Label* done) {
1938   DoubleRegister double_scratch = kScratchDoubleReg;
1939 #if !V8_TARGET_ARCH_PPC64
1940   Register scratch = ip;
1941 #endif
1942 
1943   ConvertDoubleToInt64(double_input,
1944 #if !V8_TARGET_ARCH_PPC64
1945                        scratch,
1946 #endif
1947                        result, double_scratch);
1948 
1949 // Test for overflow
1950 #if V8_TARGET_ARCH_PPC64
1951   TestIfInt32(result, r0);
1952 #else
1953   TestIfInt32(scratch, result, r0);
1954 #endif
1955   beq(done);
1956 }
1957 
1958 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1959                                  SaveFPRegsMode save_doubles) {
1960   // All parameters are on the stack.  r3 has the return value after call.
1961 
1962   // If the expected number of arguments of the runtime function is
1963   // constant, we check that the actual number of arguments match the
1964   // expectation.
1965   CHECK(f->nargs < 0 || f->nargs == num_arguments);
1966 
1967   // TODO(1236192): Most runtime routines don't need the number of
1968   // arguments passed in because it is constant. At some point we
1969   // should remove this need and make the runtime routine entry code
1970   // smarter.
1971   mov(r3, Operand(num_arguments));
1972   Move(r4, ExternalReference::Create(f));
1973 #if V8_TARGET_ARCH_PPC64
1974   Handle<Code> code =
1975       CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1976 #else
1977   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
1978 #endif
1979   Call(code, RelocInfo::CODE_TARGET);
1980 }
1981 
1982 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1983   const Runtime::Function* function = Runtime::FunctionForId(fid);
1984   DCHECK_EQ(1, function->result_size);
1985   if (function->nargs >= 0) {
1986     mov(r3, Operand(function->nargs));
1987   }
1988   JumpToExternalReference(ExternalReference::Create(fid));
1989 }
1990 
1991 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1992                                              bool builtin_exit_frame) {
1993   Move(r4, builtin);
1994   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
1995                                           ArgvMode::kStack, builtin_exit_frame);
1996   Jump(code, RelocInfo::CODE_TARGET);
1997 }
1998 
1999 void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
2000   mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
2001   Jump(kOffHeapTrampolineRegister);
2002 }
2003 
2004 void MacroAssembler::LoadWeakValue(Register out, Register in,
2005                                    Label* target_if_cleared) {
2006   CmpS32(in, Operand(kClearedWeakHeapObjectLower32), r0);
2007   beq(target_if_cleared);
2008 
2009   mov(r0, Operand(~kWeakHeapObjectMask));
2010   and_(out, in, r0);
2011 }
2012 
2013 void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
2014                                           Register scratch1,
2015                                           Register scratch2) {
2016   DCHECK_GT(value, 0);
2017   if (FLAG_native_code_counters && counter->Enabled()) {
2018     // This operation has to be exactly 32-bit wide in case the external
2019     // reference table redirects the counter to a uint32_t dummy_stats_counter_
2020     // field.
2021     Move(scratch2, ExternalReference::Create(counter));
2022     lwz(scratch1, MemOperand(scratch2));
2023     addi(scratch1, scratch1, Operand(value));
2024     stw(scratch1, MemOperand(scratch2));
2025   }
2026 }
2027 
2028 void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
2029                                           Register scratch1,
2030                                           Register scratch2) {
2031   DCHECK_GT(value, 0);
2032   if (FLAG_native_code_counters && counter->Enabled()) {
2033     // This operation has to be exactly 32-bit wide in case the external
2034     // reference table redirects the counter to a uint32_t dummy_stats_counter_
2035     // field.
2036     Move(scratch2, ExternalReference::Create(counter));
2037     lwz(scratch1, MemOperand(scratch2));
2038     subi(scratch1, scratch1, Operand(value));
2039     stw(scratch1, MemOperand(scratch2));
2040   }
2041 }
2042 
2043 void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
2044   if (FLAG_debug_code) Check(cond, reason, cr);
2045 }
2046 
2047 void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
2048   Label L;
2049   b(cond, &L, cr);
2050   Abort(reason);
2051   // will not return here
2052   bind(&L);
2053 }
2054 
2055 void TurboAssembler::Abort(AbortReason reason) {
2056   Label abort_start;
2057   bind(&abort_start);
2058   if (FLAG_code_comments) {
2059     const char* msg = GetAbortReason(reason);
2060     RecordComment("Abort message: ");
2061     RecordComment(msg);
2062   }
2063 
2064   // Avoid emitting call to builtin if requested.
2065   if (trap_on_abort()) {
2066     stop();
2067     return;
2068   }
2069 
2070   if (should_abort_hard()) {
2071     // We don't care if we constructed a frame. Just pretend we did.
2072     FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
2073     mov(r3, Operand(static_cast<int>(reason)));
2074     PrepareCallCFunction(1, r4);
2075     CallCFunction(ExternalReference::abort_with_reason(), 1);
2076     return;
2077   }
2078 
2079   LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
2080 
2081   // Disable stub call restrictions to always allow calls to abort.
2082   if (!has_frame_) {
2083     // We don't actually want to generate a pile of code for this, so just
2084     // claim there is a stack frame, without generating one.
2085     FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
2086     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
2087   } else {
2088     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
2089   }
2090   // will not return here
2091 }
2092 
2093 void TurboAssembler::LoadMap(Register destination, Register object) {
2094   LoadTaggedPointerField(destination,
2095                          FieldMemOperand(object, HeapObject::kMapOffset), r0);
2096 }
2097 
2098 void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
2099   LoadMap(dst, cp);
2100   LoadTaggedPointerField(
2101       dst,
2102       FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset),
2103       r0);
2104   LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
2105 }
2106 
2107 void TurboAssembler::AssertNotSmi(Register object) {
2108   if (FLAG_debug_code) {
2109     STATIC_ASSERT(kSmiTag == 0);
2110     TestIfSmi(object, r0);
2111     Check(ne, AbortReason::kOperandIsASmi, cr0);
2112   }
2113 }
2114 
2115 void TurboAssembler::AssertSmi(Register object) {
2116   if (FLAG_debug_code) {
2117     STATIC_ASSERT(kSmiTag == 0);
2118     TestIfSmi(object, r0);
2119     Check(eq, AbortReason::kOperandIsNotASmi, cr0);
2120   }
2121 }
2122 
2123 void MacroAssembler::AssertConstructor(Register object) {
2124   if (FLAG_debug_code) {
2125     STATIC_ASSERT(kSmiTag == 0);
2126     TestIfSmi(object, r0);
2127     Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, cr0);
2128     push(object);
2129     LoadMap(object, object);
2130     lbz(object, FieldMemOperand(object, Map::kBitFieldOffset));
2131     andi(object, object, Operand(Map::Bits1::IsConstructorBit::kMask));
2132     pop(object);
2133     Check(ne, AbortReason::kOperandIsNotAConstructor, cr0);
2134   }
2135 }
2136 
2137 void MacroAssembler::AssertFunction(Register object) {
2138   if (FLAG_debug_code) {
2139     STATIC_ASSERT(kSmiTag == 0);
2140     TestIfSmi(object, r0);
2141     Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
2142     push(object);
2143     LoadMap(object, object);
2144     CompareInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
2145                              LAST_JS_FUNCTION_TYPE);
2146     pop(object);
2147     Check(le, AbortReason::kOperandIsNotAFunction);
2148   }
2149 }
2150 
2151 void MacroAssembler::AssertCallableFunction(Register object) {
2152   if (!FLAG_debug_code) return;
2153   ASM_CODE_COMMENT(this);
2154   STATIC_ASSERT(kSmiTag == 0);
2155   TestIfSmi(object, r0);
2156   Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
2157   push(object);
2158   LoadMap(object, object);
2159   CompareInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE,
2160                            LAST_CALLABLE_JS_FUNCTION_TYPE);
2161   pop(object);
2162   Check(le, AbortReason::kOperandIsNotACallableFunction);
2163 }
2164 
2165 void MacroAssembler::AssertBoundFunction(Register object) {
2166   if (FLAG_debug_code) {
2167     STATIC_ASSERT(kSmiTag == 0);
2168     TestIfSmi(object, r0);
2169     Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
2170     push(object);
2171     CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2172     pop(object);
2173     Check(eq, AbortReason::kOperandIsNotABoundFunction);
2174   }
2175 }
2176 
2177 void MacroAssembler::AssertGeneratorObject(Register object) {
2178   if (!FLAG_debug_code) return;
2179   TestIfSmi(object, r0);
2180   Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
2181 
2182   // Load map
2183   Register map = object;
2184   push(object);
2185   LoadMap(map, object);
2186 
2187   // Check if JSGeneratorObject
2188   Label do_check;
2189   Register instance_type = object;
2190   CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
2191   beq(&do_check);
2192 
2193   // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
2194   cmpi(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
2195   beq(&do_check);
2196 
2197   // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
2198   cmpi(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
2199 
2200   bind(&do_check);
2201   // Restore generator object to register and perform assertion
2202   pop(object);
2203   Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
2204 }
2205 
2206 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2207                                                      Register scratch) {
2208   if (FLAG_debug_code) {
2209     Label done_checking;
2210     AssertNotSmi(object);
2211     CompareRoot(object, RootIndex::kUndefinedValue);
2212     beq(&done_checking);
2213     LoadMap(scratch, object);
2214     CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
2215     Assert(eq, AbortReason::kExpectedUndefinedOrCell);
2216     bind(&done_checking);
2217   }
2218 }
2219 
2220 static const int kRegisterPassedArguments = 8;
2221 
2222 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
2223                                               int num_double_arguments) {
2224   int stack_passed_words = 0;
2225   if (num_double_arguments > DoubleRegister::kNumRegisters) {
2226     stack_passed_words +=
2227         2 * (num_double_arguments - DoubleRegister::kNumRegisters);
2228   }
2229   // Up to 8 simple arguments are passed in registers r3..r10.
2230   if (num_reg_arguments > kRegisterPassedArguments) {
2231     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2232   }
2233   return stack_passed_words;
2234 }
2235 
2236 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
2237                                           int num_double_arguments,
2238                                           Register scratch) {
2239   int frame_alignment = ActivationFrameAlignment();
2240   int stack_passed_arguments =
2241       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2242   int stack_space = kNumRequiredStackFrameSlots;
2243 
2244   if (frame_alignment > kSystemPointerSize) {
2245     // Make stack end at alignment and make room for stack arguments
2246     // -- preserving original value of sp.
2247     mr(scratch, sp);
2248     AddS64(sp, sp, Operand(-(stack_passed_arguments + 1) * kSystemPointerSize),
2249            scratch);
2250     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2251     ClearRightImm(sp, sp,
2252                   Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
2253     StoreU64(scratch,
2254              MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
2255   } else {
2256     // Make room for stack arguments
2257     stack_space += stack_passed_arguments;
2258   }
2259 
2260   // Allocate frame with required slots to make ABI work.
2261   li(r0, Operand::Zero());
2262   StoreU64WithUpdate(r0, MemOperand(sp, -stack_space * kSystemPointerSize));
2263 }
2264 
2265 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
2266                                           Register scratch) {
2267   PrepareCallCFunction(num_reg_arguments, 0, scratch);
2268 }
2269 
2270 void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
2271 
2272 void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
2273 
2274 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
2275                                           DoubleRegister src2) {
2276   if (src2 == d1) {
2277     DCHECK(src1 != d2);
2278     Move(d2, src2);
2279     Move(d1, src1);
2280   } else {
2281     Move(d1, src1);
2282     Move(d2, src2);
2283   }
2284 }
2285 
2286 void TurboAssembler::CallCFunction(ExternalReference function,
2287                                    int num_reg_arguments,
2288                                    int num_double_arguments,
2289                                    bool has_function_descriptor) {
2290   Move(ip, function);
2291   CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments,
2292                       has_function_descriptor);
2293 }
2294 
2295 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
2296                                    int num_double_arguments,
2297                                    bool has_function_descriptor) {
2298   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
2299                       has_function_descriptor);
2300 }
2301 
2302 void TurboAssembler::CallCFunction(ExternalReference function,
2303                                    int num_arguments,
2304                                    bool has_function_descriptor) {
2305   CallCFunction(function, num_arguments, 0, has_function_descriptor);
2306 }
2307 
2308 void TurboAssembler::CallCFunction(Register function, int num_arguments,
2309                                    bool has_function_descriptor) {
2310   CallCFunction(function, num_arguments, 0, has_function_descriptor);
2311 }
2312 
2313 void TurboAssembler::CallCFunctionHelper(Register function,
2314                                          int num_reg_arguments,
2315                                          int num_double_arguments,
2316                                          bool has_function_descriptor) {
2317   DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2318   DCHECK(has_frame());
2319 
2320   // Save the frame pointer and PC so that the stack layout remains iterable,
2321   // even without an ExitFrame which normally exists between JS and C frames.
2322   Register addr_scratch = r7;
2323   Register scratch = r8;
2324   Push(scratch);
2325   mflr(scratch);
2326   // See x64 code for reasoning about how to address the isolate data fields.
2327   if (root_array_available()) {
2328     LoadPC(r0);
2329     StoreU64(r0, MemOperand(kRootRegister,
2330                             IsolateData::fast_c_call_caller_pc_offset()));
2331     StoreU64(fp, MemOperand(kRootRegister,
2332                             IsolateData::fast_c_call_caller_fp_offset()));
2333   } else {
2334     DCHECK_NOT_NULL(isolate());
2335     Push(addr_scratch);
2336 
2337     Move(addr_scratch,
2338          ExternalReference::fast_c_call_caller_pc_address(isolate()));
2339     LoadPC(r0);
2340     StoreU64(r0, MemOperand(addr_scratch));
2341     Move(addr_scratch,
2342          ExternalReference::fast_c_call_caller_fp_address(isolate()));
2343     StoreU64(fp, MemOperand(addr_scratch));
2344     Pop(addr_scratch);
2345   }
2346   mtlr(scratch);
2347   Pop(scratch);
2348 
2349   // Just call directly. The function called cannot cause a GC, or
2350   // allow preemption, so the return address in the link register
2351   // stays correct.
2352   Register dest = function;
2353   if (ABI_USES_FUNCTION_DESCRIPTORS && has_function_descriptor) {
2354     // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
2355     // aware of this descriptor and pick up values from it
2356     LoadU64(ToRegister(ABI_TOC_REGISTER),
2357             MemOperand(function, kSystemPointerSize));
2358     LoadU64(ip, MemOperand(function, 0));
2359     dest = ip;
2360   } else if (ABI_CALL_VIA_IP) {
2361     // pLinux and Simualtor, not AIX
2362     Move(ip, function);
2363     dest = ip;
2364   }
2365 
2366   Call(dest);
2367 
2368   // We don't unset the PC; the FP is the source of truth.
2369   Register zero_scratch = r0;
2370   mov(zero_scratch, Operand::Zero());
2371 
2372   if (root_array_available()) {
2373     StoreU64(
2374         zero_scratch,
2375         MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
2376   } else {
2377     DCHECK_NOT_NULL(isolate());
2378     Push(addr_scratch);
2379     Move(addr_scratch,
2380          ExternalReference::fast_c_call_caller_fp_address(isolate()));
2381     StoreU64(zero_scratch, MemOperand(addr_scratch));
2382     Pop(addr_scratch);
2383   }
2384 
2385   // Remove frame bought in PrepareCallCFunction
2386   int stack_passed_arguments =
2387       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2388   int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
2389   if (ActivationFrameAlignment() > kSystemPointerSize) {
2390     LoadU64(sp, MemOperand(sp, stack_space * kSystemPointerSize), r0);
2391   } else {
2392     AddS64(sp, sp, Operand(stack_space * kSystemPointerSize), r0);
2393   }
2394 }
2395 
2396 void TurboAssembler::CheckPageFlag(
2397     Register object,
2398     Register scratch,  // scratch may be same register as object
2399     int mask, Condition cc, Label* condition_met) {
2400   DCHECK(cc == ne || cc == eq);
2401   DCHECK(scratch != r0);
2402   ClearRightImm(scratch, object, Operand(kPageSizeBits));
2403   LoadU64(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset), r0);
2404 
2405   mov(r0, Operand(mask));
2406   and_(r0, scratch, r0, SetRC);
2407 
2408   if (cc == ne) {
2409     bne(condition_met, cr0);
2410   }
2411   if (cc == eq) {
2412     beq(condition_met, cr0);
2413   }
2414 }
2415 
2416 void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
2417 
2418 void TurboAssembler::ResetRoundingMode() {
2419   mtfsfi(7, kRoundToNearest);  // reset (default is kRoundToNearest)
2420 }
2421 
2422 ////////////////////////////////////////////////////////////////////////////////
2423 //
2424 // New MacroAssembler Interfaces added for PPC
2425 //
2426 ////////////////////////////////////////////////////////////////////////////////
2427 void TurboAssembler::LoadIntLiteral(Register dst, int value) {
2428   mov(dst, Operand(value));
2429 }
2430 
2431 void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
2432   mov(dst, Operand(smi));
2433 }
2434 
2435 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result,
2436                                        base::Double value, Register scratch) {
2437   if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
2438       !(scratch == r0 && ConstantPoolAccessIsInOverflow())) {
2439     ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
2440     if (access == ConstantPoolEntry::OVERFLOWED) {
2441       addis(scratch, kConstantPoolRegister, Operand::Zero());
2442       lfd(result, MemOperand(scratch, 0));
2443     } else {
2444       lfd(result, MemOperand(kConstantPoolRegister, 0));
2445     }
2446     return;
2447   }
2448 
2449   // avoid gcc strict aliasing error using union cast
2450   union {
2451     uint64_t dval;
2452 #if V8_TARGET_ARCH_PPC64
2453     intptr_t ival;
2454 #else
2455     intptr_t ival[2];
2456 #endif
2457   } litVal;
2458 
2459   litVal.dval = value.AsUint64();
2460 
2461 #if V8_TARGET_ARCH_PPC64
2462   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2463     mov(scratch, Operand(litVal.ival));
2464     mtfprd(result, scratch);
2465     return;
2466   }
2467 #endif
2468 
2469   addi(sp, sp, Operand(-kDoubleSize));
2470 #if V8_TARGET_ARCH_PPC64
2471   mov(scratch, Operand(litVal.ival));
2472   std(scratch, MemOperand(sp));
2473 #else
2474   LoadIntLiteral(scratch, litVal.ival[0]);
2475   stw(scratch, MemOperand(sp, 0));
2476   LoadIntLiteral(scratch, litVal.ival[1]);
2477   stw(scratch, MemOperand(sp, 4));
2478 #endif
2479   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2480   lfd(result, MemOperand(sp, 0));
2481   addi(sp, sp, Operand(kDoubleSize));
2482 }
2483 
2484 void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
2485                                     Register scratch) {
2486 // sign-extend src to 64-bit
2487 #if V8_TARGET_ARCH_PPC64
2488   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2489     mtfprwa(dst, src);
2490     return;
2491   }
2492 #endif
2493 
2494   DCHECK(src != scratch);
2495   subi(sp, sp, Operand(kDoubleSize));
2496 #if V8_TARGET_ARCH_PPC64
2497   extsw(scratch, src);
2498   std(scratch, MemOperand(sp, 0));
2499 #else
2500   srawi(scratch, src, 31);
2501   stw(scratch, MemOperand(sp, Register::kExponentOffset));
2502   stw(src, MemOperand(sp, Register::kMantissaOffset));
2503 #endif
2504   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2505   lfd(dst, MemOperand(sp, 0));
2506   addi(sp, sp, Operand(kDoubleSize));
2507 }
2508 
2509 void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
2510                                             Register scratch) {
2511 // zero-extend src to 64-bit
2512 #if V8_TARGET_ARCH_PPC64
2513   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2514     mtfprwz(dst, src);
2515     return;
2516   }
2517 #endif
2518 
2519   DCHECK(src != scratch);
2520   subi(sp, sp, Operand(kDoubleSize));
2521 #if V8_TARGET_ARCH_PPC64
2522   clrldi(scratch, src, Operand(32));
2523   std(scratch, MemOperand(sp, 0));
2524 #else
2525   li(scratch, Operand::Zero());
2526   stw(scratch, MemOperand(sp, Register::kExponentOffset));
2527   stw(src, MemOperand(sp, Register::kMantissaOffset));
2528 #endif
2529   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2530   lfd(dst, MemOperand(sp, 0));
2531   addi(sp, sp, Operand(kDoubleSize));
2532 }
2533 
2534 void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
2535 #if !V8_TARGET_ARCH_PPC64
2536                                       Register src_hi,
2537 #endif
2538                                       Register src) {
2539 #if V8_TARGET_ARCH_PPC64
2540   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2541     mtfprd(dst, src);
2542     return;
2543   }
2544 #endif
2545 
2546   subi(sp, sp, Operand(kDoubleSize));
2547 #if V8_TARGET_ARCH_PPC64
2548   std(src, MemOperand(sp, 0));
2549 #else
2550   stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2551   stw(src, MemOperand(sp, Register::kMantissaOffset));
2552 #endif
2553   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2554   lfd(dst, MemOperand(sp, 0));
2555   addi(sp, sp, Operand(kDoubleSize));
2556 }
2557 
2558 #if V8_TARGET_ARCH_PPC64
2559 void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
2560                                                 Register src_hi,
2561                                                 Register src_lo,
2562                                                 Register scratch) {
2563   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2564     ShiftLeftU64(scratch, src_hi, Operand(32));
2565     rldimi(scratch, src_lo, 0, 32);
2566     mtfprd(dst, scratch);
2567     return;
2568   }
2569 
2570   subi(sp, sp, Operand(kDoubleSize));
2571   stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2572   stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
2573   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2574   lfd(dst, MemOperand(sp));
2575   addi(sp, sp, Operand(kDoubleSize));
2576 }
2577 #endif
2578 
2579 void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
2580                                      Register scratch) {
2581 #if V8_TARGET_ARCH_PPC64
2582   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2583     mffprd(scratch, dst);
2584     rldimi(scratch, src, 0, 32);
2585     mtfprd(dst, scratch);
2586     return;
2587   }
2588 #endif
2589 
2590   subi(sp, sp, Operand(kDoubleSize));
2591   stfd(dst, MemOperand(sp));
2592   stw(src, MemOperand(sp, Register::kMantissaOffset));
2593   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2594   lfd(dst, MemOperand(sp));
2595   addi(sp, sp, Operand(kDoubleSize));
2596 }
2597 
2598 void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
2599                                       Register scratch) {
2600 #if V8_TARGET_ARCH_PPC64
2601   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2602     mffprd(scratch, dst);
2603     rldimi(scratch, src, 32, 0);
2604     mtfprd(dst, scratch);
2605     return;
2606   }
2607 #endif
2608 
2609   subi(sp, sp, Operand(kDoubleSize));
2610   stfd(dst, MemOperand(sp));
2611   stw(src, MemOperand(sp, Register::kExponentOffset));
2612   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2613   lfd(dst, MemOperand(sp));
2614   addi(sp, sp, Operand(kDoubleSize));
2615 }
2616 
2617 void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
2618 #if V8_TARGET_ARCH_PPC64
2619   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2620     mffprwz(dst, src);
2621     return;
2622   }
2623 #endif
2624 
2625   subi(sp, sp, Operand(kDoubleSize));
2626   stfd(src, MemOperand(sp));
2627   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2628   lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2629   addi(sp, sp, Operand(kDoubleSize));
2630 }
2631 
2632 void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
2633 #if V8_TARGET_ARCH_PPC64
2634   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2635     mffprd(dst, src);
2636     srdi(dst, dst, Operand(32));
2637     return;
2638   }
2639 #endif
2640 
2641   subi(sp, sp, Operand(kDoubleSize));
2642   stfd(src, MemOperand(sp));
2643   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2644   lwz(dst, MemOperand(sp, Register::kExponentOffset));
2645   addi(sp, sp, Operand(kDoubleSize));
2646 }
2647 
2648 void TurboAssembler::MovDoubleToInt64(
2649 #if !V8_TARGET_ARCH_PPC64
2650     Register dst_hi,
2651 #endif
2652     Register dst, DoubleRegister src) {
2653 #if V8_TARGET_ARCH_PPC64
2654   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2655     mffprd(dst, src);
2656     return;
2657   }
2658 #endif
2659 
2660   subi(sp, sp, Operand(kDoubleSize));
2661   stfd(src, MemOperand(sp));
2662   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2663 #if V8_TARGET_ARCH_PPC64
2664   ld(dst, MemOperand(sp, 0));
2665 #else
2666   lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
2667   lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2668 #endif
2669   addi(sp, sp, Operand(kDoubleSize));
2670 }
2671 
2672 void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src,
2673                                    Register scratch) {
2674   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2675     ShiftLeftU64(scratch, src, Operand(32));
2676     mtfprd(dst, scratch);
2677     xscvspdpn(dst, dst);
2678     return;
2679   }
2680   subi(sp, sp, Operand(kFloatSize));
2681   stw(src, MemOperand(sp, 0));
2682   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2683   lfs(dst, MemOperand(sp, 0));
2684   addi(sp, sp, Operand(kFloatSize));
2685 }
2686 
2687 void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src,
2688                                    DoubleRegister scratch) {
2689   if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
2690     xscvdpspn(scratch, src);
2691     mffprwz(dst, scratch);
2692     return;
2693   }
2694   subi(sp, sp, Operand(kFloatSize));
2695   stfs(src, MemOperand(sp, 0));
2696   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2697   lwz(dst, MemOperand(sp, 0));
2698   addi(sp, sp, Operand(kFloatSize));
2699 }
2700 
2701 void TurboAssembler::AddS64(Register dst, Register src, Register value, OEBit s,
2702                             RCBit r) {
2703   add(dst, src, value, s, r);
2704 }
2705 
2706 void TurboAssembler::AddS64(Register dst, Register src, const Operand& value,
2707                             Register scratch, OEBit s, RCBit r) {
2708   if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
2709     addi(dst, src, value);
2710   } else {
2711     mov(scratch, value);
2712     add(dst, src, scratch, s, r);
2713   }
2714 }
2715 
2716 void TurboAssembler::SubS64(Register dst, Register src, Register value, OEBit s,
2717                             RCBit r) {
2718   sub(dst, src, value, s, r);
2719 }
2720 
2721 void TurboAssembler::SubS64(Register dst, Register src, const Operand& value,
2722                             Register scratch, OEBit s, RCBit r) {
2723   if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
2724     subi(dst, src, value);
2725   } else {
2726     mov(scratch, value);
2727     sub(dst, src, scratch, s, r);
2728   }
2729 }
2730 
2731 void TurboAssembler::AddS32(Register dst, Register src, Register value,
2732                             RCBit r) {
2733   AddS64(dst, src, value, LeaveOE, r);
2734   extsw(dst, dst, r);
2735 }
2736 
2737 void TurboAssembler::AddS32(Register dst, Register src, const Operand& value,
2738                             Register scratch, RCBit r) {
2739   AddS64(dst, src, value, scratch, LeaveOE, r);
2740   extsw(dst, dst, r);
2741 }
2742 
2743 void TurboAssembler::SubS32(Register dst, Register src, Register value,
2744                             RCBit r) {
2745   SubS64(dst, src, value, LeaveOE, r);
2746   extsw(dst, dst, r);
2747 }
2748 
2749 void TurboAssembler::SubS32(Register dst, Register src, const Operand& value,
2750                             Register scratch, RCBit r) {
2751   SubS64(dst, src, value, scratch, LeaveOE, r);
2752   extsw(dst, dst, r);
2753 }
2754 
2755 void TurboAssembler::MulS64(Register dst, Register src, const Operand& value,
2756                             Register scratch, OEBit s, RCBit r) {
2757   if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
2758     mulli(dst, src, value);
2759   } else {
2760     mov(scratch, value);
2761     mulld(dst, src, scratch, s, r);
2762   }
2763 }
2764 
2765 void TurboAssembler::MulS64(Register dst, Register src, Register value, OEBit s,
2766                             RCBit r) {
2767   mulld(dst, src, value, s, r);
2768 }
2769 
2770 void TurboAssembler::MulS32(Register dst, Register src, const Operand& value,
2771                             Register scratch, OEBit s, RCBit r) {
2772   MulS64(dst, src, value, scratch, s, r);
2773   extsw(dst, dst, r);
2774 }
2775 
2776 void TurboAssembler::MulS32(Register dst, Register src, Register value, OEBit s,
2777                             RCBit r) {
2778   MulS64(dst, src, value, s, r);
2779   extsw(dst, dst, r);
2780 }
2781 
2782 void TurboAssembler::DivS64(Register dst, Register src, Register value, OEBit s,
2783                             RCBit r) {
2784   divd(dst, src, value, s, r);
2785 }
2786 
2787 void TurboAssembler::DivU64(Register dst, Register src, Register value, OEBit s,
2788                             RCBit r) {
2789   divdu(dst, src, value, s, r);
2790 }
2791 
2792 void TurboAssembler::DivS32(Register dst, Register src, Register value, OEBit s,
2793                             RCBit r) {
2794   divw(dst, src, value, s, r);
2795   extsw(dst, dst);
2796 }
2797 void TurboAssembler::DivU32(Register dst, Register src, Register value, OEBit s,
2798                             RCBit r) {
2799   divwu(dst, src, value, s, r);
2800   ZeroExtWord32(dst, dst);
2801 }
2802 
2803 void TurboAssembler::ModS64(Register dst, Register src, Register value) {
2804   if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
2805     modsd(dst, src, value);
2806   } else {
2807     Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
2808     Push(scratch);
2809     divd(scratch, src, value);
2810     mulld(scratch, scratch, value);
2811     sub(dst, src, scratch);
2812     Pop(scratch);
2813   }
2814 }
2815 
2816 void TurboAssembler::ModU64(Register dst, Register src, Register value) {
2817   if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
2818     modud(dst, src, value);
2819   } else {
2820     Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
2821     Push(scratch);
2822     divdu(scratch, src, value);
2823     mulld(scratch, scratch, value);
2824     sub(dst, src, scratch);
2825     Pop(scratch);
2826   }
2827 }
2828 
2829 void TurboAssembler::ModS32(Register dst, Register src, Register value) {
2830   if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
2831     modsw(dst, src, value);
2832   } else {
2833     Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
2834     Push(scratch);
2835     divw(scratch, src, value);
2836     mullw(scratch, scratch, value);
2837     sub(dst, src, scratch);
2838     Pop(scratch);
2839   }
2840   extsw(dst, dst);
2841 }
2842 void TurboAssembler::ModU32(Register dst, Register src, Register value) {
2843   if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
2844     moduw(dst, src, value);
2845   } else {
2846     Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
2847     Push(scratch);
2848     divwu(scratch, src, value);
2849     mullw(scratch, scratch, value);
2850     sub(dst, src, scratch);
2851     Pop(scratch);
2852   }
2853   ZeroExtWord32(dst, dst);
2854 }
2855 
2856 void TurboAssembler::AndU64(Register dst, Register src, const Operand& value,
2857                             Register scratch, RCBit r) {
2858   if (is_uint16(value.immediate()) && r == SetRC) {
2859     andi(dst, src, value);
2860   } else {
2861     mov(scratch, value);
2862     and_(dst, src, scratch, r);
2863   }
2864 }
2865 
2866 void TurboAssembler::AndU64(Register dst, Register src, Register value,
2867                             RCBit r) {
2868   and_(dst, src, value, r);
2869 }
2870 
2871 void TurboAssembler::OrU64(Register dst, Register src, const Operand& value,
2872                            Register scratch, RCBit r) {
2873   if (is_int16(value.immediate()) && r == LeaveRC) {
2874     ori(dst, src, value);
2875   } else {
2876     mov(scratch, value);
2877     orx(dst, src, scratch, r);
2878   }
2879 }
2880 
2881 void TurboAssembler::OrU64(Register dst, Register src, Register value,
2882                            RCBit r) {
2883   orx(dst, src, value, r);
2884 }
2885 
2886 void TurboAssembler::XorU64(Register dst, Register src, const Operand& value,
2887                             Register scratch, RCBit r) {
2888   if (is_int16(value.immediate()) && r == LeaveRC) {
2889     xori(dst, src, value);
2890   } else {
2891     mov(scratch, value);
2892     xor_(dst, src, scratch, r);
2893   }
2894 }
2895 
2896 void TurboAssembler::XorU64(Register dst, Register src, Register value,
2897                             RCBit r) {
2898   xor_(dst, src, value, r);
2899 }
2900 
2901 void TurboAssembler::AndU32(Register dst, Register src, const Operand& value,
2902                             Register scratch, RCBit r) {
2903   AndU64(dst, src, value, scratch, r);
2904   extsw(dst, dst, r);
2905 }
2906 
2907 void TurboAssembler::AndU32(Register dst, Register src, Register value,
2908                             RCBit r) {
2909   AndU64(dst, src, value, r);
2910   extsw(dst, dst, r);
2911 }
2912 
2913 void TurboAssembler::OrU32(Register dst, Register src, const Operand& value,
2914                            Register scratch, RCBit r) {
2915   OrU64(dst, src, value, scratch, r);
2916   extsw(dst, dst, r);
2917 }
2918 
2919 void TurboAssembler::OrU32(Register dst, Register src, Register value,
2920                            RCBit r) {
2921   OrU64(dst, src, value, r);
2922   extsw(dst, dst, r);
2923 }
2924 
2925 void TurboAssembler::XorU32(Register dst, Register src, const Operand& value,
2926                             Register scratch, RCBit r) {
2927   XorU64(dst, src, value, scratch, r);
2928   extsw(dst, dst, r);
2929 }
2930 
2931 void TurboAssembler::XorU32(Register dst, Register src, Register value,
2932                             RCBit r) {
2933   XorU64(dst, src, value, r);
2934   extsw(dst, dst, r);
2935 }
2936 
2937 void TurboAssembler::ShiftLeftU64(Register dst, Register src,
2938                                   const Operand& value, RCBit r) {
2939   sldi(dst, src, value, r);
2940 }
2941 
2942 void TurboAssembler::ShiftRightU64(Register dst, Register src,
2943                                    const Operand& value, RCBit r) {
2944   srdi(dst, src, value, r);
2945 }
2946 
2947 void TurboAssembler::ShiftRightS64(Register dst, Register src,
2948                                    const Operand& value, RCBit r) {
2949   sradi(dst, src, value.immediate(), r);
2950 }
2951 
2952 void TurboAssembler::ShiftLeftU32(Register dst, Register src,
2953                                   const Operand& value, RCBit r) {
2954   slwi(dst, src, value, r);
2955 }
2956 
2957 void TurboAssembler::ShiftRightU32(Register dst, Register src,
2958                                    const Operand& value, RCBit r) {
2959   srwi(dst, src, value, r);
2960 }
2961 
2962 void TurboAssembler::ShiftRightS32(Register dst, Register src,
2963                                    const Operand& value, RCBit r) {
2964   srawi(dst, src, value.immediate(), r);
2965 }
2966 
2967 void TurboAssembler::ShiftLeftU64(Register dst, Register src, Register value,
2968                                   RCBit r) {
2969   sld(dst, src, value, r);
2970 }
2971 
2972 void TurboAssembler::ShiftRightU64(Register dst, Register src, Register value,
2973                                    RCBit r) {
2974   srd(dst, src, value, r);
2975 }
2976 
2977 void TurboAssembler::ShiftRightS64(Register dst, Register src, Register value,
2978                                    RCBit r) {
2979   srad(dst, src, value, r);
2980 }
2981 
2982 void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register value,
2983                                   RCBit r) {
2984   slw(dst, src, value, r);
2985 }
2986 
2987 void TurboAssembler::ShiftRightU32(Register dst, Register src, Register value,
2988                                    RCBit r) {
2989   srw(dst, src, value, r);
2990 }
2991 
2992 void TurboAssembler::ShiftRightS32(Register dst, Register src, Register value,
2993                                    RCBit r) {
2994   sraw(dst, src, value, r);
2995 }
2996 
2997 void TurboAssembler::CmpS64(Register src1, Register src2, CRegister cr) {
2998   cmp(src1, src2, cr);
2999 }
3000 
3001 void TurboAssembler::CmpS64(Register src1, const Operand& src2,
3002                             Register scratch, CRegister cr) {
3003   intptr_t value = src2.immediate();
3004   if (is_int16(value)) {
3005     cmpi(src1, src2, cr);
3006   } else {
3007     mov(scratch, src2);
3008     CmpS64(src1, scratch, cr);
3009   }
3010 }
3011 
3012 void TurboAssembler::CmpU64(Register src1, const Operand& src2,
3013                             Register scratch, CRegister cr) {
3014   intptr_t value = src2.immediate();
3015   if (is_uint16(value)) {
3016     cmpli(src1, src2, cr);
3017   } else {
3018     mov(scratch, src2);
3019     CmpU64(src1, scratch, cr);
3020   }
3021 }
3022 
3023 void TurboAssembler::CmpU64(Register src1, Register src2, CRegister cr) {
3024   cmpl(src1, src2, cr);
3025 }
3026 
3027 void TurboAssembler::CmpS32(Register src1, const Operand& src2,
3028                             Register scratch, CRegister cr) {
3029   intptr_t value = src2.immediate();
3030   if (is_int16(value)) {
3031     cmpwi(src1, src2, cr);
3032   } else {
3033     mov(scratch, src2);
3034     CmpS32(src1, scratch, cr);
3035   }
3036 }
3037 
3038 void TurboAssembler::CmpS32(Register src1, Register src2, CRegister cr) {
3039   cmpw(src1, src2, cr);
3040 }
3041 
3042 void TurboAssembler::CmpU32(Register src1, const Operand& src2,
3043                             Register scratch, CRegister cr) {
3044   intptr_t value = src2.immediate();
3045   if (is_uint16(value)) {
3046     cmplwi(src1, src2, cr);
3047   } else {
3048     mov(scratch, src2);
3049     cmplw(src1, scratch, cr);
3050   }
3051 }
3052 
3053 void TurboAssembler::CmpU32(Register src1, Register src2, CRegister cr) {
3054   cmplw(src1, src2, cr);
3055 }
3056 
3057 void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs,
3058                             DoubleRegister rhs, RCBit r) {
3059   fadd(dst, lhs, rhs, r);
3060 }
3061 
3062 void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs,
3063                             DoubleRegister rhs, RCBit r) {
3064   fsub(dst, lhs, rhs, r);
3065 }
3066 
3067 void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs,
3068                             DoubleRegister rhs, RCBit r) {
3069   fmul(dst, lhs, rhs, r);
3070 }
3071 
3072 void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs,
3073                             DoubleRegister rhs, RCBit r) {
3074   fdiv(dst, lhs, rhs, r);
3075 }
3076 
3077 void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs,
3078                             DoubleRegister rhs, RCBit r) {
3079   fadd(dst, lhs, rhs, r);
3080   frsp(dst, dst, r);
3081 }
3082 
3083 void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs,
3084                             DoubleRegister rhs, RCBit r) {
3085   fsub(dst, lhs, rhs, r);
3086   frsp(dst, dst, r);
3087 }
3088 
3089 void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs,
3090                             DoubleRegister rhs, RCBit r) {
3091   fmul(dst, lhs, rhs, r);
3092   frsp(dst, dst, r);
3093 }
3094 
3095 void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
3096                             DoubleRegister rhs, RCBit r) {
3097   fdiv(dst, lhs, rhs, r);
3098   frsp(dst, dst, r);
3099 }
3100 
3101 void TurboAssembler::CopySignF64(DoubleRegister dst, DoubleRegister lhs,
3102                                  DoubleRegister rhs, RCBit r) {
3103   fcpsgn(dst, rhs, lhs, r);
3104 }
3105 
3106 void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
3107                                    CRegister cr) {
3108 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3109   CmpS32(src1, Operand(smi), scratch, cr);
3110 #else
3111   LoadSmiLiteral(scratch, smi);
3112   CmpS64(src1, scratch, cr);
3113 #endif
3114 }
3115 
3116 void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch,
3117                                     CRegister cr) {
3118 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3119   CmpU64(src1, Operand(smi), scratch, cr);
3120 #else
3121   LoadSmiLiteral(scratch, smi);
3122   CmpU64(src1, scratch, cr);
3123 #endif
3124 }
3125 
3126 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
3127                                    Register scratch) {
3128 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3129   AddS64(dst, src, Operand(smi.ptr()), scratch);
3130 #else
3131   LoadSmiLiteral(scratch, smi);
3132   add(dst, src, scratch);
3133 #endif
3134 }
3135 
3136 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi,
3137                                    Register scratch) {
3138 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3139   AddS64(dst, src, Operand(-(static_cast<intptr_t>(smi.ptr()))), scratch);
3140 #else
3141   LoadSmiLiteral(scratch, smi);
3142   sub(dst, src, scratch);
3143 #endif
3144 }
3145 
3146 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
3147                                    Register scratch, RCBit rc) {
3148 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3149   AndU64(dst, src, Operand(smi), scratch, rc);
3150 #else
3151   LoadSmiLiteral(scratch, smi);
3152   and_(dst, src, scratch, rc);
3153 #endif
3154 }
3155 
3156 #define GenerateMemoryOperation(reg, mem, ri_op, rr_op) \
3157   {                                                     \
3158     int64_t offset = mem.offset();                      \
3159                                                         \
3160     if (mem.rb() == no_reg) {                           \
3161       if (!is_int16(offset)) {                          \
3162         /* cannot use d-form */                         \
3163         CHECK_NE(scratch, no_reg);                      \
3164         mov(scratch, Operand(offset));                  \
3165         rr_op(reg, MemOperand(mem.ra(), scratch));      \
3166       } else {                                          \
3167         ri_op(reg, mem);                                \
3168       }                                                 \
3169     } else {                                            \
3170       if (offset == 0) {                                \
3171         rr_op(reg, mem);                                \
3172       } else if (is_int16(offset)) {                    \
3173         CHECK_NE(scratch, no_reg);                      \
3174         addi(scratch, mem.rb(), Operand(offset));       \
3175         rr_op(reg, MemOperand(mem.ra(), scratch));      \
3176       } else {                                          \
3177         CHECK_NE(scratch, no_reg);                      \
3178         mov(scratch, Operand(offset));                  \
3179         add(scratch, scratch, mem.rb());                \
3180         rr_op(reg, MemOperand(mem.ra(), scratch));      \
3181       }                                                 \
3182     }                                                   \
3183   }
3184 
3185 #define GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op) \
3186   {                                                              \
3187     int64_t offset = mem.offset();                               \
3188     int misaligned = (offset & 3);                               \
3189                                                                  \
3190     if (mem.rb() == no_reg) {                                    \
3191       if (!is_int16(offset) || misaligned) {                     \
3192         /* cannot use d-form */                                  \
3193         CHECK_NE(scratch, no_reg);                               \
3194         mov(scratch, Operand(offset));                           \
3195         rr_op(reg, MemOperand(mem.ra(), scratch));               \
3196       } else {                                                   \
3197         ri_op(reg, mem);                                         \
3198       }                                                          \
3199     } else {                                                     \
3200       if (offset == 0) {                                         \
3201         rr_op(reg, mem);                                         \
3202       } else if (is_int16(offset)) {                             \
3203         CHECK_NE(scratch, no_reg);                               \
3204         addi(scratch, mem.rb(), Operand(offset));                \
3205         rr_op(reg, MemOperand(mem.ra(), scratch));               \
3206       } else {                                                   \
3207         CHECK_NE(scratch, no_reg);                               \
3208         mov(scratch, Operand(offset));                           \
3209         add(scratch, scratch, mem.rb());                         \
3210         rr_op(reg, MemOperand(mem.ra(), scratch));               \
3211       }                                                          \
3212     }                                                            \
3213   }
3214 
3215 #define MEM_OP_WITH_ALIGN_LIST(V) \
3216   V(LoadU64, ld, ldx)             \
3217   V(LoadS32, lwa, lwax)           \
3218   V(StoreU64, std, stdx)          \
3219   V(StoreU64WithUpdate, stdu, stdux)
3220 
3221 #define MEM_OP_WITH_ALIGN_FUNCTION(name, ri_op, rr_op)           \
3222   void TurboAssembler::name(Register reg, const MemOperand& mem, \
3223                             Register scratch) {                  \
3224     GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op);    \
3225   }
3226 MEM_OP_WITH_ALIGN_LIST(MEM_OP_WITH_ALIGN_FUNCTION)
3227 #undef MEM_OP_WITH_ALIGN_LIST
3228 #undef MEM_OP_WITH_ALIGN_FUNCTION
3229 
3230 #define MEM_OP_LIST(V)                                 \
3231   V(LoadU32, Register, lwz, lwzx)                      \
3232   V(LoadS16, Register, lha, lhax)                      \
3233   V(LoadU16, Register, lhz, lhzx)                      \
3234   V(LoadU8, Register, lbz, lbzx)                       \
3235   V(StoreU32, Register, stw, stwx)                     \
3236   V(StoreU16, Register, sth, sthx)                     \
3237   V(StoreU8, Register, stb, stbx)                      \
3238   V(LoadF64, DoubleRegister, lfd, lfdx)                \
3239   V(LoadF32, DoubleRegister, lfs, lfsx)                \
3240   V(StoreF64, DoubleRegister, stfd, stfdx)             \
3241   V(StoreF32, DoubleRegister, stfs, stfsx)             \
3242   V(LoadU64WithUpdate, Register, ldu, ldux)            \
3243   V(LoadF64WithUpdate, DoubleRegister, lfdu, lfdux)    \
3244   V(LoadF32WithUpdate, DoubleRegister, lfsu, lfsux)    \
3245   V(StoreF64WithUpdate, DoubleRegister, stfdu, stfdux) \
3246   V(StoreF32WithUpdate, DoubleRegister, stfsu, stfsux)
3247 
3248 #define MEM_OP_FUNCTION(name, result_t, ri_op, rr_op)            \
3249   void TurboAssembler::name(result_t reg, const MemOperand& mem, \
3250                             Register scratch) {                  \
3251     GenerateMemoryOperation(reg, mem, ri_op, rr_op);             \
3252   }
3253 MEM_OP_LIST(MEM_OP_FUNCTION)
3254 #undef MEM_OP_LIST
3255 #undef MEM_OP_FUNCTION
3256 
3257 void TurboAssembler::LoadS8(Register dst, const MemOperand& mem,
3258                             Register scratch) {
3259   LoadU8(dst, mem, scratch);
3260   extsb(dst, dst);
3261 }
3262 
3263 void TurboAssembler::LoadSimd128(Simd128Register src, const MemOperand& mem) {
3264   DCHECK(mem.rb().is_valid());
3265   lxvx(src, mem);
3266 }
3267 
3268 void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem) {
3269   DCHECK(mem.rb().is_valid());
3270   stxvx(src, mem);
3271 }
3272 
3273 #define GenerateMemoryLEOperation(reg, mem, op)                \
3274   {                                                            \
3275     if (mem.offset() == 0) {                                   \
3276       if (mem.rb() != no_reg)                                  \
3277         op(reg, mem);                                          \
3278       else                                                     \
3279         op(reg, MemOperand(r0, mem.ra()));                     \
3280     } else if (is_int16(mem.offset())) {                       \
3281       if (mem.rb() != no_reg)                                  \
3282         addi(scratch, mem.rb(), Operand(mem.offset()));        \
3283       else                                                     \
3284         mov(scratch, Operand(mem.offset()));                   \
3285       op(reg, MemOperand(mem.ra(), scratch));                  \
3286     } else {                                                   \
3287       mov(scratch, Operand(mem.offset()));                     \
3288       if (mem.rb() != no_reg) add(scratch, scratch, mem.rb()); \
3289       op(reg, MemOperand(mem.ra(), scratch));                  \
3290     }                                                          \
3291   }
3292 
3293 #define MEM_LE_OP_LIST(V) \
3294   V(LoadU64, ldbrx)       \
3295   V(LoadU32, lwbrx)       \
3296   V(LoadU16, lhbrx)       \
3297   V(StoreU64, stdbrx)     \
3298   V(StoreU32, stwbrx)     \
3299   V(StoreU16, sthbrx)
3300 
3301 #ifdef V8_TARGET_BIG_ENDIAN
3302 #define MEM_LE_OP_FUNCTION(name, op)                                 \
3303   void TurboAssembler::name##LE(Register reg, const MemOperand& mem, \
3304                                 Register scratch) {                  \
3305     GenerateMemoryLEOperation(reg, mem, op);                         \
3306   }
3307 #else
3308 #define MEM_LE_OP_FUNCTION(name, op)                                 \
3309   void TurboAssembler::name##LE(Register reg, const MemOperand& mem, \
3310                                 Register scratch) {                  \
3311     name(reg, mem, scratch);                                         \
3312   }
3313 #endif
3314 
3315 MEM_LE_OP_LIST(MEM_LE_OP_FUNCTION)
3316 #undef MEM_LE_OP_FUNCTION
3317 #undef MEM_LE_OP_LIST
3318 
3319 void TurboAssembler::LoadS32LE(Register dst, const MemOperand& mem,
3320                                Register scratch) {
3321 #ifdef V8_TARGET_BIG_ENDIAN
3322   LoadU32LE(dst, mem, scratch);
3323   extsw(dst, dst);
3324 #else
3325   LoadS32(dst, mem, scratch);
3326 #endif
3327 }
3328 
3329 void TurboAssembler::LoadS16LE(Register dst, const MemOperand& mem,
3330                                Register scratch) {
3331 #ifdef V8_TARGET_BIG_ENDIAN
3332   LoadU16LE(dst, mem, scratch);
3333   extsh(dst, dst);
3334 #else
3335   LoadS16(dst, mem, scratch);
3336 #endif
3337 }
3338 
3339 void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& mem,
3340                                Register scratch, Register scratch2) {
3341 #ifdef V8_TARGET_BIG_ENDIAN
3342   LoadU64LE(scratch, mem, scratch2);
3343   push(scratch);
3344   LoadF64(dst, MemOperand(sp), scratch2);
3345   pop(scratch);
3346 #else
3347   LoadF64(dst, mem, scratch);
3348 #endif
3349 }
3350 
3351 void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& mem,
3352                                Register scratch, Register scratch2) {
3353 #ifdef V8_TARGET_BIG_ENDIAN
3354   LoadU32LE(scratch, mem, scratch2);
3355   push(scratch);
3356   LoadF32(dst, MemOperand(sp, 4), scratch2);
3357   pop(scratch);
3358 #else
3359   LoadF32(dst, mem, scratch);
3360 #endif
3361 }
3362 
3363 void TurboAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem,
3364                                 Register scratch, Register scratch2) {
3365 #ifdef V8_TARGET_BIG_ENDIAN
3366   StoreF64(dst, mem, scratch2);
3367   LoadU64(scratch, mem, scratch2);
3368   StoreU64LE(scratch, mem, scratch2);
3369 #else
3370   StoreF64(dst, mem, scratch);
3371 #endif
3372 }
3373 
3374 void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem,
3375                                 Register scratch, Register scratch2) {
3376 #ifdef V8_TARGET_BIG_ENDIAN
3377   StoreF32(dst, mem, scratch2);
3378   LoadU32(scratch, mem, scratch2);
3379   StoreU32LE(scratch, mem, scratch2);
3380 #else
3381   StoreF32(dst, mem, scratch);
3382 #endif
3383 }
3384 
3385 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
3386                                    Register reg4, Register reg5,
3387                                    Register reg6) {
3388   RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
3389 
3390   const RegisterConfiguration* config = RegisterConfiguration::Default();
3391   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
3392     int code = config->GetAllocatableGeneralCode(i);
3393     Register candidate = Register::from_code(code);
3394     if (regs.has(candidate)) continue;
3395     return candidate;
3396   }
3397   UNREACHABLE();
3398 }
3399 
3400 void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
3401   if (src == dst) return;
3402   DCHECK(!AreAliased(src, dst, scratch));
3403   mr(scratch, src);
3404   mr(src, dst);
3405   mr(dst, scratch);
3406 }
3407 
3408 void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
3409   if (dst.ra() != r0 && dst.ra().is_valid())
3410     DCHECK(!AreAliased(src, dst.ra(), scratch));
3411   if (dst.rb() != r0 && dst.rb().is_valid())
3412     DCHECK(!AreAliased(src, dst.rb(), scratch));
3413   DCHECK(!AreAliased(src, scratch));
3414   mr(scratch, src);
3415   LoadU64(src, dst, r0);
3416   StoreU64(scratch, dst, r0);
3417 }
3418 
3419 void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
3420                            Register scratch_1) {
3421   if (src.ra() != r0 && src.ra().is_valid())
3422     DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1));
3423   if (src.rb() != r0 && src.rb().is_valid())
3424     DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
3425   if (dst.ra() != r0 && dst.ra().is_valid())
3426     DCHECK(!AreAliased(dst.ra(), scratch_0, scratch_1));
3427   if (dst.rb() != r0 && dst.rb().is_valid())
3428     DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
3429   DCHECK(!AreAliased(scratch_0, scratch_1));
3430   if (is_int16(src.offset()) || is_int16(dst.offset())) {
3431     if (!is_int16(src.offset())) {
3432       // swap operand
3433       MemOperand temp = src;
3434       src = dst;
3435       dst = temp;
3436     }
3437     LoadU64(scratch_1, dst, scratch_0);
3438     LoadU64(scratch_0, src);
3439     StoreU64(scratch_1, src);
3440     StoreU64(scratch_0, dst, scratch_1);
3441   } else {
3442     LoadU64(scratch_1, dst, scratch_0);
3443     push(scratch_1);
3444     LoadU64(scratch_0, src, scratch_1);
3445     StoreU64(scratch_0, dst, scratch_1);
3446     pop(scratch_1);
3447     StoreU64(scratch_1, src, scratch_0);
3448   }
3449 }
3450 
3451 void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
3452                                  DoubleRegister scratch) {
3453   if (src == dst) return;
3454   DCHECK(!AreAliased(src, dst, scratch));
3455   fmr(scratch, src);
3456   fmr(src, dst);
3457   fmr(dst, scratch);
3458 }
3459 
3460 void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
3461                                  DoubleRegister scratch) {
3462   DCHECK(!AreAliased(src, scratch));
3463   fmr(scratch, src);
3464   LoadF32(src, dst, r0);
3465   StoreF32(scratch, dst, r0);
3466 }
3467 
3468 void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
3469                                  DoubleRegister scratch_0,
3470                                  DoubleRegister scratch_1) {
3471   DCHECK(!AreAliased(scratch_0, scratch_1));
3472   LoadF32(scratch_0, src, r0);
3473   LoadF32(scratch_1, dst, r0);
3474   StoreF32(scratch_0, dst, r0);
3475   StoreF32(scratch_1, src, r0);
3476 }
3477 
3478 void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
3479                                 DoubleRegister scratch) {
3480   if (src == dst) return;
3481   DCHECK(!AreAliased(src, dst, scratch));
3482   fmr(scratch, src);
3483   fmr(src, dst);
3484   fmr(dst, scratch);
3485 }
3486 
3487 void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
3488                                 DoubleRegister scratch) {
3489   DCHECK(!AreAliased(src, scratch));
3490   fmr(scratch, src);
3491   LoadF64(src, dst, r0);
3492   StoreF64(scratch, dst, r0);
3493 }
3494 
3495 void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
3496                                 DoubleRegister scratch_0,
3497                                 DoubleRegister scratch_1) {
3498   DCHECK(!AreAliased(scratch_0, scratch_1));
3499   LoadF64(scratch_0, src, r0);
3500   LoadF64(scratch_1, dst, r0);
3501   StoreF64(scratch_0, dst, r0);
3502   StoreF64(scratch_1, src, r0);
3503 }
3504 
3505 void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
3506                                  Simd128Register scratch) {
3507   if (src == dst) return;
3508   vor(scratch, src, src);
3509   vor(src, dst, dst);
3510   vor(dst, scratch, scratch);
3511 }
3512 
3513 void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
3514                                  Simd128Register scratch) {
3515   DCHECK(src != scratch);
3516   // push v0, to be used as scratch
3517   addi(sp, sp, Operand(-kSimd128Size));
3518   StoreSimd128(v0, MemOperand(r0, sp));
3519   mov(ip, Operand(dst.offset()));
3520   LoadSimd128(v0, MemOperand(dst.ra(), ip));
3521   StoreSimd128(src, MemOperand(dst.ra(), ip));
3522   vor(src, v0, v0);
3523   // restore v0
3524   LoadSimd128(v0, MemOperand(r0, sp));
3525   addi(sp, sp, Operand(kSimd128Size));
3526 }
3527 
3528 void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
3529                                  Simd128Register scratch) {
3530   // push v0 and v1, to be used as scratch
3531   addi(sp, sp, Operand(2 * -kSimd128Size));
3532   StoreSimd128(v0, MemOperand(r0, sp));
3533   li(ip, Operand(kSimd128Size));
3534   StoreSimd128(v1, MemOperand(ip, sp));
3535 
3536   mov(ip, Operand(src.offset()));
3537   LoadSimd128(v0, MemOperand(src.ra(), ip));
3538   mov(ip, Operand(dst.offset()));
3539   LoadSimd128(v1, MemOperand(dst.ra(), ip));
3540 
3541   StoreSimd128(v0, MemOperand(dst.ra(), ip));
3542   mov(ip, Operand(src.offset()));
3543   StoreSimd128(v1, MemOperand(src.ra(), ip));
3544 
3545   // restore v0 and v1
3546   LoadSimd128(v0, MemOperand(r0, sp));
3547   li(ip, Operand(kSimd128Size));
3548   LoadSimd128(v1, MemOperand(ip, sp));
3549   addi(sp, sp, Operand(2 * kSimd128Size));
3550 }
3551 
3552 void TurboAssembler::ByteReverseU16(Register dst, Register val,
3553                                     Register scratch) {
3554   if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
3555     brh(dst, val);
3556     ZeroExtHalfWord(dst, dst);
3557     return;
3558   }
3559   rlwinm(scratch, val, 8, 16, 23);
3560   rlwinm(dst, val, 24, 24, 31);
3561   orx(dst, scratch, dst);
3562   ZeroExtHalfWord(dst, dst);
3563 }
3564 
3565 void TurboAssembler::ByteReverseU32(Register dst, Register val,
3566                                     Register scratch) {
3567   if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
3568     brw(dst, val);
3569     ZeroExtWord32(dst, dst);
3570     return;
3571   }
3572   rotlwi(scratch, val, 8);
3573   rlwimi(scratch, val, 24, 0, 7);
3574   rlwimi(scratch, val, 24, 16, 23);
3575   ZeroExtWord32(dst, scratch);
3576 }
3577 
3578 void TurboAssembler::ByteReverseU64(Register dst, Register val, Register) {
3579   if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
3580     brd(dst, val);
3581     return;
3582   }
3583   subi(sp, sp, Operand(kSystemPointerSize));
3584   std(val, MemOperand(sp));
3585   ldbrx(dst, MemOperand(r0, sp));
3586   addi(sp, sp, Operand(kSystemPointerSize));
3587 }
3588 
3589 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
3590   CmpS64(x, Operand(y), r0);
3591   beq(dest);
3592 }
3593 
3594 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
3595   CmpS64(x, Operand(y), r0);
3596   blt(dest);
3597 }
3598 
3599 void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
3600   STATIC_ASSERT(kSystemPointerSize == 8);
3601   STATIC_ASSERT(kSmiTagSize == 1);
3602   STATIC_ASSERT(kSmiTag == 0);
3603 
3604   // The builtin_index register contains the builtin index as a Smi.
3605   if (SmiValuesAre32Bits()) {
3606     ShiftRightS64(builtin_index, builtin_index,
3607                   Operand(kSmiShift - kSystemPointerSizeLog2));
3608   } else {
3609     DCHECK(SmiValuesAre31Bits());
3610     ShiftLeftU64(builtin_index, builtin_index,
3611                  Operand(kSystemPointerSizeLog2 - kSmiShift));
3612   }
3613   AddS64(builtin_index, builtin_index,
3614          Operand(IsolateData::builtin_entry_table_offset()));
3615   LoadU64(builtin_index, MemOperand(kRootRegister, builtin_index));
3616 }
3617 
3618 void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
3619   LoadEntryFromBuiltinIndex(builtin_index);
3620   Call(builtin_index);
3621 }
3622 
3623 void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
3624                                           Register destination) {
3625   ASM_CODE_COMMENT(this);
3626   LoadU64(destination, EntryFromBuiltinAsOperand(builtin));
3627 }
3628 
3629 MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
3630   ASM_CODE_COMMENT(this);
3631   DCHECK(root_array_available());
3632   return MemOperand(kRootRegister,
3633                     IsolateData::BuiltinEntrySlotOffset(builtin));
3634 }
3635 
3636 void TurboAssembler::LoadCodeObjectEntry(Register destination,
3637                                          Register code_object) {
3638   // Code objects are called differently depending on whether we are generating
3639   // builtin code (which will later be embedded into the binary) or compiling
3640   // user JS code at runtime.
3641   // * Builtin code runs in --jitless mode and thus must not call into on-heap
3642   //   Code targets. Instead, we dispatch through the builtins entry table.
3643   // * Codegen at runtime does not have this restriction and we can use the
3644   //   shorter, branchless instruction sequence. The assumption here is that
3645   //   targets are usually generated code and not builtin Code objects.
3646 
3647   if (options().isolate_independent_code) {
3648     DCHECK(root_array_available());
3649     Label if_code_is_off_heap, out;
3650 
3651     Register scratch = r11;
3652 
3653     DCHECK(!AreAliased(destination, scratch));
3654     DCHECK(!AreAliased(code_object, scratch));
3655 
3656     // Check whether the Code object is an off-heap trampoline. If so, call its
3657     // (off-heap) entry point directly without going through the (on-heap)
3658     // trampoline.  Otherwise, just call the Code object as always.
3659     LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset), r0);
3660     mov(r0, Operand(Code::IsOffHeapTrampoline::kMask));
3661     and_(r0, scratch, r0, SetRC);
3662     bne(&if_code_is_off_heap, cr0);
3663 
3664     // Not an off-heap trampoline, the entry point is at
3665     // Code::raw_instruction_start().
3666     addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
3667     b(&out);
3668 
3669     // An off-heap trampoline, the entry point is loaded from the builtin entry
3670     // table.
3671     bind(&if_code_is_off_heap);
3672     LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset),
3673             r0);
3674     ShiftLeftU64(destination, scratch, Operand(kSystemPointerSizeLog2));
3675     add(destination, destination, kRootRegister);
3676     LoadU64(destination,
3677             MemOperand(destination, IsolateData::builtin_entry_table_offset()),
3678             r0);
3679 
3680     bind(&out);
3681   } else {
3682     addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
3683   }
3684 }
3685 
3686 void TurboAssembler::CallCodeObject(Register code_object) {
3687   LoadCodeObjectEntry(code_object, code_object);
3688   Call(code_object);
3689 }
3690 
3691 void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
3692   DCHECK_EQ(JumpMode::kJump, jump_mode);
3693   LoadCodeObjectEntry(code_object, code_object);
3694   Jump(code_object);
3695 }
3696 
3697 void TurboAssembler::StoreReturnAddressAndCall(Register target) {
3698   // This generates the final instruction sequence for calls to C functions
3699   // once an exit frame has been constructed.
3700   //
3701   // Note that this assumes the caller code (i.e. the Code object currently
3702   // being generated) is immovable or that the callee function cannot trigger
3703   // GC, since the callee function will return to it.
3704 
3705   static constexpr int after_call_offset = 5 * kInstrSize;
3706   Label start_call;
3707   Register dest = target;
3708 
3709   if (ABI_USES_FUNCTION_DESCRIPTORS) {
3710     // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
3711     // aware of this descriptor and pick up values from it
3712     LoadU64(ToRegister(ABI_TOC_REGISTER),
3713             MemOperand(target, kSystemPointerSize));
3714     LoadU64(ip, MemOperand(target, 0));
3715     dest = ip;
3716   } else if (ABI_CALL_VIA_IP && dest != ip) {
3717     Move(ip, target);
3718     dest = ip;
3719   }
3720 
3721   LoadPC(r7);
3722   bind(&start_call);
3723   addi(r7, r7, Operand(after_call_offset));
3724   StoreU64(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
3725   Call(dest);
3726 
3727   DCHECK_EQ(after_call_offset - kInstrSize,
3728             SizeOfCodeGeneratedSince(&start_call));
3729 }
3730 
3731 void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
3732                                            DeoptimizeKind kind, Label* ret,
3733                                            Label*) {
3734   BlockTrampolinePoolScope block_trampoline_pool(this);
3735   CHECK_LE(target, Builtins::kLastTier0);
3736   LoadU64(ip, MemOperand(kRootRegister,
3737                          IsolateData::BuiltinEntrySlotOffset(target)));
3738   Call(ip);
3739   DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
3740             (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
3741                                             : Deoptimizer::kEagerDeoptExitSize);
3742 }
3743 
3744 void TurboAssembler::ZeroExtByte(Register dst, Register src) {
3745   clrldi(dst, src, Operand(56));
3746 }
3747 
3748 void TurboAssembler::ZeroExtHalfWord(Register dst, Register src) {
3749   clrldi(dst, src, Operand(48));
3750 }
3751 
3752 void TurboAssembler::ZeroExtWord32(Register dst, Register src) {
3753   clrldi(dst, src, Operand(32));
3754 }
3755 
3756 void TurboAssembler::Trap() { stop(); }
3757 void TurboAssembler::DebugBreak() { stop(); }
3758 
3759 void TurboAssembler::Popcnt32(Register dst, Register src) { popcntw(dst, src); }
3760 
3761 void TurboAssembler::Popcnt64(Register dst, Register src) { popcntd(dst, src); }
3762 
3763 void TurboAssembler::CountLeadingZerosU32(Register dst, Register src, RCBit r) {
3764   cntlzw(dst, src, r);
3765 }
3766 
3767 void TurboAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) {
3768   cntlzd(dst, src, r);
3769 }
3770 
3771 #define COUNT_TRAILING_ZEROES_SLOW(max_count, scratch1, scratch2) \
3772   Label loop, done;                                               \
3773   li(scratch1, Operand(max_count));                               \
3774   mtctr(scratch1);                                                \
3775   mr(scratch1, src);                                              \
3776   li(dst, Operand::Zero());                                       \
3777   bind(&loop); /* while ((src & 1) == 0) */                       \
3778   andi(scratch2, scratch1, Operand(1));                           \
3779   bne(&done, cr0);                                                \
3780   srdi(scratch1, scratch1, Operand(1)); /* src >>= 1;*/           \
3781   addi(dst, dst, Operand(1));           /* dst++ */               \
3782   bdnz(&loop);                                                    \
3783   bind(&done);
3784 void TurboAssembler::CountTrailingZerosU32(Register dst, Register src,
3785                                            Register scratch1, Register scratch2,
3786                                            RCBit r) {
3787   if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
3788     cnttzw(dst, src, r);
3789   } else {
3790     COUNT_TRAILING_ZEROES_SLOW(32, scratch1, scratch2);
3791   }
3792 }
3793 
3794 void TurboAssembler::CountTrailingZerosU64(Register dst, Register src,
3795                                            Register scratch1, Register scratch2,
3796                                            RCBit r) {
3797   if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
3798     cnttzd(dst, src, r);
3799   } else {
3800     COUNT_TRAILING_ZEROES_SLOW(64, scratch1, scratch2);
3801   }
3802 }
3803 #undef COUNT_TRAILING_ZEROES_SLOW
3804 
3805 void TurboAssembler::ClearByteU64(Register dst, int byte_idx) {
3806   CHECK(0 <= byte_idx && byte_idx <= 7);
3807   int shift = byte_idx*8;
3808   rldicl(dst, dst, shift, 8);
3809   rldicl(dst, dst, 64-shift, 0);
3810 }
3811 
3812 void TurboAssembler::ReverseBitsU64(Register dst, Register src,
3813                                     Register scratch1, Register scratch2) {
3814   ByteReverseU64(dst, src);
3815   for (int i = 0; i < 8; i++) {
3816     ReverseBitsInSingleByteU64(dst, dst, scratch1, scratch2, i);
3817   }
3818 }
3819 
3820 void TurboAssembler::ReverseBitsU32(Register dst, Register src,
3821                                     Register scratch1, Register scratch2) {
3822   ByteReverseU32(dst, src, scratch1);
3823   for (int i = 4; i < 8; i++) {
3824     ReverseBitsInSingleByteU64(dst, dst, scratch1, scratch2, i);
3825   }
3826 }
3827 
3828 // byte_idx=7 refers to least significant byte
3829 void TurboAssembler::ReverseBitsInSingleByteU64(Register dst, Register src,
3830                                                 Register scratch1,
3831                                                 Register scratch2,
3832                                                 int byte_idx) {
3833   CHECK(0 <= byte_idx && byte_idx <= 7);
3834   int j = byte_idx;
3835   // zero all bits of scratch1
3836   li(scratch2, Operand(0));
3837   for (int i = 0; i <= 7; i++) {
3838     // zero all bits of scratch1
3839     li(scratch1, Operand(0));
3840     // move bit (j+1)*8-i-1 of src to bit j*8+i of scratch1, erase bits
3841     // (j*8+i+1):end of scratch1
3842     int shift = 7 - (2*i);
3843     if (shift < 0) shift += 64;
3844     rldicr(scratch1, src, shift, j*8+i);
3845     // erase bits start:(j*8-1+i) of scratch1 (inclusive)
3846     rldicl(scratch1, scratch1, 0, j*8+i);
3847     // scratch2 = scratch2|scratch1
3848     orx(scratch2, scratch2, scratch1);
3849   }
3850   // clear jth byte of dst and insert jth byte of scratch2
3851   ClearByteU64(dst, j);
3852   orx(dst, dst, scratch2);
3853 }
3854 
3855 }  // namespace internal
3856 }  // namespace v8
3857 
3858 #endif  // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
3859