• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <assert.h>  // For assert
6 #include <limits.h>  // For LONG_MIN, LONG_MAX.
7 
8 #if V8_TARGET_ARCH_PPC
9 
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/callable.h"
14 #include "src/code-factory.h"
15 #include "src/code-stubs.h"
16 #include "src/debug/debug.h"
17 #include "src/external-reference-table.h"
18 #include "src/frames-inl.h"
19 #include "src/instruction-stream.h"
20 #include "src/register-configuration.h"
21 #include "src/runtime/runtime.h"
22 #include "src/snapshot/snapshot.h"
23 #include "src/wasm/wasm-code-manager.h"
24 
25 #include "src/ppc/macro-assembler-ppc.h"
26 
27 namespace v8 {
28 namespace internal {
29 
MacroAssembler(Isolate * isolate,const AssemblerOptions & options,void * buffer,int size,CodeObjectRequired create_code_object)30 MacroAssembler::MacroAssembler(Isolate* isolate,
31                                const AssemblerOptions& options, void* buffer,
32                                int size, CodeObjectRequired create_code_object)
33     : TurboAssembler(isolate, options, buffer, size, create_code_object) {
34   if (create_code_object == CodeObjectRequired::kYes) {
35     // Unlike TurboAssembler, which can be used off the main thread and may not
36     // allocate, macro assembler creates its own copy of the self-reference
37     // marker in order to disambiguate between self-references during nested
38     // code generation (e.g.: codegen of the current object triggers stub
39     // compilation through CodeStub::GetCode()).
40     code_object_ = Handle<HeapObject>::New(
41         *isolate->factory()->NewSelfReferenceMarker(), isolate);
42   }
43 }
44 
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const45 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
46                                                     Register exclusion1,
47                                                     Register exclusion2,
48                                                     Register exclusion3) const {
49   int bytes = 0;
50   RegList exclusions = 0;
51   if (exclusion1 != no_reg) {
52     exclusions |= exclusion1.bit();
53     if (exclusion2 != no_reg) {
54       exclusions |= exclusion2.bit();
55       if (exclusion3 != no_reg) {
56         exclusions |= exclusion3.bit();
57       }
58     }
59   }
60 
61   RegList list = kJSCallerSaved & ~exclusions;
62   bytes += NumRegs(list) * kPointerSize;
63 
64   if (fp_mode == kSaveFPRegs) {
65     bytes += kNumCallerSavedDoubles * kDoubleSize;
66   }
67 
68   return bytes;
69 }
70 
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)71 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
72                                     Register exclusion2, Register exclusion3) {
73   int bytes = 0;
74   RegList exclusions = 0;
75   if (exclusion1 != no_reg) {
76     exclusions |= exclusion1.bit();
77     if (exclusion2 != no_reg) {
78       exclusions |= exclusion2.bit();
79       if (exclusion3 != no_reg) {
80         exclusions |= exclusion3.bit();
81       }
82     }
83   }
84 
85   RegList list = kJSCallerSaved & ~exclusions;
86   MultiPush(list);
87   bytes += NumRegs(list) * kPointerSize;
88 
89   if (fp_mode == kSaveFPRegs) {
90     MultiPushDoubles(kCallerSavedDoubles);
91     bytes += kNumCallerSavedDoubles * kDoubleSize;
92   }
93 
94   return bytes;
95 }
96 
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)97 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
98                                    Register exclusion2, Register exclusion3) {
99   int bytes = 0;
100   if (fp_mode == kSaveFPRegs) {
101     MultiPopDoubles(kCallerSavedDoubles);
102     bytes += kNumCallerSavedDoubles * kDoubleSize;
103   }
104 
105   RegList exclusions = 0;
106   if (exclusion1 != no_reg) {
107     exclusions |= exclusion1.bit();
108     if (exclusion2 != no_reg) {
109       exclusions |= exclusion2.bit();
110       if (exclusion3 != no_reg) {
111         exclusions |= exclusion3.bit();
112       }
113     }
114   }
115 
116   RegList list = kJSCallerSaved & ~exclusions;
117   MultiPop(list);
118   bytes += NumRegs(list) * kPointerSize;
119 
120   return bytes;
121 }
122 
Jump(Register target)123 void TurboAssembler::Jump(Register target) {
124   mtctr(target);
125   bctr();
126 }
127 
LoadFromConstantsTable(Register destination,int constant_index)128 void TurboAssembler::LoadFromConstantsTable(Register destination,
129                                             int constant_index) {
130   DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
131       Heap::kBuiltinsConstantsTableRootIndex));
132 
133   const uint32_t offset =
134       FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
135 
136   CHECK(is_uint19(offset));
137   DCHECK_NE(destination, r0);
138   LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
139   LoadP(destination, MemOperand(destination, offset), r0);
140 }
141 
LoadRootRelative(Register destination,int32_t offset)142 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
143   LoadP(destination, MemOperand(kRootRegister, offset), r0);
144 }
145 
LoadRootRegisterOffset(Register destination,intptr_t offset)146 void TurboAssembler::LoadRootRegisterOffset(Register destination,
147                                             intptr_t offset) {
148   if (offset == 0) {
149     mr(destination, kRootRegister);
150   } else {
151     addi(destination, kRootRegister, Operand(offset));
152   }
153 }
154 
JumpToJSEntry(Register target)155 void MacroAssembler::JumpToJSEntry(Register target) {
156   Move(ip, target);
157   Jump(ip);
158 }
159 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,CRegister cr)160 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
161                           Condition cond, CRegister cr) {
162   Label skip;
163 
164   if (cond != al) b(NegateCondition(cond), &skip, cr);
165 
166   DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
167 
168   mov(ip, Operand(target, rmode));
169   mtctr(ip);
170   bctr();
171 
172   bind(&skip);
173 }
174 
Jump(Address target,RelocInfo::Mode rmode,Condition cond,CRegister cr)175 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
176                           CRegister cr) {
177   DCHECK(!RelocInfo::IsCodeTarget(rmode));
178   Jump(static_cast<intptr_t>(target), rmode, cond, cr);
179 }
180 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,CRegister cr)181 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
182                           Condition cond, CRegister cr) {
183   DCHECK(RelocInfo::IsCodeTarget(rmode));
184   // 'code' is always generated ppc code, never THUMB code
185   if (FLAG_embedded_builtins) {
186     if (root_array_available_ && options().isolate_independent_code) {
187       Register scratch = ip;
188       IndirectLoadConstant(scratch, code);
189       addi(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
190       Label skip;
191       if (cond != al) b(NegateCondition(cond), &skip, cr);
192       Jump(scratch);
193       bind(&skip);
194       return;
195     } else if (options().inline_offheap_trampolines) {
196       int builtin_index = Builtins::kNoBuiltinId;
197       if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
198           Builtins::IsIsolateIndependent(builtin_index)) {
199         // Inline the trampoline.
200         RecordCommentForOffHeapTrampoline(builtin_index);
201         EmbeddedData d = EmbeddedData::FromBlob();
202         Address entry = d.InstructionStartOfBuiltin(builtin_index);
203         // Use ip directly instead of using UseScratchRegisterScope, as we do
204         // not preserve scratch registers across calls.
205         mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
206         Label skip;
207         if (cond != al) b(NegateCondition(cond), &skip, cr);
208         Jump(ip);
209         bind(&skip);
210         return;
211       }
212     }
213   }
214   Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
215 }
216 
Call(Register target)217 void TurboAssembler::Call(Register target) {
218   BlockTrampolinePoolScope block_trampoline_pool(this);
219   // branch via link register and set LK bit for return point
220   mtctr(target);
221   bctrl();
222 }
223 
CallJSEntry(Register target)224 void MacroAssembler::CallJSEntry(Register target) {
225   CHECK(target == r5);
226   Call(target);
227 }
228 
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)229 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
230                                                    RelocInfo::Mode rmode,
231                                                    Condition cond) {
232   return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
233 }
234 
Call(Address target,RelocInfo::Mode rmode,Condition cond)235 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
236                           Condition cond) {
237   BlockTrampolinePoolScope block_trampoline_pool(this);
238   DCHECK(cond == al);
239 
240   // This can likely be optimized to make use of bc() with 24bit relative
241   //
242   // RecordRelocInfo(x.rmode_, x.immediate);
243   // bc( BA, .... offset, LKset);
244   //
245 
246   mov(ip, Operand(target, rmode));
247   mtctr(ip);
248   bctrl();
249 }
250 
Call(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)251 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
252                           Condition cond) {
253   BlockTrampolinePoolScope block_trampoline_pool(this);
254   DCHECK(RelocInfo::IsCodeTarget(rmode));
255 
256   if (FLAG_embedded_builtins) {
257     if (root_array_available_ && options().isolate_independent_code) {
258       // Use ip directly instead of using UseScratchRegisterScope, as we do not
259       // preserve scratch registers across calls.
260       IndirectLoadConstant(ip, code);
261       addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
262       Label skip;
263       if (cond != al) b(NegateCondition(cond), &skip);
264       Call(ip);
265       bind(&skip);
266       return;
267     } else if (options().inline_offheap_trampolines) {
268       int builtin_index = Builtins::kNoBuiltinId;
269       if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
270           Builtins::IsIsolateIndependent(builtin_index)) {
271         // Inline the trampoline.
272         RecordCommentForOffHeapTrampoline(builtin_index);
273         DCHECK(Builtins::IsBuiltinId(builtin_index));
274         EmbeddedData d = EmbeddedData::FromBlob();
275         Address entry = d.InstructionStartOfBuiltin(builtin_index);
276         // Use ip directly instead of using UseScratchRegisterScope, as we do
277         // not preserve scratch registers across calls.
278         mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
279         Label skip;
280         if (cond != al) b(NegateCondition(cond), &skip);
281         Call(ip);
282         bind(&skip);
283         return;
284       }
285     }
286   }
287   Call(code.address(), rmode, cond);
288 }
289 
Drop(int count)290 void TurboAssembler::Drop(int count) {
291   if (count > 0) {
292     Add(sp, sp, count * kPointerSize, r0);
293   }
294 }
295 
Drop(Register count,Register scratch)296 void TurboAssembler::Drop(Register count, Register scratch) {
297   ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
298   add(sp, sp, scratch);
299 }
300 
Call(Label * target)301 void TurboAssembler::Call(Label* target) { b(target, SetLK); }
302 
Push(Handle<HeapObject> handle)303 void TurboAssembler::Push(Handle<HeapObject> handle) {
304   mov(r0, Operand(handle));
305   push(r0);
306 }
307 
Push(Smi * smi)308 void TurboAssembler::Push(Smi* smi) {
309   mov(r0, Operand(smi));
310   push(r0);
311 }
312 
Move(Register dst,Handle<HeapObject> value)313 void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
314   if (FLAG_embedded_builtins) {
315     if (root_array_available_ && options().isolate_independent_code) {
316       IndirectLoadConstant(dst, value);
317       return;
318     }
319   }
320   mov(dst, Operand(value));
321 }
322 
Move(Register dst,ExternalReference reference)323 void TurboAssembler::Move(Register dst, ExternalReference reference) {
324   if (FLAG_embedded_builtins) {
325     if (root_array_available_ && options().isolate_independent_code) {
326       IndirectLoadExternalReference(dst, reference);
327       return;
328     }
329   }
330   mov(dst, Operand(reference));
331 }
332 
Move(Register dst,Register src,Condition cond)333 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
334   DCHECK(cond == al);
335   if (dst != src) {
336     mr(dst, src);
337   }
338 }
339 
Move(DoubleRegister dst,DoubleRegister src)340 void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
341   if (dst != src) {
342     fmr(dst, src);
343   }
344 }
345 
MultiPush(RegList regs,Register location)346 void TurboAssembler::MultiPush(RegList regs, Register location) {
347   int16_t num_to_push = base::bits::CountPopulation(regs);
348   int16_t stack_offset = num_to_push * kPointerSize;
349 
350   subi(location, location, Operand(stack_offset));
351   for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
352     if ((regs & (1 << i)) != 0) {
353       stack_offset -= kPointerSize;
354       StoreP(ToRegister(i), MemOperand(location, stack_offset));
355     }
356   }
357 }
358 
MultiPop(RegList regs,Register location)359 void TurboAssembler::MultiPop(RegList regs, Register location) {
360   int16_t stack_offset = 0;
361 
362   for (int16_t i = 0; i < Register::kNumRegisters; i++) {
363     if ((regs & (1 << i)) != 0) {
364       LoadP(ToRegister(i), MemOperand(location, stack_offset));
365       stack_offset += kPointerSize;
366     }
367   }
368   addi(location, location, Operand(stack_offset));
369 }
370 
MultiPushDoubles(RegList dregs,Register location)371 void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
372   int16_t num_to_push = base::bits::CountPopulation(dregs);
373   int16_t stack_offset = num_to_push * kDoubleSize;
374 
375   subi(location, location, Operand(stack_offset));
376   for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
377     if ((dregs & (1 << i)) != 0) {
378       DoubleRegister dreg = DoubleRegister::from_code(i);
379       stack_offset -= kDoubleSize;
380       stfd(dreg, MemOperand(location, stack_offset));
381     }
382   }
383 }
384 
MultiPopDoubles(RegList dregs,Register location)385 void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
386   int16_t stack_offset = 0;
387 
388   for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
389     if ((dregs & (1 << i)) != 0) {
390       DoubleRegister dreg = DoubleRegister::from_code(i);
391       lfd(dreg, MemOperand(location, stack_offset));
392       stack_offset += kDoubleSize;
393     }
394   }
395   addi(location, location, Operand(stack_offset));
396 }
397 
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond)398 void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
399                               Condition cond) {
400   DCHECK(cond == al);
401   LoadP(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), r0);
402 }
403 
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)404 void MacroAssembler::RecordWriteField(Register object, int offset,
405                                       Register value, Register dst,
406                                       LinkRegisterStatus lr_status,
407                                       SaveFPRegsMode save_fp,
408                                       RememberedSetAction remembered_set_action,
409                                       SmiCheck smi_check) {
410   // First, check if a write barrier is even needed. The tests below
411   // catch stores of Smis.
412   Label done;
413 
414   // Skip barrier if writing a smi.
415   if (smi_check == INLINE_SMI_CHECK) {
416     JumpIfSmi(value, &done);
417   }
418 
419   // Although the object register is tagged, the offset is relative to the start
420   // of the object, so so offset must be a multiple of kPointerSize.
421   DCHECK(IsAligned(offset, kPointerSize));
422 
423   Add(dst, object, offset - kHeapObjectTag, r0);
424   if (emit_debug_code()) {
425     Label ok;
426     andi(r0, dst, Operand(kPointerSize - 1));
427     beq(&ok, cr0);
428     stop("Unaligned cell in write barrier");
429     bind(&ok);
430   }
431 
432   RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
433               OMIT_SMI_CHECK);
434 
435   bind(&done);
436 
437   // Clobber clobbered input registers when running with the debug-code flag
438   // turned on to provoke errors.
439   if (emit_debug_code()) {
440     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
441     mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
442   }
443 }
444 
SaveRegisters(RegList registers)445 void TurboAssembler::SaveRegisters(RegList registers) {
446   DCHECK_GT(NumRegs(registers), 0);
447   RegList regs = 0;
448   for (int i = 0; i < Register::kNumRegisters; ++i) {
449     if ((registers >> i) & 1u) {
450       regs |= Register::from_code(i).bit();
451     }
452   }
453 
454   MultiPush(regs);
455 }
456 
RestoreRegisters(RegList registers)457 void TurboAssembler::RestoreRegisters(RegList registers) {
458   DCHECK_GT(NumRegs(registers), 0);
459   RegList regs = 0;
460   for (int i = 0; i < Register::kNumRegisters; ++i) {
461     if ((registers >> i) & 1u) {
462       regs |= Register::from_code(i).bit();
463     }
464   }
465   MultiPop(regs);
466 }
467 
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)468 void TurboAssembler::CallRecordWriteStub(
469     Register object, Register address,
470     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
471   // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
472   // i.e. always emit remember set and save FP registers in RecordWriteStub. If
473   // large performance regression is observed, we should use these values to
474   // avoid unnecessary work.
475 
476   Callable const callable =
477       Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
478   RegList registers = callable.descriptor().allocatable_registers();
479 
480   SaveRegisters(registers);
481 
482   Register object_parameter(callable.descriptor().GetRegisterParameter(
483       RecordWriteDescriptor::kObject));
484   Register slot_parameter(
485       callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
486   Register isolate_parameter(callable.descriptor().GetRegisterParameter(
487       RecordWriteDescriptor::kIsolate));
488   Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
489       RecordWriteDescriptor::kRememberedSet));
490   Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
491       RecordWriteDescriptor::kFPMode));
492 
493   push(object);
494   push(address);
495 
496   pop(slot_parameter);
497   pop(object_parameter);
498 
499   Move(isolate_parameter, ExternalReference::isolate_address(isolate()));
500   Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
501   Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
502   Call(callable.code(), RelocInfo::CODE_TARGET);
503 
504   RestoreRegisters(registers);
505 }
506 
507 // Will clobber 4 registers: object, address, scratch, ip.  The
508 // register 'object' contains a heap object pointer.  The heap object
509 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)510 void MacroAssembler::RecordWrite(Register object, Register address,
511                                  Register value, LinkRegisterStatus lr_status,
512                                  SaveFPRegsMode fp_mode,
513                                  RememberedSetAction remembered_set_action,
514                                  SmiCheck smi_check) {
515   DCHECK(object != value);
516   if (emit_debug_code()) {
517     LoadP(r0, MemOperand(address));
518     cmp(r0, value);
519     Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
520   }
521 
522   if (remembered_set_action == OMIT_REMEMBERED_SET &&
523       !FLAG_incremental_marking) {
524     return;
525   }
526 
527   // First, check if a write barrier is even needed. The tests below
528   // catch stores of smis and stores into the young generation.
529   Label done;
530 
531   if (smi_check == INLINE_SMI_CHECK) {
532     JumpIfSmi(value, &done);
533   }
534 
535   CheckPageFlag(value,
536                 value,  // Used as scratch.
537                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
538   CheckPageFlag(object,
539                 value,  // Used as scratch.
540                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
541 
542   // Record the actual write.
543   if (lr_status == kLRHasNotBeenSaved) {
544     mflr(r0);
545     push(r0);
546   }
547   CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
548   if (lr_status == kLRHasNotBeenSaved) {
549     pop(r0);
550     mtlr(r0);
551   }
552 
553   bind(&done);
554 
555   // Count number of write barriers in generated code.
556   isolate()->counters()->write_barriers_static()->Increment();
557   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
558                    value);
559 
560   // Clobber clobbered registers when running with the debug-code flag
561   // turned on to provoke errors.
562   if (emit_debug_code()) {
563     mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
564     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
565   }
566 }
567 
PushCommonFrame(Register marker_reg)568 void TurboAssembler::PushCommonFrame(Register marker_reg) {
569   int fp_delta = 0;
570   mflr(r0);
571   if (FLAG_enable_embedded_constant_pool) {
572     if (marker_reg.is_valid()) {
573       Push(r0, fp, kConstantPoolRegister, marker_reg);
574       fp_delta = 2;
575     } else {
576       Push(r0, fp, kConstantPoolRegister);
577       fp_delta = 1;
578     }
579   } else {
580     if (marker_reg.is_valid()) {
581       Push(r0, fp, marker_reg);
582       fp_delta = 1;
583     } else {
584       Push(r0, fp);
585       fp_delta = 0;
586     }
587   }
588   addi(fp, sp, Operand(fp_delta * kPointerSize));
589 }
590 
PushStandardFrame(Register function_reg)591 void TurboAssembler::PushStandardFrame(Register function_reg) {
592   int fp_delta = 0;
593   mflr(r0);
594   if (FLAG_enable_embedded_constant_pool) {
595     if (function_reg.is_valid()) {
596       Push(r0, fp, kConstantPoolRegister, cp, function_reg);
597       fp_delta = 3;
598     } else {
599       Push(r0, fp, kConstantPoolRegister, cp);
600       fp_delta = 2;
601     }
602   } else {
603     if (function_reg.is_valid()) {
604       Push(r0, fp, cp, function_reg);
605       fp_delta = 2;
606     } else {
607       Push(r0, fp, cp);
608       fp_delta = 1;
609     }
610   }
611   addi(fp, sp, Operand(fp_delta * kPointerSize));
612 }
613 
RestoreFrameStateForTailCall()614 void TurboAssembler::RestoreFrameStateForTailCall() {
615   if (FLAG_enable_embedded_constant_pool) {
616     LoadP(kConstantPoolRegister,
617           MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
618     set_constant_pool_available(false);
619   }
620   LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
621   LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
622   mtlr(r0);
623 }
624 
625 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()626 void MacroAssembler::PushSafepointRegisters() {
627   // Safepoints expect a block of kNumSafepointRegisters values on the
628   // stack, so adjust the stack for unsaved registers.
629   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
630   DCHECK_GE(num_unsaved, 0);
631   if (num_unsaved > 0) {
632     subi(sp, sp, Operand(num_unsaved * kPointerSize));
633   }
634   MultiPush(kSafepointSavedRegisters);
635 }
636 
637 
PopSafepointRegisters()638 void MacroAssembler::PopSafepointRegisters() {
639   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
640   MultiPop(kSafepointSavedRegisters);
641   if (num_unsaved > 0) {
642     addi(sp, sp, Operand(num_unsaved * kPointerSize));
643   }
644 }
645 
SafepointRegisterStackIndex(int reg_code)646 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
647   // The registers are pushed starting with the highest encoding,
648   // which means that lowest encodings are closest to the stack pointer.
649   RegList regs = kSafepointSavedRegisters;
650   int index = 0;
651 
652   DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
653 
654   for (int16_t i = 0; i < reg_code; i++) {
655     if ((regs & (1 << i)) != 0) {
656       index++;
657     }
658   }
659 
660   return index;
661 }
662 
663 
CanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)664 void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
665                                      const DoubleRegister src) {
666   // Turn potential sNaN into qNaN.
667   fsub(dst, src, kDoubleRegZero);
668 }
669 
ConvertIntToDouble(Register src,DoubleRegister dst)670 void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
671   MovIntToDouble(dst, src, r0);
672   fcfid(dst, dst);
673 }
674 
ConvertUnsignedIntToDouble(Register src,DoubleRegister dst)675 void TurboAssembler::ConvertUnsignedIntToDouble(Register src,
676                                                 DoubleRegister dst) {
677   MovUnsignedIntToDouble(dst, src, r0);
678   fcfid(dst, dst);
679 }
680 
ConvertIntToFloat(Register src,DoubleRegister dst)681 void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
682   MovIntToDouble(dst, src, r0);
683   fcfids(dst, dst);
684 }
685 
ConvertUnsignedIntToFloat(Register src,DoubleRegister dst)686 void TurboAssembler::ConvertUnsignedIntToFloat(Register src,
687                                                DoubleRegister dst) {
688   MovUnsignedIntToDouble(dst, src, r0);
689   fcfids(dst, dst);
690 }
691 
692 #if V8_TARGET_ARCH_PPC64
ConvertInt64ToDouble(Register src,DoubleRegister double_dst)693 void TurboAssembler::ConvertInt64ToDouble(Register src,
694                                           DoubleRegister double_dst) {
695   MovInt64ToDouble(double_dst, src);
696   fcfid(double_dst, double_dst);
697 }
698 
ConvertUnsignedInt64ToFloat(Register src,DoubleRegister double_dst)699 void TurboAssembler::ConvertUnsignedInt64ToFloat(Register src,
700                                                  DoubleRegister double_dst) {
701   MovInt64ToDouble(double_dst, src);
702   fcfidus(double_dst, double_dst);
703 }
704 
ConvertUnsignedInt64ToDouble(Register src,DoubleRegister double_dst)705 void TurboAssembler::ConvertUnsignedInt64ToDouble(Register src,
706                                                   DoubleRegister double_dst) {
707   MovInt64ToDouble(double_dst, src);
708   fcfidu(double_dst, double_dst);
709 }
710 
ConvertInt64ToFloat(Register src,DoubleRegister double_dst)711 void TurboAssembler::ConvertInt64ToFloat(Register src,
712                                          DoubleRegister double_dst) {
713   MovInt64ToDouble(double_dst, src);
714   fcfids(double_dst, double_dst);
715 }
716 #endif
717 
ConvertDoubleToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)718 void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
719 #if !V8_TARGET_ARCH_PPC64
720                                           const Register dst_hi,
721 #endif
722                                           const Register dst,
723                                           const DoubleRegister double_dst,
724                                           FPRoundingMode rounding_mode) {
725   if (rounding_mode == kRoundToZero) {
726     fctidz(double_dst, double_input);
727   } else {
728     SetRoundingMode(rounding_mode);
729     fctid(double_dst, double_input);
730     ResetRoundingMode();
731   }
732 
733   MovDoubleToInt64(
734 #if !V8_TARGET_ARCH_PPC64
735       dst_hi,
736 #endif
737       dst, double_dst);
738 }
739 
740 #if V8_TARGET_ARCH_PPC64
ConvertDoubleToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)741 void TurboAssembler::ConvertDoubleToUnsignedInt64(
742     const DoubleRegister double_input, const Register dst,
743     const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
744   if (rounding_mode == kRoundToZero) {
745     fctiduz(double_dst, double_input);
746   } else {
747     SetRoundingMode(rounding_mode);
748     fctidu(double_dst, double_input);
749     ResetRoundingMode();
750   }
751 
752   MovDoubleToInt64(dst, double_dst);
753 }
754 #endif
755 
756 #if !V8_TARGET_ARCH_PPC64
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)757 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
758                                    Register src_low, Register src_high,
759                                    Register scratch, Register shift) {
760   DCHECK(!AreAliased(dst_low, src_high));
761   DCHECK(!AreAliased(dst_high, src_low));
762   DCHECK(!AreAliased(dst_low, dst_high, shift));
763   Label less_than_32;
764   Label done;
765   cmpi(shift, Operand(32));
766   blt(&less_than_32);
767   // If shift >= 32
768   andi(scratch, shift, Operand(0x1F));
769   slw(dst_high, src_low, scratch);
770   li(dst_low, Operand::Zero());
771   b(&done);
772   bind(&less_than_32);
773   // If shift < 32
774   subfic(scratch, shift, Operand(32));
775   slw(dst_high, src_high, shift);
776   srw(scratch, src_low, scratch);
777   orx(dst_high, dst_high, scratch);
778   slw(dst_low, src_low, shift);
779   bind(&done);
780 }
781 
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)782 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
783                                    Register src_low, Register src_high,
784                                    uint32_t shift) {
785   DCHECK(!AreAliased(dst_low, src_high));
786   DCHECK(!AreAliased(dst_high, src_low));
787   if (shift == 32) {
788     Move(dst_high, src_low);
789     li(dst_low, Operand::Zero());
790   } else if (shift > 32) {
791     shift &= 0x1F;
792     slwi(dst_high, src_low, Operand(shift));
793     li(dst_low, Operand::Zero());
794   } else if (shift == 0) {
795     Move(dst_low, src_low);
796     Move(dst_high, src_high);
797   } else {
798     slwi(dst_high, src_high, Operand(shift));
799     rlwimi(dst_high, src_low, shift, 32 - shift, 31);
800     slwi(dst_low, src_low, Operand(shift));
801   }
802 }
803 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)804 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
805                                     Register src_low, Register src_high,
806                                     Register scratch, Register shift) {
807   DCHECK(!AreAliased(dst_low, src_high));
808   DCHECK(!AreAliased(dst_high, src_low));
809   DCHECK(!AreAliased(dst_low, dst_high, shift));
810   Label less_than_32;
811   Label done;
812   cmpi(shift, Operand(32));
813   blt(&less_than_32);
814   // If shift >= 32
815   andi(scratch, shift, Operand(0x1F));
816   srw(dst_low, src_high, scratch);
817   li(dst_high, Operand::Zero());
818   b(&done);
819   bind(&less_than_32);
820   // If shift < 32
821   subfic(scratch, shift, Operand(32));
822   srw(dst_low, src_low, shift);
823   slw(scratch, src_high, scratch);
824   orx(dst_low, dst_low, scratch);
825   srw(dst_high, src_high, shift);
826   bind(&done);
827 }
828 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)829 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
830                                     Register src_low, Register src_high,
831                                     uint32_t shift) {
832   DCHECK(!AreAliased(dst_low, src_high));
833   DCHECK(!AreAliased(dst_high, src_low));
834   if (shift == 32) {
835     Move(dst_low, src_high);
836     li(dst_high, Operand::Zero());
837   } else if (shift > 32) {
838     shift &= 0x1F;
839     srwi(dst_low, src_high, Operand(shift));
840     li(dst_high, Operand::Zero());
841   } else if (shift == 0) {
842     Move(dst_low, src_low);
843     Move(dst_high, src_high);
844   } else {
845     srwi(dst_low, src_low, Operand(shift));
846     rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
847     srwi(dst_high, src_high, Operand(shift));
848   }
849 }
850 
ShiftRightAlgPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)851 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
852                                        Register src_low, Register src_high,
853                                        Register scratch, Register shift) {
854   DCHECK(!AreAliased(dst_low, src_high, shift));
855   DCHECK(!AreAliased(dst_high, src_low, shift));
856   Label less_than_32;
857   Label done;
858   cmpi(shift, Operand(32));
859   blt(&less_than_32);
860   // If shift >= 32
861   andi(scratch, shift, Operand(0x1F));
862   sraw(dst_low, src_high, scratch);
863   srawi(dst_high, src_high, 31);
864   b(&done);
865   bind(&less_than_32);
866   // If shift < 32
867   subfic(scratch, shift, Operand(32));
868   srw(dst_low, src_low, shift);
869   slw(scratch, src_high, scratch);
870   orx(dst_low, dst_low, scratch);
871   sraw(dst_high, src_high, shift);
872   bind(&done);
873 }
874 
ShiftRightAlgPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)875 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
876                                        Register src_low, Register src_high,
877                                        uint32_t shift) {
878   DCHECK(!AreAliased(dst_low, src_high));
879   DCHECK(!AreAliased(dst_high, src_low));
880   if (shift == 32) {
881     Move(dst_low, src_high);
882     srawi(dst_high, src_high, 31);
883   } else if (shift > 32) {
884     shift &= 0x1F;
885     srawi(dst_low, src_high, shift);
886     srawi(dst_high, src_high, 31);
887   } else if (shift == 0) {
888     Move(dst_low, src_low);
889     Move(dst_high, src_high);
890   } else {
891     srwi(dst_low, src_low, Operand(shift));
892     rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
893     srawi(dst_high, src_high, shift);
894   }
895 }
896 #endif
897 
LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address)898 void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
899     Register code_target_address) {
900   lwz(kConstantPoolRegister,
901       MemOperand(code_target_address,
902                  Code::kConstantPoolOffset - Code::kHeaderSize));
903   add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
904 }
905 
LoadPC(Register dst)906 void TurboAssembler::LoadPC(Register dst) {
907   b(4, SetLK);
908   mflr(dst);
909 }
910 
ComputeCodeStartAddress(Register dst)911 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
912   mflr(r0);
913   LoadPC(dst);
914   subi(dst, dst, Operand(pc_offset() - kInstrSize));
915   mtlr(r0);
916 }
917 
LoadConstantPoolPointerRegister()918 void TurboAssembler::LoadConstantPoolPointerRegister() {
919   LoadPC(kConstantPoolRegister);
920   int32_t delta = -pc_offset() + 4;
921   add_label_offset(kConstantPoolRegister, kConstantPoolRegister,
922                    ConstantPoolPosition(), delta);
923 }
924 
StubPrologue(StackFrame::Type type)925 void TurboAssembler::StubPrologue(StackFrame::Type type) {
926   {
927     ConstantPoolUnavailableScope constant_pool_unavailable(this);
928     mov(r11, Operand(StackFrame::TypeToMarker(type)));
929     PushCommonFrame(r11);
930   }
931   if (FLAG_enable_embedded_constant_pool) {
932     LoadConstantPoolPointerRegister();
933     set_constant_pool_available(true);
934   }
935 }
936 
Prologue()937 void TurboAssembler::Prologue() {
938   PushStandardFrame(r4);
939   if (FLAG_enable_embedded_constant_pool) {
940     // base contains prologue address
941     LoadConstantPoolPointerRegister();
942     set_constant_pool_available(true);
943   }
944 }
945 
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)946 void TurboAssembler::EnterFrame(StackFrame::Type type,
947                                 bool load_constant_pool_pointer_reg) {
948   if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
949     // Push type explicitly so we can leverage the constant pool.
950     // This path cannot rely on ip containing code entry.
951     PushCommonFrame();
952     LoadConstantPoolPointerRegister();
953     mov(ip, Operand(StackFrame::TypeToMarker(type)));
954     push(ip);
955   } else {
956     mov(ip, Operand(StackFrame::TypeToMarker(type)));
957     PushCommonFrame(ip);
958   }
959 }
960 
LeaveFrame(StackFrame::Type type,int stack_adjustment)961 int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
962   ConstantPoolUnavailableScope constant_pool_unavailable(this);
963   // r3: preserved
964   // r4: preserved
965   // r5: preserved
966 
967   // Drop the execution stack down to the frame pointer and restore
968   // the caller's state.
969   int frame_ends;
970   LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
971   LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
972   if (FLAG_enable_embedded_constant_pool) {
973     LoadP(kConstantPoolRegister,
974           MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
975   }
976   mtlr(r0);
977   frame_ends = pc_offset();
978   Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
979   mr(fp, ip);
980   return frame_ends;
981 }
982 
983 // ExitFrame layout (probably wrongish.. needs updating)
984 //
985 //  SP -> previousSP
986 //        LK reserved
987 //        code
988 //        sp_on_exit (for debug?)
989 // oldSP->prev SP
990 //        LK
991 //        <parameters on stack>
992 
993 // Prior to calling EnterExitFrame, we've got a bunch of parameters
994 // on the stack that we need to wrap a real frame around.. so first
995 // we reserve a slot for LK and push the previous SP which is captured
996 // in the fp register (r31)
997 // Then - we buy a new frame
998 
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)999 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1000                                     StackFrame::Type frame_type) {
1001   DCHECK(frame_type == StackFrame::EXIT ||
1002          frame_type == StackFrame::BUILTIN_EXIT);
1003   // Set up the frame structure on the stack.
1004   DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1005   DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1006   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1007   DCHECK_GT(stack_space, 0);
1008 
1009   // This is an opportunity to build a frame to wrap
1010   // all of the pushes that have happened inside of V8
1011   // since we were called from C code
1012 
1013   mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
1014   PushCommonFrame(ip);
1015   // Reserve room for saved entry sp and code object.
1016   subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1017 
1018   if (emit_debug_code()) {
1019     li(r8, Operand::Zero());
1020     StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1021   }
1022   if (FLAG_enable_embedded_constant_pool) {
1023     StoreP(kConstantPoolRegister,
1024            MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1025   }
1026   Move(r8, CodeObject());
1027   StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1028 
1029   // Save the frame pointer and the context in top.
1030   Move(r8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1031                                      isolate()));
1032   StoreP(fp, MemOperand(r8));
1033   Move(r8,
1034        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1035   StoreP(cp, MemOperand(r8));
1036 
1037   // Optionally save all volatile double registers.
1038   if (save_doubles) {
1039     MultiPushDoubles(kCallerSavedDoubles);
1040     // Note that d0 will be accessible at
1041     //   fp - ExitFrameConstants::kFrameSize -
1042     //   kNumCallerSavedDoubles * kDoubleSize,
1043     // since the sp slot and code slot were pushed after the fp.
1044   }
1045 
1046   addi(sp, sp, Operand(-stack_space * kPointerSize));
1047 
1048   // Allocate and align the frame preparing for calling the runtime
1049   // function.
1050   const int frame_alignment = ActivationFrameAlignment();
1051   if (frame_alignment > kPointerSize) {
1052     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1053     ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
1054   }
1055   li(r0, Operand::Zero());
1056   StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1057 
1058   // Set the exit frame sp value to point just before the return address
1059   // location.
1060   addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
1061   StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1062 }
1063 
ActivationFrameAlignment()1064 int TurboAssembler::ActivationFrameAlignment() {
1065 #if !defined(USE_SIMULATOR)
1066   // Running on the real platform. Use the alignment as mandated by the local
1067   // environment.
1068   // Note: This will break if we ever start generating snapshots on one PPC
1069   // platform for another PPC platform with a different alignment.
1070   return base::OS::ActivationFrameAlignment();
1071 #else  // Simulated
1072   // If we are using the simulator then we should always align to the expected
1073   // alignment. As the simulator is used to generate snapshots we do not know
1074   // if the target platform will need alignment, so this is controlled from a
1075   // flag.
1076   return FLAG_sim_stack_alignment;
1077 #endif
1078 }
1079 
1080 
LeaveExitFrame(bool save_doubles,Register argument_count,bool argument_count_is_length)1081 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1082                                     bool argument_count_is_length) {
1083   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1084   // Optionally restore all double registers.
1085   if (save_doubles) {
1086     // Calculate the stack location of the saved doubles and restore them.
1087     const int kNumRegs = kNumCallerSavedDoubles;
1088     const int offset =
1089         (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
1090     addi(r6, fp, Operand(-offset));
1091     MultiPopDoubles(kCallerSavedDoubles, r6);
1092   }
1093 
1094   // Clear top frame.
1095   li(r6, Operand::Zero());
1096   Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1097                                      isolate()));
1098   StoreP(r6, MemOperand(ip));
1099 
1100   // Restore current context from top and clear it in debug mode.
1101   Move(ip,
1102        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1103   LoadP(cp, MemOperand(ip));
1104 
1105 #ifdef DEBUG
1106   mov(r6, Operand(Context::kInvalidContext));
1107   Move(ip,
1108        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1109   StoreP(r6, MemOperand(ip));
1110 #endif
1111 
1112   // Tear down the exit frame, pop the arguments, and return.
1113   LeaveFrame(StackFrame::EXIT);
1114 
1115   if (argument_count.is_valid()) {
1116     if (!argument_count_is_length) {
1117       ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
1118     }
1119     add(sp, sp, argument_count);
1120   }
1121 }
1122 
MovFromFloatResult(const DoubleRegister dst)1123 void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
1124   Move(dst, d1);
1125 }
1126 
MovFromFloatParameter(const DoubleRegister dst)1127 void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1128   Move(dst, d1);
1129 }
1130 
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)1131 void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1132                                         Register caller_args_count_reg,
1133                                         Register scratch0, Register scratch1) {
1134 #if DEBUG
1135   if (callee_args_count.is_reg()) {
1136     DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1137                        scratch1));
1138   } else {
1139     DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1140   }
1141 #endif
1142 
1143   // Calculate the end of destination area where we will put the arguments
1144   // after we drop current frame. We add kPointerSize to count the receiver
1145   // argument which is not included into formal parameters count.
1146   Register dst_reg = scratch0;
1147   ShiftLeftImm(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
1148   add(dst_reg, fp, dst_reg);
1149   addi(dst_reg, dst_reg,
1150        Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1151 
1152   Register src_reg = caller_args_count_reg;
1153   // Calculate the end of source area. +kPointerSize is for the receiver.
1154   if (callee_args_count.is_reg()) {
1155     ShiftLeftImm(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
1156     add(src_reg, sp, src_reg);
1157     addi(src_reg, src_reg, Operand(kPointerSize));
1158   } else {
1159     Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize, r0);
1160   }
1161 
1162   if (FLAG_debug_code) {
1163     cmpl(src_reg, dst_reg);
1164     Check(lt, AbortReason::kStackAccessBelowStackPointer);
1165   }
1166 
1167   // Restore caller's frame pointer and return address now as they will be
1168   // overwritten by the copying loop.
1169   RestoreFrameStateForTailCall();
1170 
1171   // Now copy callee arguments to the caller frame going backwards to avoid
1172   // callee arguments corruption (source and destination areas could overlap).
1173 
1174   // Both src_reg and dst_reg are pointing to the word after the one to copy,
1175   // so they must be pre-decremented in the loop.
1176   Register tmp_reg = scratch1;
1177   Label loop;
1178   if (callee_args_count.is_reg()) {
1179     addi(tmp_reg, callee_args_count.reg(), Operand(1));  // +1 for receiver
1180   } else {
1181     mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
1182   }
1183   mtctr(tmp_reg);
1184   bind(&loop);
1185   LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
1186   StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1187   bdnz(&loop);
1188 
1189   // Leave current frame.
1190   mr(sp, dst_reg);
1191 }
1192 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag)1193 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1194                                     const ParameterCount& actual, Label* done,
1195                                     bool* definitely_mismatches,
1196                                     InvokeFlag flag) {
1197   bool definitely_matches = false;
1198   *definitely_mismatches = false;
1199   Label regular_invoke;
1200 
1201   // Check whether the expected and actual arguments count match. If not,
1202   // setup registers according to contract with ArgumentsAdaptorTrampoline:
1203   //  r3: actual arguments count
1204   //  r4: function (passed through to callee)
1205   //  r5: expected arguments count
1206 
1207   // The code below is made a lot easier because the calling code already sets
1208   // up actual and expected registers according to the contract if values are
1209   // passed in registers.
1210 
1211   // ARM has some sanity checks as per below, considering add them for PPC
1212   //  DCHECK(actual.is_immediate() || actual.reg() == r3);
1213   //  DCHECK(expected.is_immediate() || expected.reg() == r5);
1214 
1215   if (expected.is_immediate()) {
1216     DCHECK(actual.is_immediate());
1217     mov(r3, Operand(actual.immediate()));
1218     if (expected.immediate() == actual.immediate()) {
1219       definitely_matches = true;
1220     } else {
1221       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1222       if (expected.immediate() == sentinel) {
1223         // Don't worry about adapting arguments for builtins that
1224         // don't want that done. Skip adaption code by making it look
1225         // like we have a match between expected and actual number of
1226         // arguments.
1227         definitely_matches = true;
1228       } else {
1229         *definitely_mismatches = true;
1230         mov(r5, Operand(expected.immediate()));
1231       }
1232     }
1233   } else {
1234     if (actual.is_immediate()) {
1235       mov(r3, Operand(actual.immediate()));
1236       cmpi(expected.reg(), Operand(actual.immediate()));
1237       beq(&regular_invoke);
1238     } else {
1239       cmp(expected.reg(), actual.reg());
1240       beq(&regular_invoke);
1241     }
1242   }
1243 
1244   if (!definitely_matches) {
1245     Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1246     if (flag == CALL_FUNCTION) {
1247       Call(adaptor);
1248       if (!*definitely_mismatches) {
1249         b(done);
1250       }
1251     } else {
1252       Jump(adaptor, RelocInfo::CODE_TARGET);
1253     }
1254     bind(&regular_invoke);
1255   }
1256 }
1257 
CheckDebugHook(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1258 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1259                                     const ParameterCount& expected,
1260                                     const ParameterCount& actual) {
1261   Label skip_hook;
1262 
1263   ExternalReference debug_hook_active =
1264       ExternalReference::debug_hook_on_function_call_address(isolate());
1265   Move(r7, debug_hook_active);
1266   LoadByte(r7, MemOperand(r7), r0);
1267   extsb(r7, r7);
1268   CmpSmiLiteral(r7, Smi::kZero, r0);
1269   beq(&skip_hook);
1270 
1271   {
1272     // Load receiver to pass it later to DebugOnFunctionCall hook.
1273     if (actual.is_reg()) {
1274       mr(r7, actual.reg());
1275     } else {
1276       mov(r7, Operand(actual.immediate()));
1277     }
1278     ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
1279     LoadPX(r7, MemOperand(sp, r7));
1280     FrameScope frame(this,
1281                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1282     if (expected.is_reg()) {
1283       SmiTag(expected.reg());
1284       Push(expected.reg());
1285     }
1286     if (actual.is_reg()) {
1287       SmiTag(actual.reg());
1288       Push(actual.reg());
1289     }
1290     if (new_target.is_valid()) {
1291       Push(new_target);
1292     }
1293     Push(fun, fun, r7);
1294     CallRuntime(Runtime::kDebugOnFunctionCall);
1295     Pop(fun);
1296     if (new_target.is_valid()) {
1297       Pop(new_target);
1298     }
1299     if (actual.is_reg()) {
1300       Pop(actual.reg());
1301       SmiUntag(actual.reg());
1302     }
1303     if (expected.is_reg()) {
1304       Pop(expected.reg());
1305       SmiUntag(expected.reg());
1306     }
1307   }
1308   bind(&skip_hook);
1309 }
1310 
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)1311 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1312                                         const ParameterCount& expected,
1313                                         const ParameterCount& actual,
1314                                         InvokeFlag flag) {
1315   // You can't call a function without a valid frame.
1316   DCHECK(flag == JUMP_FUNCTION || has_frame());
1317   DCHECK(function == r4);
1318   DCHECK_IMPLIES(new_target.is_valid(), new_target == r6);
1319 
1320   // On function call, call into the debugger if necessary.
1321   CheckDebugHook(function, new_target, expected, actual);
1322 
1323   // Clear the new.target register if not given.
1324   if (!new_target.is_valid()) {
1325     LoadRoot(r6, Heap::kUndefinedValueRootIndex);
1326   }
1327 
1328   Label done;
1329   bool definitely_mismatches = false;
1330   InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
1331   if (!definitely_mismatches) {
1332     // We call indirectly through the code field in the function to
1333     // allow recompilation to take effect without changing any of the
1334     // call sites.
1335     Register code = kJavaScriptCallCodeStartRegister;
1336     LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
1337     addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
1338     if (flag == CALL_FUNCTION) {
1339       CallJSEntry(code);
1340     } else {
1341       DCHECK(flag == JUMP_FUNCTION);
1342       JumpToJSEntry(code);
1343     }
1344 
1345     // Continue here if InvokePrologue does handle the invocation due to
1346     // mismatched parameter counts.
1347     bind(&done);
1348   }
1349 }
1350 
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag)1351 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1352                                     const ParameterCount& actual,
1353                                     InvokeFlag flag) {
1354   // You can't call a function without a valid frame.
1355   DCHECK(flag == JUMP_FUNCTION || has_frame());
1356 
1357   // Contract with called JS functions requires that function is passed in r4.
1358   DCHECK(fun == r4);
1359 
1360   Register expected_reg = r5;
1361   Register temp_reg = r7;
1362 
1363   LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1364   LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1365   LoadHalfWord(expected_reg,
1366                FieldMemOperand(
1367                    temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1368 
1369   ParameterCount expected(expected_reg);
1370   InvokeFunctionCode(fun, new_target, expected, actual, flag);
1371 }
1372 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)1373 void MacroAssembler::InvokeFunction(Register function,
1374                                     const ParameterCount& expected,
1375                                     const ParameterCount& actual,
1376                                     InvokeFlag flag) {
1377   // You can't call a function without a valid frame.
1378   DCHECK(flag == JUMP_FUNCTION || has_frame());
1379 
1380   // Contract with called JS functions requires that function is passed in r4.
1381   DCHECK(function == r4);
1382 
1383   // Get the function and setup the context.
1384   LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1385 
1386   InvokeFunctionCode(r4, no_reg, expected, actual, flag);
1387 }
1388 
MaybeDropFrames()1389 void MacroAssembler::MaybeDropFrames() {
1390   // Check whether we need to drop frames to restart a function on the stack.
1391   ExternalReference restart_fp =
1392       ExternalReference::debug_restart_fp_address(isolate());
1393   Move(r4, restart_fp);
1394   LoadP(r4, MemOperand(r4));
1395   cmpi(r4, Operand::Zero());
1396   Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1397        ne);
1398 }
1399 
PushStackHandler()1400 void MacroAssembler::PushStackHandler() {
1401   // Adjust this code if not the case.
1402   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1403   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1404 
1405   Push(Smi::kZero);  // Padding.
1406 
1407   // Link the current handler as the next handler.
1408   // Preserve r3-r7.
1409   mov(r8, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1410                                             isolate())));
1411   LoadP(r0, MemOperand(r8));
1412   push(r0);
1413 
1414   // Set this new handler as the current one.
1415   StoreP(sp, MemOperand(r8));
1416 }
1417 
1418 
PopStackHandler()1419 void MacroAssembler::PopStackHandler() {
1420   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1421   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1422 
1423   pop(r4);
1424   mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1425                                             isolate())));
1426   StoreP(r4, MemOperand(ip));
1427 
1428   Drop(1);  // Drop padding.
1429 }
1430 
1431 
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)1432 void MacroAssembler::CompareObjectType(Register object, Register map,
1433                                        Register type_reg, InstanceType type) {
1434   const Register temp = type_reg == no_reg ? r0 : type_reg;
1435 
1436   LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1437   CompareInstanceType(map, temp, type);
1438 }
1439 
1440 
CompareInstanceType(Register map,Register type_reg,InstanceType type)1441 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1442                                          InstanceType type) {
1443   STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1444   STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
1445   lhz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1446   cmpi(type_reg, Operand(type));
1447 }
1448 
1449 
CompareRoot(Register obj,Heap::RootListIndex index)1450 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
1451   DCHECK(obj != r0);
1452   LoadRoot(r0, index);
1453   cmp(obj, r0);
1454 }
1455 
AddAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)1456 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1457                                             Register right,
1458                                             Register overflow_dst,
1459                                             Register scratch) {
1460   DCHECK(dst != overflow_dst);
1461   DCHECK(dst != scratch);
1462   DCHECK(overflow_dst != scratch);
1463   DCHECK(overflow_dst != left);
1464   DCHECK(overflow_dst != right);
1465 
1466   bool left_is_right = left == right;
1467   RCBit xorRC = left_is_right ? SetRC : LeaveRC;
1468 
1469   // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1470   if (dst == left) {
1471     mr(scratch, left);            // Preserve left.
1472     add(dst, left, right);        // Left is overwritten.
1473     xor_(overflow_dst, dst, scratch, xorRC);  // Original left.
1474     if (!left_is_right) xor_(scratch, dst, right);
1475   } else if (dst == right) {
1476     mr(scratch, right);           // Preserve right.
1477     add(dst, left, right);        // Right is overwritten.
1478     xor_(overflow_dst, dst, left, xorRC);
1479     if (!left_is_right) xor_(scratch, dst, scratch);  // Original right.
1480   } else {
1481     add(dst, left, right);
1482     xor_(overflow_dst, dst, left, xorRC);
1483     if (!left_is_right) xor_(scratch, dst, right);
1484   }
1485   if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
1486 }
1487 
AddAndCheckForOverflow(Register dst,Register left,intptr_t right,Register overflow_dst,Register scratch)1488 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1489                                             intptr_t right,
1490                                             Register overflow_dst,
1491                                             Register scratch) {
1492   Register original_left = left;
1493   DCHECK(dst != overflow_dst);
1494   DCHECK(dst != scratch);
1495   DCHECK(overflow_dst != scratch);
1496   DCHECK(overflow_dst != left);
1497 
1498   // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1499   if (dst == left) {
1500     // Preserve left.
1501     original_left = overflow_dst;
1502     mr(original_left, left);
1503   }
1504   Add(dst, left, right, scratch);
1505   xor_(overflow_dst, dst, original_left);
1506   if (right >= 0) {
1507     and_(overflow_dst, overflow_dst, dst, SetRC);
1508   } else {
1509     andc(overflow_dst, overflow_dst, dst, SetRC);
1510   }
1511 }
1512 
SubAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)1513 void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
1514                                             Register right,
1515                                             Register overflow_dst,
1516                                             Register scratch) {
1517   DCHECK(dst != overflow_dst);
1518   DCHECK(dst != scratch);
1519   DCHECK(overflow_dst != scratch);
1520   DCHECK(overflow_dst != left);
1521   DCHECK(overflow_dst != right);
1522 
1523   // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
1524   if (dst == left) {
1525     mr(scratch, left);      // Preserve left.
1526     sub(dst, left, right);  // Left is overwritten.
1527     xor_(overflow_dst, dst, scratch);
1528     xor_(scratch, scratch, right);
1529     and_(overflow_dst, overflow_dst, scratch, SetRC);
1530   } else if (dst == right) {
1531     mr(scratch, right);     // Preserve right.
1532     sub(dst, left, right);  // Right is overwritten.
1533     xor_(overflow_dst, dst, left);
1534     xor_(scratch, left, scratch);
1535     and_(overflow_dst, overflow_dst, scratch, SetRC);
1536   } else {
1537     sub(dst, left, right);
1538     xor_(overflow_dst, dst, left);
1539     xor_(scratch, left, right);
1540     and_(overflow_dst, scratch, overflow_dst, SetRC);
1541   }
1542 }
1543 
1544 
CallStub(CodeStub * stub,Condition cond)1545 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
1546   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
1547   Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1548 }
1549 
CallStubDelayed(CodeStub * stub)1550 void TurboAssembler::CallStubDelayed(CodeStub* stub) {
1551   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
1552 
1553   // Block constant pool for the call instruction sequence.
1554   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1555 
1556   mov(ip, Operand::EmbeddedCode(stub));
1557   mtctr(ip);
1558   bctrl();
1559 }
1560 
TailCallStub(CodeStub * stub,Condition cond)1561 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1562   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1563 }
1564 
AllowThisStubCall(CodeStub * stub)1565 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
1566   return has_frame_ || !stub->SometimesSetsUpAFrame();
1567 }
1568 
TryDoubleToInt32Exact(Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch)1569 void MacroAssembler::TryDoubleToInt32Exact(Register result,
1570                                            DoubleRegister double_input,
1571                                            Register scratch,
1572                                            DoubleRegister double_scratch) {
1573   Label done;
1574   DCHECK(double_input != double_scratch);
1575 
1576   ConvertDoubleToInt64(double_input,
1577 #if !V8_TARGET_ARCH_PPC64
1578                        scratch,
1579 #endif
1580                        result, double_scratch);
1581 
1582 #if V8_TARGET_ARCH_PPC64
1583   TestIfInt32(result, r0);
1584 #else
1585   TestIfInt32(scratch, result, r0);
1586 #endif
1587   bne(&done);
1588 
1589   // convert back and compare
1590   fcfid(double_scratch, double_scratch);
1591   fcmpu(double_scratch, double_input);
1592   bind(&done);
1593 }
1594 
TruncateDoubleToI(Isolate * isolate,Zone * zone,Register result,DoubleRegister double_input,StubCallMode stub_mode)1595 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1596                                        Register result,
1597                                        DoubleRegister double_input,
1598                                        StubCallMode stub_mode) {
1599   Label done;
1600 
1601   TryInlineTruncateDoubleToI(result, double_input, &done);
1602 
1603   // If we fell through then inline version didn't succeed - call stub instead.
1604   mflr(r0);
1605   push(r0);
1606   // Put input on stack.
1607   stfdu(double_input, MemOperand(sp, -kDoubleSize));
1608 
1609   if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1610     Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1611   } else {
1612     Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1613   }
1614 
1615   LoadP(result, MemOperand(sp));
1616   addi(sp, sp, Operand(kDoubleSize));
1617   pop(r0);
1618   mtlr(r0);
1619 
1620   bind(&done);
1621 }
1622 
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)1623 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1624                                                 DoubleRegister double_input,
1625                                                 Label* done) {
1626   DoubleRegister double_scratch = kScratchDoubleReg;
1627 #if !V8_TARGET_ARCH_PPC64
1628   Register scratch = ip;
1629 #endif
1630 
1631   ConvertDoubleToInt64(double_input,
1632 #if !V8_TARGET_ARCH_PPC64
1633                        scratch,
1634 #endif
1635                        result, double_scratch);
1636 
1637 // Test for overflow
1638 #if V8_TARGET_ARCH_PPC64
1639   TestIfInt32(result, r0);
1640 #else
1641   TestIfInt32(scratch, result, r0);
1642 #endif
1643   beq(done);
1644 }
1645 
CallRuntimeWithCEntry(Runtime::FunctionId fid,Register centry)1646 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
1647                                            Register centry) {
1648   const Runtime::Function* f = Runtime::FunctionForId(fid);
1649   // TODO(1236192): Most runtime routines don't need the number of
1650   // arguments passed in because it is constant. At some point we
1651   // should remove this need and make the runtime routine entry code
1652   // smarter.
1653   mov(r3, Operand(f->nargs));
1654   Move(r4, ExternalReference::Create(f));
1655   DCHECK(!AreAliased(centry, r3, r4));
1656   addi(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
1657   Call(centry);
1658 }
1659 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)1660 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1661                                  SaveFPRegsMode save_doubles) {
1662   // All parameters are on the stack.  r3 has the return value after call.
1663 
1664   // If the expected number of arguments of the runtime function is
1665   // constant, we check that the actual number of arguments match the
1666   // expectation.
1667   CHECK(f->nargs < 0 || f->nargs == num_arguments);
1668 
1669   // TODO(1236192): Most runtime routines don't need the number of
1670   // arguments passed in because it is constant. At some point we
1671   // should remove this need and make the runtime routine entry code
1672   // smarter.
1673   mov(r3, Operand(num_arguments));
1674   Move(r4, ExternalReference::Create(f));
1675 #if V8_TARGET_ARCH_PPC64
1676   Handle<Code> code =
1677       CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1678 #else
1679   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
1680 #endif
1681   Call(code, RelocInfo::CODE_TARGET);
1682 }
1683 
TailCallRuntime(Runtime::FunctionId fid)1684 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1685   const Runtime::Function* function = Runtime::FunctionForId(fid);
1686   DCHECK_EQ(1, function->result_size);
1687   if (function->nargs >= 0) {
1688     mov(r3, Operand(function->nargs));
1689   }
1690   JumpToExternalReference(ExternalReference::Create(fid));
1691 }
1692 
1693 
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)1694 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1695                                              bool builtin_exit_frame) {
1696   Move(r4, builtin);
1697   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1698                                           kArgvOnStack, builtin_exit_frame);
1699   Jump(code, RelocInfo::CODE_TARGET);
1700 }
1701 
JumpToInstructionStream(Address entry)1702 void MacroAssembler::JumpToInstructionStream(Address entry) {
1703   mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1704   Jump(kOffHeapTrampolineRegister);
1705 }
1706 
LoadWeakValue(Register out,Register in,Label * target_if_cleared)1707 void MacroAssembler::LoadWeakValue(Register out, Register in,
1708                                    Label* target_if_cleared) {
1709   cmpi(in, Operand(kClearedWeakHeapObject));
1710   beq(target_if_cleared);
1711 
1712   mov(r0, Operand(~kWeakHeapObjectMask));
1713   and_(out, in, r0);
1714 }
1715 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1716 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1717                                       Register scratch1, Register scratch2) {
1718   DCHECK_GT(value, 0);
1719   if (FLAG_native_code_counters && counter->Enabled()) {
1720     Move(scratch2, ExternalReference::Create(counter));
1721     lwz(scratch1, MemOperand(scratch2));
1722     addi(scratch1, scratch1, Operand(value));
1723     stw(scratch1, MemOperand(scratch2));
1724   }
1725 }
1726 
1727 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1728 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1729                                       Register scratch1, Register scratch2) {
1730   DCHECK_GT(value, 0);
1731   if (FLAG_native_code_counters && counter->Enabled()) {
1732     Move(scratch2, ExternalReference::Create(counter));
1733     lwz(scratch1, MemOperand(scratch2));
1734     subi(scratch1, scratch1, Operand(value));
1735     stw(scratch1, MemOperand(scratch2));
1736   }
1737 }
1738 
Assert(Condition cond,AbortReason reason,CRegister cr)1739 void TurboAssembler::Assert(Condition cond, AbortReason reason,
1740                             CRegister cr) {
1741   if (emit_debug_code()) Check(cond, reason, cr);
1742 }
1743 
Check(Condition cond,AbortReason reason,CRegister cr)1744 void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
1745   Label L;
1746   b(cond, &L, cr);
1747   Abort(reason);
1748   // will not return here
1749   bind(&L);
1750 }
1751 
Abort(AbortReason reason)1752 void TurboAssembler::Abort(AbortReason reason) {
1753   Label abort_start;
1754   bind(&abort_start);
1755   const char* msg = GetAbortReason(reason);
1756 #ifdef DEBUG
1757   RecordComment("Abort message: ");
1758   RecordComment(msg);
1759 #endif
1760 
1761   // Avoid emitting call to builtin if requested.
1762   if (trap_on_abort()) {
1763     stop(msg);
1764     return;
1765   }
1766 
1767   if (should_abort_hard()) {
1768     // We don't care if we constructed a frame. Just pretend we did.
1769     FrameScope assume_frame(this, StackFrame::NONE);
1770     mov(r3, Operand(static_cast<int>(reason)));
1771     PrepareCallCFunction(1, r4);
1772     CallCFunction(ExternalReference::abort_with_reason(), 1);
1773     return;
1774   }
1775 
1776   LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
1777 
1778   // Disable stub call restrictions to always allow calls to abort.
1779   if (!has_frame_) {
1780     // We don't actually want to generate a pile of code for this, so just
1781     // claim there is a stack frame, without generating one.
1782     FrameScope scope(this, StackFrame::NONE);
1783     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1784   } else {
1785     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1786   }
1787   // will not return here
1788 }
1789 
LoadNativeContextSlot(int index,Register dst)1790 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
1791   LoadP(dst, NativeContextMemOperand());
1792   LoadP(dst, ContextMemOperand(dst, index));
1793 }
1794 
1795 
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)1796 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
1797                                        Label* smi_case) {
1798   STATIC_ASSERT(kSmiTag == 0);
1799   TestBitRange(src, kSmiTagSize - 1, 0, r0);
1800   SmiUntag(dst, src);
1801   beq(smi_case, cr0);
1802 }
1803 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)1804 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
1805                                      Label* on_either_smi) {
1806   STATIC_ASSERT(kSmiTag == 0);
1807   JumpIfSmi(reg1, on_either_smi);
1808   JumpIfSmi(reg2, on_either_smi);
1809 }
1810 
AssertNotSmi(Register object)1811 void MacroAssembler::AssertNotSmi(Register object) {
1812   if (emit_debug_code()) {
1813     STATIC_ASSERT(kSmiTag == 0);
1814     TestIfSmi(object, r0);
1815     Check(ne, AbortReason::kOperandIsASmi, cr0);
1816   }
1817 }
1818 
1819 
AssertSmi(Register object)1820 void MacroAssembler::AssertSmi(Register object) {
1821   if (emit_debug_code()) {
1822     STATIC_ASSERT(kSmiTag == 0);
1823     TestIfSmi(object, r0);
1824     Check(eq, AbortReason::kOperandIsNotASmi, cr0);
1825   }
1826 }
1827 
AssertConstructor(Register object)1828 void MacroAssembler::AssertConstructor(Register object) {
1829   if (emit_debug_code()) {
1830     STATIC_ASSERT(kSmiTag == 0);
1831     TestIfSmi(object, r0);
1832     Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, cr0);
1833     push(object);
1834     LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
1835     lbz(object, FieldMemOperand(object, Map::kBitFieldOffset));
1836     andi(object, object, Operand(Map::IsConstructorBit::kMask));
1837     pop(object);
1838     Check(ne, AbortReason::kOperandIsNotAConstructor, cr0);
1839   }
1840 }
1841 
AssertFunction(Register object)1842 void MacroAssembler::AssertFunction(Register object) {
1843   if (emit_debug_code()) {
1844     STATIC_ASSERT(kSmiTag == 0);
1845     TestIfSmi(object, r0);
1846     Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
1847     push(object);
1848     CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
1849     pop(object);
1850     Check(eq, AbortReason::kOperandIsNotAFunction);
1851   }
1852 }
1853 
1854 
AssertBoundFunction(Register object)1855 void MacroAssembler::AssertBoundFunction(Register object) {
1856   if (emit_debug_code()) {
1857     STATIC_ASSERT(kSmiTag == 0);
1858     TestIfSmi(object, r0);
1859     Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
1860     push(object);
1861     CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
1862     pop(object);
1863     Check(eq, AbortReason::kOperandIsNotABoundFunction);
1864   }
1865 }
1866 
AssertGeneratorObject(Register object)1867 void MacroAssembler::AssertGeneratorObject(Register object) {
1868   if (!emit_debug_code()) return;
1869   TestIfSmi(object, r0);
1870   Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
1871 
1872   // Load map
1873   Register map = object;
1874   push(object);
1875   LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1876 
1877   // Check if JSGeneratorObject
1878   Label do_check;
1879   Register instance_type = object;
1880   CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
1881   beq(&do_check);
1882 
1883   // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
1884   cmpi(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
1885 
1886   bind(&do_check);
1887   // Restore generator object to register and perform assertion
1888   pop(object);
1889   Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
1890 }
1891 
AssertUndefinedOrAllocationSite(Register object,Register scratch)1892 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1893                                                      Register scratch) {
1894   if (emit_debug_code()) {
1895     Label done_checking;
1896     AssertNotSmi(object);
1897     CompareRoot(object, Heap::kUndefinedValueRootIndex);
1898     beq(&done_checking);
1899     LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1900     CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
1901     Assert(eq, AbortReason::kExpectedUndefinedOrCell);
1902     bind(&done_checking);
1903   }
1904 }
1905 
1906 
1907 static const int kRegisterPassedArguments = 8;
1908 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)1909 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
1910                                               int num_double_arguments) {
1911   int stack_passed_words = 0;
1912   if (num_double_arguments > DoubleRegister::kNumRegisters) {
1913     stack_passed_words +=
1914         2 * (num_double_arguments - DoubleRegister::kNumRegisters);
1915   }
1916   // Up to 8 simple arguments are passed in registers r3..r10.
1917   if (num_reg_arguments > kRegisterPassedArguments) {
1918     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
1919   }
1920   return stack_passed_words;
1921 }
1922 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)1923 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1924                                           int num_double_arguments,
1925                                           Register scratch) {
1926   int frame_alignment = ActivationFrameAlignment();
1927   int stack_passed_arguments =
1928       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
1929   int stack_space = kNumRequiredStackFrameSlots;
1930 
1931   if (frame_alignment > kPointerSize) {
1932     // Make stack end at alignment and make room for stack arguments
1933     // -- preserving original value of sp.
1934     mr(scratch, sp);
1935     addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
1936     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1937     ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
1938     StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
1939   } else {
1940     // Make room for stack arguments
1941     stack_space += stack_passed_arguments;
1942   }
1943 
1944   // Allocate frame with required slots to make ABI work.
1945   li(r0, Operand::Zero());
1946   StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
1947 }
1948 
PrepareCallCFunction(int num_reg_arguments,Register scratch)1949 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1950                                           Register scratch) {
1951   PrepareCallCFunction(num_reg_arguments, 0, scratch);
1952 }
1953 
MovToFloatParameter(DoubleRegister src)1954 void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
1955 
MovToFloatResult(DoubleRegister src)1956 void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
1957 
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)1958 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
1959                                           DoubleRegister src2) {
1960   if (src2 == d1) {
1961     DCHECK(src1 != d2);
1962     Move(d2, src2);
1963     Move(d1, src1);
1964   } else {
1965     Move(d1, src1);
1966     Move(d2, src2);
1967   }
1968 }
1969 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)1970 void TurboAssembler::CallCFunction(ExternalReference function,
1971                                    int num_reg_arguments,
1972                                    int num_double_arguments) {
1973   Move(ip, function);
1974   CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
1975 }
1976 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)1977 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
1978                                    int num_double_arguments) {
1979   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
1980 }
1981 
CallCFunction(ExternalReference function,int num_arguments)1982 void TurboAssembler::CallCFunction(ExternalReference function,
1983                                    int num_arguments) {
1984   CallCFunction(function, num_arguments, 0);
1985 }
1986 
CallCFunction(Register function,int num_arguments)1987 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
1988   CallCFunction(function, num_arguments, 0);
1989 }
1990 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)1991 void TurboAssembler::CallCFunctionHelper(Register function,
1992                                          int num_reg_arguments,
1993                                          int num_double_arguments) {
1994   DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
1995   DCHECK(has_frame());
1996 
1997   // Just call directly. The function called cannot cause a GC, or
1998   // allow preemption, so the return address in the link register
1999   // stays correct.
2000   Register dest = function;
2001   if (ABI_USES_FUNCTION_DESCRIPTORS) {
2002     // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
2003     // aware of this descriptor and pick up values from it
2004     LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
2005     LoadP(ip, MemOperand(function, 0));
2006     dest = ip;
2007   } else if (ABI_CALL_VIA_IP) {
2008     Move(ip, function);
2009     dest = ip;
2010   }
2011 
2012   Call(dest);
2013 
2014   // Remove frame bought in PrepareCallCFunction
2015   int stack_passed_arguments =
2016       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2017   int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
2018   if (ActivationFrameAlignment() > kPointerSize) {
2019     LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
2020   } else {
2021     addi(sp, sp, Operand(stack_space * kPointerSize));
2022   }
2023 }
2024 
2025 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)2026 void TurboAssembler::CheckPageFlag(
2027     Register object,
2028     Register scratch,  // scratch may be same register as object
2029     int mask, Condition cc, Label* condition_met) {
2030   DCHECK(cc == ne || cc == eq);
2031   ClearRightImm(scratch, object, Operand(kPageSizeBits));
2032   LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2033 
2034   mov(r0, Operand(mask));
2035   and_(r0, scratch, r0, SetRC);
2036 
2037   if (cc == ne) {
2038     bne(condition_met, cr0);
2039   }
2040   if (cc == eq) {
2041     beq(condition_met, cr0);
2042   }
2043 }
2044 
SetRoundingMode(FPRoundingMode RN)2045 void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
2046 
ResetRoundingMode()2047 void TurboAssembler::ResetRoundingMode() {
2048   mtfsfi(7, kRoundToNearest);  // reset (default is kRoundToNearest)
2049 }
2050 
2051 
2052 ////////////////////////////////////////////////////////////////////////////////
2053 //
2054 // New MacroAssembler Interfaces added for PPC
2055 //
2056 ////////////////////////////////////////////////////////////////////////////////
LoadIntLiteral(Register dst,int value)2057 void TurboAssembler::LoadIntLiteral(Register dst, int value) {
2058   mov(dst, Operand(value));
2059 }
2060 
LoadSmiLiteral(Register dst,Smi * smi)2061 void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
2062   mov(dst, Operand(smi));
2063 }
2064 
LoadDoubleLiteral(DoubleRegister result,Double value,Register scratch)2065 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, Double value,
2066                                        Register scratch) {
2067   if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
2068       !(scratch == r0 && ConstantPoolAccessIsInOverflow())) {
2069     ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
2070     if (access == ConstantPoolEntry::OVERFLOWED) {
2071       addis(scratch, kConstantPoolRegister, Operand::Zero());
2072       lfd(result, MemOperand(scratch, 0));
2073     } else {
2074       lfd(result, MemOperand(kConstantPoolRegister, 0));
2075     }
2076     return;
2077   }
2078 
2079   // avoid gcc strict aliasing error using union cast
2080   union {
2081     uint64_t dval;
2082 #if V8_TARGET_ARCH_PPC64
2083     intptr_t ival;
2084 #else
2085     intptr_t ival[2];
2086 #endif
2087   } litVal;
2088 
2089   litVal.dval = value.AsUint64();
2090 
2091 #if V8_TARGET_ARCH_PPC64
2092   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2093     mov(scratch, Operand(litVal.ival));
2094     mtfprd(result, scratch);
2095     return;
2096   }
2097 #endif
2098 
2099   addi(sp, sp, Operand(-kDoubleSize));
2100 #if V8_TARGET_ARCH_PPC64
2101   mov(scratch, Operand(litVal.ival));
2102   std(scratch, MemOperand(sp));
2103 #else
2104   LoadIntLiteral(scratch, litVal.ival[0]);
2105   stw(scratch, MemOperand(sp, 0));
2106   LoadIntLiteral(scratch, litVal.ival[1]);
2107   stw(scratch, MemOperand(sp, 4));
2108 #endif
2109   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2110   lfd(result, MemOperand(sp, 0));
2111   addi(sp, sp, Operand(kDoubleSize));
2112 }
2113 
MovIntToDouble(DoubleRegister dst,Register src,Register scratch)2114 void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
2115                                     Register scratch) {
2116 // sign-extend src to 64-bit
2117 #if V8_TARGET_ARCH_PPC64
2118   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2119     mtfprwa(dst, src);
2120     return;
2121   }
2122 #endif
2123 
2124   DCHECK(src != scratch);
2125   subi(sp, sp, Operand(kDoubleSize));
2126 #if V8_TARGET_ARCH_PPC64
2127   extsw(scratch, src);
2128   std(scratch, MemOperand(sp, 0));
2129 #else
2130   srawi(scratch, src, 31);
2131   stw(scratch, MemOperand(sp, Register::kExponentOffset));
2132   stw(src, MemOperand(sp, Register::kMantissaOffset));
2133 #endif
2134   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2135   lfd(dst, MemOperand(sp, 0));
2136   addi(sp, sp, Operand(kDoubleSize));
2137 }
2138 
MovUnsignedIntToDouble(DoubleRegister dst,Register src,Register scratch)2139 void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
2140                                             Register scratch) {
2141 // zero-extend src to 64-bit
2142 #if V8_TARGET_ARCH_PPC64
2143   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2144     mtfprwz(dst, src);
2145     return;
2146   }
2147 #endif
2148 
2149   DCHECK(src != scratch);
2150   subi(sp, sp, Operand(kDoubleSize));
2151 #if V8_TARGET_ARCH_PPC64
2152   clrldi(scratch, src, Operand(32));
2153   std(scratch, MemOperand(sp, 0));
2154 #else
2155   li(scratch, Operand::Zero());
2156   stw(scratch, MemOperand(sp, Register::kExponentOffset));
2157   stw(src, MemOperand(sp, Register::kMantissaOffset));
2158 #endif
2159   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2160   lfd(dst, MemOperand(sp, 0));
2161   addi(sp, sp, Operand(kDoubleSize));
2162 }
2163 
MovInt64ToDouble(DoubleRegister dst,Register src_hi,Register src)2164 void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
2165 #if !V8_TARGET_ARCH_PPC64
2166                                       Register src_hi,
2167 #endif
2168                                       Register src) {
2169 #if V8_TARGET_ARCH_PPC64
2170   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2171     mtfprd(dst, src);
2172     return;
2173   }
2174 #endif
2175 
2176   subi(sp, sp, Operand(kDoubleSize));
2177 #if V8_TARGET_ARCH_PPC64
2178   std(src, MemOperand(sp, 0));
2179 #else
2180   stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2181   stw(src, MemOperand(sp, Register::kMantissaOffset));
2182 #endif
2183   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2184   lfd(dst, MemOperand(sp, 0));
2185   addi(sp, sp, Operand(kDoubleSize));
2186 }
2187 
2188 
2189 #if V8_TARGET_ARCH_PPC64
MovInt64ComponentsToDouble(DoubleRegister dst,Register src_hi,Register src_lo,Register scratch)2190 void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
2191                                                 Register src_hi,
2192                                                 Register src_lo,
2193                                                 Register scratch) {
2194   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2195     sldi(scratch, src_hi, Operand(32));
2196     rldimi(scratch, src_lo, 0, 32);
2197     mtfprd(dst, scratch);
2198     return;
2199   }
2200 
2201   subi(sp, sp, Operand(kDoubleSize));
2202   stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2203   stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
2204   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2205   lfd(dst, MemOperand(sp));
2206   addi(sp, sp, Operand(kDoubleSize));
2207 }
2208 #endif
2209 
InsertDoubleLow(DoubleRegister dst,Register src,Register scratch)2210 void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
2211                                      Register scratch) {
2212 #if V8_TARGET_ARCH_PPC64
2213   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2214     mffprd(scratch, dst);
2215     rldimi(scratch, src, 0, 32);
2216     mtfprd(dst, scratch);
2217     return;
2218   }
2219 #endif
2220 
2221   subi(sp, sp, Operand(kDoubleSize));
2222   stfd(dst, MemOperand(sp));
2223   stw(src, MemOperand(sp, Register::kMantissaOffset));
2224   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2225   lfd(dst, MemOperand(sp));
2226   addi(sp, sp, Operand(kDoubleSize));
2227 }
2228 
InsertDoubleHigh(DoubleRegister dst,Register src,Register scratch)2229 void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
2230                                       Register scratch) {
2231 #if V8_TARGET_ARCH_PPC64
2232   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2233     mffprd(scratch, dst);
2234     rldimi(scratch, src, 32, 0);
2235     mtfprd(dst, scratch);
2236     return;
2237   }
2238 #endif
2239 
2240   subi(sp, sp, Operand(kDoubleSize));
2241   stfd(dst, MemOperand(sp));
2242   stw(src, MemOperand(sp, Register::kExponentOffset));
2243   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2244   lfd(dst, MemOperand(sp));
2245   addi(sp, sp, Operand(kDoubleSize));
2246 }
2247 
MovDoubleLowToInt(Register dst,DoubleRegister src)2248 void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
2249 #if V8_TARGET_ARCH_PPC64
2250   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2251     mffprwz(dst, src);
2252     return;
2253   }
2254 #endif
2255 
2256   subi(sp, sp, Operand(kDoubleSize));
2257   stfd(src, MemOperand(sp));
2258   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2259   lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2260   addi(sp, sp, Operand(kDoubleSize));
2261 }
2262 
MovDoubleHighToInt(Register dst,DoubleRegister src)2263 void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
2264 #if V8_TARGET_ARCH_PPC64
2265   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2266     mffprd(dst, src);
2267     srdi(dst, dst, Operand(32));
2268     return;
2269   }
2270 #endif
2271 
2272   subi(sp, sp, Operand(kDoubleSize));
2273   stfd(src, MemOperand(sp));
2274   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2275   lwz(dst, MemOperand(sp, Register::kExponentOffset));
2276   addi(sp, sp, Operand(kDoubleSize));
2277 }
2278 
MovDoubleToInt64(Register dst_hi,Register dst,DoubleRegister src)2279 void TurboAssembler::MovDoubleToInt64(
2280 #if !V8_TARGET_ARCH_PPC64
2281     Register dst_hi,
2282 #endif
2283     Register dst, DoubleRegister src) {
2284 #if V8_TARGET_ARCH_PPC64
2285   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2286     mffprd(dst, src);
2287     return;
2288   }
2289 #endif
2290 
2291   subi(sp, sp, Operand(kDoubleSize));
2292   stfd(src, MemOperand(sp));
2293   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2294 #if V8_TARGET_ARCH_PPC64
2295   ld(dst, MemOperand(sp, 0));
2296 #else
2297   lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
2298   lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2299 #endif
2300   addi(sp, sp, Operand(kDoubleSize));
2301 }
2302 
MovIntToFloat(DoubleRegister dst,Register src)2303 void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
2304   subi(sp, sp, Operand(kFloatSize));
2305   stw(src, MemOperand(sp, 0));
2306   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2307   lfs(dst, MemOperand(sp, 0));
2308   addi(sp, sp, Operand(kFloatSize));
2309 }
2310 
MovFloatToInt(Register dst,DoubleRegister src)2311 void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
2312   subi(sp, sp, Operand(kFloatSize));
2313   stfs(src, MemOperand(sp, 0));
2314   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2315   lwz(dst, MemOperand(sp, 0));
2316   addi(sp, sp, Operand(kFloatSize));
2317 }
2318 
Add(Register dst,Register src,intptr_t value,Register scratch)2319 void TurboAssembler::Add(Register dst, Register src, intptr_t value,
2320                          Register scratch) {
2321   if (is_int16(value)) {
2322     addi(dst, src, Operand(value));
2323   } else {
2324     mov(scratch, Operand(value));
2325     add(dst, src, scratch);
2326   }
2327 }
2328 
2329 
Cmpi(Register src1,const Operand & src2,Register scratch,CRegister cr)2330 void TurboAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
2331                           CRegister cr) {
2332   intptr_t value = src2.immediate();
2333   if (is_int16(value)) {
2334     cmpi(src1, src2, cr);
2335   } else {
2336     mov(scratch, src2);
2337     cmp(src1, scratch, cr);
2338   }
2339 }
2340 
Cmpli(Register src1,const Operand & src2,Register scratch,CRegister cr)2341 void TurboAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
2342                            CRegister cr) {
2343   intptr_t value = src2.immediate();
2344   if (is_uint16(value)) {
2345     cmpli(src1, src2, cr);
2346   } else {
2347     mov(scratch, src2);
2348     cmpl(src1, scratch, cr);
2349   }
2350 }
2351 
Cmpwi(Register src1,const Operand & src2,Register scratch,CRegister cr)2352 void TurboAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
2353                            CRegister cr) {
2354   intptr_t value = src2.immediate();
2355   if (is_int16(value)) {
2356     cmpwi(src1, src2, cr);
2357   } else {
2358     mov(scratch, src2);
2359     cmpw(src1, scratch, cr);
2360   }
2361 }
2362 
2363 
Cmplwi(Register src1,const Operand & src2,Register scratch,CRegister cr)2364 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
2365                             Register scratch, CRegister cr) {
2366   intptr_t value = src2.immediate();
2367   if (is_uint16(value)) {
2368     cmplwi(src1, src2, cr);
2369   } else {
2370     mov(scratch, src2);
2371     cmplw(src1, scratch, cr);
2372   }
2373 }
2374 
2375 
And(Register ra,Register rs,const Operand & rb,RCBit rc)2376 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
2377                          RCBit rc) {
2378   if (rb.is_reg()) {
2379     and_(ra, rs, rb.rm(), rc);
2380   } else {
2381     if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2382         rc == SetRC) {
2383       andi(ra, rs, rb);
2384     } else {
2385       // mov handles the relocation.
2386       DCHECK(rs != r0);
2387       mov(r0, rb);
2388       and_(ra, rs, r0, rc);
2389     }
2390   }
2391 }
2392 
2393 
Or(Register ra,Register rs,const Operand & rb,RCBit rc)2394 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
2395   if (rb.is_reg()) {
2396     orx(ra, rs, rb.rm(), rc);
2397   } else {
2398     if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2399         rc == LeaveRC) {
2400       ori(ra, rs, rb);
2401     } else {
2402       // mov handles the relocation.
2403       DCHECK(rs != r0);
2404       mov(r0, rb);
2405       orx(ra, rs, r0, rc);
2406     }
2407   }
2408 }
2409 
2410 
Xor(Register ra,Register rs,const Operand & rb,RCBit rc)2411 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
2412                          RCBit rc) {
2413   if (rb.is_reg()) {
2414     xor_(ra, rs, rb.rm(), rc);
2415   } else {
2416     if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2417         rc == LeaveRC) {
2418       xori(ra, rs, rb);
2419     } else {
2420       // mov handles the relocation.
2421       DCHECK(rs != r0);
2422       mov(r0, rb);
2423       xor_(ra, rs, r0, rc);
2424     }
2425   }
2426 }
2427 
2428 
CmpSmiLiteral(Register src1,Smi * smi,Register scratch,CRegister cr)2429 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
2430                                    CRegister cr) {
2431 #if V8_TARGET_ARCH_PPC64
2432   LoadSmiLiteral(scratch, smi);
2433   cmp(src1, scratch, cr);
2434 #else
2435   Cmpi(src1, Operand(smi), scratch, cr);
2436 #endif
2437 }
2438 
2439 
CmplSmiLiteral(Register src1,Smi * smi,Register scratch,CRegister cr)2440 void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
2441                                     CRegister cr) {
2442 #if V8_TARGET_ARCH_PPC64
2443   LoadSmiLiteral(scratch, smi);
2444   cmpl(src1, scratch, cr);
2445 #else
2446   Cmpli(src1, Operand(smi), scratch, cr);
2447 #endif
2448 }
2449 
2450 
AddSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)2451 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
2452                                    Register scratch) {
2453 #if V8_TARGET_ARCH_PPC64
2454   LoadSmiLiteral(scratch, smi);
2455   add(dst, src, scratch);
2456 #else
2457   Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
2458 #endif
2459 }
2460 
2461 
SubSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)2462 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
2463                                    Register scratch) {
2464 #if V8_TARGET_ARCH_PPC64
2465   LoadSmiLiteral(scratch, smi);
2466   sub(dst, src, scratch);
2467 #else
2468   Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
2469 #endif
2470 }
2471 
2472 
AndSmiLiteral(Register dst,Register src,Smi * smi,Register scratch,RCBit rc)2473 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
2474                                    Register scratch, RCBit rc) {
2475 #if V8_TARGET_ARCH_PPC64
2476   LoadSmiLiteral(scratch, smi);
2477   and_(dst, src, scratch, rc);
2478 #else
2479   And(dst, src, Operand(smi), rc);
2480 #endif
2481 }
2482 
2483 
2484 // Load a "pointer" sized value from the memory location
LoadP(Register dst,const MemOperand & mem,Register scratch)2485 void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
2486                            Register scratch) {
2487   DCHECK_EQ(mem.rb(), no_reg);
2488   int offset = mem.offset();
2489 
2490   if (!is_int16(offset)) {
2491     /* cannot use d-form */
2492     DCHECK_NE(scratch, no_reg);
2493     mov(scratch, Operand(offset));
2494     LoadPX(dst, MemOperand(mem.ra(), scratch));
2495   } else {
2496 #if V8_TARGET_ARCH_PPC64
2497     int misaligned = (offset & 3);
2498     if (misaligned) {
2499       // adjust base to conform to offset alignment requirements
2500       // Todo: enhance to use scratch if dst is unsuitable
2501       DCHECK(dst != r0);
2502       addi(dst, mem.ra(), Operand((offset & 3) - 4));
2503       ld(dst, MemOperand(dst, (offset & ~3) + 4));
2504     } else {
2505       ld(dst, mem);
2506     }
2507 #else
2508     lwz(dst, mem);
2509 #endif
2510   }
2511 }
2512 
LoadPU(Register dst,const MemOperand & mem,Register scratch)2513 void TurboAssembler::LoadPU(Register dst, const MemOperand& mem,
2514                             Register scratch) {
2515   int offset = mem.offset();
2516 
2517   if (!is_int16(offset)) {
2518     /* cannot use d-form */
2519     DCHECK(scratch != no_reg);
2520     mov(scratch, Operand(offset));
2521     LoadPUX(dst, MemOperand(mem.ra(), scratch));
2522   } else {
2523 #if V8_TARGET_ARCH_PPC64
2524     ldu(dst, mem);
2525 #else
2526     lwzu(dst, mem);
2527 #endif
2528   }
2529 }
2530 
2531 // Store a "pointer" sized value to the memory location
StoreP(Register src,const MemOperand & mem,Register scratch)2532 void TurboAssembler::StoreP(Register src, const MemOperand& mem,
2533                             Register scratch) {
2534   int offset = mem.offset();
2535 
2536   if (!is_int16(offset)) {
2537     /* cannot use d-form */
2538     DCHECK(scratch != no_reg);
2539     mov(scratch, Operand(offset));
2540     StorePX(src, MemOperand(mem.ra(), scratch));
2541   } else {
2542 #if V8_TARGET_ARCH_PPC64
2543     int misaligned = (offset & 3);
2544     if (misaligned) {
2545       // adjust base to conform to offset alignment requirements
2546       // a suitable scratch is required here
2547       DCHECK(scratch != no_reg);
2548       if (scratch == r0) {
2549         LoadIntLiteral(scratch, offset);
2550         stdx(src, MemOperand(mem.ra(), scratch));
2551       } else {
2552         addi(scratch, mem.ra(), Operand((offset & 3) - 4));
2553         std(src, MemOperand(scratch, (offset & ~3) + 4));
2554       }
2555     } else {
2556       std(src, mem);
2557     }
2558 #else
2559     stw(src, mem);
2560 #endif
2561   }
2562 }
2563 
StorePU(Register src,const MemOperand & mem,Register scratch)2564 void TurboAssembler::StorePU(Register src, const MemOperand& mem,
2565                              Register scratch) {
2566   int offset = mem.offset();
2567 
2568   if (!is_int16(offset)) {
2569     /* cannot use d-form */
2570     DCHECK(scratch != no_reg);
2571     mov(scratch, Operand(offset));
2572     StorePUX(src, MemOperand(mem.ra(), scratch));
2573   } else {
2574 #if V8_TARGET_ARCH_PPC64
2575     stdu(src, mem);
2576 #else
2577     stwu(src, mem);
2578 #endif
2579   }
2580 }
2581 
LoadWordArith(Register dst,const MemOperand & mem,Register scratch)2582 void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
2583                                    Register scratch) {
2584   int offset = mem.offset();
2585 
2586   if (!is_int16(offset)) {
2587     DCHECK(scratch != no_reg);
2588     mov(scratch, Operand(offset));
2589     lwax(dst, MemOperand(mem.ra(), scratch));
2590   } else {
2591 #if V8_TARGET_ARCH_PPC64
2592     int misaligned = (offset & 3);
2593     if (misaligned) {
2594       // adjust base to conform to offset alignment requirements
2595       // Todo: enhance to use scratch if dst is unsuitable
2596       DCHECK(dst != r0);
2597       addi(dst, mem.ra(), Operand((offset & 3) - 4));
2598       lwa(dst, MemOperand(dst, (offset & ~3) + 4));
2599     } else {
2600       lwa(dst, mem);
2601     }
2602 #else
2603     lwz(dst, mem);
2604 #endif
2605   }
2606 }
2607 
2608 
2609 // Variable length depending on whether offset fits into immediate field
2610 // MemOperand currently only supports d-form
LoadWord(Register dst,const MemOperand & mem,Register scratch)2611 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
2612                               Register scratch) {
2613   Register base = mem.ra();
2614   int offset = mem.offset();
2615 
2616   if (!is_int16(offset)) {
2617     LoadIntLiteral(scratch, offset);
2618     lwzx(dst, MemOperand(base, scratch));
2619   } else {
2620     lwz(dst, mem);
2621   }
2622 }
2623 
2624 
2625 // Variable length depending on whether offset fits into immediate field
2626 // MemOperand current only supports d-form
StoreWord(Register src,const MemOperand & mem,Register scratch)2627 void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
2628                                Register scratch) {
2629   Register base = mem.ra();
2630   int offset = mem.offset();
2631 
2632   if (!is_int16(offset)) {
2633     LoadIntLiteral(scratch, offset);
2634     stwx(src, MemOperand(base, scratch));
2635   } else {
2636     stw(src, mem);
2637   }
2638 }
2639 
2640 
LoadHalfWordArith(Register dst,const MemOperand & mem,Register scratch)2641 void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
2642                                        Register scratch) {
2643   int offset = mem.offset();
2644 
2645   if (!is_int16(offset)) {
2646     DCHECK(scratch != no_reg);
2647     mov(scratch, Operand(offset));
2648     lhax(dst, MemOperand(mem.ra(), scratch));
2649   } else {
2650     lha(dst, mem);
2651   }
2652 }
2653 
2654 
2655 // Variable length depending on whether offset fits into immediate field
2656 // MemOperand currently only supports d-form
LoadHalfWord(Register dst,const MemOperand & mem,Register scratch)2657 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
2658                                   Register scratch) {
2659   Register base = mem.ra();
2660   int offset = mem.offset();
2661 
2662   if (!is_int16(offset)) {
2663     DCHECK_NE(scratch, no_reg);
2664     LoadIntLiteral(scratch, offset);
2665     lhzx(dst, MemOperand(base, scratch));
2666   } else {
2667     lhz(dst, mem);
2668   }
2669 }
2670 
2671 
2672 // Variable length depending on whether offset fits into immediate field
2673 // MemOperand current only supports d-form
StoreHalfWord(Register src,const MemOperand & mem,Register scratch)2674 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
2675                                    Register scratch) {
2676   Register base = mem.ra();
2677   int offset = mem.offset();
2678 
2679   if (!is_int16(offset)) {
2680     LoadIntLiteral(scratch, offset);
2681     sthx(src, MemOperand(base, scratch));
2682   } else {
2683     sth(src, mem);
2684   }
2685 }
2686 
2687 
2688 // Variable length depending on whether offset fits into immediate field
2689 // MemOperand currently only supports d-form
LoadByte(Register dst,const MemOperand & mem,Register scratch)2690 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
2691                               Register scratch) {
2692   Register base = mem.ra();
2693   int offset = mem.offset();
2694 
2695   if (!is_int16(offset)) {
2696     LoadIntLiteral(scratch, offset);
2697     lbzx(dst, MemOperand(base, scratch));
2698   } else {
2699     lbz(dst, mem);
2700   }
2701 }
2702 
2703 
2704 // Variable length depending on whether offset fits into immediate field
2705 // MemOperand current only supports d-form
StoreByte(Register src,const MemOperand & mem,Register scratch)2706 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
2707                                Register scratch) {
2708   Register base = mem.ra();
2709   int offset = mem.offset();
2710 
2711   if (!is_int16(offset)) {
2712     LoadIntLiteral(scratch, offset);
2713     stbx(src, MemOperand(base, scratch));
2714   } else {
2715     stb(src, mem);
2716   }
2717 }
2718 
2719 
LoadRepresentation(Register dst,const MemOperand & mem,Representation r,Register scratch)2720 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
2721                                         Representation r, Register scratch) {
2722   DCHECK(!r.IsDouble());
2723   if (r.IsInteger8()) {
2724     LoadByte(dst, mem, scratch);
2725     extsb(dst, dst);
2726   } else if (r.IsUInteger8()) {
2727     LoadByte(dst, mem, scratch);
2728   } else if (r.IsInteger16()) {
2729     LoadHalfWordArith(dst, mem, scratch);
2730   } else if (r.IsUInteger16()) {
2731     LoadHalfWord(dst, mem, scratch);
2732 #if V8_TARGET_ARCH_PPC64
2733   } else if (r.IsInteger32()) {
2734     LoadWordArith(dst, mem, scratch);
2735 #endif
2736   } else {
2737     LoadP(dst, mem, scratch);
2738   }
2739 }
2740 
2741 
StoreRepresentation(Register src,const MemOperand & mem,Representation r,Register scratch)2742 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
2743                                          Representation r, Register scratch) {
2744   DCHECK(!r.IsDouble());
2745   if (r.IsInteger8() || r.IsUInteger8()) {
2746     StoreByte(src, mem, scratch);
2747   } else if (r.IsInteger16() || r.IsUInteger16()) {
2748     StoreHalfWord(src, mem, scratch);
2749 #if V8_TARGET_ARCH_PPC64
2750   } else if (r.IsInteger32()) {
2751     StoreWord(src, mem, scratch);
2752 #endif
2753   } else {
2754     if (r.IsHeapObject()) {
2755       AssertNotSmi(src);
2756     } else if (r.IsSmi()) {
2757       AssertSmi(src);
2758     }
2759     StoreP(src, mem, scratch);
2760   }
2761 }
2762 
LoadDouble(DoubleRegister dst,const MemOperand & mem,Register scratch)2763 void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
2764                                 Register scratch) {
2765   Register base = mem.ra();
2766   int offset = mem.offset();
2767 
2768   if (!is_int16(offset)) {
2769     mov(scratch, Operand(offset));
2770     lfdx(dst, MemOperand(base, scratch));
2771   } else {
2772     lfd(dst, mem);
2773   }
2774 }
2775 
LoadDoubleU(DoubleRegister dst,const MemOperand & mem,Register scratch)2776 void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
2777                                 Register scratch) {
2778   Register base = mem.ra();
2779   int offset = mem.offset();
2780 
2781   if (!is_int16(offset)) {
2782     mov(scratch, Operand(offset));
2783     lfdux(dst, MemOperand(base, scratch));
2784   } else {
2785     lfdu(dst, mem);
2786   }
2787 }
2788 
LoadSingle(DoubleRegister dst,const MemOperand & mem,Register scratch)2789 void TurboAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
2790                                 Register scratch) {
2791   Register base = mem.ra();
2792   int offset = mem.offset();
2793 
2794   if (!is_int16(offset)) {
2795     mov(scratch, Operand(offset));
2796     lfsx(dst, MemOperand(base, scratch));
2797   } else {
2798     lfs(dst, mem);
2799   }
2800 }
2801 
LoadSingleU(DoubleRegister dst,const MemOperand & mem,Register scratch)2802 void TurboAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
2803                                  Register scratch) {
2804   Register base = mem.ra();
2805   int offset = mem.offset();
2806 
2807   if (!is_int16(offset)) {
2808     mov(scratch, Operand(offset));
2809     lfsux(dst, MemOperand(base, scratch));
2810   } else {
2811     lfsu(dst, mem);
2812   }
2813 }
2814 
StoreDouble(DoubleRegister src,const MemOperand & mem,Register scratch)2815 void TurboAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
2816                                  Register scratch) {
2817   Register base = mem.ra();
2818   int offset = mem.offset();
2819 
2820   if (!is_int16(offset)) {
2821     mov(scratch, Operand(offset));
2822     stfdx(src, MemOperand(base, scratch));
2823   } else {
2824     stfd(src, mem);
2825   }
2826 }
2827 
StoreDoubleU(DoubleRegister src,const MemOperand & mem,Register scratch)2828 void TurboAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
2829                                   Register scratch) {
2830   Register base = mem.ra();
2831   int offset = mem.offset();
2832 
2833   if (!is_int16(offset)) {
2834     mov(scratch, Operand(offset));
2835     stfdux(src, MemOperand(base, scratch));
2836   } else {
2837     stfdu(src, mem);
2838   }
2839 }
2840 
StoreSingle(DoubleRegister src,const MemOperand & mem,Register scratch)2841 void TurboAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
2842                                  Register scratch) {
2843   Register base = mem.ra();
2844   int offset = mem.offset();
2845 
2846   if (!is_int16(offset)) {
2847     mov(scratch, Operand(offset));
2848     stfsx(src, MemOperand(base, scratch));
2849   } else {
2850     stfs(src, mem);
2851   }
2852 }
2853 
StoreSingleU(DoubleRegister src,const MemOperand & mem,Register scratch)2854 void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
2855                                   Register scratch) {
2856   Register base = mem.ra();
2857   int offset = mem.offset();
2858 
2859   if (!is_int16(offset)) {
2860     mov(scratch, Operand(offset));
2861     stfsux(src, MemOperand(base, scratch));
2862   } else {
2863     stfsu(src, mem);
2864   }
2865 }
2866 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)2867 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2868                                    Register reg4, Register reg5,
2869                                    Register reg6) {
2870   RegList regs = 0;
2871   if (reg1.is_valid()) regs |= reg1.bit();
2872   if (reg2.is_valid()) regs |= reg2.bit();
2873   if (reg3.is_valid()) regs |= reg3.bit();
2874   if (reg4.is_valid()) regs |= reg4.bit();
2875   if (reg5.is_valid()) regs |= reg5.bit();
2876   if (reg6.is_valid()) regs |= reg6.bit();
2877 
2878   const RegisterConfiguration* config = RegisterConfiguration::Default();
2879   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2880     int code = config->GetAllocatableGeneralCode(i);
2881     Register candidate = Register::from_code(code);
2882     if (regs & candidate.bit()) continue;
2883     return candidate;
2884   }
2885   UNREACHABLE();
2886 }
2887 
SwapP(Register src,Register dst,Register scratch)2888 void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
2889   if (src == dst) return;
2890   DCHECK(!AreAliased(src, dst, scratch));
2891   mr(scratch, src);
2892   mr(src, dst);
2893   mr(dst, scratch);
2894 }
2895 
SwapP(Register src,MemOperand dst,Register scratch)2896 void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
2897   if (dst.ra() != r0 && dst.ra().is_valid())
2898     DCHECK(!AreAliased(src, dst.ra(), scratch));
2899   if (dst.rb() != r0 && dst.rb().is_valid())
2900     DCHECK(!AreAliased(src, dst.rb(), scratch));
2901   DCHECK(!AreAliased(src, scratch));
2902   mr(scratch, src);
2903   LoadP(src, dst, r0);
2904   StoreP(scratch, dst, r0);
2905 }
2906 
SwapP(MemOperand src,MemOperand dst,Register scratch_0,Register scratch_1)2907 void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
2908                            Register scratch_1) {
2909   if (src.ra() != r0 && src.ra().is_valid())
2910     DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1));
2911   if (src.rb() != r0 && src.rb().is_valid())
2912     DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
2913   if (dst.ra() != r0 && dst.ra().is_valid())
2914     DCHECK(!AreAliased(dst.ra(), scratch_0, scratch_1));
2915   if (dst.rb() != r0 && dst.rb().is_valid())
2916     DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
2917   DCHECK(!AreAliased(scratch_0, scratch_1));
2918   if (is_int16(src.offset()) || is_int16(dst.offset())) {
2919     if (!is_int16(src.offset())) {
2920       // swap operand
2921       MemOperand temp = src;
2922       src = dst;
2923       dst = temp;
2924     }
2925     LoadP(scratch_1, dst, scratch_0);
2926     LoadP(scratch_0, src);
2927     StoreP(scratch_1, src);
2928     StoreP(scratch_0, dst, scratch_1);
2929   } else {
2930     LoadP(scratch_1, dst, scratch_0);
2931     push(scratch_1);
2932     LoadP(scratch_0, src, scratch_1);
2933     StoreP(scratch_0, dst, scratch_1);
2934     pop(scratch_1);
2935     StoreP(scratch_1, src, scratch_0);
2936   }
2937 }
2938 
SwapFloat32(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)2939 void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
2940                                  DoubleRegister scratch) {
2941   if (src == dst) return;
2942   DCHECK(!AreAliased(src, dst, scratch));
2943   fmr(scratch, src);
2944   fmr(src, dst);
2945   fmr(dst, scratch);
2946 }
2947 
SwapFloat32(DoubleRegister src,MemOperand dst,DoubleRegister scratch)2948 void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
2949                                  DoubleRegister scratch) {
2950   DCHECK(!AreAliased(src, scratch));
2951   fmr(scratch, src);
2952   LoadSingle(src, dst, r0);
2953   StoreSingle(scratch, dst, r0);
2954 }
2955 
SwapFloat32(MemOperand src,MemOperand dst,DoubleRegister scratch_0,DoubleRegister scratch_1)2956 void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
2957                                  DoubleRegister scratch_0,
2958                                  DoubleRegister scratch_1) {
2959   DCHECK(!AreAliased(scratch_0, scratch_1));
2960   LoadSingle(scratch_0, src, r0);
2961   LoadSingle(scratch_1, dst, r0);
2962   StoreSingle(scratch_0, dst, r0);
2963   StoreSingle(scratch_1, src, r0);
2964 }
2965 
SwapDouble(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)2966 void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
2967                                 DoubleRegister scratch) {
2968   if (src == dst) return;
2969   DCHECK(!AreAliased(src, dst, scratch));
2970   fmr(scratch, src);
2971   fmr(src, dst);
2972   fmr(dst, scratch);
2973 }
2974 
SwapDouble(DoubleRegister src,MemOperand dst,DoubleRegister scratch)2975 void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
2976                                 DoubleRegister scratch) {
2977   DCHECK(!AreAliased(src, scratch));
2978   fmr(scratch, src);
2979   LoadDouble(src, dst, r0);
2980   StoreDouble(scratch, dst, r0);
2981 }
2982 
SwapDouble(MemOperand src,MemOperand dst,DoubleRegister scratch_0,DoubleRegister scratch_1)2983 void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
2984                                 DoubleRegister scratch_0,
2985                                 DoubleRegister scratch_1) {
2986   DCHECK(!AreAliased(scratch_0, scratch_1));
2987   LoadDouble(scratch_0, src, r0);
2988   LoadDouble(scratch_1, dst, r0);
2989   StoreDouble(scratch_0, dst, r0);
2990   StoreDouble(scratch_1, src, r0);
2991 }
2992 
ResetSpeculationPoisonRegister()2993 void TurboAssembler::ResetSpeculationPoisonRegister() {
2994   mov(kSpeculationPoisonRegister, Operand(-1));
2995 }
2996 
JumpIfEqual(Register x,int32_t y,Label * dest)2997 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
2998   Cmpi(x, Operand(y), r0);
2999   beq(dest);
3000 }
3001 
JumpIfLessThan(Register x,int32_t y,Label * dest)3002 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
3003   Cmpi(x, Operand(y), r0);
3004   blt(dest);
3005 }
3006 
3007 }  // namespace internal
3008 }  // namespace v8
3009 
3010 #endif  // V8_TARGET_ARCH_PPC
3011