• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <assert.h>  // For assert
6 #include <limits.h>  // For LONG_MIN, LONG_MAX.
7 
8 #if V8_TARGET_ARCH_S390
9 
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/codegen/callable.h"
13 #include "src/codegen/code-factory.h"
14 #include "src/codegen/external-reference-table.h"
15 #include "src/codegen/macro-assembler.h"
16 #include "src/codegen/register-configuration.h"
17 #include "src/debug/debug.h"
18 #include "src/execution/frames-inl.h"
19 #include "src/heap/memory-chunk.h"
20 #include "src/init/bootstrapper.h"
21 #include "src/logging/counters.h"
22 #include "src/objects/smi.h"
23 #include "src/runtime/runtime.h"
24 #include "src/snapshot/embedded/embedded-data.h"
25 #include "src/snapshot/snapshot.h"
26 #include "src/wasm/wasm-code-manager.h"
27 
28 // Satisfy cpplint check, but don't include platform-specific header. It is
29 // included recursively via macro-assembler.h.
30 #if 0
31 #include "src/codegen/s390/macro-assembler-s390.h"
32 #endif
33 
34 namespace v8 {
35 namespace internal {
36 
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const37 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
38                                                     Register exclusion1,
39                                                     Register exclusion2,
40                                                     Register exclusion3) const {
41   int bytes = 0;
42   RegList exclusions = 0;
43   if (exclusion1 != no_reg) {
44     exclusions |= exclusion1.bit();
45     if (exclusion2 != no_reg) {
46       exclusions |= exclusion2.bit();
47       if (exclusion3 != no_reg) {
48         exclusions |= exclusion3.bit();
49       }
50     }
51   }
52 
53   RegList list = kJSCallerSaved & ~exclusions;
54   bytes += NumRegs(list) * kSystemPointerSize;
55 
56   if (fp_mode == kSaveFPRegs) {
57     bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
58   }
59 
60   return bytes;
61 }
62 
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)63 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
64                                     Register exclusion2, Register exclusion3) {
65   int bytes = 0;
66   RegList exclusions = 0;
67   if (exclusion1 != no_reg) {
68     exclusions |= exclusion1.bit();
69     if (exclusion2 != no_reg) {
70       exclusions |= exclusion2.bit();
71       if (exclusion3 != no_reg) {
72         exclusions |= exclusion3.bit();
73       }
74     }
75   }
76 
77   RegList list = kJSCallerSaved & ~exclusions;
78   MultiPush(list);
79   bytes += NumRegs(list) * kSystemPointerSize;
80 
81   if (fp_mode == kSaveFPRegs) {
82     MultiPushDoubles(kCallerSavedDoubles);
83     bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
84   }
85 
86   return bytes;
87 }
88 
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)89 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
90                                    Register exclusion2, Register exclusion3) {
91   int bytes = 0;
92   if (fp_mode == kSaveFPRegs) {
93     MultiPopDoubles(kCallerSavedDoubles);
94     bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
95   }
96 
97   RegList exclusions = 0;
98   if (exclusion1 != no_reg) {
99     exclusions |= exclusion1.bit();
100     if (exclusion2 != no_reg) {
101       exclusions |= exclusion2.bit();
102       if (exclusion3 != no_reg) {
103         exclusions |= exclusion3.bit();
104       }
105     }
106   }
107 
108   RegList list = kJSCallerSaved & ~exclusions;
109   MultiPop(list);
110   bytes += NumRegs(list) * kSystemPointerSize;
111 
112   return bytes;
113 }
114 
LoadFromConstantsTable(Register destination,int constant_index)115 void TurboAssembler::LoadFromConstantsTable(Register destination,
116                                             int constant_index) {
117   DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
118 
119   const uint32_t offset = FixedArray::kHeaderSize +
120                           constant_index * kSystemPointerSize - kHeapObjectTag;
121 
122   CHECK(is_uint19(offset));
123   DCHECK_NE(destination, r0);
124   LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
125   LoadTaggedPointerField(
126       destination,
127       FieldMemOperand(destination,
128                       FixedArray::OffsetOfElementAt(constant_index)),
129       r1);
130 }
131 
LoadRootRelative(Register destination,int32_t offset)132 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
133   LoadP(destination, MemOperand(kRootRegister, offset));
134 }
135 
LoadRootRegisterOffset(Register destination,intptr_t offset)136 void TurboAssembler::LoadRootRegisterOffset(Register destination,
137                                             intptr_t offset) {
138   if (offset == 0) {
139     LoadRR(destination, kRootRegister);
140   } else if (is_uint12(offset)) {
141     la(destination, MemOperand(kRootRegister, offset));
142   } else {
143     DCHECK(is_int20(offset));
144     lay(destination, MemOperand(kRootRegister, offset));
145   }
146 }
147 
Jump(Register target,Condition cond)148 void TurboAssembler::Jump(Register target, Condition cond) { b(cond, target); }
149 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)150 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
151                           Condition cond) {
152   Label skip;
153 
154   if (cond != al) b(NegateCondition(cond), &skip);
155 
156   DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
157 
158   mov(ip, Operand(target, rmode));
159   b(ip);
160 
161   bind(&skip);
162 }
163 
Jump(Address target,RelocInfo::Mode rmode,Condition cond)164 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
165                           Condition cond) {
166   DCHECK(!RelocInfo::IsCodeTarget(rmode));
167   Jump(static_cast<intptr_t>(target), rmode, cond);
168 }
169 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)170 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
171                           Condition cond) {
172   DCHECK(RelocInfo::IsCodeTarget(rmode));
173   DCHECK_IMPLIES(options().isolate_independent_code,
174                  Builtins::IsIsolateIndependentBuiltin(*code));
175 
176   int builtin_index = Builtins::kNoBuiltinId;
177   bool target_is_builtin =
178       isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
179 
180   if (options().inline_offheap_trampolines && target_is_builtin) {
181     // Inline the trampoline.
182     RecordCommentForOffHeapTrampoline(builtin_index);
183     CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
184     EmbeddedData d = EmbeddedData::FromBlob();
185     Address entry = d.InstructionStartOfBuiltin(builtin_index);
186     mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
187     b(cond, ip);
188     return;
189   }
190   jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
191 }
192 
Jump(const ExternalReference & reference)193 void TurboAssembler::Jump(const ExternalReference& reference) {
194   UseScratchRegisterScope temps(this);
195   Register scratch = temps.Acquire();
196   Move(scratch, reference);
197   Jump(scratch);
198 }
199 
Call(Register target)200 void TurboAssembler::Call(Register target) {
201   // Branch to target via indirect branch
202   basr(r14, target);
203 }
204 
CallJSEntry(Register target)205 void MacroAssembler::CallJSEntry(Register target) {
206   DCHECK(target == r4);
207   Call(target);
208 }
209 
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)210 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
211                                                    RelocInfo::Mode rmode,
212                                                    Condition cond) {
213   // S390 Assembler::move sequence is IILF / IIHF
214   int size;
215 #if V8_TARGET_ARCH_S390X
216   size = 14;  // IILF + IIHF + BASR
217 #else
218   size = 8;  // IILF + BASR
219 #endif
220   return size;
221 }
222 
Call(Address target,RelocInfo::Mode rmode,Condition cond)223 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
224                           Condition cond) {
225   DCHECK(cond == al);
226 
227   mov(ip, Operand(target, rmode));
228   basr(r14, ip);
229 }
230 
Call(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)231 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
232                           Condition cond) {
233   DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
234 
235   DCHECK_IMPLIES(options().isolate_independent_code,
236                  Builtins::IsIsolateIndependentBuiltin(*code));
237   int builtin_index = Builtins::kNoBuiltinId;
238   bool target_is_builtin =
239       isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
240 
241   if (target_is_builtin && options().inline_offheap_trampolines) {
242     // Inline the trampoline.
243     RecordCommentForOffHeapTrampoline(builtin_index);
244     CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
245     EmbeddedData d = EmbeddedData::FromBlob();
246     Address entry = d.InstructionStartOfBuiltin(builtin_index);
247     mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
248     Call(ip);
249     return;
250   }
251   DCHECK(code->IsExecutable());
252   call(code, rmode);
253 }
254 
Drop(int count)255 void TurboAssembler::Drop(int count) {
256   if (count > 0) {
257     int total = count * kSystemPointerSize;
258     if (is_uint12(total)) {
259       la(sp, MemOperand(sp, total));
260     } else if (is_int20(total)) {
261       lay(sp, MemOperand(sp, total));
262     } else {
263       AddP(sp, Operand(total));
264     }
265   }
266 }
267 
Drop(Register count,Register scratch)268 void TurboAssembler::Drop(Register count, Register scratch) {
269   ShiftLeftP(scratch, count, Operand(kSystemPointerSizeLog2));
270   AddP(sp, sp, scratch);
271 }
272 
Call(Label * target)273 void TurboAssembler::Call(Label* target) { b(r14, target); }
274 
Push(Handle<HeapObject> handle)275 void TurboAssembler::Push(Handle<HeapObject> handle) {
276   mov(r0, Operand(handle));
277   push(r0);
278 }
279 
Push(Smi smi)280 void TurboAssembler::Push(Smi smi) {
281   mov(r0, Operand(smi));
282   push(r0);
283 }
284 
Move(Register dst,Handle<HeapObject> value,RelocInfo::Mode rmode)285 void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
286                           RelocInfo::Mode rmode) {
287   // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
288   // non-isolate-independent code. In many cases it might be cheaper than
289   // embedding the relocatable value.
290   if (root_array_available_ && options().isolate_independent_code) {
291     IndirectLoadConstant(dst, value);
292     return;
293   } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
294     EmbeddedObjectIndex index = AddEmbeddedObject(value);
295     DCHECK(is_uint32(index));
296     mov(dst, Operand(static_cast<int>(index), rmode));
297   } else {
298     DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
299     mov(dst, Operand(value.address(), rmode));
300   }
301 }
302 
Move(Register dst,ExternalReference reference)303 void TurboAssembler::Move(Register dst, ExternalReference reference) {
304   // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
305   // non-isolate-independent code. In many cases it might be cheaper than
306   // embedding the relocatable value.
307   if (root_array_available_ && options().isolate_independent_code) {
308     IndirectLoadExternalReference(dst, reference);
309     return;
310   }
311   mov(dst, Operand(reference));
312 }
313 
Move(Register dst,Register src,Condition cond)314 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
315   if (dst != src) {
316     if (cond == al) {
317       LoadRR(dst, src);
318     } else {
319       LoadOnConditionP(cond, dst, src);
320     }
321   }
322 }
323 
Move(DoubleRegister dst,DoubleRegister src)324 void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
325   if (dst != src) {
326     ldr(dst, src);
327   }
328 }
329 
330 // Wrapper around Assembler::mvc (SS-a format)
MoveChar(const MemOperand & opnd1,const MemOperand & opnd2,const Operand & length)331 void TurboAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
332                               const Operand& length) {
333   mvc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
334 }
335 
336 // Wrapper around Assembler::clc (SS-a format)
CompareLogicalChar(const MemOperand & opnd1,const MemOperand & opnd2,const Operand & length)337 void TurboAssembler::CompareLogicalChar(const MemOperand& opnd1,
338                                         const MemOperand& opnd2,
339                                         const Operand& length) {
340   clc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
341 }
342 
343 // Wrapper around Assembler::xc (SS-a format)
ExclusiveOrChar(const MemOperand & opnd1,const MemOperand & opnd2,const Operand & length)344 void TurboAssembler::ExclusiveOrChar(const MemOperand& opnd1,
345                                      const MemOperand& opnd2,
346                                      const Operand& length) {
347   xc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
348 }
349 
350 // Wrapper around Assembler::risbg(n) (RIE-f)
RotateInsertSelectBits(Register dst,Register src,const Operand & startBit,const Operand & endBit,const Operand & shiftAmt,bool zeroBits)351 void TurboAssembler::RotateInsertSelectBits(Register dst, Register src,
352                                             const Operand& startBit,
353                                             const Operand& endBit,
354                                             const Operand& shiftAmt,
355                                             bool zeroBits) {
356   if (zeroBits)
357     // High tag the top bit of I4/EndBit to zero out any unselected bits
358     risbg(dst, src, startBit,
359           Operand(static_cast<intptr_t>(endBit.immediate() | 0x80)), shiftAmt);
360   else
361     risbg(dst, src, startBit, endBit, shiftAmt);
362 }
363 
BranchRelativeOnIdxHighP(Register dst,Register inc,Label * L)364 void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
365                                               Label* L) {
366 #if V8_TARGET_ARCH_S390X
367   brxhg(dst, inc, L);
368 #else
369   brxh(dst, inc, L);
370 #endif  // V8_TARGET_ARCH_S390X
371 }
372 
PushArray(Register array,Register size,Register scratch,Register scratch2,PushArrayOrder order)373 void TurboAssembler::PushArray(Register array, Register size, Register scratch,
374                                Register scratch2, PushArrayOrder order) {
375   Label loop, done;
376 
377   if (order == kNormal) {
378     ShiftLeftP(scratch, size, Operand(kSystemPointerSizeLog2));
379     lay(scratch, MemOperand(array, scratch));
380     bind(&loop);
381     CmpP(array, scratch);
382     bge(&done);
383     lay(scratch, MemOperand(scratch, -kSystemPointerSize));
384     lay(sp, MemOperand(sp, -kSystemPointerSize));
385     MoveChar(MemOperand(sp), MemOperand(scratch), Operand(kSystemPointerSize));
386     b(&loop);
387     bind(&done);
388   } else {
389     DCHECK_NE(scratch2, r0);
390     ShiftLeftP(scratch, size, Operand(kSystemPointerSizeLog2));
391     lay(scratch, MemOperand(array, scratch));
392     LoadRR(scratch2, array);
393     bind(&loop);
394     CmpP(scratch2, scratch);
395     bge(&done);
396     lay(sp, MemOperand(sp, -kSystemPointerSize));
397     MoveChar(MemOperand(sp), MemOperand(scratch2), Operand(kSystemPointerSize));
398     lay(scratch2, MemOperand(scratch2, kSystemPointerSize));
399     b(&loop);
400     bind(&done);
401   }
402 }
403 
MultiPush(RegList regs,Register location)404 void TurboAssembler::MultiPush(RegList regs, Register location) {
405   int16_t num_to_push = base::bits::CountPopulation(regs);
406   int16_t stack_offset = num_to_push * kSystemPointerSize;
407 
408   SubP(location, location, Operand(stack_offset));
409   for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
410     if ((regs & (1 << i)) != 0) {
411       stack_offset -= kSystemPointerSize;
412       StoreP(ToRegister(i), MemOperand(location, stack_offset));
413     }
414   }
415 }
416 
MultiPop(RegList regs,Register location)417 void TurboAssembler::MultiPop(RegList regs, Register location) {
418   int16_t stack_offset = 0;
419 
420   for (int16_t i = 0; i < Register::kNumRegisters; i++) {
421     if ((regs & (1 << i)) != 0) {
422       LoadP(ToRegister(i), MemOperand(location, stack_offset));
423       stack_offset += kSystemPointerSize;
424     }
425   }
426   AddP(location, location, Operand(stack_offset));
427 }
428 
MultiPushDoubles(RegList dregs,Register location)429 void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
430   int16_t num_to_push = base::bits::CountPopulation(dregs);
431   int16_t stack_offset = num_to_push * kDoubleSize;
432 
433   SubP(location, location, Operand(stack_offset));
434   for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
435     if ((dregs & (1 << i)) != 0) {
436       DoubleRegister dreg = DoubleRegister::from_code(i);
437       stack_offset -= kDoubleSize;
438       StoreDouble(dreg, MemOperand(location, stack_offset));
439     }
440   }
441 }
442 
MultiPopDoubles(RegList dregs,Register location)443 void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
444   int16_t stack_offset = 0;
445 
446   for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
447     if ((dregs & (1 << i)) != 0) {
448       DoubleRegister dreg = DoubleRegister::from_code(i);
449       LoadDouble(dreg, MemOperand(location, stack_offset));
450       stack_offset += kDoubleSize;
451     }
452   }
453   AddP(location, location, Operand(stack_offset));
454 }
455 
LoadRoot(Register destination,RootIndex index,Condition)456 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
457                               Condition) {
458   LoadP(destination,
459         MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
460 }
461 
LoadTaggedPointerField(const Register & destination,const MemOperand & field_operand,const Register & scratch)462 void TurboAssembler::LoadTaggedPointerField(const Register& destination,
463                                             const MemOperand& field_operand,
464                                             const Register& scratch) {
465   if (COMPRESS_POINTERS_BOOL) {
466     DecompressTaggedPointer(destination, field_operand);
467   } else {
468     LoadP(destination, field_operand, scratch);
469   }
470 }
471 
LoadAnyTaggedField(const Register & destination,const MemOperand & field_operand,const Register & scratch)472 void TurboAssembler::LoadAnyTaggedField(const Register& destination,
473                                         const MemOperand& field_operand,
474                                         const Register& scratch) {
475   if (COMPRESS_POINTERS_BOOL) {
476     DecompressAnyTagged(destination, field_operand);
477   } else {
478     LoadP(destination, field_operand, scratch);
479   }
480 }
481 
SmiUntag(Register dst,const MemOperand & src)482 void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
483   if (SmiValuesAre31Bits()) {
484     LoadW(dst, src);
485   } else {
486     LoadP(dst, src);
487   }
488   SmiUntag(dst);
489 }
490 
SmiUntagField(Register dst,const MemOperand & src)491 void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
492   SmiUntag(dst, src);
493 }
494 
StoreTaggedField(const Register & value,const MemOperand & dst_field_operand,const Register & scratch)495 void TurboAssembler::StoreTaggedField(const Register& value,
496                                       const MemOperand& dst_field_operand,
497                                       const Register& scratch) {
498   if (COMPRESS_POINTERS_BOOL) {
499     RecordComment("[ StoreTagged");
500     StoreW(value, dst_field_operand);
501     RecordComment("]");
502   } else {
503     StoreP(value, dst_field_operand, scratch);
504   }
505 }
506 
DecompressTaggedSigned(Register destination,Register src)507 void TurboAssembler::DecompressTaggedSigned(Register destination,
508                                             Register src) {
509   RecordComment("[ DecompressTaggedSigned");
510   llgfr(destination, src);
511   RecordComment("]");
512 }
513 
DecompressTaggedSigned(Register destination,MemOperand field_operand)514 void TurboAssembler::DecompressTaggedSigned(Register destination,
515                                             MemOperand field_operand) {
516   RecordComment("[ DecompressTaggedSigned");
517   llgf(destination, field_operand);
518   RecordComment("]");
519 }
520 
DecompressTaggedPointer(Register destination,Register source)521 void TurboAssembler::DecompressTaggedPointer(Register destination,
522                                              Register source) {
523   RecordComment("[ DecompressTaggedPointer");
524   llgfr(destination, source);
525   agr(destination, kRootRegister);
526   RecordComment("]");
527 }
528 
DecompressTaggedPointer(Register destination,MemOperand field_operand)529 void TurboAssembler::DecompressTaggedPointer(Register destination,
530                                              MemOperand field_operand) {
531   RecordComment("[ DecompressTaggedPointer");
532   llgf(destination, field_operand);
533   agr(destination, kRootRegister);
534   RecordComment("]");
535 }
536 
DecompressAnyTagged(Register destination,MemOperand field_operand)537 void TurboAssembler::DecompressAnyTagged(Register destination,
538                                          MemOperand field_operand) {
539   RecordComment("[ DecompressAnyTagged");
540   llgf(destination, field_operand);
541   agr(destination, kRootRegister);
542   RecordComment("]");
543 }
544 
DecompressAnyTagged(Register destination,Register source)545 void TurboAssembler::DecompressAnyTagged(Register destination,
546                                          Register source) {
547   RecordComment("[ DecompressAnyTagged");
548   llgfr(destination, source);
549   agr(destination, kRootRegister);
550   RecordComment("]");
551 }
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)552 void MacroAssembler::RecordWriteField(Register object, int offset,
553                                       Register value, Register dst,
554                                       LinkRegisterStatus lr_status,
555                                       SaveFPRegsMode save_fp,
556                                       RememberedSetAction remembered_set_action,
557                                       SmiCheck smi_check) {
558   // First, check if a write barrier is even needed. The tests below
559   // catch stores of Smis.
560   Label done;
561 
562   // Skip barrier if writing a smi.
563   if (smi_check == INLINE_SMI_CHECK) {
564     JumpIfSmi(value, &done);
565   }
566 
567   // Although the object register is tagged, the offset is relative to the start
568   // of the object, so so offset must be a multiple of kSystemPointerSize.
569   DCHECK(IsAligned(offset, kTaggedSize));
570 
571   lay(dst, MemOperand(object, offset - kHeapObjectTag));
572   if (emit_debug_code()) {
573     Label ok;
574     AndP(r0, dst, Operand(kTaggedSize - 1));
575     beq(&ok, Label::kNear);
576     stop();
577     bind(&ok);
578   }
579 
580   RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
581               OMIT_SMI_CHECK);
582 
583   bind(&done);
584 
585   // Clobber clobbered input registers when running with the debug-code flag
586   // turned on to provoke errors.
587   if (emit_debug_code()) {
588     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
589     mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
590   }
591 }
592 
SaveRegisters(RegList registers)593 void TurboAssembler::SaveRegisters(RegList registers) {
594   DCHECK_GT(NumRegs(registers), 0);
595   RegList regs = 0;
596   for (int i = 0; i < Register::kNumRegisters; ++i) {
597     if ((registers >> i) & 1u) {
598       regs |= Register::from_code(i).bit();
599     }
600   }
601   MultiPush(regs);
602 }
603 
RestoreRegisters(RegList registers)604 void TurboAssembler::RestoreRegisters(RegList registers) {
605   DCHECK_GT(NumRegs(registers), 0);
606   RegList regs = 0;
607   for (int i = 0; i < Register::kNumRegisters; ++i) {
608     if ((registers >> i) & 1u) {
609       regs |= Register::from_code(i).bit();
610     }
611   }
612   MultiPop(regs);
613 }
614 
CallEphemeronKeyBarrier(Register object,Register address,SaveFPRegsMode fp_mode)615 void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
616                                              SaveFPRegsMode fp_mode) {
617   EphemeronKeyBarrierDescriptor descriptor;
618   RegList registers = descriptor.allocatable_registers();
619 
620   SaveRegisters(registers);
621 
622   Register object_parameter(
623       descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
624   Register slot_parameter(descriptor.GetRegisterParameter(
625       EphemeronKeyBarrierDescriptor::kSlotAddress));
626   Register fp_mode_parameter(
627       descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
628 
629   Push(object);
630   Push(address);
631 
632   Pop(slot_parameter);
633   Pop(object_parameter);
634 
635   Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
636   Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
637        RelocInfo::CODE_TARGET);
638   RestoreRegisters(registers);
639 }
640 
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)641 void TurboAssembler::CallRecordWriteStub(
642     Register object, Register address,
643     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
644   CallRecordWriteStub(
645       object, address, remembered_set_action, fp_mode,
646       isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
647       kNullAddress);
648 }
649 
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,Address wasm_target)650 void TurboAssembler::CallRecordWriteStub(
651     Register object, Register address,
652     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
653     Address wasm_target) {
654   CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
655                       Handle<Code>::null(), wasm_target);
656 }
657 
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,Handle<Code> code_target,Address wasm_target)658 void TurboAssembler::CallRecordWriteStub(
659     Register object, Register address,
660     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
661     Handle<Code> code_target, Address wasm_target) {
662   DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
663   // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
664   // i.e. always emit remember set and save FP registers in RecordWriteStub. If
665   // large performance regression is observed, we should use these values to
666   // avoid unnecessary work.
667 
668   RecordWriteDescriptor descriptor;
669   RegList registers = descriptor.allocatable_registers();
670 
671   SaveRegisters(registers);
672   Register object_parameter(
673       descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
674   Register slot_parameter(
675       descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
676   Register remembered_set_parameter(
677       descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
678   Register fp_mode_parameter(
679       descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
680 
681   Push(object);
682   Push(address);
683 
684   Pop(slot_parameter);
685   Pop(object_parameter);
686 
687   Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
688   Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
689   if (code_target.is_null()) {
690     Call(wasm_target, RelocInfo::WASM_STUB_CALL);
691   } else {
692     Call(code_target, RelocInfo::CODE_TARGET);
693   }
694 
695   RestoreRegisters(registers);
696 }
697 
698 // Will clobber 4 registers: object, address, scratch, ip.  The
699 // register 'object' contains a heap object pointer.  The heap object
700 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)701 void MacroAssembler::RecordWrite(Register object, Register address,
702                                  Register value, LinkRegisterStatus lr_status,
703                                  SaveFPRegsMode fp_mode,
704                                  RememberedSetAction remembered_set_action,
705                                  SmiCheck smi_check) {
706   DCHECK(object != value);
707   if (emit_debug_code()) {
708     LoadTaggedPointerField(r0, MemOperand(address));
709     CmpP(value, r0);
710     Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
711   }
712 
713   if ((remembered_set_action == OMIT_REMEMBERED_SET &&
714        !FLAG_incremental_marking) ||
715       FLAG_disable_write_barriers) {
716     return;
717   }
718   // First, check if a write barrier is even needed. The tests below
719   // catch stores of smis and stores into the young generation.
720   Label done;
721 
722   if (smi_check == INLINE_SMI_CHECK) {
723     JumpIfSmi(value, &done);
724   }
725 
726   CheckPageFlag(value,
727                 value,  // Used as scratch.
728                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
729   CheckPageFlag(object,
730                 value,  // Used as scratch.
731                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
732 
733   // Record the actual write.
734   if (lr_status == kLRHasNotBeenSaved) {
735     push(r14);
736   }
737   CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
738   if (lr_status == kLRHasNotBeenSaved) {
739     pop(r14);
740   }
741 
742   bind(&done);
743 
744   // Clobber clobbered registers when running with the debug-code flag
745   // turned on to provoke errors.
746   if (emit_debug_code()) {
747     mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
748     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
749   }
750 }
751 
PushCommonFrame(Register marker_reg)752 void TurboAssembler::PushCommonFrame(Register marker_reg) {
753   int fp_delta = 0;
754   CleanseP(r14);
755   if (marker_reg.is_valid()) {
756     Push(r14, fp, marker_reg);
757     fp_delta = 1;
758   } else {
759     Push(r14, fp);
760     fp_delta = 0;
761   }
762   la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
763 }
764 
PopCommonFrame(Register marker_reg)765 void TurboAssembler::PopCommonFrame(Register marker_reg) {
766   if (marker_reg.is_valid()) {
767     Pop(r14, fp, marker_reg);
768   } else {
769     Pop(r14, fp);
770   }
771 }
772 
PushStandardFrame(Register function_reg)773 void TurboAssembler::PushStandardFrame(Register function_reg) {
774   int fp_delta = 0;
775   CleanseP(r14);
776   if (function_reg.is_valid()) {
777     Push(r14, fp, cp, function_reg);
778     fp_delta = 2;
779   } else {
780     Push(r14, fp, cp);
781     fp_delta = 1;
782   }
783   la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
784   Push(kJavaScriptCallArgCountRegister);
785 }
786 
RestoreFrameStateForTailCall()787 void TurboAssembler::RestoreFrameStateForTailCall() {
788   // if (FLAG_enable_embedded_constant_pool) {
789   //   LoadP(kConstantPoolRegister,
790   //         MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
791   //   set_constant_pool_available(false);
792   // }
793   DCHECK(!FLAG_enable_embedded_constant_pool);
794   LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
795   LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
796 }
797 
CanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)798 void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
799                                      const DoubleRegister src) {
800   // Turn potential sNaN into qNaN
801   if (dst != src) ldr(dst, src);
802   lzdr(kDoubleRegZero);
803   sdbr(dst, kDoubleRegZero);
804 }
805 
ConvertIntToDouble(DoubleRegister dst,Register src)806 void TurboAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
807   cdfbr(dst, src);
808 }
809 
ConvertUnsignedIntToDouble(DoubleRegister dst,Register src)810 void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
811                                                 Register src) {
812   if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
813     cdlfbr(Condition(5), Condition(0), dst, src);
814   } else {
815     // zero-extend src
816     llgfr(src, src);
817     // convert to double
818     cdgbr(dst, src);
819   }
820 }
821 
ConvertIntToFloat(DoubleRegister dst,Register src)822 void TurboAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
823   cefbra(Condition(4), dst, src);
824 }
825 
ConvertUnsignedIntToFloat(DoubleRegister dst,Register src)826 void TurboAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
827                                                Register src) {
828   celfbr(Condition(4), Condition(0), dst, src);
829 }
830 
ConvertInt64ToFloat(DoubleRegister double_dst,Register src)831 void TurboAssembler::ConvertInt64ToFloat(DoubleRegister double_dst,
832                                          Register src) {
833   cegbr(double_dst, src);
834 }
835 
ConvertInt64ToDouble(DoubleRegister double_dst,Register src)836 void TurboAssembler::ConvertInt64ToDouble(DoubleRegister double_dst,
837                                           Register src) {
838   cdgbr(double_dst, src);
839 }
840 
ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,Register src)841 void TurboAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,
842                                                  Register src) {
843   celgbr(Condition(0), Condition(0), double_dst, src);
844 }
845 
ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,Register src)846 void TurboAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,
847                                                   Register src) {
848   cdlgbr(Condition(0), Condition(0), double_dst, src);
849 }
850 
ConvertFloat32ToInt64(const Register dst,const DoubleRegister double_input,FPRoundingMode rounding_mode)851 void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
852                                            const DoubleRegister double_input,
853                                            FPRoundingMode rounding_mode) {
854   Condition m = Condition(0);
855   switch (rounding_mode) {
856     case kRoundToZero:
857       m = Condition(5);
858       break;
859     case kRoundToNearest:
860       UNIMPLEMENTED();
861       break;
862     case kRoundToPlusInf:
863       m = Condition(6);
864       break;
865     case kRoundToMinusInf:
866       m = Condition(7);
867       break;
868     default:
869       UNIMPLEMENTED();
870       break;
871   }
872   cgebr(m, dst, double_input);
873 }
874 
ConvertDoubleToInt64(const Register dst,const DoubleRegister double_input,FPRoundingMode rounding_mode)875 void TurboAssembler::ConvertDoubleToInt64(const Register dst,
876                                           const DoubleRegister double_input,
877                                           FPRoundingMode rounding_mode) {
878   Condition m = Condition(0);
879   switch (rounding_mode) {
880     case kRoundToZero:
881       m = Condition(5);
882       break;
883     case kRoundToNearest:
884       UNIMPLEMENTED();
885       break;
886     case kRoundToPlusInf:
887       m = Condition(6);
888       break;
889     case kRoundToMinusInf:
890       m = Condition(7);
891       break;
892     default:
893       UNIMPLEMENTED();
894       break;
895   }
896   cgdbr(m, dst, double_input);
897 }
898 
ConvertDoubleToInt32(const Register dst,const DoubleRegister double_input,FPRoundingMode rounding_mode)899 void TurboAssembler::ConvertDoubleToInt32(const Register dst,
900                                           const DoubleRegister double_input,
901                                           FPRoundingMode rounding_mode) {
902   Condition m = Condition(0);
903   switch (rounding_mode) {
904     case kRoundToZero:
905       m = Condition(5);
906       break;
907     case kRoundToNearest:
908       m = Condition(4);
909       break;
910     case kRoundToPlusInf:
911       m = Condition(6);
912       break;
913     case kRoundToMinusInf:
914       m = Condition(7);
915       break;
916     default:
917       UNIMPLEMENTED();
918       break;
919   }
920 #ifdef V8_TARGET_ARCH_S390X
921   lghi(dst, Operand::Zero());
922 #endif
923   cfdbr(m, dst, double_input);
924 }
925 
ConvertFloat32ToInt32(const Register result,const DoubleRegister double_input,FPRoundingMode rounding_mode)926 void TurboAssembler::ConvertFloat32ToInt32(const Register result,
927                                            const DoubleRegister double_input,
928                                            FPRoundingMode rounding_mode) {
929   Condition m = Condition(0);
930   switch (rounding_mode) {
931     case kRoundToZero:
932       m = Condition(5);
933       break;
934     case kRoundToNearest:
935       m = Condition(4);
936       break;
937     case kRoundToPlusInf:
938       m = Condition(6);
939       break;
940     case kRoundToMinusInf:
941       m = Condition(7);
942       break;
943     default:
944       UNIMPLEMENTED();
945       break;
946   }
947 #ifdef V8_TARGET_ARCH_S390X
948   lghi(result, Operand::Zero());
949 #endif
950   cfebr(m, result, double_input);
951 }
952 
ConvertFloat32ToUnsignedInt32(const Register result,const DoubleRegister double_input,FPRoundingMode rounding_mode)953 void TurboAssembler::ConvertFloat32ToUnsignedInt32(
954     const Register result, const DoubleRegister double_input,
955     FPRoundingMode rounding_mode) {
956   Condition m = Condition(0);
957   switch (rounding_mode) {
958     case kRoundToZero:
959       m = Condition(5);
960       break;
961     case kRoundToNearest:
962       UNIMPLEMENTED();
963       break;
964     case kRoundToPlusInf:
965       m = Condition(6);
966       break;
967     case kRoundToMinusInf:
968       m = Condition(7);
969       break;
970     default:
971       UNIMPLEMENTED();
972       break;
973   }
974 #ifdef V8_TARGET_ARCH_S390X
975   lghi(result, Operand::Zero());
976 #endif
977   clfebr(m, Condition(0), result, double_input);
978 }
979 
ConvertFloat32ToUnsignedInt64(const Register result,const DoubleRegister double_input,FPRoundingMode rounding_mode)980 void TurboAssembler::ConvertFloat32ToUnsignedInt64(
981     const Register result, const DoubleRegister double_input,
982     FPRoundingMode rounding_mode) {
983   Condition m = Condition(0);
984   switch (rounding_mode) {
985     case kRoundToZero:
986       m = Condition(5);
987       break;
988     case kRoundToNearest:
989       UNIMPLEMENTED();
990       break;
991     case kRoundToPlusInf:
992       m = Condition(6);
993       break;
994     case kRoundToMinusInf:
995       m = Condition(7);
996       break;
997     default:
998       UNIMPLEMENTED();
999       break;
1000   }
1001   clgebr(m, Condition(0), result, double_input);
1002 }
1003 
ConvertDoubleToUnsignedInt64(const Register dst,const DoubleRegister double_input,FPRoundingMode rounding_mode)1004 void TurboAssembler::ConvertDoubleToUnsignedInt64(
1005     const Register dst, const DoubleRegister double_input,
1006     FPRoundingMode rounding_mode) {
1007   Condition m = Condition(0);
1008   switch (rounding_mode) {
1009     case kRoundToZero:
1010       m = Condition(5);
1011       break;
1012     case kRoundToNearest:
1013       UNIMPLEMENTED();
1014       break;
1015     case kRoundToPlusInf:
1016       m = Condition(6);
1017       break;
1018     case kRoundToMinusInf:
1019       m = Condition(7);
1020       break;
1021     default:
1022       UNIMPLEMENTED();
1023       break;
1024   }
1025   clgdbr(m, Condition(0), dst, double_input);
1026 }
1027 
ConvertDoubleToUnsignedInt32(const Register dst,const DoubleRegister double_input,FPRoundingMode rounding_mode)1028 void TurboAssembler::ConvertDoubleToUnsignedInt32(
1029     const Register dst, const DoubleRegister double_input,
1030     FPRoundingMode rounding_mode) {
1031   Condition m = Condition(0);
1032   switch (rounding_mode) {
1033     case kRoundToZero:
1034       m = Condition(5);
1035       break;
1036     case kRoundToNearest:
1037       UNIMPLEMENTED();
1038       break;
1039     case kRoundToPlusInf:
1040       m = Condition(6);
1041       break;
1042     case kRoundToMinusInf:
1043       m = Condition(7);
1044       break;
1045     default:
1046       UNIMPLEMENTED();
1047       break;
1048   }
1049 #ifdef V8_TARGET_ARCH_S390X
1050   lghi(dst, Operand::Zero());
1051 #endif
1052   clfdbr(m, Condition(0), dst, double_input);
1053 }
1054 
1055 #if !V8_TARGET_ARCH_S390X
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1056 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
1057                                    Register src_low, Register src_high,
1058                                    Register scratch, Register shift) {
1059   LoadRR(r0, src_high);
1060   LoadRR(r1, src_low);
1061   sldl(r0, shift, Operand::Zero());
1062   LoadRR(dst_high, r0);
1063   LoadRR(dst_low, r1);
1064 }
1065 
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1066 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
1067                                    Register src_low, Register src_high,
1068                                    uint32_t shift) {
1069   LoadRR(r0, src_high);
1070   LoadRR(r1, src_low);
1071   sldl(r0, r0, Operand(shift));
1072   LoadRR(dst_high, r0);
1073   LoadRR(dst_low, r1);
1074 }
1075 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1076 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
1077                                     Register src_low, Register src_high,
1078                                     Register scratch, Register shift) {
1079   LoadRR(r0, src_high);
1080   LoadRR(r1, src_low);
1081   srdl(r0, shift, Operand::Zero());
1082   LoadRR(dst_high, r0);
1083   LoadRR(dst_low, r1);
1084 }
1085 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1086 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
1087                                     Register src_low, Register src_high,
1088                                     uint32_t shift) {
1089   LoadRR(r0, src_high);
1090   LoadRR(r1, src_low);
1091   srdl(r0, Operand(shift));
1092   LoadRR(dst_high, r0);
1093   LoadRR(dst_low, r1);
1094 }
1095 
ShiftRightArithPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1096 void TurboAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
1097                                          Register src_low, Register src_high,
1098                                          Register scratch, Register shift) {
1099   LoadRR(r0, src_high);
1100   LoadRR(r1, src_low);
1101   srda(r0, shift, Operand::Zero());
1102   LoadRR(dst_high, r0);
1103   LoadRR(dst_low, r1);
1104 }
1105 
ShiftRightArithPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1106 void TurboAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
1107                                          Register src_low, Register src_high,
1108                                          uint32_t shift) {
1109   LoadRR(r0, src_high);
1110   LoadRR(r1, src_low);
1111   srda(r0, r0, Operand(shift));
1112   LoadRR(dst_high, r0);
1113   LoadRR(dst_low, r1);
1114 }
1115 #endif
1116 
MovDoubleToInt64(Register dst,DoubleRegister src)1117 void TurboAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
1118   lgdr(dst, src);
1119 }
1120 
MovInt64ToDouble(DoubleRegister dst,Register src)1121 void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
1122   ldgr(dst, src);
1123 }
1124 
StubPrologue(StackFrame::Type type,Register base,int prologue_offset)1125 void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
1126                                   int prologue_offset) {
1127   {
1128     ConstantPoolUnavailableScope constant_pool_unavailable(this);
1129     Load(r1, Operand(StackFrame::TypeToMarker(type)));
1130     PushCommonFrame(r1);
1131   }
1132 }
1133 
Prologue(Register base,int prologue_offset)1134 void TurboAssembler::Prologue(Register base, int prologue_offset) {
1135   DCHECK(base != no_reg);
1136   PushStandardFrame(r3);
1137 }
1138 
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)1139 void TurboAssembler::EnterFrame(StackFrame::Type type,
1140                                 bool load_constant_pool_pointer_reg) {
1141   // We create a stack frame with:
1142   //    Return Addr <-- old sp
1143   //    Old FP      <-- new fp
1144   //    CP
1145   //    type
1146   //    CodeObject  <-- new sp
1147 
1148   Load(ip, Operand(StackFrame::TypeToMarker(type)));
1149   PushCommonFrame(ip);
1150 }
1151 
LeaveFrame(StackFrame::Type type,int stack_adjustment)1152 int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
1153   // Drop the execution stack down to the frame pointer and restore
1154   // the caller frame pointer, return address and constant pool pointer.
1155   LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1156   if (is_int20(StandardFrameConstants::kCallerSPOffset + stack_adjustment)) {
1157     lay(r1, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
1158                                stack_adjustment));
1159   } else {
1160     AddP(r1, fp,
1161          Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment));
1162   }
1163   LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1164   LoadRR(sp, r1);
1165   int frame_ends = pc_offset();
1166   return frame_ends;
1167 }
1168 
1169 // ExitFrame layout (probably wrongish.. needs updating)
1170 //
1171 //  SP -> previousSP
1172 //        LK reserved
1173 //        sp_on_exit (for debug?)
1174 // oldSP->prev SP
1175 //        LK
1176 //        <parameters on stack>
1177 
1178 // Prior to calling EnterExitFrame, we've got a bunch of parameters
1179 // on the stack that we need to wrap a real frame around.. so first
1180 // we reserve a slot for LK and push the previous SP which is captured
1181 // in the fp register (r11)
1182 // Then - we buy a new frame
1183 
1184 // r14
1185 // oldFP <- newFP
1186 // SP
1187 // Floats
1188 // gaps
1189 // Args
1190 // ABIRes <- newSP
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1191 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1192                                     StackFrame::Type frame_type) {
1193   DCHECK(frame_type == StackFrame::EXIT ||
1194          frame_type == StackFrame::BUILTIN_EXIT);
1195   // Set up the frame structure on the stack.
1196   DCHECK_EQ(2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1197   DCHECK_EQ(1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset);
1198   DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
1199   DCHECK_GT(stack_space, 0);
1200 
1201   // This is an opportunity to build a frame to wrap
1202   // all of the pushes that have happened inside of V8
1203   // since we were called from C code
1204   CleanseP(r14);
1205   Load(r1, Operand(StackFrame::TypeToMarker(frame_type)));
1206   PushCommonFrame(r1);
1207   // Reserve room for saved entry sp.
1208   lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
1209 
1210   if (emit_debug_code()) {
1211     StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1);
1212   }
1213 
1214   // Save the frame pointer and the context in top.
1215   Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1216                                      isolate()));
1217   StoreP(fp, MemOperand(r1));
1218   Move(r1,
1219        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1220   StoreP(cp, MemOperand(r1));
1221 
1222   // Optionally save all volatile double registers.
1223   if (save_doubles) {
1224     MultiPushDoubles(kCallerSavedDoubles);
1225     // Note that d0 will be accessible at
1226     //   fp - ExitFrameConstants::kFrameSize -
1227     //   kNumCallerSavedDoubles * kDoubleSize,
1228     // since the sp slot and code slot were pushed after the fp.
1229   }
1230 
1231   lay(sp, MemOperand(sp, -stack_space * kSystemPointerSize));
1232 
1233   // Allocate and align the frame preparing for calling the runtime
1234   // function.
1235   const int frame_alignment = TurboAssembler::ActivationFrameAlignment();
1236   if (frame_alignment > 0) {
1237     DCHECK_EQ(frame_alignment, 8);
1238     ClearRightImm(sp, sp, Operand(3));  // equivalent to &= -8
1239   }
1240 
1241   lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize));
1242   StoreP(MemOperand(sp), Operand::Zero(), r0);
1243   // Set the exit frame sp value to point just before the return address
1244   // location.
1245   lay(r1, MemOperand(sp, kStackFrameSPSlot * kSystemPointerSize));
1246   StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
1247 }
1248 
ActivationFrameAlignment()1249 int TurboAssembler::ActivationFrameAlignment() {
1250 #if !defined(USE_SIMULATOR)
1251   // Running on the real platform. Use the alignment as mandated by the local
1252   // environment.
1253   // Note: This will break if we ever start generating snapshots on one S390
1254   // platform for another S390 platform with a different alignment.
1255   return base::OS::ActivationFrameAlignment();
1256 #else  // Simulated
1257   // If we are using the simulator then we should always align to the expected
1258   // alignment. As the simulator is used to generate snapshots we do not know
1259   // if the target platform will need alignment, so this is controlled from a
1260   // flag.
1261   return FLAG_sim_stack_alignment;
1262 #endif
1263 }
1264 
LeaveExitFrame(bool save_doubles,Register argument_count,bool argument_count_is_length)1265 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1266                                     bool argument_count_is_length) {
1267   // Optionally restore all double registers.
1268   if (save_doubles) {
1269     // Calculate the stack location of the saved doubles and restore them.
1270     const int kNumRegs = kNumCallerSavedDoubles;
1271     lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
1272                              kNumRegs * kDoubleSize)));
1273     MultiPopDoubles(kCallerSavedDoubles, r5);
1274   }
1275 
1276   // Clear top frame.
1277   Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1278                                      isolate()));
1279   StoreP(MemOperand(ip), Operand(0, RelocInfo::NONE), r0);
1280 
1281   // Restore current context from top and clear it in debug mode.
1282   Move(ip,
1283        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1284   LoadP(cp, MemOperand(ip));
1285 
1286 #ifdef DEBUG
1287   mov(r1, Operand(Context::kInvalidContext));
1288   Move(ip,
1289        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1290   StoreP(r1, MemOperand(ip));
1291 #endif
1292 
1293   // Tear down the exit frame, pop the arguments, and return.
1294   LeaveFrame(StackFrame::EXIT);
1295 
1296   if (argument_count.is_valid()) {
1297     if (!argument_count_is_length) {
1298       ShiftLeftP(argument_count, argument_count,
1299                  Operand(kSystemPointerSizeLog2));
1300     }
1301     la(sp, MemOperand(sp, argument_count));
1302   }
1303 }
1304 
MovFromFloatResult(const DoubleRegister dst)1305 void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
1306   Move(dst, d0);
1307 }
1308 
MovFromFloatParameter(const DoubleRegister dst)1309 void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1310   Move(dst, d0);
1311 }
1312 
PrepareForTailCall(Register callee_args_count,Register caller_args_count,Register scratch0,Register scratch1)1313 void TurboAssembler::PrepareForTailCall(Register callee_args_count,
1314                                         Register caller_args_count,
1315                                         Register scratch0, Register scratch1) {
1316   DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
1317 
1318   // Calculate the end of destination area where we will put the arguments
1319   // after we drop current frame. We AddP kSystemPointerSize to count the
1320   // receiver argument which is not included into formal parameters count.
1321   Register dst_reg = scratch0;
1322   ShiftLeftP(dst_reg, caller_args_count, Operand(kSystemPointerSizeLog2));
1323   AddP(dst_reg, fp, dst_reg);
1324   AddP(dst_reg, dst_reg,
1325        Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
1326 
1327   Register src_reg = caller_args_count;
1328   // Calculate the end of source area. +kSystemPointerSize is for the receiver.
1329   ShiftLeftP(src_reg, callee_args_count, Operand(kSystemPointerSizeLog2));
1330   AddP(src_reg, sp, src_reg);
1331   AddP(src_reg, src_reg, Operand(kSystemPointerSize));
1332 
1333   if (FLAG_debug_code) {
1334     CmpLogicalP(src_reg, dst_reg);
1335     Check(lt, AbortReason::kStackAccessBelowStackPointer);
1336   }
1337 
1338   // Restore caller's frame pointer and return address now as they will be
1339   // overwritten by the copying loop.
1340   RestoreFrameStateForTailCall();
1341 
1342   // Now copy callee arguments to the caller frame going backwards to avoid
1343   // callee arguments corruption (source and destination areas could overlap).
1344 
1345   // Both src_reg and dst_reg are pointing to the word after the one to copy,
1346   // so they must be pre-decremented in the loop.
1347   Register tmp_reg = scratch1;
1348   Label loop;
1349   AddP(tmp_reg, callee_args_count, Operand(1));  // +1 for receiver
1350   LoadRR(r1, tmp_reg);
1351   bind(&loop);
1352   LoadP(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
1353   StoreP(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
1354   lay(src_reg, MemOperand(src_reg, -kSystemPointerSize));
1355   lay(dst_reg, MemOperand(dst_reg, -kSystemPointerSize));
1356   BranchOnCount(r1, &loop);
1357 
1358   // Leave current frame.
1359   LoadRR(sp, dst_reg);
1360 }
1361 
InvokePrologue(Register expected_parameter_count,Register actual_parameter_count,Label * done,InvokeFlag flag)1362 void MacroAssembler::InvokePrologue(Register expected_parameter_count,
1363                                     Register actual_parameter_count,
1364                                     Label* done, InvokeFlag flag) {
1365   Label regular_invoke;
1366 
1367   // Check whether the expected and actual arguments count match. If not,
1368   // setup registers according to contract with ArgumentsAdaptorTrampoline:
1369   //  r2: actual arguments count
1370   //  r3: function (passed through to callee)
1371   //  r4: expected arguments count
1372 
1373   // The code below is made a lot easier because the calling code already sets
1374   // up actual and expected registers according to the contract.
1375   // ARM has some checks as per below, considering add them for S390
1376   DCHECK_EQ(actual_parameter_count, r2);
1377   DCHECK_EQ(expected_parameter_count, r4);
1378 
1379   CmpP(expected_parameter_count, actual_parameter_count);
1380   beq(&regular_invoke);
1381 
1382   Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1383   if (flag == CALL_FUNCTION) {
1384     Call(adaptor);
1385     b(done);
1386   } else {
1387     Jump(adaptor, RelocInfo::CODE_TARGET);
1388   }
1389     bind(&regular_invoke);
1390 }
1391 
CheckDebugHook(Register fun,Register new_target,Register expected_parameter_count,Register actual_parameter_count)1392 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1393                                     Register expected_parameter_count,
1394                                     Register actual_parameter_count) {
1395   Label skip_hook;
1396 
1397   ExternalReference debug_hook_active =
1398       ExternalReference::debug_hook_on_function_call_address(isolate());
1399   Move(r6, debug_hook_active);
1400   tm(MemOperand(r6), Operand(0xFF));
1401   beq(&skip_hook);
1402 
1403   {
1404     // Load receiver to pass it later to DebugOnFunctionCall hook.
1405     LoadReceiver(r6, actual_parameter_count);
1406     FrameScope frame(this,
1407                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1408 
1409     SmiTag(expected_parameter_count);
1410     Push(expected_parameter_count);
1411 
1412     SmiTag(actual_parameter_count);
1413     Push(actual_parameter_count);
1414 
1415     if (new_target.is_valid()) {
1416       Push(new_target);
1417     }
1418     Push(fun, fun, r6);
1419     CallRuntime(Runtime::kDebugOnFunctionCall);
1420     Pop(fun);
1421     if (new_target.is_valid()) {
1422       Pop(new_target);
1423     }
1424 
1425     Pop(actual_parameter_count);
1426     SmiUntag(actual_parameter_count);
1427 
1428     Pop(expected_parameter_count);
1429     SmiUntag(expected_parameter_count);
1430   }
1431   bind(&skip_hook);
1432 }
1433 
InvokeFunctionCode(Register function,Register new_target,Register expected_parameter_count,Register actual_parameter_count,InvokeFlag flag)1434 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1435                                         Register expected_parameter_count,
1436                                         Register actual_parameter_count,
1437                                         InvokeFlag flag) {
1438   // You can't call a function without a valid frame.
1439   DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1440   DCHECK_EQ(function, r3);
1441   DCHECK_IMPLIES(new_target.is_valid(), new_target == r5);
1442 
1443   // On function call, call into the debugger if necessary.
1444   CheckDebugHook(function, new_target, expected_parameter_count,
1445                  actual_parameter_count);
1446 
1447   // Clear the new.target register if not given.
1448   if (!new_target.is_valid()) {
1449     LoadRoot(r5, RootIndex::kUndefinedValue);
1450   }
1451 
1452   Label done;
1453   InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
1454   // We call indirectly through the code field in the function to
1455   // allow recompilation to take effect without changing any of the
1456   // call sites.
1457   Register code = kJavaScriptCallCodeStartRegister;
1458   LoadTaggedPointerField(code,
1459                          FieldMemOperand(function, JSFunction::kCodeOffset));
1460   if (flag == CALL_FUNCTION) {
1461     CallCodeObject(code);
1462   } else {
1463     DCHECK(flag == JUMP_FUNCTION);
1464     JumpCodeObject(code);
1465   }
1466   // Continue here if InvokePrologue does handle the invocation due to
1467   // mismatched parameter counts.
1468   bind(&done);
1469 }
1470 
InvokeFunctionWithNewTarget(Register fun,Register new_target,Register actual_parameter_count,InvokeFlag flag)1471 void MacroAssembler::InvokeFunctionWithNewTarget(
1472     Register fun, Register new_target, Register actual_parameter_count,
1473     InvokeFlag flag) {
1474   // You can't call a function without a valid frame.
1475   DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1476 
1477   // Contract with called JS functions requires that function is passed in r3.
1478   DCHECK_EQ(fun, r3);
1479 
1480   Register expected_reg = r4;
1481   Register temp_reg = r6;
1482   LoadTaggedPointerField(cp, FieldMemOperand(fun, JSFunction::kContextOffset));
1483   LoadTaggedPointerField(
1484       temp_reg, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
1485   LoadLogicalHalfWordP(
1486       expected_reg,
1487       FieldMemOperand(temp_reg,
1488                       SharedFunctionInfo::kFormalParameterCountOffset));
1489 
1490   InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
1491                      flag);
1492 }
1493 
InvokeFunction(Register function,Register expected_parameter_count,Register actual_parameter_count,InvokeFlag flag)1494 void MacroAssembler::InvokeFunction(Register function,
1495                                     Register expected_parameter_count,
1496                                     Register actual_parameter_count,
1497                                     InvokeFlag flag) {
1498   // You can't call a function without a valid frame.
1499   DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1500 
1501   // Contract with called JS functions requires that function is passed in r3.
1502   DCHECK_EQ(function, r3);
1503 
1504   // Get the function and setup the context.
1505   LoadTaggedPointerField(cp,
1506                          FieldMemOperand(function, JSFunction::kContextOffset));
1507 
1508   InvokeFunctionCode(r3, no_reg, expected_parameter_count,
1509                      actual_parameter_count, flag);
1510 }
1511 
MaybeDropFrames()1512 void MacroAssembler::MaybeDropFrames() {
1513   // Check whether we need to drop frames to restart a function on the stack.
1514   ExternalReference restart_fp =
1515       ExternalReference::debug_restart_fp_address(isolate());
1516   Move(r3, restart_fp);
1517   LoadP(r3, MemOperand(r3));
1518   CmpP(r3, Operand::Zero());
1519   Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1520        ne);
1521 }
1522 
PushStackHandler()1523 void MacroAssembler::PushStackHandler() {
1524   // Adjust this code if not the case.
1525   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
1526   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize);
1527 
1528   // Link the current handler as the next handler.
1529   Move(r7,
1530        ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1531 
1532   // Buy the full stack frame for 5 slots.
1533   lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
1534 
1535   // Store padding.
1536   lghi(r0, Operand::Zero());
1537   StoreP(r0, MemOperand(sp));  // Padding.
1538 
1539   // Copy the old handler into the next handler slot.
1540   MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
1541            Operand(kSystemPointerSize));
1542   // Set this new handler as the current one.
1543   StoreP(sp, MemOperand(r7));
1544 }
1545 
PopStackHandler()1546 void MacroAssembler::PopStackHandler() {
1547   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
1548   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1549 
1550   // Pop the Next Handler into r3 and store it into Handler Address reference.
1551   Pop(r3);
1552   Move(ip,
1553        ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1554   StoreP(r3, MemOperand(ip));
1555 
1556   Drop(1);  // Drop padding.
1557 }
1558 
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)1559 void MacroAssembler::CompareObjectType(Register object, Register map,
1560                                        Register type_reg, InstanceType type) {
1561   const Register temp = type_reg == no_reg ? r0 : type_reg;
1562 
1563   LoadMap(map, object);
1564   CompareInstanceType(map, temp, type);
1565 }
1566 
CompareInstanceType(Register map,Register type_reg,InstanceType type)1567 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1568                                          InstanceType type) {
1569   STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1570   STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
1571   LoadHalfWordP(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1572   CmpP(type_reg, Operand(type));
1573 }
1574 
CompareRoot(Register obj,RootIndex index)1575 void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1576   int32_t offset = RootRegisterOffsetForRootIndex(index);
1577 #ifdef V8_TARGET_BIG_ENDIAN
1578   offset += (COMPRESS_POINTERS_BOOL ? kTaggedSize : 0);
1579 #endif
1580   CompareTagged(obj, MemOperand(kRootRegister, offset));
1581 }
1582 
JumpIfIsInRange(Register value,unsigned lower_limit,unsigned higher_limit,Label * on_in_range)1583 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
1584                                      unsigned higher_limit,
1585                                      Label* on_in_range) {
1586   if (lower_limit != 0) {
1587     Register scratch = r0;
1588     LoadRR(scratch, value);
1589     slgfi(scratch, Operand(lower_limit));
1590     CmpLogicalP(scratch, Operand(higher_limit - lower_limit));
1591   } else {
1592     CmpLogicalP(value, Operand(higher_limit));
1593   }
1594   ble(on_in_range);
1595 }
1596 
TruncateDoubleToI(Isolate * isolate,Zone * zone,Register result,DoubleRegister double_input,StubCallMode stub_mode)1597 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1598                                        Register result,
1599                                        DoubleRegister double_input,
1600                                        StubCallMode stub_mode) {
1601   Label done;
1602 
1603   TryInlineTruncateDoubleToI(result, double_input, &done);
1604 
1605   // If we fell through then inline version didn't succeed - call stub instead.
1606   push(r14);
1607   // Put input on stack.
1608   lay(sp, MemOperand(sp, -kDoubleSize));
1609   StoreDouble(double_input, MemOperand(sp));
1610 
1611   if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1612     Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1613   } else {
1614     Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1615   }
1616 
1617   LoadP(result, MemOperand(sp, 0));
1618   la(sp, MemOperand(sp, kDoubleSize));
1619   pop(r14);
1620 
1621   bind(&done);
1622 }
1623 
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)1624 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1625                                                 DoubleRegister double_input,
1626                                                 Label* done) {
1627   ConvertDoubleToInt64(result, double_input);
1628 
1629   // Test for overflow
1630   TestIfInt32(result);
1631   beq(done);
1632 }
1633 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)1634 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1635                                  SaveFPRegsMode save_doubles) {
1636   // All parameters are on the stack.  r2 has the return value after call.
1637 
1638   // If the expected number of arguments of the runtime function is
1639   // constant, we check that the actual number of arguments match the
1640   // expectation.
1641   CHECK(f->nargs < 0 || f->nargs == num_arguments);
1642 
1643   // TODO(1236192): Most runtime routines don't need the number of
1644   // arguments passed in because it is constant. At some point we
1645   // should remove this need and make the runtime routine entry code
1646   // smarter.
1647   mov(r2, Operand(num_arguments));
1648   Move(r3, ExternalReference::Create(f));
1649 #if V8_TARGET_ARCH_S390X
1650   Handle<Code> code =
1651       CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1652 #else
1653   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
1654 #endif
1655 
1656   Call(code, RelocInfo::CODE_TARGET);
1657 }
1658 
TailCallRuntime(Runtime::FunctionId fid)1659 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1660   const Runtime::Function* function = Runtime::FunctionForId(fid);
1661   DCHECK_EQ(1, function->result_size);
1662   if (function->nargs >= 0) {
1663     mov(r2, Operand(function->nargs));
1664   }
1665   JumpToExternalReference(ExternalReference::Create(fid));
1666 }
1667 
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)1668 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1669                                              bool builtin_exit_frame) {
1670   Move(r3, builtin);
1671   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1672                                           kArgvOnStack, builtin_exit_frame);
1673   Jump(code, RelocInfo::CODE_TARGET);
1674 }
1675 
JumpToInstructionStream(Address entry)1676 void MacroAssembler::JumpToInstructionStream(Address entry) {
1677   mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1678   Jump(kOffHeapTrampolineRegister);
1679 }
1680 
LoadWeakValue(Register out,Register in,Label * target_if_cleared)1681 void MacroAssembler::LoadWeakValue(Register out, Register in,
1682                                    Label* target_if_cleared) {
1683   Cmp32(in, Operand(kClearedWeakHeapObjectLower32));
1684   beq(target_if_cleared);
1685 
1686   AndP(out, in, Operand(~kWeakHeapObjectMask));
1687 }
1688 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1689 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1690                                       Register scratch1, Register scratch2) {
1691   DCHECK(value > 0 && is_int8(value));
1692   if (FLAG_native_code_counters && counter->Enabled()) {
1693     Move(scratch2, ExternalReference::Create(counter));
1694     // @TODO(john.yan): can be optimized by asi()
1695     LoadW(scratch1, MemOperand(scratch2));
1696     AddP(scratch1, Operand(value));
1697     StoreW(scratch1, MemOperand(scratch2));
1698   }
1699 }
1700 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1701 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1702                                       Register scratch1, Register scratch2) {
1703   DCHECK(value > 0 && is_int8(value));
1704   if (FLAG_native_code_counters && counter->Enabled()) {
1705     Move(scratch2, ExternalReference::Create(counter));
1706     // @TODO(john.yan): can be optimized by asi()
1707     LoadW(scratch1, MemOperand(scratch2));
1708     AddP(scratch1, Operand(-value));
1709     StoreW(scratch1, MemOperand(scratch2));
1710   }
1711 }
1712 
Assert(Condition cond,AbortReason reason,CRegister cr)1713 void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
1714   if (emit_debug_code()) Check(cond, reason, cr);
1715 }
1716 
Check(Condition cond,AbortReason reason,CRegister cr)1717 void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
1718   Label L;
1719   b(cond, &L);
1720   Abort(reason);
1721   // will not return here
1722   bind(&L);
1723 }
1724 
Abort(AbortReason reason)1725 void TurboAssembler::Abort(AbortReason reason) {
1726   Label abort_start;
1727   bind(&abort_start);
1728 #ifdef DEBUG
1729   const char* msg = GetAbortReason(reason);
1730   RecordComment("Abort message: ");
1731   RecordComment(msg);
1732 #endif
1733 
1734   // Avoid emitting call to builtin if requested.
1735   if (trap_on_abort()) {
1736     stop();
1737     return;
1738   }
1739 
1740   if (should_abort_hard()) {
1741     // We don't care if we constructed a frame. Just pretend we did.
1742     FrameScope assume_frame(this, StackFrame::NONE);
1743     lgfi(r2, Operand(static_cast<int>(reason)));
1744     PrepareCallCFunction(1, 0, r3);
1745     Move(r3, ExternalReference::abort_with_reason());
1746     // Use Call directly to avoid any unneeded overhead. The function won't
1747     // return anyway.
1748     Call(r3);
1749     return;
1750   }
1751 
1752   LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(reason)));
1753 
1754   // Disable stub call restrictions to always allow calls to abort.
1755   if (!has_frame_) {
1756     // We don't actually want to generate a pile of code for this, so just
1757     // claim there is a stack frame, without generating one.
1758     FrameScope scope(this, StackFrame::NONE);
1759     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1760   } else {
1761     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1762   }
1763   // will not return here
1764 }
1765 
LoadMap(Register destination,Register object)1766 void MacroAssembler::LoadMap(Register destination, Register object) {
1767   LoadTaggedPointerField(destination,
1768                          FieldMemOperand(object, HeapObject::kMapOffset));
1769 }
1770 
LoadNativeContextSlot(int index,Register dst)1771 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
1772   LoadMap(dst, cp);
1773   LoadTaggedPointerField(
1774       dst, FieldMemOperand(
1775                dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
1776   LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
1777 }
1778 
AssertNotSmi(Register object)1779 void MacroAssembler::AssertNotSmi(Register object) {
1780   if (emit_debug_code()) {
1781     STATIC_ASSERT(kSmiTag == 0);
1782     TestIfSmi(object);
1783     Check(ne, AbortReason::kOperandIsASmi, cr0);
1784   }
1785 }
1786 
AssertSmi(Register object)1787 void MacroAssembler::AssertSmi(Register object) {
1788   if (emit_debug_code()) {
1789     STATIC_ASSERT(kSmiTag == 0);
1790     TestIfSmi(object);
1791     Check(eq, AbortReason::kOperandIsNotASmi, cr0);
1792   }
1793 }
1794 
AssertConstructor(Register object,Register scratch)1795 void MacroAssembler::AssertConstructor(Register object, Register scratch) {
1796   if (emit_debug_code()) {
1797     STATIC_ASSERT(kSmiTag == 0);
1798     TestIfSmi(object);
1799     Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
1800     LoadMap(scratch, object);
1801     tm(FieldMemOperand(scratch, Map::kBitFieldOffset),
1802        Operand(Map::Bits1::IsConstructorBit::kMask));
1803     Check(ne, AbortReason::kOperandIsNotAConstructor);
1804   }
1805 }
1806 
AssertFunction(Register object)1807 void MacroAssembler::AssertFunction(Register object) {
1808   if (emit_debug_code()) {
1809     STATIC_ASSERT(kSmiTag == 0);
1810     TestIfSmi(object);
1811     Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
1812     push(object);
1813     CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
1814     pop(object);
1815     Check(eq, AbortReason::kOperandIsNotAFunction);
1816   }
1817 }
1818 
AssertBoundFunction(Register object)1819 void MacroAssembler::AssertBoundFunction(Register object) {
1820   if (emit_debug_code()) {
1821     STATIC_ASSERT(kSmiTag == 0);
1822     TestIfSmi(object);
1823     Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
1824     push(object);
1825     CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
1826     pop(object);
1827     Check(eq, AbortReason::kOperandIsNotABoundFunction);
1828   }
1829 }
1830 
AssertGeneratorObject(Register object)1831 void MacroAssembler::AssertGeneratorObject(Register object) {
1832   if (!emit_debug_code()) return;
1833   TestIfSmi(object);
1834   Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
1835 
1836   // Load map
1837   Register map = object;
1838   push(object);
1839   LoadMap(map, object);
1840 
1841   // Check if JSGeneratorObject
1842   Label do_check;
1843   Register instance_type = object;
1844   CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
1845   beq(&do_check);
1846 
1847   // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
1848   CmpP(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
1849   beq(&do_check);
1850 
1851   // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
1852   CmpP(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
1853 
1854   bind(&do_check);
1855   // Restore generator object to register and perform assertion
1856   pop(object);
1857   Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
1858 }
1859 
AssertUndefinedOrAllocationSite(Register object,Register scratch)1860 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1861                                                      Register scratch) {
1862   if (emit_debug_code()) {
1863     Label done_checking;
1864     AssertNotSmi(object);
1865     CompareRoot(object, RootIndex::kUndefinedValue);
1866     beq(&done_checking, Label::kNear);
1867     LoadMap(scratch, object);
1868     CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
1869     Assert(eq, AbortReason::kExpectedUndefinedOrCell);
1870     bind(&done_checking);
1871   }
1872 }
1873 
1874 static const int kRegisterPassedArguments = 5;
1875 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)1876 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
1877                                               int num_double_arguments) {
1878   int stack_passed_words = 0;
1879   if (num_double_arguments > DoubleRegister::kNumRegisters) {
1880     stack_passed_words +=
1881         2 * (num_double_arguments - DoubleRegister::kNumRegisters);
1882   }
1883   // Up to five simple arguments are passed in registers r2..r6
1884   if (num_reg_arguments > kRegisterPassedArguments) {
1885     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
1886   }
1887   return stack_passed_words;
1888 }
1889 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)1890 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1891                                           int num_double_arguments,
1892                                           Register scratch) {
1893   int frame_alignment = ActivationFrameAlignment();
1894   int stack_passed_arguments =
1895       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
1896   int stack_space = kNumRequiredStackFrameSlots;
1897   if (frame_alignment > kSystemPointerSize) {
1898     // Make stack end at alignment and make room for stack arguments
1899     // -- preserving original value of sp.
1900     LoadRR(scratch, sp);
1901     lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kSystemPointerSize));
1902     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1903     ClearRightImm(sp, sp,
1904                   Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
1905     StoreP(scratch,
1906            MemOperand(sp, (stack_passed_arguments)*kSystemPointerSize));
1907   } else {
1908     stack_space += stack_passed_arguments;
1909   }
1910   lay(sp, MemOperand(sp, (-stack_space) * kSystemPointerSize));
1911 }
1912 
PrepareCallCFunction(int num_reg_arguments,Register scratch)1913 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1914                                           Register scratch) {
1915   PrepareCallCFunction(num_reg_arguments, 0, scratch);
1916 }
1917 
MovToFloatParameter(DoubleRegister src)1918 void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
1919 
MovToFloatResult(DoubleRegister src)1920 void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
1921 
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)1922 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
1923                                           DoubleRegister src2) {
1924   if (src2 == d0) {
1925     DCHECK(src1 != d2);
1926     Move(d2, src2);
1927     Move(d0, src1);
1928   } else {
1929     Move(d0, src1);
1930     Move(d2, src2);
1931   }
1932 }
1933 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)1934 void TurboAssembler::CallCFunction(ExternalReference function,
1935                                    int num_reg_arguments,
1936                                    int num_double_arguments) {
1937   Move(ip, function);
1938   CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
1939 }
1940 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)1941 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
1942                                    int num_double_arguments) {
1943   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
1944 }
1945 
CallCFunction(ExternalReference function,int num_arguments)1946 void TurboAssembler::CallCFunction(ExternalReference function,
1947                                    int num_arguments) {
1948   CallCFunction(function, num_arguments, 0);
1949 }
1950 
CallCFunction(Register function,int num_arguments)1951 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
1952   CallCFunction(function, num_arguments, 0);
1953 }
1954 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)1955 void TurboAssembler::CallCFunctionHelper(Register function,
1956                                          int num_reg_arguments,
1957                                          int num_double_arguments) {
1958   DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
1959   DCHECK(has_frame());
1960 
1961   // Save the frame pointer and PC so that the stack layout remains iterable,
1962   // even without an ExitFrame which normally exists between JS and C frames.
1963   Register addr_scratch = r1;
1964   // See x64 code for reasoning about how to address the isolate data fields.
1965   if (root_array_available()) {
1966     LoadPC(r0);
1967     StoreP(r0, MemOperand(kRootRegister,
1968                           IsolateData::fast_c_call_caller_pc_offset()));
1969     StoreP(fp, MemOperand(kRootRegister,
1970                           IsolateData::fast_c_call_caller_fp_offset()));
1971   } else {
1972     DCHECK_NOT_NULL(isolate());
1973 
1974     Move(addr_scratch,
1975          ExternalReference::fast_c_call_caller_pc_address(isolate()));
1976     LoadPC(r0);
1977     StoreP(r0, MemOperand(addr_scratch));
1978     Move(addr_scratch,
1979          ExternalReference::fast_c_call_caller_fp_address(isolate()));
1980     StoreP(fp, MemOperand(addr_scratch));
1981   }
1982 
1983   // Just call directly. The function called cannot cause a GC, or
1984   // allow preemption, so the return address in the link register
1985   // stays correct.
1986   Register dest = function;
1987   if (ABI_CALL_VIA_IP) {
1988     Move(ip, function);
1989     dest = ip;
1990   }
1991 
1992   Call(dest);
1993 
1994   // We don't unset the PC; the FP is the source of truth.
1995   Register zero_scratch = r0;
1996   lghi(zero_scratch, Operand::Zero());
1997 
1998   if (root_array_available()) {
1999     StoreP(
2000         zero_scratch,
2001         MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
2002   } else {
2003     DCHECK_NOT_NULL(isolate());
2004     Move(addr_scratch,
2005          ExternalReference::fast_c_call_caller_fp_address(isolate()));
2006     StoreP(zero_scratch, MemOperand(addr_scratch));
2007   }
2008 
2009   int stack_passed_arguments =
2010       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2011   int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
2012   if (ActivationFrameAlignment() > kSystemPointerSize) {
2013     // Load the original stack pointer (pre-alignment) from the stack
2014     LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize));
2015   } else {
2016     la(sp, MemOperand(sp, stack_space * kSystemPointerSize));
2017   }
2018 }
2019 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)2020 void TurboAssembler::CheckPageFlag(
2021     Register object,
2022     Register scratch,  // scratch may be same register as object
2023     int mask, Condition cc, Label* condition_met) {
2024   DCHECK(cc == ne || cc == eq);
2025   ClearRightImm(scratch, object, Operand(kPageSizeBits));
2026 
2027   if (base::bits::IsPowerOfTwo(mask)) {
2028     // If it's a power of two, we can use Test-Under-Mask Memory-Imm form
2029     // which allows testing of a single byte in memory.
2030     int32_t byte_offset = 4;
2031     uint32_t shifted_mask = mask;
2032     // Determine the byte offset to be tested
2033     if (mask <= 0x80) {
2034       byte_offset = kSystemPointerSize - 1;
2035     } else if (mask < 0x8000) {
2036       byte_offset = kSystemPointerSize - 2;
2037       shifted_mask = mask >> 8;
2038     } else if (mask < 0x800000) {
2039       byte_offset = kSystemPointerSize - 3;
2040       shifted_mask = mask >> 16;
2041     } else {
2042       byte_offset = kSystemPointerSize - 4;
2043       shifted_mask = mask >> 24;
2044     }
2045 #if V8_TARGET_LITTLE_ENDIAN
2046     // Reverse the byte_offset if emulating on little endian platform
2047     byte_offset = kSystemPointerSize - byte_offset - 1;
2048 #endif
2049     tm(MemOperand(scratch, BasicMemoryChunk::kFlagsOffset + byte_offset),
2050        Operand(shifted_mask));
2051   } else {
2052     LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
2053     AndP(r0, scratch, Operand(mask));
2054   }
2055   // Should be okay to remove rc
2056 
2057   if (cc == ne) {
2058     bne(condition_met);
2059   }
2060   if (cc == eq) {
2061     beq(condition_met);
2062   }
2063 }
2064 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)2065 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2066                                    Register reg4, Register reg5,
2067                                    Register reg6) {
2068   RegList regs = 0;
2069   if (reg1.is_valid()) regs |= reg1.bit();
2070   if (reg2.is_valid()) regs |= reg2.bit();
2071   if (reg3.is_valid()) regs |= reg3.bit();
2072   if (reg4.is_valid()) regs |= reg4.bit();
2073   if (reg5.is_valid()) regs |= reg5.bit();
2074   if (reg6.is_valid()) regs |= reg6.bit();
2075 
2076   const RegisterConfiguration* config = RegisterConfiguration::Default();
2077   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2078     int code = config->GetAllocatableGeneralCode(i);
2079     Register candidate = Register::from_code(code);
2080     if (regs & candidate.bit()) continue;
2081     return candidate;
2082   }
2083   UNREACHABLE();
2084 }
2085 
mov(Register dst,const Operand & src)2086 void TurboAssembler::mov(Register dst, const Operand& src) {
2087 #if V8_TARGET_ARCH_S390X
2088   int64_t value;
2089 #else
2090   int value;
2091 #endif
2092   if (src.is_heap_object_request()) {
2093     RequestHeapObject(src.heap_object_request());
2094     value = 0;
2095   } else {
2096     value = src.immediate();
2097   }
2098 
2099   if (src.rmode() != RelocInfo::NONE) {
2100     // some form of relocation needed
2101     RecordRelocInfo(src.rmode(), value);
2102   }
2103 
2104 #if V8_TARGET_ARCH_S390X
2105   int32_t hi_32 = static_cast<int64_t>(value) >> 32;
2106   int32_t lo_32 = static_cast<int32_t>(value);
2107 
2108   iihf(dst, Operand(hi_32));
2109   iilf(dst, Operand(lo_32));
2110 #else
2111   iilf(dst, Operand(value));
2112 #endif
2113 }
2114 
Mul32(Register dst,const MemOperand & src1)2115 void TurboAssembler::Mul32(Register dst, const MemOperand& src1) {
2116   if (is_uint12(src1.offset())) {
2117     ms(dst, src1);
2118   } else if (is_int20(src1.offset())) {
2119     msy(dst, src1);
2120   } else {
2121     UNIMPLEMENTED();
2122   }
2123 }
2124 
Mul32(Register dst,Register src1)2125 void TurboAssembler::Mul32(Register dst, Register src1) { msr(dst, src1); }
2126 
Mul32(Register dst,const Operand & src1)2127 void TurboAssembler::Mul32(Register dst, const Operand& src1) {
2128   msfi(dst, src1);
2129 }
2130 
2131 #define Generate_MulHigh32(instr) \
2132   {                               \
2133     lgfr(dst, src1);              \
2134     instr(dst, src2);             \
2135     srlg(dst, dst, Operand(32));  \
2136   }
2137 
MulHigh32(Register dst,Register src1,const MemOperand & src2)2138 void TurboAssembler::MulHigh32(Register dst, Register src1,
2139                                const MemOperand& src2) {
2140   Generate_MulHigh32(msgf);
2141 }
2142 
MulHigh32(Register dst,Register src1,Register src2)2143 void TurboAssembler::MulHigh32(Register dst, Register src1, Register src2) {
2144   if (dst == src2) {
2145     std::swap(src1, src2);
2146   }
2147   Generate_MulHigh32(msgfr);
2148 }
2149 
MulHigh32(Register dst,Register src1,const Operand & src2)2150 void TurboAssembler::MulHigh32(Register dst, Register src1,
2151                                const Operand& src2) {
2152   Generate_MulHigh32(msgfi);
2153 }
2154 
2155 #undef Generate_MulHigh32
2156 
2157 #define Generate_MulHighU32(instr) \
2158   {                                \
2159     lr(r1, src1);                  \
2160     instr(r0, src2);               \
2161     LoadlW(dst, r0);               \
2162   }
2163 
MulHighU32(Register dst,Register src1,const MemOperand & src2)2164 void TurboAssembler::MulHighU32(Register dst, Register src1,
2165                                 const MemOperand& src2) {
2166   Generate_MulHighU32(ml);
2167 }
2168 
MulHighU32(Register dst,Register src1,Register src2)2169 void TurboAssembler::MulHighU32(Register dst, Register src1, Register src2) {
2170   Generate_MulHighU32(mlr);
2171 }
2172 
MulHighU32(Register dst,Register src1,const Operand & src2)2173 void TurboAssembler::MulHighU32(Register dst, Register src1,
2174                                 const Operand& src2) {
2175   USE(dst);
2176   USE(src1);
2177   USE(src2);
2178   UNREACHABLE();
2179 }
2180 
2181 #undef Generate_MulHighU32
2182 
2183 #define Generate_Mul32WithOverflowIfCCUnequal(instr) \
2184   {                                                  \
2185     lgfr(dst, src1);                                 \
2186     instr(dst, src2);                                \
2187     cgfr(dst, dst);                                  \
2188   }
2189 
Mul32WithOverflowIfCCUnequal(Register dst,Register src1,const MemOperand & src2)2190 void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2191                                                   const MemOperand& src2) {
2192   Register result = dst;
2193   if (src2.rx() == dst || src2.rb() == dst) dst = r0;
2194   Generate_Mul32WithOverflowIfCCUnequal(msgf);
2195   if (result != dst) llgfr(result, dst);
2196 }
2197 
Mul32WithOverflowIfCCUnequal(Register dst,Register src1,Register src2)2198 void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2199                                                   Register src2) {
2200   if (dst == src2) {
2201     std::swap(src1, src2);
2202   }
2203   Generate_Mul32WithOverflowIfCCUnequal(msgfr);
2204 }
2205 
Mul32WithOverflowIfCCUnequal(Register dst,Register src1,const Operand & src2)2206 void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2207                                                   const Operand& src2) {
2208   Generate_Mul32WithOverflowIfCCUnequal(msgfi);
2209 }
2210 
2211 #undef Generate_Mul32WithOverflowIfCCUnequal
2212 
Mul64(Register dst,const MemOperand & src1)2213 void TurboAssembler::Mul64(Register dst, const MemOperand& src1) {
2214   if (is_int20(src1.offset())) {
2215     msg(dst, src1);
2216   } else {
2217     UNIMPLEMENTED();
2218   }
2219 }
2220 
Mul64(Register dst,Register src1)2221 void TurboAssembler::Mul64(Register dst, Register src1) { msgr(dst, src1); }
2222 
Mul64(Register dst,const Operand & src1)2223 void TurboAssembler::Mul64(Register dst, const Operand& src1) {
2224   msgfi(dst, src1);
2225 }
2226 
Mul(Register dst,Register src1,Register src2)2227 void TurboAssembler::Mul(Register dst, Register src1, Register src2) {
2228   if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
2229     MulPWithCondition(dst, src1, src2);
2230   } else {
2231     if (dst == src2) {
2232       MulP(dst, src1);
2233     } else if (dst == src1) {
2234       MulP(dst, src2);
2235     } else {
2236       Move(dst, src1);
2237       MulP(dst, src2);
2238     }
2239   }
2240 }
2241 
DivP(Register dividend,Register divider)2242 void TurboAssembler::DivP(Register dividend, Register divider) {
2243   // have to make sure the src and dst are reg pairs
2244   DCHECK_EQ(dividend.code() % 2, 0);
2245 #if V8_TARGET_ARCH_S390X
2246   dsgr(dividend, divider);
2247 #else
2248   dr(dividend, divider);
2249 #endif
2250 }
2251 
2252 #define Generate_Div32(instr) \
2253   {                           \
2254     lgfr(r1, src1);           \
2255     instr(r0, src2);          \
2256     LoadlW(dst, r1);          \
2257   }
2258 
Div32(Register dst,Register src1,const MemOperand & src2)2259 void TurboAssembler::Div32(Register dst, Register src1,
2260                            const MemOperand& src2) {
2261   Generate_Div32(dsgf);
2262 }
2263 
Div32(Register dst,Register src1,Register src2)2264 void TurboAssembler::Div32(Register dst, Register src1, Register src2) {
2265   Generate_Div32(dsgfr);
2266 }
2267 
2268 #undef Generate_Div32
2269 
2270 #define Generate_DivU32(instr) \
2271   {                            \
2272     lr(r0, src1);              \
2273     srdl(r0, Operand(32));     \
2274     instr(r0, src2);           \
2275     LoadlW(dst, r1);           \
2276   }
2277 
DivU32(Register dst,Register src1,const MemOperand & src2)2278 void TurboAssembler::DivU32(Register dst, Register src1,
2279                             const MemOperand& src2) {
2280   Generate_DivU32(dl);
2281 }
2282 
DivU32(Register dst,Register src1,Register src2)2283 void TurboAssembler::DivU32(Register dst, Register src1, Register src2) {
2284   Generate_DivU32(dlr);
2285 }
2286 
2287 #undef Generate_DivU32
2288 
2289 #define Generate_Div64(instr) \
2290   {                           \
2291     lgr(r1, src1);            \
2292     instr(r0, src2);          \
2293     lgr(dst, r1);             \
2294   }
2295 
Div64(Register dst,Register src1,const MemOperand & src2)2296 void TurboAssembler::Div64(Register dst, Register src1,
2297                            const MemOperand& src2) {
2298   Generate_Div64(dsg);
2299 }
2300 
Div64(Register dst,Register src1,Register src2)2301 void TurboAssembler::Div64(Register dst, Register src1, Register src2) {
2302   Generate_Div64(dsgr);
2303 }
2304 
2305 #undef Generate_Div64
2306 
2307 #define Generate_DivU64(instr) \
2308   {                            \
2309     lgr(r1, src1);             \
2310     lghi(r0, Operand::Zero()); \
2311     instr(r0, src2);           \
2312     lgr(dst, r1);              \
2313   }
2314 
DivU64(Register dst,Register src1,const MemOperand & src2)2315 void TurboAssembler::DivU64(Register dst, Register src1,
2316                             const MemOperand& src2) {
2317   Generate_DivU64(dlg);
2318 }
2319 
DivU64(Register dst,Register src1,Register src2)2320 void TurboAssembler::DivU64(Register dst, Register src1, Register src2) {
2321   Generate_DivU64(dlgr);
2322 }
2323 
2324 #undef Generate_DivU64
2325 
2326 #define Generate_Mod32(instr) \
2327   {                           \
2328     lgfr(r1, src1);           \
2329     instr(r0, src2);          \
2330     LoadlW(dst, r0);          \
2331   }
2332 
Mod32(Register dst,Register src1,const MemOperand & src2)2333 void TurboAssembler::Mod32(Register dst, Register src1,
2334                            const MemOperand& src2) {
2335   Generate_Mod32(dsgf);
2336 }
2337 
Mod32(Register dst,Register src1,Register src2)2338 void TurboAssembler::Mod32(Register dst, Register src1, Register src2) {
2339   Generate_Mod32(dsgfr);
2340 }
2341 
2342 #undef Generate_Mod32
2343 
2344 #define Generate_ModU32(instr) \
2345   {                            \
2346     lr(r0, src1);              \
2347     srdl(r0, Operand(32));     \
2348     instr(r0, src2);           \
2349     LoadlW(dst, r0);           \
2350   }
2351 
ModU32(Register dst,Register src1,const MemOperand & src2)2352 void TurboAssembler::ModU32(Register dst, Register src1,
2353                             const MemOperand& src2) {
2354   Generate_ModU32(dl);
2355 }
2356 
ModU32(Register dst,Register src1,Register src2)2357 void TurboAssembler::ModU32(Register dst, Register src1, Register src2) {
2358   Generate_ModU32(dlr);
2359 }
2360 
2361 #undef Generate_ModU32
2362 
2363 #define Generate_Mod64(instr) \
2364   {                           \
2365     lgr(r1, src1);            \
2366     instr(r0, src2);          \
2367     lgr(dst, r0);             \
2368   }
2369 
Mod64(Register dst,Register src1,const MemOperand & src2)2370 void TurboAssembler::Mod64(Register dst, Register src1,
2371                            const MemOperand& src2) {
2372   Generate_Mod64(dsg);
2373 }
2374 
Mod64(Register dst,Register src1,Register src2)2375 void TurboAssembler::Mod64(Register dst, Register src1, Register src2) {
2376   Generate_Mod64(dsgr);
2377 }
2378 
2379 #undef Generate_Mod64
2380 
2381 #define Generate_ModU64(instr) \
2382   {                            \
2383     lgr(r1, src1);             \
2384     lghi(r0, Operand::Zero()); \
2385     instr(r0, src2);           \
2386     lgr(dst, r0);              \
2387   }
2388 
ModU64(Register dst,Register src1,const MemOperand & src2)2389 void TurboAssembler::ModU64(Register dst, Register src1,
2390                             const MemOperand& src2) {
2391   Generate_ModU64(dlg);
2392 }
2393 
ModU64(Register dst,Register src1,Register src2)2394 void TurboAssembler::ModU64(Register dst, Register src1, Register src2) {
2395   Generate_ModU64(dlgr);
2396 }
2397 
2398 #undef Generate_ModU64
2399 
MulP(Register dst,const Operand & opnd)2400 void TurboAssembler::MulP(Register dst, const Operand& opnd) {
2401 #if V8_TARGET_ARCH_S390X
2402   msgfi(dst, opnd);
2403 #else
2404   msfi(dst, opnd);
2405 #endif
2406 }
2407 
MulP(Register dst,Register src)2408 void TurboAssembler::MulP(Register dst, Register src) {
2409 #if V8_TARGET_ARCH_S390X
2410   msgr(dst, src);
2411 #else
2412   msr(dst, src);
2413 #endif
2414 }
2415 
MulPWithCondition(Register dst,Register src1,Register src2)2416 void TurboAssembler::MulPWithCondition(Register dst, Register src1,
2417                                        Register src2) {
2418   CHECK(CpuFeatures::IsSupported(MISC_INSTR_EXT2));
2419 #if V8_TARGET_ARCH_S390X
2420   msgrkc(dst, src1, src2);
2421 #else
2422   msrkc(dst, src1, src2);
2423 #endif
2424 }
2425 
MulP(Register dst,const MemOperand & opnd)2426 void TurboAssembler::MulP(Register dst, const MemOperand& opnd) {
2427 #if V8_TARGET_ARCH_S390X
2428   if (is_uint16(opnd.offset())) {
2429     ms(dst, opnd);
2430   } else if (is_int20(opnd.offset())) {
2431     msy(dst, opnd);
2432   } else {
2433     UNIMPLEMENTED();
2434   }
2435 #else
2436   if (is_int20(opnd.offset())) {
2437     msg(dst, opnd);
2438   } else {
2439     UNIMPLEMENTED();
2440   }
2441 #endif
2442 }
2443 
Sqrt(DoubleRegister result,DoubleRegister input)2444 void TurboAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
2445   sqdbr(result, input);
2446 }
Sqrt(DoubleRegister result,const MemOperand & input)2447 void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
2448   if (is_uint12(input.offset())) {
2449     sqdb(result, input);
2450   } else {
2451     ldy(result, input);
2452     sqdbr(result, result);
2453   }
2454 }
2455 //----------------------------------------------------------------------------
2456 //  Add Instructions
2457 //----------------------------------------------------------------------------
2458 
2459 // Add 32-bit (Register dst = Register dst + Immediate opnd)
Add32(Register dst,const Operand & opnd)2460 void TurboAssembler::Add32(Register dst, const Operand& opnd) {
2461   if (is_int16(opnd.immediate()))
2462     ahi(dst, opnd);
2463   else
2464     afi(dst, opnd);
2465 }
2466 
2467 // Add 32-bit (Register dst = Register dst + Immediate opnd)
Add32_RI(Register dst,const Operand & opnd)2468 void TurboAssembler::Add32_RI(Register dst, const Operand& opnd) {
2469   // Just a wrapper for above
2470   Add32(dst, opnd);
2471 }
2472 
2473 // Add Pointer Size (Register dst = Register dst + Immediate opnd)
AddP(Register dst,const Operand & opnd)2474 void TurboAssembler::AddP(Register dst, const Operand& opnd) {
2475 #if V8_TARGET_ARCH_S390X
2476   if (is_int16(opnd.immediate()))
2477     aghi(dst, opnd);
2478   else
2479     agfi(dst, opnd);
2480 #else
2481   Add32(dst, opnd);
2482 #endif
2483 }
2484 
2485 // Add 32-bit (Register dst = Register src + Immediate opnd)
Add32(Register dst,Register src,const Operand & opnd)2486 void TurboAssembler::Add32(Register dst, Register src, const Operand& opnd) {
2487   if (dst != src) {
2488     if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
2489       ahik(dst, src, opnd);
2490       return;
2491     }
2492     lr(dst, src);
2493   }
2494   Add32(dst, opnd);
2495 }
2496 
2497 // Add 32-bit (Register dst = Register src + Immediate opnd)
Add32_RRI(Register dst,Register src,const Operand & opnd)2498 void TurboAssembler::Add32_RRI(Register dst, Register src,
2499                                const Operand& opnd) {
2500   // Just a wrapper for above
2501   Add32(dst, src, opnd);
2502 }
2503 
2504 // Add Pointer Size (Register dst = Register src + Immediate opnd)
AddP(Register dst,Register src,const Operand & opnd)2505 void TurboAssembler::AddP(Register dst, Register src, const Operand& opnd) {
2506   if (dst != src) {
2507     if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
2508       AddPImm_RRI(dst, src, opnd);
2509       return;
2510     }
2511     LoadRR(dst, src);
2512   }
2513   AddP(dst, opnd);
2514 }
2515 
2516 // Add 32-bit (Register dst = Register dst + Register src)
Add32(Register dst,Register src)2517 void TurboAssembler::Add32(Register dst, Register src) { ar(dst, src); }
2518 
2519 // Add Pointer Size (Register dst = Register dst + Register src)
AddP(Register dst,Register src)2520 void TurboAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
2521 
2522 // Add Pointer Size with src extension
2523 //     (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64))
2524 // src is treated as a 32-bit signed integer, which is sign extended to
2525 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,Register src)2526 void TurboAssembler::AddP_ExtendSrc(Register dst, Register src) {
2527 #if V8_TARGET_ARCH_S390X
2528   agfr(dst, src);
2529 #else
2530   ar(dst, src);
2531 #endif
2532 }
2533 
2534 // Add 32-bit (Register dst = Register src1 + Register src2)
Add32(Register dst,Register src1,Register src2)2535 void TurboAssembler::Add32(Register dst, Register src1, Register src2) {
2536   if (dst != src1 && dst != src2) {
2537     // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
2538     // as AR is a smaller instruction
2539     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2540       ark(dst, src1, src2);
2541       return;
2542     } else {
2543       lr(dst, src1);
2544     }
2545   } else if (dst == src2) {
2546     src2 = src1;
2547   }
2548   ar(dst, src2);
2549 }
2550 
2551 // Add Pointer Size (Register dst = Register src1 + Register src2)
AddP(Register dst,Register src1,Register src2)2552 void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
2553   if (dst != src1 && dst != src2) {
2554     // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
2555     // as AR is a smaller instruction
2556     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2557       AddP_RRR(dst, src1, src2);
2558       return;
2559     } else {
2560       LoadRR(dst, src1);
2561     }
2562   } else if (dst == src2) {
2563     src2 = src1;
2564   }
2565   AddRR(dst, src2);
2566 }
2567 
2568 // Add Pointer Size with src extension
2569 //      (Register dst (ptr) = Register dst (ptr) + Register src1 (ptr) +
2570 //                            Register src2 (32 | 32->64))
2571 // src is treated as a 32-bit signed integer, which is sign extended to
2572 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,Register src1,Register src2)2573 void TurboAssembler::AddP_ExtendSrc(Register dst, Register src1,
2574                                     Register src2) {
2575 #if V8_TARGET_ARCH_S390X
2576   if (dst == src2) {
2577     // The source we need to sign extend is the same as result.
2578     lgfr(dst, src2);
2579     agr(dst, src1);
2580   } else {
2581     if (dst != src1) LoadRR(dst, src1);
2582     agfr(dst, src2);
2583   }
2584 #else
2585   AddP(dst, src1, src2);
2586 #endif
2587 }
2588 
2589 // Add 32-bit (Register-Memory)
Add32(Register dst,const MemOperand & opnd)2590 void TurboAssembler::Add32(Register dst, const MemOperand& opnd) {
2591   DCHECK(is_int20(opnd.offset()));
2592   if (is_uint12(opnd.offset()))
2593     a(dst, opnd);
2594   else
2595     ay(dst, opnd);
2596 }
2597 
2598 // Add Pointer Size (Register-Memory)
AddP(Register dst,const MemOperand & opnd)2599 void TurboAssembler::AddP(Register dst, const MemOperand& opnd) {
2600 #if V8_TARGET_ARCH_S390X
2601   DCHECK(is_int20(opnd.offset()));
2602   ag(dst, opnd);
2603 #else
2604   Add32(dst, opnd);
2605 #endif
2606 }
2607 
2608 // Add Pointer Size with src extension
2609 //      (Register dst (ptr) = Register dst (ptr) + Mem opnd (32 | 32->64))
2610 // src is treated as a 32-bit signed integer, which is sign extended to
2611 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,const MemOperand & opnd)2612 void TurboAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
2613 #if V8_TARGET_ARCH_S390X
2614   DCHECK(is_int20(opnd.offset()));
2615   agf(dst, opnd);
2616 #else
2617   Add32(dst, opnd);
2618 #endif
2619 }
2620 
2621 // Add 32-bit (Memory - Immediate)
Add32(const MemOperand & opnd,const Operand & imm)2622 void TurboAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
2623   DCHECK(is_int8(imm.immediate()));
2624   DCHECK(is_int20(opnd.offset()));
2625   DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
2626   asi(opnd, imm);
2627 }
2628 
2629 // Add Pointer-sized (Memory - Immediate)
AddP(const MemOperand & opnd,const Operand & imm)2630 void TurboAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
2631   DCHECK(is_int8(imm.immediate()));
2632   DCHECK(is_int20(opnd.offset()));
2633   DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
2634 #if V8_TARGET_ARCH_S390X
2635   agsi(opnd, imm);
2636 #else
2637   asi(opnd, imm);
2638 #endif
2639 }
2640 
2641 //----------------------------------------------------------------------------
2642 //  Add Logical Instructions
2643 //----------------------------------------------------------------------------
2644 
2645 // Add Logical With Carry 32-bit (Register dst = Register src1 + Register src2)
AddLogicalWithCarry32(Register dst,Register src1,Register src2)2646 void TurboAssembler::AddLogicalWithCarry32(Register dst, Register src1,
2647                                            Register src2) {
2648   if (dst != src2 && dst != src1) {
2649     lr(dst, src1);
2650     alcr(dst, src2);
2651   } else if (dst != src2) {
2652     // dst == src1
2653     DCHECK(dst == src1);
2654     alcr(dst, src2);
2655   } else {
2656     // dst == src2
2657     DCHECK(dst == src2);
2658     alcr(dst, src1);
2659   }
2660 }
2661 
2662 // Add Logical 32-bit (Register dst = Register src1 + Register src2)
AddLogical32(Register dst,Register src1,Register src2)2663 void TurboAssembler::AddLogical32(Register dst, Register src1, Register src2) {
2664   if (dst != src2 && dst != src1) {
2665     lr(dst, src1);
2666     alr(dst, src2);
2667   } else if (dst != src2) {
2668     // dst == src1
2669     DCHECK(dst == src1);
2670     alr(dst, src2);
2671   } else {
2672     // dst == src2
2673     DCHECK(dst == src2);
2674     alr(dst, src1);
2675   }
2676 }
2677 
2678 // Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
AddLogical(Register dst,const Operand & imm)2679 void TurboAssembler::AddLogical(Register dst, const Operand& imm) {
2680   alfi(dst, imm);
2681 }
2682 
2683 // Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
AddLogicalP(Register dst,const Operand & imm)2684 void TurboAssembler::AddLogicalP(Register dst, const Operand& imm) {
2685 #ifdef V8_TARGET_ARCH_S390X
2686   algfi(dst, imm);
2687 #else
2688   AddLogical(dst, imm);
2689 #endif
2690 }
2691 
2692 // Add Logical 32-bit (Register-Memory)
AddLogical(Register dst,const MemOperand & opnd)2693 void TurboAssembler::AddLogical(Register dst, const MemOperand& opnd) {
2694   DCHECK(is_int20(opnd.offset()));
2695   if (is_uint12(opnd.offset()))
2696     al_z(dst, opnd);
2697   else
2698     aly(dst, opnd);
2699 }
2700 
2701 // Add Logical Pointer Size (Register-Memory)
AddLogicalP(Register dst,const MemOperand & opnd)2702 void TurboAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
2703 #if V8_TARGET_ARCH_S390X
2704   DCHECK(is_int20(opnd.offset()));
2705   alg(dst, opnd);
2706 #else
2707   AddLogical(dst, opnd);
2708 #endif
2709 }
2710 
2711 //----------------------------------------------------------------------------
2712 //  Subtract Instructions
2713 //----------------------------------------------------------------------------
2714 
2715 // Subtract Logical With Carry 32-bit (Register dst = Register src1 - Register
2716 // src2)
SubLogicalWithBorrow32(Register dst,Register src1,Register src2)2717 void TurboAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
2718                                             Register src2) {
2719   if (dst != src2 && dst != src1) {
2720     lr(dst, src1);
2721     slbr(dst, src2);
2722   } else if (dst != src2) {
2723     // dst == src1
2724     DCHECK(dst == src1);
2725     slbr(dst, src2);
2726   } else {
2727     // dst == src2
2728     DCHECK(dst == src2);
2729     lr(r0, dst);
2730     SubLogicalWithBorrow32(dst, src1, r0);
2731   }
2732 }
2733 
2734 // Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
SubLogical32(Register dst,Register src1,Register src2)2735 void TurboAssembler::SubLogical32(Register dst, Register src1, Register src2) {
2736   if (dst != src2 && dst != src1) {
2737     lr(dst, src1);
2738     slr(dst, src2);
2739   } else if (dst != src2) {
2740     // dst == src1
2741     DCHECK(dst == src1);
2742     slr(dst, src2);
2743   } else {
2744     // dst == src2
2745     DCHECK(dst == src2);
2746     lr(r0, dst);
2747     SubLogical32(dst, src1, r0);
2748   }
2749 }
2750 
2751 // Subtract 32-bit (Register dst = Register dst - Immediate opnd)
Sub32(Register dst,const Operand & imm)2752 void TurboAssembler::Sub32(Register dst, const Operand& imm) {
2753   Add32(dst, Operand(-(imm.immediate())));
2754 }
2755 
2756 // Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
SubP(Register dst,const Operand & imm)2757 void TurboAssembler::SubP(Register dst, const Operand& imm) {
2758   AddP(dst, Operand(-(imm.immediate())));
2759 }
2760 
2761 // Subtract 32-bit (Register dst = Register src - Immediate opnd)
Sub32(Register dst,Register src,const Operand & imm)2762 void TurboAssembler::Sub32(Register dst, Register src, const Operand& imm) {
2763   Add32(dst, src, Operand(-(imm.immediate())));
2764 }
2765 
2766 // Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
SubP(Register dst,Register src,const Operand & imm)2767 void TurboAssembler::SubP(Register dst, Register src, const Operand& imm) {
2768   AddP(dst, src, Operand(-(imm.immediate())));
2769 }
2770 
2771 // Subtract 32-bit (Register dst = Register dst - Register src)
Sub32(Register dst,Register src)2772 void TurboAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
2773 
2774 // Subtract Pointer Size (Register dst = Register dst - Register src)
SubP(Register dst,Register src)2775 void TurboAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
2776 
2777 // Subtract Pointer Size with src extension
2778 //     (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
2779 // src is treated as a 32-bit signed integer, which is sign extended to
2780 // 64-bit if necessary.
SubP_ExtendSrc(Register dst,Register src)2781 void TurboAssembler::SubP_ExtendSrc(Register dst, Register src) {
2782 #if V8_TARGET_ARCH_S390X
2783   sgfr(dst, src);
2784 #else
2785   sr(dst, src);
2786 #endif
2787 }
2788 
2789 // Subtract 32-bit (Register = Register - Register)
Sub32(Register dst,Register src1,Register src2)2790 void TurboAssembler::Sub32(Register dst, Register src1, Register src2) {
2791   // Use non-clobbering version if possible
2792   if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2793     srk(dst, src1, src2);
2794     return;
2795   }
2796   if (dst != src1 && dst != src2) lr(dst, src1);
2797   // In scenario where we have dst = src - dst, we need to swap and negate
2798   if (dst != src1 && dst == src2) {
2799     Label done;
2800     lcr(dst, dst);  // dst = -dst
2801     b(overflow, &done);
2802     ar(dst, src1);  // dst = dst + src
2803     bind(&done);
2804   } else {
2805     sr(dst, src2);
2806   }
2807 }
2808 
2809 // Subtract Pointer Sized (Register = Register - Register)
SubP(Register dst,Register src1,Register src2)2810 void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
2811   // Use non-clobbering version if possible
2812   if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2813     SubP_RRR(dst, src1, src2);
2814     return;
2815   }
2816   if (dst != src1 && dst != src2) LoadRR(dst, src1);
2817   // In scenario where we have dst = src - dst, we need to swap and negate
2818   if (dst != src1 && dst == src2) {
2819     Label done;
2820     LoadComplementRR(dst, dst);  // dst = -dst
2821     b(overflow, &done);
2822     AddP(dst, src1);  // dst = dst + src
2823     bind(&done);
2824   } else {
2825     SubP(dst, src2);
2826   }
2827 }
2828 
2829 // Subtract Pointer Size with src extension
2830 //     (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
2831 // src is treated as a 32-bit signed integer, which is sign extended to
2832 // 64-bit if necessary.
SubP_ExtendSrc(Register dst,Register src1,Register src2)2833 void TurboAssembler::SubP_ExtendSrc(Register dst, Register src1,
2834                                     Register src2) {
2835 #if V8_TARGET_ARCH_S390X
2836   if (dst != src1 && dst != src2) LoadRR(dst, src1);
2837 
2838   // In scenario where we have dst = src - dst, we need to swap and negate
2839   if (dst != src1 && dst == src2) {
2840     lgfr(dst, dst);              // Sign extend this operand first.
2841     LoadComplementRR(dst, dst);  // dst = -dst
2842     AddP(dst, src1);             // dst = -dst + src
2843   } else {
2844     sgfr(dst, src2);
2845   }
2846 #else
2847   SubP(dst, src1, src2);
2848 #endif
2849 }
2850 
2851 // Subtract 32-bit (Register-Memory)
Sub32(Register dst,const MemOperand & opnd)2852 void TurboAssembler::Sub32(Register dst, const MemOperand& opnd) {
2853   DCHECK(is_int20(opnd.offset()));
2854   if (is_uint12(opnd.offset()))
2855     s(dst, opnd);
2856   else
2857     sy(dst, opnd);
2858 }
2859 
2860 // Subtract Pointer Sized (Register - Memory)
SubP(Register dst,const MemOperand & opnd)2861 void TurboAssembler::SubP(Register dst, const MemOperand& opnd) {
2862 #if V8_TARGET_ARCH_S390X
2863   sg(dst, opnd);
2864 #else
2865   Sub32(dst, opnd);
2866 #endif
2867 }
2868 
MovIntToFloat(DoubleRegister dst,Register src)2869 void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
2870   sllg(r0, src, Operand(32));
2871   ldgr(dst, r0);
2872 }
2873 
MovFloatToInt(Register dst,DoubleRegister src)2874 void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
2875   lgdr(dst, src);
2876   srlg(dst, dst, Operand(32));
2877 }
2878 
SubP_ExtendSrc(Register dst,const MemOperand & opnd)2879 void TurboAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
2880 #if V8_TARGET_ARCH_S390X
2881   DCHECK(is_int20(opnd.offset()));
2882   sgf(dst, opnd);
2883 #else
2884   Sub32(dst, opnd);
2885 #endif
2886 }
2887 
2888 // Load And Subtract 32-bit (similar to laa/lan/lao/lax)
LoadAndSub32(Register dst,Register src,const MemOperand & opnd)2889 void TurboAssembler::LoadAndSub32(Register dst, Register src,
2890                                   const MemOperand& opnd) {
2891   lcr(dst, src);
2892   laa(dst, dst, opnd);
2893 }
2894 
LoadAndSub64(Register dst,Register src,const MemOperand & opnd)2895 void TurboAssembler::LoadAndSub64(Register dst, Register src,
2896                                   const MemOperand& opnd) {
2897   lcgr(dst, src);
2898   laag(dst, dst, opnd);
2899 }
2900 
2901 //----------------------------------------------------------------------------
2902 //  Subtract Logical Instructions
2903 //----------------------------------------------------------------------------
2904 
2905 // Subtract Logical 32-bit (Register - Memory)
SubLogical(Register dst,const MemOperand & opnd)2906 void TurboAssembler::SubLogical(Register dst, const MemOperand& opnd) {
2907   DCHECK(is_int20(opnd.offset()));
2908   if (is_uint12(opnd.offset()))
2909     sl(dst, opnd);
2910   else
2911     sly(dst, opnd);
2912 }
2913 
2914 // Subtract Logical Pointer Sized (Register - Memory)
SubLogicalP(Register dst,const MemOperand & opnd)2915 void TurboAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
2916   DCHECK(is_int20(opnd.offset()));
2917 #if V8_TARGET_ARCH_S390X
2918   slgf(dst, opnd);
2919 #else
2920   SubLogical(dst, opnd);
2921 #endif
2922 }
2923 
2924 // Subtract Logical Pointer Size with src extension
2925 //      (Register dst (ptr) = Register dst (ptr) - Mem opnd (32 | 32->64))
2926 // src is treated as a 32-bit signed integer, which is sign extended to
2927 // 64-bit if necessary.
SubLogicalP_ExtendSrc(Register dst,const MemOperand & opnd)2928 void TurboAssembler::SubLogicalP_ExtendSrc(Register dst,
2929                                            const MemOperand& opnd) {
2930 #if V8_TARGET_ARCH_S390X
2931   DCHECK(is_int20(opnd.offset()));
2932   slgf(dst, opnd);
2933 #else
2934   SubLogical(dst, opnd);
2935 #endif
2936 }
2937 
2938 //----------------------------------------------------------------------------
2939 //  Bitwise Operations
2940 //----------------------------------------------------------------------------
2941 
2942 // AND 32-bit - dst = dst & src
And(Register dst,Register src)2943 void TurboAssembler::And(Register dst, Register src) { nr(dst, src); }
2944 
2945 // AND Pointer Size - dst = dst & src
AndP(Register dst,Register src)2946 void TurboAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
2947 
2948 // Non-clobbering AND 32-bit - dst = src1 & src1
And(Register dst,Register src1,Register src2)2949 void TurboAssembler::And(Register dst, Register src1, Register src2) {
2950   if (dst != src1 && dst != src2) {
2951     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
2952     // as XR is a smaller instruction
2953     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2954       nrk(dst, src1, src2);
2955       return;
2956     } else {
2957       lr(dst, src1);
2958     }
2959   } else if (dst == src2) {
2960     src2 = src1;
2961   }
2962   And(dst, src2);
2963 }
2964 
2965 // Non-clobbering AND pointer size - dst = src1 & src1
AndP(Register dst,Register src1,Register src2)2966 void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
2967   if (dst != src1 && dst != src2) {
2968     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
2969     // as XR is a smaller instruction
2970     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2971       AndP_RRR(dst, src1, src2);
2972       return;
2973     } else {
2974       LoadRR(dst, src1);
2975     }
2976   } else if (dst == src2) {
2977     src2 = src1;
2978   }
2979   AndP(dst, src2);
2980 }
2981 
2982 // AND 32-bit (Reg - Mem)
And(Register dst,const MemOperand & opnd)2983 void TurboAssembler::And(Register dst, const MemOperand& opnd) {
2984   DCHECK(is_int20(opnd.offset()));
2985   if (is_uint12(opnd.offset()))
2986     n(dst, opnd);
2987   else
2988     ny(dst, opnd);
2989 }
2990 
2991 // AND Pointer Size (Reg - Mem)
AndP(Register dst,const MemOperand & opnd)2992 void TurboAssembler::AndP(Register dst, const MemOperand& opnd) {
2993   DCHECK(is_int20(opnd.offset()));
2994 #if V8_TARGET_ARCH_S390X
2995   ng(dst, opnd);
2996 #else
2997   And(dst, opnd);
2998 #endif
2999 }
3000 
3001 // AND 32-bit - dst = dst & imm
And(Register dst,const Operand & opnd)3002 void TurboAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
3003 
3004 // AND Pointer Size - dst = dst & imm
AndP(Register dst,const Operand & opnd)3005 void TurboAssembler::AndP(Register dst, const Operand& opnd) {
3006 #if V8_TARGET_ARCH_S390X
3007   intptr_t value = opnd.immediate();
3008   if (value >> 32 != -1) {
3009     // this may not work b/c condition code won't be set correctly
3010     nihf(dst, Operand(value >> 32));
3011   }
3012   nilf(dst, Operand(value & 0xFFFFFFFF));
3013 #else
3014   And(dst, opnd);
3015 #endif
3016 }
3017 
3018 // AND 32-bit - dst = src & imm
And(Register dst,Register src,const Operand & opnd)3019 void TurboAssembler::And(Register dst, Register src, const Operand& opnd) {
3020   if (dst != src) lr(dst, src);
3021   nilf(dst, opnd);
3022 }
3023 
3024 // AND Pointer Size - dst = src & imm
AndP(Register dst,Register src,const Operand & opnd)3025 void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
3026   // Try to exploit RISBG first
3027   intptr_t value = opnd.immediate();
3028   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
3029     intptr_t shifted_value = value;
3030     int trailing_zeros = 0;
3031 
3032     // We start checking how many trailing zeros are left at the end.
3033     while ((0 != shifted_value) && (0 == (shifted_value & 1))) {
3034       trailing_zeros++;
3035       shifted_value >>= 1;
3036     }
3037 
3038     // If temp (value with right-most set of zeros shifted out) is 1 less
3039     // than power of 2, we have consecutive bits of 1.
3040     // Special case: If shift_value is zero, we cannot use RISBG, as it requires
3041     //               selection of at least 1 bit.
3042     if ((0 != shifted_value) && base::bits::IsPowerOfTwo(shifted_value + 1)) {
3043       int startBit =
3044           base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
3045       int endBit = 63 - trailing_zeros;
3046       // Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
3047       RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
3048                              Operand::Zero(), true);
3049       return;
3050     } else if (-1 == shifted_value) {
3051       // A Special case in which all top bits up to MSB are 1's.  In this case,
3052       // we can set startBit to be 0.
3053       int endBit = 63 - trailing_zeros;
3054       RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
3055                              Operand::Zero(), true);
3056       return;
3057     }
3058   }
3059 
3060   // If we are &'ing zero, we can just whack the dst register and skip copy
3061   if (dst != src && (0 != value)) LoadRR(dst, src);
3062   AndP(dst, opnd);
3063 }
3064 
3065 // OR 32-bit - dst = dst & src
Or(Register dst,Register src)3066 void TurboAssembler::Or(Register dst, Register src) { or_z(dst, src); }
3067 
3068 // OR Pointer Size - dst = dst & src
OrP(Register dst,Register src)3069 void TurboAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
3070 
3071 // Non-clobbering OR 32-bit - dst = src1 & src1
Or(Register dst,Register src1,Register src2)3072 void TurboAssembler::Or(Register dst, Register src1, Register src2) {
3073   if (dst != src1 && dst != src2) {
3074     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3075     // as XR is a smaller instruction
3076     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3077       ork(dst, src1, src2);
3078       return;
3079     } else {
3080       lr(dst, src1);
3081     }
3082   } else if (dst == src2) {
3083     src2 = src1;
3084   }
3085   Or(dst, src2);
3086 }
3087 
3088 // Non-clobbering OR pointer size - dst = src1 & src1
OrP(Register dst,Register src1,Register src2)3089 void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
3090   if (dst != src1 && dst != src2) {
3091     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3092     // as XR is a smaller instruction
3093     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3094       OrP_RRR(dst, src1, src2);
3095       return;
3096     } else {
3097       LoadRR(dst, src1);
3098     }
3099   } else if (dst == src2) {
3100     src2 = src1;
3101   }
3102   OrP(dst, src2);
3103 }
3104 
3105 // OR 32-bit (Reg - Mem)
Or(Register dst,const MemOperand & opnd)3106 void TurboAssembler::Or(Register dst, const MemOperand& opnd) {
3107   DCHECK(is_int20(opnd.offset()));
3108   if (is_uint12(opnd.offset()))
3109     o(dst, opnd);
3110   else
3111     oy(dst, opnd);
3112 }
3113 
3114 // OR Pointer Size (Reg - Mem)
OrP(Register dst,const MemOperand & opnd)3115 void TurboAssembler::OrP(Register dst, const MemOperand& opnd) {
3116   DCHECK(is_int20(opnd.offset()));
3117 #if V8_TARGET_ARCH_S390X
3118   og(dst, opnd);
3119 #else
3120   Or(dst, opnd);
3121 #endif
3122 }
3123 
3124 // OR 32-bit - dst = dst & imm
Or(Register dst,const Operand & opnd)3125 void TurboAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
3126 
3127 // OR Pointer Size - dst = dst & imm
OrP(Register dst,const Operand & opnd)3128 void TurboAssembler::OrP(Register dst, const Operand& opnd) {
3129 #if V8_TARGET_ARCH_S390X
3130   intptr_t value = opnd.immediate();
3131   if (value >> 32 != 0) {
3132     // this may not work b/c condition code won't be set correctly
3133     oihf(dst, Operand(value >> 32));
3134   }
3135   oilf(dst, Operand(value & 0xFFFFFFFF));
3136 #else
3137   Or(dst, opnd);
3138 #endif
3139 }
3140 
3141 // OR 32-bit - dst = src & imm
Or(Register dst,Register src,const Operand & opnd)3142 void TurboAssembler::Or(Register dst, Register src, const Operand& opnd) {
3143   if (dst != src) lr(dst, src);
3144   oilf(dst, opnd);
3145 }
3146 
3147 // OR Pointer Size - dst = src & imm
OrP(Register dst,Register src,const Operand & opnd)3148 void TurboAssembler::OrP(Register dst, Register src, const Operand& opnd) {
3149   if (dst != src) LoadRR(dst, src);
3150   OrP(dst, opnd);
3151 }
3152 
3153 // XOR 32-bit - dst = dst & src
Xor(Register dst,Register src)3154 void TurboAssembler::Xor(Register dst, Register src) { xr(dst, src); }
3155 
3156 // XOR Pointer Size - dst = dst & src
XorP(Register dst,Register src)3157 void TurboAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
3158 
3159 // Non-clobbering XOR 32-bit - dst = src1 & src1
Xor(Register dst,Register src1,Register src2)3160 void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
3161   if (dst != src1 && dst != src2) {
3162     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3163     // as XR is a smaller instruction
3164     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3165       xrk(dst, src1, src2);
3166       return;
3167     } else {
3168       lr(dst, src1);
3169     }
3170   } else if (dst == src2) {
3171     src2 = src1;
3172   }
3173   Xor(dst, src2);
3174 }
3175 
3176 // Non-clobbering XOR pointer size - dst = src1 & src1
XorP(Register dst,Register src1,Register src2)3177 void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
3178   if (dst != src1 && dst != src2) {
3179     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3180     // as XR is a smaller instruction
3181     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3182       XorP_RRR(dst, src1, src2);
3183       return;
3184     } else {
3185       LoadRR(dst, src1);
3186     }
3187   } else if (dst == src2) {
3188     src2 = src1;
3189   }
3190   XorP(dst, src2);
3191 }
3192 
3193 // XOR 32-bit (Reg - Mem)
Xor(Register dst,const MemOperand & opnd)3194 void TurboAssembler::Xor(Register dst, const MemOperand& opnd) {
3195   DCHECK(is_int20(opnd.offset()));
3196   if (is_uint12(opnd.offset()))
3197     x(dst, opnd);
3198   else
3199     xy(dst, opnd);
3200 }
3201 
3202 // XOR Pointer Size (Reg - Mem)
XorP(Register dst,const MemOperand & opnd)3203 void TurboAssembler::XorP(Register dst, const MemOperand& opnd) {
3204   DCHECK(is_int20(opnd.offset()));
3205 #if V8_TARGET_ARCH_S390X
3206   xg(dst, opnd);
3207 #else
3208   Xor(dst, opnd);
3209 #endif
3210 }
3211 
3212 // XOR 32-bit - dst = dst & imm
Xor(Register dst,const Operand & opnd)3213 void TurboAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
3214 
3215 // XOR Pointer Size - dst = dst & imm
XorP(Register dst,const Operand & opnd)3216 void TurboAssembler::XorP(Register dst, const Operand& opnd) {
3217 #if V8_TARGET_ARCH_S390X
3218   intptr_t value = opnd.immediate();
3219   xihf(dst, Operand(value >> 32));
3220   xilf(dst, Operand(value & 0xFFFFFFFF));
3221 #else
3222   Xor(dst, opnd);
3223 #endif
3224 }
3225 
3226 // XOR 32-bit - dst = src & imm
Xor(Register dst,Register src,const Operand & opnd)3227 void TurboAssembler::Xor(Register dst, Register src, const Operand& opnd) {
3228   if (dst != src) lr(dst, src);
3229   xilf(dst, opnd);
3230 }
3231 
3232 // XOR Pointer Size - dst = src & imm
XorP(Register dst,Register src,const Operand & opnd)3233 void TurboAssembler::XorP(Register dst, Register src, const Operand& opnd) {
3234   if (dst != src) LoadRR(dst, src);
3235   XorP(dst, opnd);
3236 }
3237 
Not32(Register dst,Register src)3238 void TurboAssembler::Not32(Register dst, Register src) {
3239   if (src != no_reg && src != dst) lr(dst, src);
3240   xilf(dst, Operand(0xFFFFFFFF));
3241 }
3242 
Not64(Register dst,Register src)3243 void TurboAssembler::Not64(Register dst, Register src) {
3244   if (src != no_reg && src != dst) lgr(dst, src);
3245   xihf(dst, Operand(0xFFFFFFFF));
3246   xilf(dst, Operand(0xFFFFFFFF));
3247 }
3248 
NotP(Register dst,Register src)3249 void TurboAssembler::NotP(Register dst, Register src) {
3250 #if V8_TARGET_ARCH_S390X
3251   Not64(dst, src);
3252 #else
3253   Not32(dst, src);
3254 #endif
3255 }
3256 
3257 // works the same as mov
Load(Register dst,const Operand & opnd)3258 void TurboAssembler::Load(Register dst, const Operand& opnd) {
3259   intptr_t value = opnd.immediate();
3260   if (is_int16(value)) {
3261 #if V8_TARGET_ARCH_S390X
3262     lghi(dst, opnd);
3263 #else
3264     lhi(dst, opnd);
3265 #endif
3266   } else if (is_int32(value)) {
3267 #if V8_TARGET_ARCH_S390X
3268     lgfi(dst, opnd);
3269 #else
3270     iilf(dst, opnd);
3271 #endif
3272   } else if (is_uint32(value)) {
3273 #if V8_TARGET_ARCH_S390X
3274     llilf(dst, opnd);
3275 #else
3276     iilf(dst, opnd);
3277 #endif
3278   } else {
3279     int32_t hi_32 = static_cast<int64_t>(value) >> 32;
3280     int32_t lo_32 = static_cast<int32_t>(value);
3281 
3282     iihf(dst, Operand(hi_32));
3283     iilf(dst, Operand(lo_32));
3284   }
3285 }
3286 
Load(Register dst,const MemOperand & opnd)3287 void TurboAssembler::Load(Register dst, const MemOperand& opnd) {
3288   DCHECK(is_int20(opnd.offset()));
3289 #if V8_TARGET_ARCH_S390X
3290   lgf(dst, opnd);  // 64<-32
3291 #else
3292   if (is_uint12(opnd.offset())) {
3293     l(dst, opnd);
3294   } else {
3295     ly(dst, opnd);
3296   }
3297 #endif
3298 }
3299 
LoadPositiveP(Register result,Register input)3300 void TurboAssembler::LoadPositiveP(Register result, Register input) {
3301 #if V8_TARGET_ARCH_S390X
3302   lpgr(result, input);
3303 #else
3304   lpr(result, input);
3305 #endif
3306 }
3307 
LoadPositive32(Register result,Register input)3308 void TurboAssembler::LoadPositive32(Register result, Register input) {
3309   lpr(result, input);
3310   lgfr(result, result);
3311 }
3312 
3313 //-----------------------------------------------------------------------------
3314 //  Compare Helpers
3315 //-----------------------------------------------------------------------------
3316 
3317 // Compare 32-bit Register vs Register
Cmp32(Register src1,Register src2)3318 void TurboAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
3319 
3320 // Compare Pointer Sized Register vs Register
CmpP(Register src1,Register src2)3321 void TurboAssembler::CmpP(Register src1, Register src2) {
3322 #if V8_TARGET_ARCH_S390X
3323   cgr(src1, src2);
3324 #else
3325   Cmp32(src1, src2);
3326 #endif
3327 }
3328 
3329 // Compare 32-bit Register vs Immediate
3330 // This helper will set up proper relocation entries if required.
Cmp32(Register dst,const Operand & opnd)3331 void TurboAssembler::Cmp32(Register dst, const Operand& opnd) {
3332   if (opnd.rmode() == RelocInfo::NONE) {
3333     intptr_t value = opnd.immediate();
3334     if (is_int16(value))
3335       chi(dst, opnd);
3336     else
3337       cfi(dst, opnd);
3338   } else {
3339     // Need to generate relocation record here
3340     RecordRelocInfo(opnd.rmode(), opnd.immediate());
3341     cfi(dst, opnd);
3342   }
3343 }
3344 
3345 // Compare Pointer Sized  Register vs Immediate
3346 // This helper will set up proper relocation entries if required.
CmpP(Register dst,const Operand & opnd)3347 void TurboAssembler::CmpP(Register dst, const Operand& opnd) {
3348 #if V8_TARGET_ARCH_S390X
3349   if (opnd.rmode() == RelocInfo::NONE) {
3350     cgfi(dst, opnd);
3351   } else {
3352     mov(r0, opnd);  // Need to generate 64-bit relocation
3353     cgr(dst, r0);
3354   }
3355 #else
3356   Cmp32(dst, opnd);
3357 #endif
3358 }
3359 
3360 // Compare 32-bit Register vs Memory
Cmp32(Register dst,const MemOperand & opnd)3361 void TurboAssembler::Cmp32(Register dst, const MemOperand& opnd) {
3362   // make sure offset is within 20 bit range
3363   DCHECK(is_int20(opnd.offset()));
3364   if (is_uint12(opnd.offset()))
3365     c(dst, opnd);
3366   else
3367     cy(dst, opnd);
3368 }
3369 
3370 // Compare Pointer Size Register vs Memory
CmpP(Register dst,const MemOperand & opnd)3371 void TurboAssembler::CmpP(Register dst, const MemOperand& opnd) {
3372   // make sure offset is within 20 bit range
3373   DCHECK(is_int20(opnd.offset()));
3374 #if V8_TARGET_ARCH_S390X
3375   cg(dst, opnd);
3376 #else
3377   Cmp32(dst, opnd);
3378 #endif
3379 }
3380 
3381 // Using cs or scy based on the offset
CmpAndSwap(Register old_val,Register new_val,const MemOperand & opnd)3382 void TurboAssembler::CmpAndSwap(Register old_val, Register new_val,
3383                                 const MemOperand& opnd) {
3384   if (is_uint12(opnd.offset())) {
3385     cs(old_val, new_val, opnd);
3386   } else {
3387     csy(old_val, new_val, opnd);
3388   }
3389 }
3390 
CmpAndSwap64(Register old_val,Register new_val,const MemOperand & opnd)3391 void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val,
3392                                   const MemOperand& opnd) {
3393   DCHECK(is_int20(opnd.offset()));
3394   csg(old_val, new_val, opnd);
3395 }
3396 
3397 //-----------------------------------------------------------------------------
3398 // Compare Logical Helpers
3399 //-----------------------------------------------------------------------------
3400 
3401 // Compare Logical 32-bit Register vs Register
CmpLogical32(Register dst,Register src)3402 void TurboAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
3403 
3404 // Compare Logical Pointer Sized Register vs Register
CmpLogicalP(Register dst,Register src)3405 void TurboAssembler::CmpLogicalP(Register dst, Register src) {
3406 #ifdef V8_TARGET_ARCH_S390X
3407   clgr(dst, src);
3408 #else
3409   CmpLogical32(dst, src);
3410 #endif
3411 }
3412 
3413 // Compare Logical 32-bit Register vs Immediate
CmpLogical32(Register dst,const Operand & opnd)3414 void TurboAssembler::CmpLogical32(Register dst, const Operand& opnd) {
3415   clfi(dst, opnd);
3416 }
3417 
3418 // Compare Logical Pointer Sized Register vs Immediate
CmpLogicalP(Register dst,const Operand & opnd)3419 void TurboAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
3420 #if V8_TARGET_ARCH_S390X
3421   DCHECK_EQ(static_cast<uint32_t>(opnd.immediate() >> 32), 0);
3422   clgfi(dst, opnd);
3423 #else
3424   CmpLogical32(dst, opnd);
3425 #endif
3426 }
3427 
3428 // Compare Logical 32-bit Register vs Memory
CmpLogical32(Register dst,const MemOperand & opnd)3429 void TurboAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
3430   // make sure offset is within 20 bit range
3431   DCHECK(is_int20(opnd.offset()));
3432   if (is_uint12(opnd.offset()))
3433     cl(dst, opnd);
3434   else
3435     cly(dst, opnd);
3436 }
3437 
3438 // Compare Logical Pointer Sized Register vs Memory
CmpLogicalP(Register dst,const MemOperand & opnd)3439 void TurboAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
3440   // make sure offset is within 20 bit range
3441   DCHECK(is_int20(opnd.offset()));
3442 #if V8_TARGET_ARCH_S390X
3443   clg(dst, opnd);
3444 #else
3445   CmpLogical32(dst, opnd);
3446 #endif
3447 }
3448 
3449 // Compare Logical Byte (Mem - Imm)
CmpLogicalByte(const MemOperand & mem,const Operand & imm)3450 void TurboAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
3451   DCHECK(is_uint8(imm.immediate()));
3452   if (is_uint12(mem.offset()))
3453     cli(mem, imm);
3454   else
3455     cliy(mem, imm);
3456 }
3457 
Branch(Condition c,const Operand & opnd)3458 void TurboAssembler::Branch(Condition c, const Operand& opnd) {
3459   intptr_t value = opnd.immediate();
3460   if (is_int16(value))
3461     brc(c, opnd);
3462   else
3463     brcl(c, opnd);
3464 }
3465 
3466 // Branch On Count.  Decrement R1, and branch if R1 != 0.
BranchOnCount(Register r1,Label * l)3467 void TurboAssembler::BranchOnCount(Register r1, Label* l) {
3468   int32_t offset = branch_offset(l);
3469   if (is_int16(offset)) {
3470 #if V8_TARGET_ARCH_S390X
3471     brctg(r1, Operand(offset));
3472 #else
3473     brct(r1, Operand(offset));
3474 #endif
3475   } else {
3476     AddP(r1, Operand(-1));
3477     Branch(ne, Operand(offset));
3478   }
3479 }
3480 
LoadIntLiteral(Register dst,int value)3481 void TurboAssembler::LoadIntLiteral(Register dst, int value) {
3482   Load(dst, Operand(value));
3483 }
3484 
LoadSmiLiteral(Register dst,Smi smi)3485 void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
3486   intptr_t value = static_cast<intptr_t>(smi.ptr());
3487 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3488   llilf(dst, Operand(value));
3489 #else
3490   DCHECK_EQ(value & 0xFFFFFFFF, 0);
3491   // The smi value is loaded in upper 32-bits.  Lower 32-bit are zeros.
3492   llihf(dst, Operand(value >> 32));
3493 #endif
3494 }
3495 
LoadDoubleLiteral(DoubleRegister result,uint64_t value,Register scratch)3496 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
3497                                        Register scratch) {
3498   uint32_t hi_32 = value >> 32;
3499   uint32_t lo_32 = static_cast<uint32_t>(value);
3500 
3501   // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
3502   if (value == 0) {
3503     lzdr(result);
3504   } else if (lo_32 == 0) {
3505     llihf(scratch, Operand(hi_32));
3506     ldgr(result, scratch);
3507   } else {
3508     iihf(scratch, Operand(hi_32));
3509     iilf(scratch, Operand(lo_32));
3510     ldgr(result, scratch);
3511   }
3512 }
3513 
LoadDoubleLiteral(DoubleRegister result,double value,Register scratch)3514 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
3515                                        Register scratch) {
3516   uint64_t int_val = bit_cast<uint64_t, double>(value);
3517   LoadDoubleLiteral(result, int_val, scratch);
3518 }
3519 
LoadFloat32Literal(DoubleRegister result,float value,Register scratch)3520 void TurboAssembler::LoadFloat32Literal(DoubleRegister result, float value,
3521                                         Register scratch) {
3522   uint64_t int_val = static_cast<uint64_t>(bit_cast<uint32_t, float>(value))
3523                      << 32;
3524   LoadDoubleLiteral(result, int_val, scratch);
3525 }
3526 
CmpSmiLiteral(Register src1,Smi smi,Register scratch)3527 void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) {
3528 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3529   // CFI takes 32-bit immediate.
3530   cfi(src1, Operand(smi));
3531 #else
3532   if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3533     cih(src1, Operand(static_cast<intptr_t>(smi.ptr()) >> 32));
3534   } else {
3535     LoadSmiLiteral(scratch, smi);
3536     cgr(src1, scratch);
3537   }
3538 #endif
3539 }
3540 
3541 // Load a "pointer" sized value from the memory location
LoadP(Register dst,const MemOperand & mem,Register scratch)3542 void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
3543                            Register scratch) {
3544   int offset = mem.offset();
3545 
3546 #if V8_TARGET_ARCH_S390X
3547   MemOperand src = mem;
3548   if (!is_int20(offset)) {
3549     DCHECK(scratch != no_reg && scratch != r0 && mem.rx() == r0);
3550     DCHECK(scratch != mem.rb());
3551     LoadIntLiteral(scratch, offset);
3552     src = MemOperand(mem.rb(), scratch);
3553   }
3554   lg(dst, src);
3555 #else
3556   if (is_uint12(offset)) {
3557     l(dst, mem);
3558   } else if (is_int20(offset)) {
3559     ly(dst, mem);
3560   } else {
3561     DCHECK(scratch != no_reg && scratch != r0 && mem.rx() == r0);
3562     DCHECK(scratch != mem.rb());
3563     LoadIntLiteral(scratch, offset);
3564     l(dst, MemOperand(mem.rb(), scratch));
3565   }
3566 #endif
3567 }
3568 
3569 // Store a "pointer" sized value to the memory location
StoreP(Register src,const MemOperand & mem,Register scratch)3570 void TurboAssembler::StoreP(Register src, const MemOperand& mem,
3571                             Register scratch) {
3572   if (!is_int20(mem.offset())) {
3573     DCHECK(scratch != no_reg);
3574     DCHECK(scratch != r0);
3575     LoadIntLiteral(scratch, mem.offset());
3576 #if V8_TARGET_ARCH_S390X
3577     stg(src, MemOperand(mem.rb(), scratch));
3578 #else
3579     st(src, MemOperand(mem.rb(), scratch));
3580 #endif
3581   } else {
3582 #if V8_TARGET_ARCH_S390X
3583     stg(src, mem);
3584 #else
3585     // StoreW will try to generate ST if offset fits, otherwise
3586     // it'll generate STY.
3587     StoreW(src, mem);
3588 #endif
3589   }
3590 }
3591 
3592 // Store a "pointer" sized constant to the memory location
StoreP(const MemOperand & mem,const Operand & opnd,Register scratch)3593 void TurboAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
3594                             Register scratch) {
3595   // Relocations not supported
3596   DCHECK_EQ(opnd.rmode(), RelocInfo::NONE);
3597 
3598   // Try to use MVGHI/MVHI
3599   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
3600       mem.getIndexRegister() == r0 && is_int16(opnd.immediate())) {
3601 #if V8_TARGET_ARCH_S390X
3602     mvghi(mem, opnd);
3603 #else
3604     mvhi(mem, opnd);
3605 #endif
3606   } else {
3607     LoadImmP(scratch, opnd);
3608     StoreP(scratch, mem);
3609   }
3610 }
3611 
LoadMultipleP(Register dst1,Register dst2,const MemOperand & mem)3612 void TurboAssembler::LoadMultipleP(Register dst1, Register dst2,
3613                                    const MemOperand& mem) {
3614 #if V8_TARGET_ARCH_S390X
3615   DCHECK(is_int20(mem.offset()));
3616   lmg(dst1, dst2, mem);
3617 #else
3618   if (is_uint12(mem.offset())) {
3619     lm(dst1, dst2, mem);
3620   } else {
3621     DCHECK(is_int20(mem.offset()));
3622     lmy(dst1, dst2, mem);
3623   }
3624 #endif
3625 }
3626 
StoreMultipleP(Register src1,Register src2,const MemOperand & mem)3627 void TurboAssembler::StoreMultipleP(Register src1, Register src2,
3628                                     const MemOperand& mem) {
3629 #if V8_TARGET_ARCH_S390X
3630   DCHECK(is_int20(mem.offset()));
3631   stmg(src1, src2, mem);
3632 #else
3633   if (is_uint12(mem.offset())) {
3634     stm(src1, src2, mem);
3635   } else {
3636     DCHECK(is_int20(mem.offset()));
3637     stmy(src1, src2, mem);
3638   }
3639 #endif
3640 }
3641 
LoadMultipleW(Register dst1,Register dst2,const MemOperand & mem)3642 void TurboAssembler::LoadMultipleW(Register dst1, Register dst2,
3643                                    const MemOperand& mem) {
3644   if (is_uint12(mem.offset())) {
3645     lm(dst1, dst2, mem);
3646   } else {
3647     DCHECK(is_int20(mem.offset()));
3648     lmy(dst1, dst2, mem);
3649   }
3650 }
3651 
StoreMultipleW(Register src1,Register src2,const MemOperand & mem)3652 void TurboAssembler::StoreMultipleW(Register src1, Register src2,
3653                                     const MemOperand& mem) {
3654   if (is_uint12(mem.offset())) {
3655     stm(src1, src2, mem);
3656   } else {
3657     DCHECK(is_int20(mem.offset()));
3658     stmy(src1, src2, mem);
3659   }
3660 }
3661 
3662 // Load 32-bits and sign extend if necessary.
LoadW(Register dst,Register src)3663 void TurboAssembler::LoadW(Register dst, Register src) {
3664 #if V8_TARGET_ARCH_S390X
3665   lgfr(dst, src);
3666 #else
3667   if (dst != src) lr(dst, src);
3668 #endif
3669 }
3670 
3671 // Load 32-bits and sign extend if necessary.
LoadW(Register dst,const MemOperand & mem,Register scratch)3672 void TurboAssembler::LoadW(Register dst, const MemOperand& mem,
3673                            Register scratch) {
3674   int offset = mem.offset();
3675 
3676   if (!is_int20(offset)) {
3677     DCHECK(scratch != no_reg);
3678     LoadIntLiteral(scratch, offset);
3679 #if V8_TARGET_ARCH_S390X
3680     lgf(dst, MemOperand(mem.rb(), scratch));
3681 #else
3682     l(dst, MemOperand(mem.rb(), scratch));
3683 #endif
3684   } else {
3685 #if V8_TARGET_ARCH_S390X
3686     lgf(dst, mem);
3687 #else
3688     if (is_uint12(offset)) {
3689       l(dst, mem);
3690     } else {
3691       ly(dst, mem);
3692     }
3693 #endif
3694   }
3695 }
3696 
3697 // Load 32-bits and zero extend if necessary.
LoadlW(Register dst,Register src)3698 void TurboAssembler::LoadlW(Register dst, Register src) {
3699 #if V8_TARGET_ARCH_S390X
3700   llgfr(dst, src);
3701 #else
3702   if (dst != src) lr(dst, src);
3703 #endif
3704 }
3705 
3706 // Variable length depending on whether offset fits into immediate field
3707 // MemOperand of RX or RXY format
LoadlW(Register dst,const MemOperand & mem,Register scratch)3708 void TurboAssembler::LoadlW(Register dst, const MemOperand& mem,
3709                             Register scratch) {
3710   Register base = mem.rb();
3711   int offset = mem.offset();
3712 
3713 #if V8_TARGET_ARCH_S390X
3714   if (is_int20(offset)) {
3715     llgf(dst, mem);
3716   } else if (scratch != no_reg) {
3717     // Materialize offset into scratch register.
3718     LoadIntLiteral(scratch, offset);
3719     llgf(dst, MemOperand(base, scratch));
3720   } else {
3721     DCHECK(false);
3722   }
3723 #else
3724   bool use_RXform = false;
3725   bool use_RXYform = false;
3726   if (is_uint12(offset)) {
3727     // RX-format supports unsigned 12-bits offset.
3728     use_RXform = true;
3729   } else if (is_int20(offset)) {
3730     // RXY-format supports signed 20-bits offset.
3731     use_RXYform = true;
3732   } else if (scratch != no_reg) {
3733     // Materialize offset into scratch register.
3734     LoadIntLiteral(scratch, offset);
3735   } else {
3736     DCHECK(false);
3737   }
3738 
3739   if (use_RXform) {
3740     l(dst, mem);
3741   } else if (use_RXYform) {
3742     ly(dst, mem);
3743   } else {
3744     ly(dst, MemOperand(base, scratch));
3745   }
3746 #endif
3747 }
3748 
LoadLogicalHalfWordP(Register dst,const MemOperand & mem)3749 void TurboAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
3750 #if V8_TARGET_ARCH_S390X
3751   llgh(dst, mem);
3752 #else
3753   llh(dst, mem);
3754 #endif
3755 }
3756 
LoadLogicalHalfWordP(Register dst,Register src)3757 void TurboAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
3758 #if V8_TARGET_ARCH_S390X
3759   llghr(dst, src);
3760 #else
3761   llhr(dst, src);
3762 #endif
3763 }
3764 
LoadB(Register dst,const MemOperand & mem)3765 void TurboAssembler::LoadB(Register dst, const MemOperand& mem) {
3766 #if V8_TARGET_ARCH_S390X
3767   lgb(dst, mem);
3768 #else
3769   lb(dst, mem);
3770 #endif
3771 }
3772 
LoadB(Register dst,Register src)3773 void TurboAssembler::LoadB(Register dst, Register src) {
3774 #if V8_TARGET_ARCH_S390X
3775   lgbr(dst, src);
3776 #else
3777   lbr(dst, src);
3778 #endif
3779 }
3780 
LoadlB(Register dst,const MemOperand & mem)3781 void TurboAssembler::LoadlB(Register dst, const MemOperand& mem) {
3782 #if V8_TARGET_ARCH_S390X
3783   llgc(dst, mem);
3784 #else
3785   llc(dst, mem);
3786 #endif
3787 }
3788 
LoadlB(Register dst,Register src)3789 void TurboAssembler::LoadlB(Register dst, Register src) {
3790 #if V8_TARGET_ARCH_S390X
3791   llgcr(dst, src);
3792 #else
3793   llcr(dst, src);
3794 #endif
3795 }
3796 
LoadLogicalReversedWordP(Register dst,const MemOperand & mem)3797 void TurboAssembler::LoadLogicalReversedWordP(Register dst,
3798                                               const MemOperand& mem) {
3799   lrv(dst, mem);
3800   LoadlW(dst, dst);
3801 }
3802 
LoadLogicalReversedHalfWordP(Register dst,const MemOperand & mem)3803 void TurboAssembler::LoadLogicalReversedHalfWordP(Register dst,
3804                                                   const MemOperand& mem) {
3805   lrvh(dst, mem);
3806   LoadLogicalHalfWordP(dst, dst);
3807 }
3808 
3809 // Load And Test (Reg <- Reg)
LoadAndTest32(Register dst,Register src)3810 void TurboAssembler::LoadAndTest32(Register dst, Register src) {
3811   ltr(dst, src);
3812 }
3813 
3814 // Load And Test
3815 //     (Register dst(ptr) = Register src (32 | 32->64))
3816 // src is treated as a 32-bit signed integer, which is sign extended to
3817 // 64-bit if necessary.
LoadAndTestP_ExtendSrc(Register dst,Register src)3818 void TurboAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
3819 #if V8_TARGET_ARCH_S390X
3820   ltgfr(dst, src);
3821 #else
3822   ltr(dst, src);
3823 #endif
3824 }
3825 
3826 // Load And Test Pointer Sized (Reg <- Reg)
LoadAndTestP(Register dst,Register src)3827 void TurboAssembler::LoadAndTestP(Register dst, Register src) {
3828 #if V8_TARGET_ARCH_S390X
3829   ltgr(dst, src);
3830 #else
3831   ltr(dst, src);
3832 #endif
3833 }
3834 
3835 // Load And Test 32-bit (Reg <- Mem)
LoadAndTest32(Register dst,const MemOperand & mem)3836 void TurboAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
3837   lt_z(dst, mem);
3838 }
3839 
3840 // Load And Test Pointer Sized (Reg <- Mem)
LoadAndTestP(Register dst,const MemOperand & mem)3841 void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
3842 #if V8_TARGET_ARCH_S390X
3843   ltg(dst, mem);
3844 #else
3845   lt_z(dst, mem);
3846 #endif
3847 }
3848 
3849 // Load On Condition Pointer Sized (Reg <- Reg)
LoadOnConditionP(Condition cond,Register dst,Register src)3850 void TurboAssembler::LoadOnConditionP(Condition cond, Register dst,
3851                                       Register src) {
3852 #if V8_TARGET_ARCH_S390X
3853   locgr(cond, dst, src);
3854 #else
3855   locr(cond, dst, src);
3856 #endif
3857 }
3858 
3859 // Load Double Precision (64-bit) Floating Point number from memory
LoadDouble(DoubleRegister dst,const MemOperand & mem)3860 void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
3861   // for 32bit and 64bit we all use 64bit floating point regs
3862   if (is_uint12(mem.offset())) {
3863     ld(dst, mem);
3864   } else {
3865     ldy(dst, mem);
3866   }
3867 }
3868 
3869 // Load Single Precision (32-bit) Floating Point number from memory
LoadFloat32(DoubleRegister dst,const MemOperand & mem)3870 void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
3871   if (is_uint12(mem.offset())) {
3872     le_z(dst, mem);
3873   } else {
3874     DCHECK(is_int20(mem.offset()));
3875     ley(dst, mem);
3876   }
3877 }
3878 
3879 // Load Single Precision (32-bit) Floating Point number from memory,
3880 // and convert to Double Precision (64-bit)
LoadFloat32ConvertToDouble(DoubleRegister dst,const MemOperand & mem)3881 void TurboAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
3882                                                 const MemOperand& mem) {
3883   LoadFloat32(dst, mem);
3884   ldebr(dst, dst);
3885 }
3886 
LoadSimd128(Simd128Register dst,const MemOperand & mem,Register scratch)3887 void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
3888                                  Register scratch) {
3889   if (is_uint12(mem.offset())) {
3890     vl(dst, mem, Condition(0));
3891   } else {
3892     DCHECK(is_int20(mem.offset()));
3893     lay(scratch, mem);
3894     vl(dst, MemOperand(scratch), Condition(0));
3895   }
3896 }
3897 
3898 // Store Double Precision (64-bit) Floating Point number to memory
StoreDouble(DoubleRegister dst,const MemOperand & mem)3899 void TurboAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
3900   if (is_uint12(mem.offset())) {
3901     std(dst, mem);
3902   } else {
3903     stdy(dst, mem);
3904   }
3905 }
3906 
3907 // Store Single Precision (32-bit) Floating Point number to memory
StoreFloat32(DoubleRegister src,const MemOperand & mem)3908 void TurboAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
3909   if (is_uint12(mem.offset())) {
3910     ste(src, mem);
3911   } else {
3912     stey(src, mem);
3913   }
3914 }
3915 
3916 // Convert Double precision (64-bit) to Single Precision (32-bit)
3917 // and store resulting Float32 to memory
StoreDoubleAsFloat32(DoubleRegister src,const MemOperand & mem,DoubleRegister scratch)3918 void TurboAssembler::StoreDoubleAsFloat32(DoubleRegister src,
3919                                           const MemOperand& mem,
3920                                           DoubleRegister scratch) {
3921   ledbr(scratch, src);
3922   StoreFloat32(scratch, mem);
3923 }
3924 
StoreSimd128(Simd128Register src,const MemOperand & mem,Register scratch)3925 void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem,
3926                                   Register scratch) {
3927   if (is_uint12(mem.offset())) {
3928     vst(src, mem, Condition(0));
3929   } else {
3930     DCHECK(is_int20(mem.offset()));
3931     lay(scratch, mem);
3932     vst(src, MemOperand(scratch), Condition(0));
3933   }
3934 }
3935 
AddFloat32(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3936 void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
3937                                 DoubleRegister scratch) {
3938   if (is_uint12(opnd.offset())) {
3939     aeb(dst, opnd);
3940   } else {
3941     ley(scratch, opnd);
3942     aebr(dst, scratch);
3943   }
3944 }
3945 
AddFloat64(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3946 void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
3947                                 DoubleRegister scratch) {
3948   if (is_uint12(opnd.offset())) {
3949     adb(dst, opnd);
3950   } else {
3951     ldy(scratch, opnd);
3952     adbr(dst, scratch);
3953   }
3954 }
3955 
SubFloat32(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3956 void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
3957                                 DoubleRegister scratch) {
3958   if (is_uint12(opnd.offset())) {
3959     seb(dst, opnd);
3960   } else {
3961     ley(scratch, opnd);
3962     sebr(dst, scratch);
3963   }
3964 }
3965 
SubFloat64(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3966 void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
3967                                 DoubleRegister scratch) {
3968   if (is_uint12(opnd.offset())) {
3969     sdb(dst, opnd);
3970   } else {
3971     ldy(scratch, opnd);
3972     sdbr(dst, scratch);
3973   }
3974 }
3975 
MulFloat32(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3976 void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
3977                                 DoubleRegister scratch) {
3978   if (is_uint12(opnd.offset())) {
3979     meeb(dst, opnd);
3980   } else {
3981     ley(scratch, opnd);
3982     meebr(dst, scratch);
3983   }
3984 }
3985 
MulFloat64(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3986 void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
3987                                 DoubleRegister scratch) {
3988   if (is_uint12(opnd.offset())) {
3989     mdb(dst, opnd);
3990   } else {
3991     ldy(scratch, opnd);
3992     mdbr(dst, scratch);
3993   }
3994 }
3995 
DivFloat32(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3996 void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
3997                                 DoubleRegister scratch) {
3998   if (is_uint12(opnd.offset())) {
3999     deb(dst, opnd);
4000   } else {
4001     ley(scratch, opnd);
4002     debr(dst, scratch);
4003   }
4004 }
4005 
DivFloat64(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)4006 void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
4007                                 DoubleRegister scratch) {
4008   if (is_uint12(opnd.offset())) {
4009     ddb(dst, opnd);
4010   } else {
4011     ldy(scratch, opnd);
4012     ddbr(dst, scratch);
4013   }
4014 }
4015 
LoadFloat32ToDouble(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)4016 void TurboAssembler::LoadFloat32ToDouble(DoubleRegister dst,
4017                                          const MemOperand& opnd,
4018                                          DoubleRegister scratch) {
4019   if (is_uint12(opnd.offset())) {
4020     ldeb(dst, opnd);
4021   } else {
4022     ley(scratch, opnd);
4023     ldebr(dst, scratch);
4024   }
4025 }
4026 
4027 // Variable length depending on whether offset fits into immediate field
4028 // MemOperand of RX or RXY format
StoreW(Register src,const MemOperand & mem,Register scratch)4029 void TurboAssembler::StoreW(Register src, const MemOperand& mem,
4030                             Register scratch) {
4031   Register base = mem.rb();
4032   int offset = mem.offset();
4033 
4034   bool use_RXform = false;
4035   bool use_RXYform = false;
4036 
4037   if (is_uint12(offset)) {
4038     // RX-format supports unsigned 12-bits offset.
4039     use_RXform = true;
4040   } else if (is_int20(offset)) {
4041     // RXY-format supports signed 20-bits offset.
4042     use_RXYform = true;
4043   } else if (scratch != no_reg) {
4044     // Materialize offset into scratch register.
4045     LoadIntLiteral(scratch, offset);
4046   } else {
4047     // scratch is no_reg
4048     DCHECK(false);
4049   }
4050 
4051   if (use_RXform) {
4052     st(src, mem);
4053   } else if (use_RXYform) {
4054     sty(src, mem);
4055   } else {
4056     StoreW(src, MemOperand(base, scratch));
4057   }
4058 }
4059 
LoadHalfWordP(Register dst,Register src)4060 void TurboAssembler::LoadHalfWordP(Register dst, Register src) {
4061 #if V8_TARGET_ARCH_S390X
4062   lghr(dst, src);
4063 #else
4064   lhr(dst, src);
4065 #endif
4066 }
4067 
4068 // Loads 16-bits half-word value from memory and sign extends to pointer
4069 // sized register
LoadHalfWordP(Register dst,const MemOperand & mem,Register scratch)4070 void TurboAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
4071                                    Register scratch) {
4072   Register base = mem.rb();
4073   int offset = mem.offset();
4074 
4075   if (!is_int20(offset)) {
4076     DCHECK(scratch != no_reg);
4077     LoadIntLiteral(scratch, offset);
4078 #if V8_TARGET_ARCH_S390X
4079     lgh(dst, MemOperand(base, scratch));
4080 #else
4081     lh(dst, MemOperand(base, scratch));
4082 #endif
4083   } else {
4084 #if V8_TARGET_ARCH_S390X
4085     lgh(dst, mem);
4086 #else
4087     if (is_uint12(offset)) {
4088       lh(dst, mem);
4089     } else {
4090       lhy(dst, mem);
4091     }
4092 #endif
4093   }
4094 }
4095 
4096 // Variable length depending on whether offset fits into immediate field
4097 // MemOperand current only supports d-form
StoreHalfWord(Register src,const MemOperand & mem,Register scratch)4098 void TurboAssembler::StoreHalfWord(Register src, const MemOperand& mem,
4099                                    Register scratch) {
4100   Register base = mem.rb();
4101   int offset = mem.offset();
4102 
4103   if (is_uint12(offset)) {
4104     sth(src, mem);
4105   } else if (is_int20(offset)) {
4106     sthy(src, mem);
4107   } else {
4108     DCHECK(scratch != no_reg);
4109     LoadIntLiteral(scratch, offset);
4110     sth(src, MemOperand(base, scratch));
4111   }
4112 }
4113 
4114 // Variable length depending on whether offset fits into immediate field
4115 // MemOperand current only supports d-form
StoreByte(Register src,const MemOperand & mem,Register scratch)4116 void TurboAssembler::StoreByte(Register src, const MemOperand& mem,
4117                                Register scratch) {
4118   Register base = mem.rb();
4119   int offset = mem.offset();
4120 
4121   if (is_uint12(offset)) {
4122     stc(src, mem);
4123   } else if (is_int20(offset)) {
4124     stcy(src, mem);
4125   } else {
4126     DCHECK(scratch != no_reg);
4127     LoadIntLiteral(scratch, offset);
4128     stc(src, MemOperand(base, scratch));
4129   }
4130 }
4131 
4132 // Shift left logical for 32-bit integer types.
ShiftLeft(Register dst,Register src,const Operand & val)4133 void TurboAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
4134   if (dst == src) {
4135     sll(dst, val);
4136   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4137     sllk(dst, src, val);
4138   } else {
4139     lr(dst, src);
4140     sll(dst, val);
4141   }
4142 }
4143 
4144 // Shift left logical for 32-bit integer types.
ShiftLeft(Register dst,Register src,Register val)4145 void TurboAssembler::ShiftLeft(Register dst, Register src, Register val) {
4146   if (dst == src) {
4147     sll(dst, val);
4148   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4149     sllk(dst, src, val);
4150   } else {
4151     DCHECK(dst != val);  // The lr/sll path clobbers val.
4152     lr(dst, src);
4153     sll(dst, val);
4154   }
4155 }
4156 
4157 // Shift right logical for 32-bit integer types.
ShiftRight(Register dst,Register src,const Operand & val)4158 void TurboAssembler::ShiftRight(Register dst, Register src,
4159                                 const Operand& val) {
4160   if (dst == src) {
4161     srl(dst, val);
4162   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4163     srlk(dst, src, val);
4164   } else {
4165     lr(dst, src);
4166     srl(dst, val);
4167   }
4168 }
4169 
4170 // Shift right logical for 32-bit integer types.
ShiftRight(Register dst,Register src,Register val)4171 void TurboAssembler::ShiftRight(Register dst, Register src, Register val) {
4172   if (dst == src) {
4173     srl(dst, val);
4174   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4175     srlk(dst, src, val);
4176   } else {
4177     DCHECK(dst != val);  // The lr/srl path clobbers val.
4178     lr(dst, src);
4179     srl(dst, val);
4180   }
4181 }
4182 
4183 // Shift left arithmetic for 32-bit integer types.
ShiftLeftArith(Register dst,Register src,const Operand & val)4184 void TurboAssembler::ShiftLeftArith(Register dst, Register src,
4185                                     const Operand& val) {
4186   if (dst == src) {
4187     sla(dst, val);
4188   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4189     slak(dst, src, val);
4190   } else {
4191     lr(dst, src);
4192     sla(dst, val);
4193   }
4194 }
4195 
4196 // Shift left arithmetic for 32-bit integer types.
ShiftLeftArith(Register dst,Register src,Register val)4197 void TurboAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
4198   if (dst == src) {
4199     sla(dst, val);
4200   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4201     slak(dst, src, val);
4202   } else {
4203     DCHECK(dst != val);  // The lr/sla path clobbers val.
4204     lr(dst, src);
4205     sla(dst, val);
4206   }
4207 }
4208 
4209 // Shift right arithmetic for 32-bit integer types.
ShiftRightArith(Register dst,Register src,const Operand & val)4210 void TurboAssembler::ShiftRightArith(Register dst, Register src,
4211                                      const Operand& val) {
4212   if (dst == src) {
4213     sra(dst, val);
4214   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4215     srak(dst, src, val);
4216   } else {
4217     lr(dst, src);
4218     sra(dst, val);
4219   }
4220 }
4221 
4222 // Shift right arithmetic for 32-bit integer types.
ShiftRightArith(Register dst,Register src,Register val)4223 void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) {
4224   if (dst == src) {
4225     sra(dst, val);
4226   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4227     srak(dst, src, val);
4228   } else {
4229     DCHECK(dst != val);  // The lr/sra path clobbers val.
4230     lr(dst, src);
4231     sra(dst, val);
4232   }
4233 }
4234 
4235 // Clear right most # of bits
ClearRightImm(Register dst,Register src,const Operand & val)4236 void TurboAssembler::ClearRightImm(Register dst, Register src,
4237                                    const Operand& val) {
4238   int numBitsToClear = val.immediate() % (kSystemPointerSize * 8);
4239 
4240   // Try to use RISBG if possible
4241   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
4242     int endBit = 63 - numBitsToClear;
4243     RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
4244                            Operand::Zero(), true);
4245     return;
4246   }
4247 
4248   uint64_t hexMask = ~((1L << numBitsToClear) - 1);
4249 
4250   // S390 AND instr clobbers source.  Make a copy if necessary
4251   if (dst != src) LoadRR(dst, src);
4252 
4253   if (numBitsToClear <= 16) {
4254     nill(dst, Operand(static_cast<uint16_t>(hexMask)));
4255   } else if (numBitsToClear <= 32) {
4256     nilf(dst, Operand(static_cast<uint32_t>(hexMask)));
4257   } else if (numBitsToClear <= 64) {
4258     nilf(dst, Operand(static_cast<intptr_t>(0)));
4259     nihf(dst, Operand(hexMask >> 32));
4260   }
4261 }
4262 
Popcnt32(Register dst,Register src)4263 void TurboAssembler::Popcnt32(Register dst, Register src) {
4264   DCHECK(src != r0);
4265   DCHECK(dst != r0);
4266 
4267   popcnt(dst, src);
4268   ShiftRight(r0, dst, Operand(16));
4269   ar(dst, r0);
4270   ShiftRight(r0, dst, Operand(8));
4271   ar(dst, r0);
4272   llgcr(dst, dst);
4273 }
4274 
4275 #ifdef V8_TARGET_ARCH_S390X
Popcnt64(Register dst,Register src)4276 void TurboAssembler::Popcnt64(Register dst, Register src) {
4277   DCHECK(src != r0);
4278   DCHECK(dst != r0);
4279 
4280   popcnt(dst, src);
4281   ShiftRightP(r0, dst, Operand(32));
4282   AddP(dst, r0);
4283   ShiftRightP(r0, dst, Operand(16));
4284   AddP(dst, r0);
4285   ShiftRightP(r0, dst, Operand(8));
4286   AddP(dst, r0);
4287   LoadlB(dst, dst);
4288 }
4289 #endif
4290 
SwapP(Register src,Register dst,Register scratch)4291 void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
4292   if (src == dst) return;
4293   DCHECK(!AreAliased(src, dst, scratch));
4294   LoadRR(scratch, src);
4295   LoadRR(src, dst);
4296   LoadRR(dst, scratch);
4297 }
4298 
SwapP(Register src,MemOperand dst,Register scratch)4299 void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
4300   if (dst.rx() != r0) DCHECK(!AreAliased(src, dst.rx(), scratch));
4301   if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
4302   DCHECK(!AreAliased(src, scratch));
4303   LoadRR(scratch, src);
4304   LoadP(src, dst);
4305   StoreP(scratch, dst);
4306 }
4307 
SwapP(MemOperand src,MemOperand dst,Register scratch_0,Register scratch_1)4308 void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
4309                            Register scratch_1) {
4310   if (src.rx() != r0) DCHECK(!AreAliased(src.rx(), scratch_0, scratch_1));
4311   if (src.rb() != r0) DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
4312   if (dst.rx() != r0) DCHECK(!AreAliased(dst.rx(), scratch_0, scratch_1));
4313   if (dst.rb() != r0) DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
4314   DCHECK(!AreAliased(scratch_0, scratch_1));
4315   LoadP(scratch_0, src);
4316   LoadP(scratch_1, dst);
4317   StoreP(scratch_0, dst);
4318   StoreP(scratch_1, src);
4319 }
4320 
SwapFloat32(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)4321 void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
4322                                  DoubleRegister scratch) {
4323   if (src == dst) return;
4324   DCHECK(!AreAliased(src, dst, scratch));
4325   ldr(scratch, src);
4326   ldr(src, dst);
4327   ldr(dst, scratch);
4328 }
4329 
SwapFloat32(DoubleRegister src,MemOperand dst,DoubleRegister scratch)4330 void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
4331                                  DoubleRegister scratch) {
4332   DCHECK(!AreAliased(src, scratch));
4333   ldr(scratch, src);
4334   LoadFloat32(src, dst);
4335   StoreFloat32(scratch, dst);
4336 }
4337 
SwapFloat32(MemOperand src,MemOperand dst,DoubleRegister scratch)4338 void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
4339                                  DoubleRegister scratch) {
4340   // push d0, to be used as scratch
4341   lay(sp, MemOperand(sp, -kDoubleSize));
4342   StoreDouble(d0, MemOperand(sp));
4343   LoadFloat32(scratch, src);
4344   LoadFloat32(d0, dst);
4345   StoreFloat32(scratch, dst);
4346   StoreFloat32(d0, src);
4347   // restore d0
4348   LoadDouble(d0, MemOperand(sp));
4349   lay(sp, MemOperand(sp, kDoubleSize));
4350 }
4351 
SwapDouble(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)4352 void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
4353                                 DoubleRegister scratch) {
4354   if (src == dst) return;
4355   DCHECK(!AreAliased(src, dst, scratch));
4356   ldr(scratch, src);
4357   ldr(src, dst);
4358   ldr(dst, scratch);
4359 }
4360 
SwapDouble(DoubleRegister src,MemOperand dst,DoubleRegister scratch)4361 void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
4362                                 DoubleRegister scratch) {
4363   DCHECK(!AreAliased(src, scratch));
4364   ldr(scratch, src);
4365   LoadDouble(src, dst);
4366   StoreDouble(scratch, dst);
4367 }
4368 
SwapDouble(MemOperand src,MemOperand dst,DoubleRegister scratch)4369 void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
4370                                 DoubleRegister scratch) {
4371   // push d0, to be used as scratch
4372   lay(sp, MemOperand(sp, -kDoubleSize));
4373   StoreDouble(d0, MemOperand(sp));
4374   LoadDouble(scratch, src);
4375   LoadDouble(d0, dst);
4376   StoreDouble(scratch, dst);
4377   StoreDouble(d0, src);
4378   // restore d0
4379   LoadDouble(d0, MemOperand(sp));
4380   lay(sp, MemOperand(sp, kDoubleSize));
4381 }
4382 
SwapSimd128(Simd128Register src,Simd128Register dst,Simd128Register scratch)4383 void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
4384                                  Simd128Register scratch) {
4385   if (src == dst) return;
4386   vlr(scratch, src, Condition(0), Condition(0), Condition(0));
4387   vlr(src, dst, Condition(0), Condition(0), Condition(0));
4388   vlr(dst, scratch, Condition(0), Condition(0), Condition(0));
4389 }
4390 
SwapSimd128(Simd128Register src,MemOperand dst,Simd128Register scratch)4391 void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
4392                                  Simd128Register scratch) {
4393   DCHECK(!AreAliased(src, scratch));
4394   vlr(scratch, src, Condition(0), Condition(0), Condition(0));
4395   LoadSimd128(src, dst, ip);
4396   StoreSimd128(scratch, dst, ip);
4397 }
4398 
SwapSimd128(MemOperand src,MemOperand dst,Simd128Register scratch)4399 void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
4400                                  Simd128Register scratch) {
4401   // push d0, to be used as scratch
4402   lay(sp, MemOperand(sp, -kSimd128Size));
4403   StoreSimd128(d0, MemOperand(sp), ip);
4404   LoadSimd128(scratch, src, ip);
4405   LoadSimd128(d0, dst, ip);
4406   StoreSimd128(scratch, dst, ip);
4407   StoreSimd128(d0, src, ip);
4408   // restore d0
4409   LoadSimd128(d0, MemOperand(sp), ip);
4410   lay(sp, MemOperand(sp, kSimd128Size));
4411 }
4412 
ResetSpeculationPoisonRegister()4413 void TurboAssembler::ResetSpeculationPoisonRegister() {
4414   mov(kSpeculationPoisonRegister, Operand(-1));
4415 }
4416 
ComputeCodeStartAddress(Register dst)4417 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
4418   larl(dst, Operand(-pc_offset() / 2));
4419 }
4420 
LoadPC(Register dst)4421 void TurboAssembler::LoadPC(Register dst) {
4422   Label current_pc;
4423   larl(dst, &current_pc);
4424   bind(&current_pc);
4425 }
4426 
JumpIfEqual(Register x,int32_t y,Label * dest)4427 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
4428   Cmp32(x, Operand(y));
4429   beq(dest);
4430 }
4431 
JumpIfLessThan(Register x,int32_t y,Label * dest)4432 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
4433   Cmp32(x, Operand(y));
4434   blt(dest);
4435 }
4436 
LoadEntryFromBuiltinIndex(Register builtin_index)4437 void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
4438   STATIC_ASSERT(kSystemPointerSize == 8);
4439   STATIC_ASSERT(kSmiTagSize == 1);
4440   STATIC_ASSERT(kSmiTag == 0);
4441   // The builtin_index register contains the builtin index as a Smi.
4442   if (SmiValuesAre32Bits()) {
4443     ShiftRightArithP(builtin_index, builtin_index,
4444                      Operand(kSmiShift - kSystemPointerSizeLog2));
4445   } else {
4446     DCHECK(SmiValuesAre31Bits());
4447     ShiftLeftP(builtin_index, builtin_index,
4448                Operand(kSystemPointerSizeLog2 - kSmiShift));
4449   }
4450   LoadP(builtin_index, MemOperand(kRootRegister, builtin_index,
4451                                   IsolateData::builtin_entry_table_offset()));
4452 }
4453 
CallBuiltinByIndex(Register builtin_index)4454 void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
4455   LoadEntryFromBuiltinIndex(builtin_index);
4456   Call(builtin_index);
4457 }
4458 
LoadCodeObjectEntry(Register destination,Register code_object)4459 void TurboAssembler::LoadCodeObjectEntry(Register destination,
4460                                          Register code_object) {
4461   // Code objects are called differently depending on whether we are generating
4462   // builtin code (which will later be embedded into the binary) or compiling
4463   // user JS code at runtime.
4464   // * Builtin code runs in --jitless mode and thus must not call into on-heap
4465   //   Code targets. Instead, we dispatch through the builtins entry table.
4466   // * Codegen at runtime does not have this restriction and we can use the
4467   //   shorter, branchless instruction sequence. The assumption here is that
4468   //   targets are usually generated code and not builtin Code objects.
4469 
4470   if (options().isolate_independent_code) {
4471     DCHECK(root_array_available());
4472     Label if_code_is_off_heap, out;
4473 
4474     Register scratch = r1;
4475 
4476     DCHECK(!AreAliased(destination, scratch));
4477     DCHECK(!AreAliased(code_object, scratch));
4478 
4479     // Check whether the Code object is an off-heap trampoline. If so, call its
4480     // (off-heap) entry point directly without going through the (on-heap)
4481     // trampoline.  Otherwise, just call the Code object as always.
4482     LoadW(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
4483     tmlh(scratch, Operand(Code::IsOffHeapTrampoline::kMask >> 16));
4484     bne(&if_code_is_off_heap);
4485 
4486     // Not an off-heap trampoline, the entry point is at
4487     // Code::raw_instruction_start().
4488     AddP(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
4489     b(&out);
4490 
4491     // An off-heap trampoline, the entry point is loaded from the builtin entry
4492     // table.
4493     bind(&if_code_is_off_heap);
4494     LoadW(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
4495     ShiftLeftP(destination, scratch, Operand(kSystemPointerSizeLog2));
4496     AddP(destination, destination, kRootRegister);
4497     LoadP(destination,
4498           MemOperand(destination, IsolateData::builtin_entry_table_offset()));
4499 
4500     bind(&out);
4501   } else {
4502     AddP(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
4503   }
4504 }
4505 
CallCodeObject(Register code_object)4506 void TurboAssembler::CallCodeObject(Register code_object) {
4507   LoadCodeObjectEntry(code_object, code_object);
4508   Call(code_object);
4509 }
4510 
JumpCodeObject(Register code_object)4511 void TurboAssembler::JumpCodeObject(Register code_object) {
4512   LoadCodeObjectEntry(code_object, code_object);
4513   Jump(code_object);
4514 }
4515 
StoreReturnAddressAndCall(Register target)4516 void TurboAssembler::StoreReturnAddressAndCall(Register target) {
4517   // This generates the final instruction sequence for calls to C functions
4518   // once an exit frame has been constructed.
4519   //
4520   // Note that this assumes the caller code (i.e. the Code object currently
4521   // being generated) is immovable or that the callee function cannot trigger
4522   // GC, since the callee function will return to it.
4523 
4524   Label return_label;
4525   larl(r14, &return_label);  // Generate the return addr of call later.
4526   StoreP(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
4527 
4528   // zLinux ABI requires caller's frame to have sufficient space for callee
4529   // preserved regsiter save area.
4530   b(target);
4531   bind(&return_label);
4532 }
4533 
CallForDeoptimization(Builtins::Name target,int,Label * exit,DeoptimizeKind kind,Label *)4534 void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
4535                                            Label* exit, DeoptimizeKind kind,
4536                                            Label*) {
4537   LoadP(ip, MemOperand(kRootRegister,
4538                        IsolateData::builtin_entry_slot_offset(target)));
4539   Call(ip);
4540   DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
4541             (kind == DeoptimizeKind::kLazy)
4542                 ? Deoptimizer::kLazyDeoptExitSize
4543                 : Deoptimizer::kNonLazyDeoptExitSize);
4544   USE(exit, kind);
4545 }
4546 
Trap()4547 void TurboAssembler::Trap() { stop(); }
DebugBreak()4548 void TurboAssembler::DebugBreak() { stop(); }
4549 
4550 }  // namespace internal
4551 }  // namespace v8
4552 
4553 #endif  // V8_TARGET_ARCH_S390
4554