• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8 
9 #ifndef V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_
10 #define V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_
11 
12 #include "src/codegen/bailout-reason.h"
13 #include "src/codegen/s390/assembler-s390.h"
14 #include "src/common/globals.h"
15 #include "src/objects/contexts.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
21 
22 // ----------------------------------------------------------------------------
23 // Static helper functions
24 
25 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)26 inline MemOperand FieldMemOperand(Register object, int offset) {
27   return MemOperand(object, offset - kHeapObjectTag);
28 }
29 
30 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,Register index,int offset)31 inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
32   return MemOperand(object, index, offset - kHeapObjectTag);
33 }
34 
35 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
36 
37 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
38                                    Register reg3 = no_reg,
39                                    Register reg4 = no_reg,
40                                    Register reg5 = no_reg,
41                                    Register reg6 = no_reg);
42 
43 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
44  public:
45   using TurboAssemblerBase::TurboAssemblerBase;
46 
47   void CallBuiltin(Builtin builtin);
48   void TailCallBuiltin(Builtin builtin);
49   void AtomicCmpExchangeHelper(Register addr, Register output,
50                                Register old_value, Register new_value,
51                                int start, int end, int shift_amount, int offset,
52                                Register temp0, Register temp1);
53   void AtomicCmpExchangeU8(Register addr, Register output, Register old_value,
54                            Register new_value, Register temp0, Register temp1);
55   void AtomicCmpExchangeU16(Register addr, Register output, Register old_value,
56                             Register new_value, Register temp0, Register temp1);
57   void AtomicExchangeHelper(Register addr, Register value, Register output,
58                             int start, int end, int shift_amount, int offset,
59                             Register scratch);
60   void AtomicExchangeU8(Register addr, Register value, Register output,
61                         Register scratch);
62   void AtomicExchangeU16(Register addr, Register value, Register output,
63                          Register scratch);
64 
65   void DoubleMax(DoubleRegister result_reg, DoubleRegister left_reg,
66                  DoubleRegister right_reg);
67   void DoubleMin(DoubleRegister result_reg, DoubleRegister left_reg,
68                  DoubleRegister right_reg);
69   void FloatMax(DoubleRegister result_reg, DoubleRegister left_reg,
70                 DoubleRegister right_reg);
71   void FloatMin(DoubleRegister result_reg, DoubleRegister left_reg,
72                 DoubleRegister right_reg);
73   void CeilF32(DoubleRegister dst, DoubleRegister src);
74   void CeilF64(DoubleRegister dst, DoubleRegister src);
75   void FloorF32(DoubleRegister dst, DoubleRegister src);
76   void FloorF64(DoubleRegister dst, DoubleRegister src);
77   void TruncF32(DoubleRegister dst, DoubleRegister src);
78   void TruncF64(DoubleRegister dst, DoubleRegister src);
79   void NearestIntF32(DoubleRegister dst, DoubleRegister src);
80   void NearestIntF64(DoubleRegister dst, DoubleRegister src);
81 
82   void LoadFromConstantsTable(Register destination, int constant_index) final;
83   void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
84   void LoadRootRelative(Register destination, int32_t offset) final;
85 
86   // Jump, Call, and Ret pseudo instructions implementing inter-working.
87   void Jump(Register target, Condition cond = al);
88   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
89   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
90   void Jump(const ExternalReference& reference);
91   // Jump the register contains a smi.
JumpIfSmi(Register value,Label * smi_label)92   inline void JumpIfSmi(Register value, Label* smi_label) {
93     TestIfSmi(value);
94     beq(smi_label /*, cr0*/);  // branch if SMI
95   }
96   void JumpIfEqual(Register x, int32_t y, Label* dest);
97   void JumpIfLessThan(Register x, int32_t y, Label* dest);
98 
99   void LoadMap(Register destination, Register object);
100 
101   void Call(Register target);
102   void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
103   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
104             Condition cond = al);
Ret()105   void Ret() { b(r14); }
Ret(Condition cond)106   void Ret(Condition cond) { b(cond, r14); }
107 
108   void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
109                              DeoptimizeKind kind, Label* ret,
110                              Label* jump_deoptimization_entry_label);
111 
112   // Emit code to discard a non-negative number of pointer-sized elements
113   // from the stack, clobbering only the sp register.
114   void Drop(int count);
115   void Drop(Register count, Register scratch = r0);
116 
Ret(int drop)117   void Ret(int drop) {
118     Drop(drop);
119     Ret();
120   }
121 
122   void Call(Label* target);
123 
124   // Load the builtin given by the Smi in |builtin_index| into the same
125   // register.
126   void LoadEntryFromBuiltinIndex(Register builtin_index);
127   void LoadEntryFromBuiltin(Builtin builtin, Register destination);
128   MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
129   void LoadCodeObjectEntry(Register destination, Register code_object);
130   void CallCodeObject(Register code_object);
131   void JumpCodeObject(Register code_object,
132                       JumpMode jump_mode = JumpMode::kJump);
133 
134   void CallBuiltinByIndex(Register builtin_index);
135 
136   // Register move. May do nothing if the registers are identical.
Move(Register dst,Smi smi)137   void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
138   void Move(Register dst, Handle<HeapObject> source,
139             RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
140   void Move(Register dst, ExternalReference reference);
141   void Move(Register dst, const MemOperand& src);
142   void Move(Register dst, Register src, Condition cond = al);
143   void Move(DoubleRegister dst, DoubleRegister src);
144 
145   void MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
146                 const Operand& length);
147 
148   void CompareLogicalChar(const MemOperand& opnd1, const MemOperand& opnd2,
149                           const Operand& length);
150 
151   void ExclusiveOrChar(const MemOperand& opnd1, const MemOperand& opnd2,
152                        const Operand& length);
153 
154   void RotateInsertSelectBits(Register dst, Register src,
155                               const Operand& startBit, const Operand& endBit,
156                               const Operand& shiftAmt, bool zeroBits);
157 
158   void BranchRelativeOnIdxHighP(Register dst, Register inc, Label* L);
159 
160   void MaybeSaveRegisters(RegList registers);
161   void MaybeRestoreRegisters(RegList registers);
162 
163   void CallEphemeronKeyBarrier(Register object, Register slot_address,
164                                SaveFPRegsMode fp_mode);
165 
166   void CallRecordWriteStubSaveRegisters(
167       Register object, Register slot_address,
168       RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
169       StubCallMode mode = StubCallMode::kCallBuiltinPointer);
170   void CallRecordWriteStub(
171       Register object, Register slot_address,
172       RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
173       StubCallMode mode = StubCallMode::kCallBuiltinPointer);
174 
175   void MultiPush(RegList regs, Register location = sp);
176   void MultiPop(RegList regs, Register location = sp);
177 
178   void MultiPushDoubles(DoubleRegList dregs, Register location = sp);
179   void MultiPopDoubles(DoubleRegList dregs, Register location = sp);
180 
181   void MultiPushV128(DoubleRegList dregs, Register scratch,
182                      Register location = sp);
183   void MultiPopV128(DoubleRegList dregs, Register scratch,
184                     Register location = sp);
185 
186   void MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
187                           Register location = sp);
188   void MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
189                          Register location = sp);
190 
191   // Calculate how much stack space (in bytes) are required to store caller
192   // registers excluding those specified in the arguments.
193   int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
194                                       Register exclusion1 = no_reg,
195                                       Register exclusion2 = no_reg,
196                                       Register exclusion3 = no_reg) const;
197 
198   // Push caller saved registers on the stack, and return the number of bytes
199   // stack pointer is adjusted.
200   int PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
201                       Register exclusion1 = no_reg,
202                       Register exclusion2 = no_reg,
203                       Register exclusion3 = no_reg);
204   // Restore caller saved registers from the stack, and return the number of
205   // bytes stack pointer is adjusted.
206   int PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
207                      Register exclusion1 = no_reg, Register exclusion2 = no_reg,
208                      Register exclusion3 = no_reg);
209 
210   // Load an object from the root table.
LoadRoot(Register destination,RootIndex index)211   void LoadRoot(Register destination, RootIndex index) override {
212     LoadRoot(destination, index, al);
213   }
214   void LoadRoot(Register destination, RootIndex index, Condition cond);
215   //--------------------------------------------------------------------------
216   // S390 Macro Assemblers for Instructions
217   //--------------------------------------------------------------------------
218 
219   // Arithmetic Operations
220 
221   // Add (Register - Immediate)
222   void AddS32(Register dst, const Operand& imm);
223   void AddS64(Register dst, const Operand& imm);
224   void AddS32(Register dst, Register src, const Operand& imm);
225   void AddS64(Register dst, Register src, const Operand& imm);
226   void AddS32(Register dst, Register src, int32_t imm);
227   void AddS64(Register dst, Register src, int32_t imm);
228 
229   // Add (Register - Register)
230   void AddS32(Register dst, Register src);
231   void AddS64(Register dst, Register src);
232   void AddS32(Register dst, Register src1, Register src2);
233   void AddS64(Register dst, Register src1, Register src2);
234 
235   // Add (Register - Mem)
236   void AddS32(Register dst, const MemOperand& opnd);
237   void AddS64(Register dst, const MemOperand& opnd);
238 
239   // Add (Mem - Immediate)
240   void AddS32(const MemOperand& opnd, const Operand& imm);
241   void AddS64(const MemOperand& opnd, const Operand& imm);
242 
243   // Add Logical (Register - Register)
244   void AddU32(Register dst, Register src1, Register src2);
245 
246   // Add Logical (Register - Immediate)
247   void AddU32(Register dst, const Operand& imm);
248   void AddU64(Register dst, const Operand& imm);
249   void AddU64(Register dst, Register src1, Register src2);
250 
251   // Add Logical (Register - Mem)
252   void AddU32(Register dst, const MemOperand& opnd);
253   void AddU64(Register dst, const MemOperand& opnd);
254 
255   // Subtract (Register - Immediate)
256   void SubS32(Register dst, const Operand& imm);
257   void SubS64(Register dst, const Operand& imm);
258   void SubS32(Register dst, Register src, const Operand& imm);
259   void SubS64(Register dst, Register src, const Operand& imm);
260   void SubS32(Register dst, Register src, int32_t imm);
261   void SubS64(Register dst, Register src, int32_t imm);
262 
263   // Subtract (Register - Register)
264   void SubS32(Register dst, Register src);
265   void SubS64(Register dst, Register src);
266   void SubS32(Register dst, Register src1, Register src2);
267   void SubS64(Register dst, Register src1, Register src2);
268 
269   // Subtract (Register - Mem)
270   void SubS32(Register dst, const MemOperand& opnd);
271   void SubS64(Register dst, const MemOperand& opnd);
272   void LoadAndSub32(Register dst, Register src, const MemOperand& opnd);
273   void LoadAndSub64(Register dst, Register src, const MemOperand& opnd);
274 
275   // Subtract Logical (Register - Mem)
276   void SubU32(Register dst, const MemOperand& opnd);
277   void SubU64(Register dst, const MemOperand& opnd);
278   // Subtract Logical 32-bit
279   void SubU32(Register dst, Register src1, Register src2);
280 
281   // Multiply
282   void MulS64(Register dst, const Operand& opnd);
283   void MulS64(Register dst, Register src);
284   void MulS64(Register dst, const MemOperand& opnd);
MulS64(Register dst,Register src1,Register src2)285   void MulS64(Register dst, Register src1, Register src2) {
286     if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
287       msgrkc(dst, src1, src2);
288     } else {
289       if (dst == src2) {
290         MulS64(dst, src1);
291       } else if (dst == src1) {
292         MulS64(dst, src2);
293       } else {
294         mov(dst, src1);
295         MulS64(dst, src2);
296       }
297     }
298   }
299 
300   void MulS32(Register dst, const MemOperand& src1);
301   void MulS32(Register dst, Register src1);
302   void MulS32(Register dst, const Operand& src1);
MulS32(Register dst,Register src1,Register src2)303   void MulS32(Register dst, Register src1, Register src2) {
304     if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
305       msrkc(dst, src1, src2);
306     } else {
307       if (dst == src2) {
308         MulS32(dst, src1);
309       } else if (dst == src1) {
310         MulS32(dst, src2);
311       } else {
312         mov(dst, src1);
313         MulS32(dst, src2);
314       }
315     }
316   }
317 
318   void MulHighS32(Register dst, Register src1, const MemOperand& src2);
319   void MulHighS32(Register dst, Register src1, Register src2);
320   void MulHighS32(Register dst, Register src1, const Operand& src2);
321   void MulHighU32(Register dst, Register src1, const MemOperand& src2);
322   void MulHighU32(Register dst, Register src1, Register src2);
323   void MulHighU32(Register dst, Register src1, const Operand& src2);
324   void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
325                                     const MemOperand& src2);
326   void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Register src2);
327   void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
328                                     const Operand& src2);
329   // Divide
330   void DivS32(Register dst, Register src1, const MemOperand& src2);
331   void DivS32(Register dst, Register src1, Register src2);
332   void DivU32(Register dst, Register src1, const MemOperand& src2);
333   void DivU32(Register dst, Register src1, Register src2);
334   void DivS64(Register dst, Register src1, const MemOperand& src2);
335   void DivS64(Register dst, Register src1, Register src2);
336   void DivU64(Register dst, Register src1, const MemOperand& src2);
337   void DivU64(Register dst, Register src1, Register src2);
338 
339   // Mod
340   void ModS32(Register dst, Register src1, const MemOperand& src2);
341   void ModS32(Register dst, Register src1, Register src2);
342   void ModU32(Register dst, Register src1, const MemOperand& src2);
343   void ModU32(Register dst, Register src1, Register src2);
344   void ModS64(Register dst, Register src1, const MemOperand& src2);
345   void ModS64(Register dst, Register src1, Register src2);
346   void ModU64(Register dst, Register src1, const MemOperand& src2);
347   void ModU64(Register dst, Register src1, Register src2);
348 
349   // Square root
350   void Sqrt(DoubleRegister result, DoubleRegister input);
351   void Sqrt(DoubleRegister result, const MemOperand& input);
352 
353   // Compare
354   void CmpS32(Register src1, Register src2);
355   void CmpS64(Register src1, Register src2);
356   void CmpS32(Register dst, const Operand& opnd);
357   void CmpS64(Register dst, const Operand& opnd);
358   void CmpS32(Register dst, const MemOperand& opnd);
359   void CmpS64(Register dst, const MemOperand& opnd);
360   void CmpAndSwap(Register old_val, Register new_val, const MemOperand& opnd);
361   void CmpAndSwap64(Register old_val, Register new_val, const MemOperand& opnd);
362   // TODO(john.yan): remove this
363   template <class T>
CmpP(Register src1,T src2)364   void CmpP(Register src1, T src2) {
365     CmpS64(src1, src2);
366   }
367 
368   // Compare Logical
369   void CmpU32(Register src1, Register src2);
370   void CmpU64(Register src1, Register src2);
371   void CmpU32(Register src1, const Operand& opnd);
372   void CmpU64(Register src1, const Operand& opnd);
373   void CmpU32(Register dst, const MemOperand& opnd);
374   void CmpU64(Register dst, const MemOperand& opnd);
375 
376   // Load
377   void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
378   void LoadS32(Register dst, const MemOperand& opnd, Register scratch = no_reg);
379   void LoadS32(Register dst, Register src);
380   void LoadU32(Register dst, const MemOperand& opnd, Register scratch = no_reg);
381   void LoadU32(Register dst, Register src);
382   void LoadU16(Register dst, const MemOperand& opnd);
383   void LoadU16(Register dst, Register src);
384   void LoadS16(Register dst, Register src);
385   void LoadS16(Register dst, const MemOperand& mem, Register scratch = no_reg);
386   void LoadS8(Register dst, const MemOperand& opnd);
387   void LoadS8(Register dst, Register src);
388   void LoadU8(Register dst, const MemOperand& opnd);
389   void LoadU8(Register dst, Register src);
390   void LoadV128(Simd128Register dst, const MemOperand& mem, Register scratch);
391   void LoadF64(DoubleRegister dst, const MemOperand& opnd);
392   void LoadF32(DoubleRegister dst, const MemOperand& opnd);
393   // LE Load
394   void LoadU64LE(Register dst, const MemOperand& mem,
395                  Register scratch = no_reg);
396   void LoadS32LE(Register dst, const MemOperand& opnd,
397                  Register scratch = no_reg);
398   void LoadU32LE(Register dst, const MemOperand& opnd,
399                  Register scratch = no_reg);
400   void LoadU16LE(Register dst, const MemOperand& opnd);
401   void LoadS16LE(Register dst, const MemOperand& opnd);
402   void LoadV128LE(DoubleRegister dst, const MemOperand& mem, Register scratch0,
403                   Register scratch1);
404   void LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
405   void LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
406   // Vector LE Load and Transform instructions.
407   void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand& mem,
408                           Register scratch);
409   void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand& mem,
410                           Register scratch);
411   void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand& me,
412                           Register scratch);
413   void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand& mem,
414                           Register scratch);
415   void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand& mem,
416                            Register scratch);
417   void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand& mem,
418                            Register scratch);
419   void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand& mem,
420                             Register scratch);
421   void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand& mem,
422                             Register scratch);
423   void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand& mem,
424                             Register scratch);
425   void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand& mem,
426                             Register scratch);
427   void LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem,
428                      Register scratch);
429   void LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem,
430                      Register scratch);
431   void LoadLane8LE(Simd128Register dst, const MemOperand& mem, int lane,
432                    Register scratch);
433   void LoadLane16LE(Simd128Register dst, const MemOperand& mem, int lane,
434                     Register scratch);
435   void LoadLane32LE(Simd128Register dst, const MemOperand& mem, int lane,
436                     Register scratch);
437   void LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane,
438                     Register scratch);
439   void StoreLane8LE(Simd128Register src, const MemOperand& mem, int lane,
440                     Register scratch);
441   void StoreLane16LE(Simd128Register src, const MemOperand& mem, int lane,
442                      Register scratch);
443   void StoreLane32LE(Simd128Register src, const MemOperand& mem, int lane,
444                      Register scratch);
445   void StoreLane64LE(Simd128Register src, const MemOperand& mem, int lane,
446                      Register scratch);
447 
448   // Load And Test
449   void LoadAndTest32(Register dst, Register src);
450   void LoadAndTestP(Register dst, Register src);
451 
452   void LoadAndTest32(Register dst, const MemOperand& opnd);
453   void LoadAndTestP(Register dst, const MemOperand& opnd);
454 
455   // Store
456   void StoreU64(const MemOperand& mem, const Operand& opnd,
457                 Register scratch = no_reg);
458   void StoreU64(Register src, const MemOperand& mem, Register scratch = no_reg);
459   void StoreU32(Register src, const MemOperand& mem, Register scratch = no_reg);
460 
461   void StoreU16(Register src, const MemOperand& mem, Register scratch = r0);
462   void StoreU8(Register src, const MemOperand& mem, Register scratch = r0);
463   void StoreF64(DoubleRegister dst, const MemOperand& opnd);
464   void StoreF32(DoubleRegister dst, const MemOperand& opnd);
465   void StoreV128(Simd128Register src, const MemOperand& mem, Register scratch);
466 
467   // Store LE
468   void StoreU64LE(Register src, const MemOperand& mem,
469                   Register scratch = no_reg);
470   void StoreU32LE(Register src, const MemOperand& mem,
471                   Register scratch = no_reg);
472 
473   void StoreU16LE(Register src, const MemOperand& mem, Register scratch = r0);
474   void StoreF64LE(DoubleRegister src, const MemOperand& opnd, Register scratch);
475   void StoreF32LE(DoubleRegister src, const MemOperand& opnd, Register scratch);
476   void StoreV128LE(Simd128Register src, const MemOperand& mem,
477                    Register scratch1, Register scratch2);
478 
479   void AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
480   void SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
481   void MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
482   void DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
483 
484   void AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
485   void SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
486   void MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
487   void DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
488 
489   void AddFloat32(DoubleRegister dst, const MemOperand& opnd,
490                   DoubleRegister scratch);
491   void AddFloat64(DoubleRegister dst, const MemOperand& opnd,
492                   DoubleRegister scratch);
493   void SubFloat32(DoubleRegister dst, const MemOperand& opnd,
494                   DoubleRegister scratch);
495   void SubFloat64(DoubleRegister dst, const MemOperand& opnd,
496                   DoubleRegister scratch);
497   void MulFloat32(DoubleRegister dst, const MemOperand& opnd,
498                   DoubleRegister scratch);
499   void MulFloat64(DoubleRegister dst, const MemOperand& opnd,
500                   DoubleRegister scratch);
501   void DivFloat32(DoubleRegister dst, const MemOperand& opnd,
502                   DoubleRegister scratch);
503   void DivFloat64(DoubleRegister dst, const MemOperand& opnd,
504                   DoubleRegister scratch);
505   void LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd,
506                     DoubleRegister scratch);
507 
508   // Load On Condition
509   void LoadOnConditionP(Condition cond, Register dst, Register src);
510 
511   void LoadPositiveP(Register result, Register input);
512   void LoadPositive32(Register result, Register input);
513 
514   void Branch(Condition c, const Operand& opnd);
515   void BranchOnCount(Register r1, Label* l);
516 
517   // Shifts
518   void ShiftLeftU32(Register dst, Register src, Register val,
519                     const Operand& val2 = Operand::Zero());
520   void ShiftLeftU32(Register dst, Register src, const Operand& val);
521   void ShiftLeftU64(Register dst, Register src, Register val,
522                     const Operand& val2 = Operand::Zero());
523   void ShiftLeftU64(Register dst, Register src, const Operand& val);
524   void ShiftRightU32(Register dst, Register src, Register val,
525                      const Operand& val2 = Operand::Zero());
526   void ShiftRightU32(Register dst, Register src, const Operand& val);
527   void ShiftRightU64(Register dst, Register src, Register val,
528                      const Operand& val2 = Operand::Zero());
529   void ShiftRightU64(Register dst, Register src, const Operand& val);
530   void ShiftRightS32(Register dst, Register src, Register shift,
531                      const Operand& val2 = Operand::Zero());
532   void ShiftRightS32(Register dst, Register src, const Operand& val);
533   void ShiftRightS64(Register dst, Register src, Register shift,
534                      const Operand& val2 = Operand::Zero());
535   void ShiftRightS64(Register dst, Register src, const Operand& val);
536 
537   void ClearRightImm(Register dst, Register src, const Operand& val);
538 
539   // Bitwise operations
540   void And(Register dst, Register src);
541   void AndP(Register dst, Register src);
542   void And(Register dst, Register src1, Register src2);
543   void AndP(Register dst, Register src1, Register src2);
544   void And(Register dst, const MemOperand& opnd);
545   void AndP(Register dst, const MemOperand& opnd);
546   void And(Register dst, const Operand& opnd);
547   void AndP(Register dst, const Operand& opnd);
548   void And(Register dst, Register src, const Operand& opnd);
549   void AndP(Register dst, Register src, const Operand& opnd);
550   void Or(Register dst, Register src);
551   void OrP(Register dst, Register src);
552   void Or(Register dst, Register src1, Register src2);
553   void OrP(Register dst, Register src1, Register src2);
554   void Or(Register dst, const MemOperand& opnd);
555   void OrP(Register dst, const MemOperand& opnd);
556   void Or(Register dst, const Operand& opnd);
557   void OrP(Register dst, const Operand& opnd);
558   void Or(Register dst, Register src, const Operand& opnd);
559   void OrP(Register dst, Register src, const Operand& opnd);
560   void Xor(Register dst, Register src);
561   void XorP(Register dst, Register src);
562   void Xor(Register dst, Register src1, Register src2);
563   void XorP(Register dst, Register src1, Register src2);
564   void Xor(Register dst, const MemOperand& opnd);
565   void XorP(Register dst, const MemOperand& opnd);
566   void Xor(Register dst, const Operand& opnd);
567   void XorP(Register dst, const Operand& opnd);
568   void Xor(Register dst, Register src, const Operand& opnd);
569   void XorP(Register dst, Register src, const Operand& opnd);
570   void Popcnt32(Register dst, Register src);
571   void Not32(Register dst, Register src = no_reg);
572   void Not64(Register dst, Register src = no_reg);
573   void NotP(Register dst, Register src = no_reg);
574 
575 #ifdef V8_TARGET_ARCH_S390X
576   void Popcnt64(Register dst, Register src);
577 #endif
578 
579   void mov(Register dst, const Operand& src);
580   void mov(Register dst, Register src);
581 
CleanUInt32(Register x)582   void CleanUInt32(Register x) {
583 #ifdef V8_TARGET_ARCH_S390X
584     llgfr(x, x);
585 #endif
586   }
587 
push(DoubleRegister src)588   void push(DoubleRegister src) {
589     lay(sp, MemOperand(sp, -kSystemPointerSize));
590     StoreF64(src, MemOperand(sp));
591   }
592 
push(Register src)593   void push(Register src) {
594     lay(sp, MemOperand(sp, -kSystemPointerSize));
595     StoreU64(src, MemOperand(sp));
596   }
597 
pop(DoubleRegister dst)598   void pop(DoubleRegister dst) {
599     LoadF64(dst, MemOperand(sp));
600     la(sp, MemOperand(sp, kSystemPointerSize));
601   }
602 
pop(Register dst)603   void pop(Register dst) {
604     LoadU64(dst, MemOperand(sp));
605     la(sp, MemOperand(sp, kSystemPointerSize));
606   }
607 
pop()608   void pop() { la(sp, MemOperand(sp, kSystemPointerSize)); }
609 
Push(Register src)610   void Push(Register src) { push(src); }
611 
612   // Push a handle.
613   void Push(Handle<HeapObject> handle);
614   void Push(Smi smi);
615 
616   // Push two registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)617   void Push(Register src1, Register src2) {
618     lay(sp, MemOperand(sp, -kSystemPointerSize * 2));
619     StoreU64(src1, MemOperand(sp, kSystemPointerSize));
620     StoreU64(src2, MemOperand(sp, 0));
621   }
622 
623   // Push three registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)624   void Push(Register src1, Register src2, Register src3) {
625     lay(sp, MemOperand(sp, -kSystemPointerSize * 3));
626     StoreU64(src1, MemOperand(sp, kSystemPointerSize * 2));
627     StoreU64(src2, MemOperand(sp, kSystemPointerSize));
628     StoreU64(src3, MemOperand(sp, 0));
629   }
630 
631   // Push four registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)632   void Push(Register src1, Register src2, Register src3, Register src4) {
633     lay(sp, MemOperand(sp, -kSystemPointerSize * 4));
634     StoreU64(src1, MemOperand(sp, kSystemPointerSize * 3));
635     StoreU64(src2, MemOperand(sp, kSystemPointerSize * 2));
636     StoreU64(src3, MemOperand(sp, kSystemPointerSize));
637     StoreU64(src4, MemOperand(sp, 0));
638   }
639 
640   // Push five registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4,Register src5)641   void Push(Register src1, Register src2, Register src3, Register src4,
642             Register src5) {
643     DCHECK(src1 != src2);
644     DCHECK(src1 != src3);
645     DCHECK(src2 != src3);
646     DCHECK(src1 != src4);
647     DCHECK(src2 != src4);
648     DCHECK(src3 != src4);
649     DCHECK(src1 != src5);
650     DCHECK(src2 != src5);
651     DCHECK(src3 != src5);
652     DCHECK(src4 != src5);
653 
654     lay(sp, MemOperand(sp, -kSystemPointerSize * 5));
655     StoreU64(src1, MemOperand(sp, kSystemPointerSize * 4));
656     StoreU64(src2, MemOperand(sp, kSystemPointerSize * 3));
657     StoreU64(src3, MemOperand(sp, kSystemPointerSize * 2));
658     StoreU64(src4, MemOperand(sp, kSystemPointerSize));
659     StoreU64(src5, MemOperand(sp, 0));
660   }
661 
662   enum PushArrayOrder { kNormal, kReverse };
663   void PushArray(Register array, Register size, Register scratch,
664                  Register scratch2, PushArrayOrder order = kNormal);
665 
Pop(Register dst)666   void Pop(Register dst) { pop(dst); }
667 
668   // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)669   void Pop(Register src1, Register src2) {
670     LoadU64(src2, MemOperand(sp, 0));
671     LoadU64(src1, MemOperand(sp, kSystemPointerSize));
672     la(sp, MemOperand(sp, 2 * kSystemPointerSize));
673   }
674 
675   // Pop three registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)676   void Pop(Register src1, Register src2, Register src3) {
677     LoadU64(src3, MemOperand(sp, 0));
678     LoadU64(src2, MemOperand(sp, kSystemPointerSize));
679     LoadU64(src1, MemOperand(sp, 2 * kSystemPointerSize));
680     la(sp, MemOperand(sp, 3 * kSystemPointerSize));
681   }
682 
683   // Pop four registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4)684   void Pop(Register src1, Register src2, Register src3, Register src4) {
685     LoadU64(src4, MemOperand(sp, 0));
686     LoadU64(src3, MemOperand(sp, kSystemPointerSize));
687     LoadU64(src2, MemOperand(sp, 2 * kSystemPointerSize));
688     LoadU64(src1, MemOperand(sp, 3 * kSystemPointerSize));
689     la(sp, MemOperand(sp, 4 * kSystemPointerSize));
690   }
691 
692   // Pop five registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4,Register src5)693   void Pop(Register src1, Register src2, Register src3, Register src4,
694            Register src5) {
695     LoadU64(src5, MemOperand(sp, 0));
696     LoadU64(src4, MemOperand(sp, kSystemPointerSize));
697     LoadU64(src3, MemOperand(sp, 2 * kSystemPointerSize));
698     LoadU64(src2, MemOperand(sp, 3 * kSystemPointerSize));
699     LoadU64(src1, MemOperand(sp, 4 * kSystemPointerSize));
700     la(sp, MemOperand(sp, 5 * kSystemPointerSize));
701   }
702 
703   // Push a fixed frame, consisting of lr, fp, constant pool.
704   void PushCommonFrame(Register marker_reg = no_reg);
705 
706   // Push a standard frame, consisting of lr, fp, constant pool,
707   // context and JS function
708   void PushStandardFrame(Register function_reg);
709 
710   void PopCommonFrame(Register marker_reg = no_reg);
711 
712   // Restore caller's frame pointer and return address prior to being
713   // overwritten by tail call stack preparation.
714   void RestoreFrameStateForTailCall();
715 
InitializeRootRegister()716   void InitializeRootRegister() {
717     ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
718     mov(kRootRegister, Operand(isolate_root));
719   }
720 
721   // If the value is a NaN, canonicalize the value else, do nothing.
722   void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
CanonicalizeNaN(const DoubleRegister value)723   void CanonicalizeNaN(const DoubleRegister value) {
724     CanonicalizeNaN(value, value);
725   }
726 
727   // Converts the integer (untagged smi) in |src| to a double, storing
728   // the result to |dst|
729   void ConvertIntToDouble(DoubleRegister dst, Register src);
730 
731   // Converts the unsigned integer (untagged smi) in |src| to
732   // a double, storing the result to |dst|
733   void ConvertUnsignedIntToDouble(DoubleRegister dst, Register src);
734 
735   // Converts the integer (untagged smi) in |src| to
736   // a float, storing the result in |dst|
737   void ConvertIntToFloat(DoubleRegister dst, Register src);
738 
739   // Converts the unsigned integer (untagged smi) in |src| to
740   // a float, storing the result in |dst|
741   void ConvertUnsignedIntToFloat(DoubleRegister dst, Register src);
742 
743   void ConvertInt64ToFloat(DoubleRegister double_dst, Register src);
744   void ConvertInt64ToDouble(DoubleRegister double_dst, Register src);
745   void ConvertUnsignedInt64ToFloat(DoubleRegister double_dst, Register src);
746   void ConvertUnsignedInt64ToDouble(DoubleRegister double_dst, Register src);
747 
748   void MovIntToFloat(DoubleRegister dst, Register src);
749   void MovFloatToInt(Register dst, DoubleRegister src);
750   void MovDoubleToInt64(Register dst, DoubleRegister src);
751   void MovInt64ToDouble(DoubleRegister dst, Register src);
752   // Converts the double_input to an integer.  Note that, upon return,
753   // the contents of double_dst will also hold the fixed point representation.
754   void ConvertFloat32ToInt64(const Register dst,
755                              const DoubleRegister double_input,
756                              FPRoundingMode rounding_mode = kRoundToZero);
757 
758   // Converts the double_input to an integer.  Note that, upon return,
759   // the contents of double_dst will also hold the fixed point representation.
760   void ConvertDoubleToInt64(const Register dst,
761                             const DoubleRegister double_input,
762                             FPRoundingMode rounding_mode = kRoundToZero);
763   void ConvertDoubleToInt32(const Register dst,
764                             const DoubleRegister double_input,
765                             FPRoundingMode rounding_mode = kRoundToZero);
766 
767   void ConvertFloat32ToInt32(const Register result,
768                              const DoubleRegister double_input,
769                              FPRoundingMode rounding_mode);
770   void ConvertFloat32ToUnsignedInt32(
771       const Register result, const DoubleRegister double_input,
772       FPRoundingMode rounding_mode = kRoundToZero);
773   // Converts the double_input to an unsigned integer.  Note that, upon return,
774   // the contents of double_dst will also hold the fixed point representation.
775   void ConvertDoubleToUnsignedInt64(
776       const Register dst, const DoubleRegister double_input,
777       FPRoundingMode rounding_mode = kRoundToZero);
778   void ConvertDoubleToUnsignedInt32(
779       const Register dst, const DoubleRegister double_input,
780       FPRoundingMode rounding_mode = kRoundToZero);
781   void ConvertFloat32ToUnsignedInt64(
782       const Register result, const DoubleRegister double_input,
783       FPRoundingMode rounding_mode = kRoundToZero);
784 
785   // Generates function and stub prologue code.
786   void StubPrologue(StackFrame::Type type, Register base = no_reg,
787                     int prologue_offset = 0);
788   void Prologue(Register base, int prologue_offset = 0);
789 
790   enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
791   enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes };
792   void DropArguments(Register count, ArgumentsCountType type,
793                      ArgumentsCountMode mode);
794   void DropArgumentsAndPushNewReceiver(Register argc, Register receiver,
795                                        ArgumentsCountType type,
796                                        ArgumentsCountMode mode);
797 
798   // Get the actual activation frame alignment for target environment.
799   static int ActivationFrameAlignment();
800   // ----------------------------------------------------------------
801   // new S390 macro-assembler interfaces that are slightly higher level
802   // than assembler-s390 and may generate variable length sequences
803 
804   // load an SMI value <value> to GPR <dst>
805   void LoadSmiLiteral(Register dst, Smi smi);
806 
807   // load a literal double value <value> to FPR <result>
808   template <class T>
LoadF64(DoubleRegister result,T value,Register scratch)809   void LoadF64(DoubleRegister result, T value, Register scratch) {
810     static_assert(sizeof(T) == kDoubleSize, "Expect input size to be 8");
811     uint64_t int_val = bit_cast<uint64_t, T>(value);
812     // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
813     uint32_t hi_32 = int_val >> 32;
814     uint32_t lo_32 = static_cast<uint32_t>(int_val);
815 
816     if (int_val == 0) {
817       lzdr(result);
818     } else if (lo_32 == 0) {
819       llihf(scratch, Operand(hi_32));
820       ldgr(result, scratch);
821     } else {
822       iihf(scratch, Operand(hi_32));
823       iilf(scratch, Operand(lo_32));
824       ldgr(result, scratch);
825     }
826   }
827 
828   template <class T>
LoadF32(DoubleRegister result,T value,Register scratch)829   void LoadF32(DoubleRegister result, T value, Register scratch) {
830     static_assert(sizeof(T) == kFloatSize, "Expect input size to be 4");
831     uint32_t int_val = bit_cast<uint32_t, T>(value);
832     LoadF64(result, static_cast<uint64_t>(int_val) << 32, scratch);
833   }
834 
835   void CmpSmiLiteral(Register src1, Smi smi, Register scratch);
836 
837   // Set new rounding mode RN to FPSCR
838   void SetRoundingMode(FPRoundingMode RN);
839 
840   // reset rounding mode to default (kRoundToNearest)
841   void ResetRoundingMode();
842 
843   // These exist to provide portability between 32 and 64bit
844   void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
845   void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
846   void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
847   void StoreMultipleW(Register dst1, Register dst2, const MemOperand& mem);
848 
849   void SwapP(Register src, Register dst, Register scratch);
850   void SwapP(Register src, MemOperand dst, Register scratch);
851   void SwapP(MemOperand src, MemOperand dst, Register scratch_0,
852              Register scratch_1);
853   void SwapFloat32(DoubleRegister src, DoubleRegister dst,
854                    DoubleRegister scratch);
855   void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
856   void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch);
857   void SwapDouble(DoubleRegister src, DoubleRegister dst,
858                   DoubleRegister scratch);
859   void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
860   void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch);
861   void SwapSimd128(Simd128Register src, Simd128Register dst,
862                    Simd128Register scratch);
863   void SwapSimd128(Simd128Register src, MemOperand dst,
864                    Simd128Register scratch);
865   void SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch);
866 
867   // Cleanse pointer address on 31bit by zero out top  bit.
868   // This is a NOP on 64-bit.
CleanseP(Register src)869   void CleanseP(Register src) {
870 #if (V8_HOST_ARCH_S390 && !(V8_TARGET_ARCH_S390X))
871     nilh(src, Operand(0x7FFF));
872 #endif
873   }
874 
875   // ---------------------------------------------------------------------------
876   // Runtime calls
877 
878   // Before calling a C-function from generated code, align arguments on stack.
879   // After aligning the frame, non-register arguments must be stored in
880   // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
881   // are word sized. If double arguments are used, this function assumes that
882   // all double arguments are stored before core registers; otherwise the
883   // correct alignment of the double values is not guaranteed.
884   // Some compilers/platforms require the stack to be aligned when calling
885   // C++ code.
886   // Needs a scratch register to do some arithmetic. This register will be
887   // trashed.
888   void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
889                             Register scratch);
890   void PrepareCallCFunction(int num_reg_arguments, Register scratch);
891 
892   // There are two ways of passing double arguments on ARM, depending on
893   // whether soft or hard floating point ABI is used. These functions
894   // abstract parameter passing for the three different ways we call
895   // C functions from generated code.
896   void MovToFloatParameter(DoubleRegister src);
897   void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
898   void MovToFloatResult(DoubleRegister src);
899 
900   // Calls a C function and cleans up the space for arguments allocated
901   // by PrepareCallCFunction. The called function is not allowed to trigger a
902   // garbage collection, since that might move the code and invalidate the
903   // return address (unless this is somehow accounted for by the called
904   // function).
905   void CallCFunction(ExternalReference function, int num_arguments);
906   void CallCFunction(Register function, int num_arguments);
907   void CallCFunction(ExternalReference function, int num_reg_arguments,
908                      int num_double_arguments);
909   void CallCFunction(Register function, int num_reg_arguments,
910                      int num_double_arguments);
911 
912   void MovFromFloatParameter(DoubleRegister dst);
913   void MovFromFloatResult(DoubleRegister dst);
914 
915   void Trap();
916   void DebugBreak();
917 
918   // Emit code for a truncating division by a constant. The dividend register is
919   // unchanged and ip gets clobbered. Dividend and result must be different.
920   void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
921                          DoubleRegister double_input, StubCallMode stub_mode);
922   void TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input,
923                                   Label* done);
924 
925   // ---------------------------------------------------------------------------
926   // Debugging
927 
928   // Calls Abort(msg) if the condition cond is not satisfied.
929   // Use --debug_code to enable.
930   void Assert(Condition cond, AbortReason reason, CRegister cr = cr7);
931 
932   // Like Assert(), but without condition.
933   // Use --debug-code to enable.
934   void AssertUnreachable(AbortReason reason);
935 
936   // Like Assert(), but always enabled.
937   void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
938 
939   // Print a message to stdout and abort execution.
940   void Abort(AbortReason reason);
941 
942   // ---------------------------------------------------------------------------
943   // Bit testing/extraction
944   //
945   // Bit numbering is such that the least significant bit is bit 0
946   // (for consistency between 32/64-bit).
947 
948   // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
949   // and place them into the least significant bits of dst.
ExtractBitRange(Register dst,Register src,int rangeStart,int rangeEnd)950   inline void ExtractBitRange(Register dst, Register src, int rangeStart,
951                               int rangeEnd) {
952     DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerSystemPointer);
953 
954     // Try to use RISBG if possible.
955     if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
956       int shiftAmount = (64 - rangeEnd) % 64;  // Convert to shift left.
957       int endBit = 63;  // End is always LSB after shifting.
958       int startBit = 63 - rangeStart + rangeEnd;
959       RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
960                              Operand(shiftAmount), true);
961     } else {
962       if (rangeEnd > 0)  // Don't need to shift if rangeEnd is zero.
963         ShiftRightU64(dst, src, Operand(rangeEnd));
964       else if (dst != src)  // If we didn't shift, we might need to copy
965         mov(dst, src);
966       int width = rangeStart - rangeEnd + 1;
967 #if V8_TARGET_ARCH_S390X
968       uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
969       nihf(dst, Operand(mask >> 32));
970       nilf(dst, Operand(mask & 0xFFFFFFFF));
971       ltgr(dst, dst);
972 #else
973       uint32_t mask = (1 << width) - 1;
974       AndP(dst, Operand(mask));
975 #endif
976     }
977   }
978 
ExtractBit(Register dst,Register src,uint32_t bitNumber)979   inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
980     ExtractBitRange(dst, src, bitNumber, bitNumber);
981   }
982 
983   // Extract consecutive bits (defined by mask) from src and place them
984   // into the least significant bits of dst.
985   inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
986                              RCBit rc = LeaveRC) {
987     int start = kBitsPerSystemPointer - 1;
988     int end;
989     uintptr_t bit = (1L << start);
990 
991     while (bit && (mask & bit) == 0) {
992       start--;
993       bit >>= 1;
994     }
995     end = start;
996     bit >>= 1;
997 
998     while (bit && (mask & bit)) {
999       end--;
1000       bit >>= 1;
1001     }
1002 
1003     // 1-bits in mask must be contiguous
1004     DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
1005 
1006     ExtractBitRange(dst, src, start, end);
1007   }
1008 
1009   // Test single bit in value.
1010   inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
1011     ExtractBitRange(scratch, value, bitNumber, bitNumber);
1012   }
1013 
1014   // Test consecutive bit range in value.  Range is defined by
1015   // rangeStart - rangeEnd.
1016   inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
1017                            Register scratch = r0) {
1018     ExtractBitRange(scratch, value, rangeStart, rangeEnd);
1019   }
1020 
1021   // Test consecutive bit range in value.  Range is defined by mask.
1022   inline void TestBitMask(Register value, uintptr_t mask,
1023                           Register scratch = r0) {
1024     ExtractBitMask(scratch, value, mask, SetRC);
1025   }
TestIfSmi(Register value)1026   inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
1027 
TestIfSmi(MemOperand value)1028   inline void TestIfSmi(MemOperand value) {
1029     if (is_uint12(value.offset())) {
1030       tm(value, Operand(1));
1031     } else if (is_int20(value.offset())) {
1032       tmy(value, Operand(1));
1033     } else {
1034       LoadS8(r0, value);
1035       tmll(r0, Operand(1));
1036     }
1037   }
1038 
TestIfInt32(Register value)1039   inline void TestIfInt32(Register value) {
1040     // High bits must be identical to fit into an 32-bit integer
1041     cgfr(value, value);
1042   }
SmiUntag(Register reg)1043   void SmiUntag(Register reg) { SmiUntag(reg, reg); }
1044 
1045   void SmiUntag(Register dst, const MemOperand& src);
SmiUntag(Register dst,Register src)1046   void SmiUntag(Register dst, Register src) {
1047     if (SmiValuesAre31Bits()) {
1048       ShiftRightS32(dst, src, Operand(kSmiShift));
1049     } else {
1050       ShiftRightS64(dst, src, Operand(kSmiShift));
1051     }
1052     lgfr(dst, dst);
1053   }
SmiToInt32(Register smi)1054   void SmiToInt32(Register smi) {
1055     if (FLAG_enable_slow_asserts) {
1056       AssertSmi(smi);
1057     }
1058     DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1059     SmiUntag(smi);
1060   }
1061 
1062   // Shift left by kSmiShift
SmiTag(Register reg)1063   void SmiTag(Register reg) { SmiTag(reg, reg); }
SmiTag(Register dst,Register src)1064   void SmiTag(Register dst, Register src) {
1065     ShiftLeftU64(dst, src, Operand(kSmiShift));
1066   }
1067 
1068   // Abort execution if argument is a smi, enabled via --debug-code.
1069   void AssertNotSmi(Register object);
1070   void AssertSmi(Register object);
1071 
1072   // Activation support.
1073   void EnterFrame(StackFrame::Type type,
1074                   bool load_constant_pool_pointer_reg = false);
1075   // Returns the pc offset at which the frame ends.
1076   int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
1077 
AllocateStackSpace(int bytes)1078   void AllocateStackSpace(int bytes) {
1079     DCHECK_GE(bytes, 0);
1080     if (bytes == 0) return;
1081     lay(sp, MemOperand(sp, -bytes));
1082   }
1083 
AllocateStackSpace(Register bytes)1084   void AllocateStackSpace(Register bytes) { SubS64(sp, sp, bytes); }
1085 
1086   void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
1087                      Label* condition_met);
1088 
1089   void ComputeCodeStartAddress(Register dst);
1090   void LoadPC(Register dst);
1091 
1092   // Control-flow integrity:
1093 
1094   // Define a function entrypoint. This doesn't emit any code for this
1095   // architecture, as control-flow integrity is not supported for it.
CodeEntry()1096   void CodeEntry() {}
1097   // Define an exception handler.
ExceptionHandler()1098   void ExceptionHandler() {}
1099   // Define an exception handler and bind a label.
BindExceptionHandler(Label * label)1100   void BindExceptionHandler(Label* label) { bind(label); }
1101 
1102   // Generates an instruction sequence s.t. the return address points to the
1103   // instruction following the call.
1104   // The return address on the stack is used by frame iteration.
1105   void StoreReturnAddressAndCall(Register target);
1106 
1107   // ---------------------------------------------------------------------------
1108   // Simd Support.
1109   void F64x2Splat(Simd128Register dst, Simd128Register src);
1110   void F32x4Splat(Simd128Register dst, Simd128Register src);
1111   void I64x2Splat(Simd128Register dst, Register src);
1112   void I32x4Splat(Simd128Register dst, Register src);
1113   void I16x8Splat(Simd128Register dst, Register src);
1114   void I8x16Splat(Simd128Register dst, Register src);
1115   void F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
1116                         uint8_t imm_lane_idx, Register = r0);
1117   void F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
1118                         uint8_t imm_lane_idx, Register = r0);
1119   void I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx,
1120                         Register = r0);
1121   void I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx,
1122                         Register = r0);
1123   void I16x8ExtractLaneU(Register dst, Simd128Register src,
1124                          uint8_t imm_lane_idx, Register = r0);
1125   void I16x8ExtractLaneS(Register dst, Simd128Register src,
1126                          uint8_t imm_lane_idx, Register scratch);
1127   void I8x16ExtractLaneU(Register dst, Simd128Register src,
1128                          uint8_t imm_lane_idx, Register = r0);
1129   void I8x16ExtractLaneS(Register dst, Simd128Register src,
1130                          uint8_t imm_lane_idx, Register scratch);
1131   void F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
1132                         DoubleRegister src2, uint8_t imm_lane_idx,
1133                         Register scratch);
1134   void F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
1135                         DoubleRegister src2, uint8_t imm_lane_idx,
1136                         Register scratch);
1137   void I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
1138                         Register src2, uint8_t imm_lane_idx, Register = r0);
1139   void I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
1140                         Register src2, uint8_t imm_lane_idx, Register = r0);
1141   void I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
1142                         Register src2, uint8_t imm_lane_idx, Register = r0);
1143   void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
1144                         Register src2, uint8_t imm_lane_idx, Register = r0);
1145   void I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2,
1146                 Register scratch1, Register scratch2, Register scratch3);
1147   void I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2,
1148                 Simd128Register scratch);
1149   void I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2,
1150                 Simd128Register scratch);
1151   void I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2,
1152                 Simd128Register scratch);
1153   void I64x2BitMask(Register dst, Simd128Register src, Register scratch1,
1154                     Simd128Register scratch2);
1155   void I32x4BitMask(Register dst, Simd128Register src, Register scratch1,
1156                     Simd128Register scratch2);
1157   void I16x8BitMask(Register dst, Simd128Register src, Register scratch1,
1158                     Simd128Register scratch2);
1159   void I8x16BitMask(Register dst, Simd128Register src, Register scratch1,
1160                     Register scratch2, Simd128Register scratch3);
1161   void V128AnyTrue(Register dst, Simd128Register src, Register scratch);
1162   void I32x4SConvertF32x4(Simd128Register dst, Simd128Register src,
1163                           Simd128Register scratch1, Register scratch2);
1164   void I32x4UConvertF32x4(Simd128Register dst, Simd128Register src,
1165                           Simd128Register scratch1, Register scratch2);
1166   void F32x4SConvertI32x4(Simd128Register dst, Simd128Register src,
1167                           Simd128Register scratch1, Register scratch2);
1168   void F32x4UConvertI32x4(Simd128Register dst, Simd128Register src,
1169                           Simd128Register scratch1, Register scratch2);
1170   void I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1,
1171                           Simd128Register src2);
1172   void I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1,
1173                           Simd128Register src2);
1174   void I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1,
1175                           Simd128Register src2, Simd128Register scratch);
1176   void I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1,
1177                           Simd128Register src2, Simd128Register scratch);
1178   void F64x2PromoteLowF32x4(Simd128Register dst, Simd128Register src,
1179                             Simd128Register scratch1, Register scratch2,
1180                             Register scratch3, Register scratch4);
1181   void F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src,
1182                             Simd128Register scratch1, Register scratch2,
1183                             Register scratch3, Register scratch4);
1184   void I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src,
1185                                Simd128Register scratch);
1186   void I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src,
1187                                Simd128Register scratch);
1188   void I8x16Swizzle(Simd128Register dst, Simd128Register src1,
1189                     Simd128Register src2, Register scratch1, Register scratch2,
1190                     Simd128Register scratch3, Simd128Register scratch4);
1191   void S128Const(Simd128Register dst, uint64_t high, uint64_t low,
1192                  Register scratch1, Register scratch2);
1193   void I8x16Shuffle(Simd128Register dst, Simd128Register src1,
1194                     Simd128Register src2, uint64_t high, uint64_t low,
1195                     Register scratch1, Register scratch2,
1196                     Simd128Register scratch3);
1197   void I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
1198                       Simd128Register src2, Simd128Register scratch);
1199   void I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1,
1200                         Simd128Register src2, Simd128Register scratch1,
1201                         Simd128Register scratch2, Simd128Register scratch3);
1202   void S128Select(Simd128Register dst, Simd128Register src1,
1203                   Simd128Register src2, Simd128Register mask);
1204 
1205 #define SIMD_SHIFT_LIST(V) \
1206   V(I64x2Shl)              \
1207   V(I64x2ShrS)             \
1208   V(I64x2ShrU)             \
1209   V(I32x4Shl)              \
1210   V(I32x4ShrS)             \
1211   V(I32x4ShrU)             \
1212   V(I16x8Shl)              \
1213   V(I16x8ShrS)             \
1214   V(I16x8ShrU)             \
1215   V(I8x16Shl)              \
1216   V(I8x16ShrS)             \
1217   V(I8x16ShrU)
1218 
1219 #define PROTOTYPE_SIMD_SHIFT(name)                                          \
1220   void name(Simd128Register dst, Simd128Register src1, Register src2,       \
1221             Simd128Register scratch);                                       \
1222   void name(Simd128Register dst, Simd128Register src1, const Operand& src2, \
1223             Register scratch1, Simd128Register scratch2);
1224   SIMD_SHIFT_LIST(PROTOTYPE_SIMD_SHIFT)
1225 #undef PROTOTYPE_SIMD_SHIFT
1226 #undef SIMD_SHIFT_LIST
1227 
1228 #define SIMD_UNOP_LIST(V)   \
1229   V(F64x2Abs)               \
1230   V(F64x2Neg)               \
1231   V(F64x2Sqrt)              \
1232   V(F64x2Ceil)              \
1233   V(F64x2Floor)             \
1234   V(F64x2Trunc)             \
1235   V(F64x2NearestInt)        \
1236   V(F64x2ConvertLowI32x4S)  \
1237   V(F64x2ConvertLowI32x4U)  \
1238   V(F32x4Abs)               \
1239   V(F32x4Neg)               \
1240   V(F32x4Sqrt)              \
1241   V(F32x4Ceil)              \
1242   V(F32x4Floor)             \
1243   V(F32x4Trunc)             \
1244   V(F32x4NearestInt)        \
1245   V(I64x2Abs)               \
1246   V(I64x2SConvertI32x4Low)  \
1247   V(I64x2SConvertI32x4High) \
1248   V(I64x2UConvertI32x4Low)  \
1249   V(I64x2UConvertI32x4High) \
1250   V(I64x2Neg)               \
1251   V(I32x4Abs)               \
1252   V(I32x4Neg)               \
1253   V(I32x4SConvertI16x8Low)  \
1254   V(I32x4SConvertI16x8High) \
1255   V(I32x4UConvertI16x8Low)  \
1256   V(I32x4UConvertI16x8High) \
1257   V(I16x8Abs)               \
1258   V(I16x8Neg)               \
1259   V(I16x8SConvertI8x16Low)  \
1260   V(I16x8SConvertI8x16High) \
1261   V(I16x8UConvertI8x16Low)  \
1262   V(I16x8UConvertI8x16High) \
1263   V(I8x16Abs)               \
1264   V(I8x16Neg)               \
1265   V(I8x16Popcnt)            \
1266   V(S128Not)                \
1267   V(S128Zero)               \
1268   V(S128AllOnes)
1269 
1270 #define PROTOTYPE_SIMD_UNOP(name) \
1271   void name(Simd128Register dst, Simd128Register src);
SIMD_UNOP_LIST(PROTOTYPE_SIMD_UNOP)1272   SIMD_UNOP_LIST(PROTOTYPE_SIMD_UNOP)
1273 #undef PROTOTYPE_SIMD_UNOP
1274 #undef SIMD_UNOP_LIST
1275 
1276 #define SIMD_BINOP_LIST(V) \
1277   V(F64x2Add)              \
1278   V(F64x2Sub)              \
1279   V(F64x2Mul)              \
1280   V(F64x2Div)              \
1281   V(F64x2Min)              \
1282   V(F64x2Max)              \
1283   V(F64x2Eq)               \
1284   V(F64x2Ne)               \
1285   V(F64x2Lt)               \
1286   V(F64x2Le)               \
1287   V(F64x2Pmin)             \
1288   V(F64x2Pmax)             \
1289   V(F32x4Add)              \
1290   V(F32x4Sub)              \
1291   V(F32x4Mul)              \
1292   V(F32x4Div)              \
1293   V(F32x4Min)              \
1294   V(F32x4Max)              \
1295   V(F32x4Eq)               \
1296   V(F32x4Ne)               \
1297   V(F32x4Lt)               \
1298   V(F32x4Le)               \
1299   V(F32x4Pmin)             \
1300   V(F32x4Pmax)             \
1301   V(I64x2Add)              \
1302   V(I64x2Sub)              \
1303   V(I64x2Eq)               \
1304   V(I64x2Ne)               \
1305   V(I64x2GtS)              \
1306   V(I64x2GeS)              \
1307   V(I32x4Add)              \
1308   V(I32x4Sub)              \
1309   V(I32x4Mul)              \
1310   V(I32x4Eq)               \
1311   V(I32x4Ne)               \
1312   V(I32x4GtS)              \
1313   V(I32x4GeS)              \
1314   V(I32x4GtU)              \
1315   V(I32x4MinS)             \
1316   V(I32x4MinU)             \
1317   V(I32x4MaxS)             \
1318   V(I32x4MaxU)             \
1319   V(I16x8Add)              \
1320   V(I16x8Sub)              \
1321   V(I16x8Mul)              \
1322   V(I16x8Eq)               \
1323   V(I16x8Ne)               \
1324   V(I16x8GtS)              \
1325   V(I16x8GeS)              \
1326   V(I16x8GtU)              \
1327   V(I16x8MinS)             \
1328   V(I16x8MinU)             \
1329   V(I16x8MaxS)             \
1330   V(I16x8MaxU)             \
1331   V(I16x8RoundingAverageU) \
1332   V(I8x16Add)              \
1333   V(I8x16Sub)              \
1334   V(I8x16Eq)               \
1335   V(I8x16Ne)               \
1336   V(I8x16GtS)              \
1337   V(I8x16GeS)              \
1338   V(I8x16GtU)              \
1339   V(I8x16MinS)             \
1340   V(I8x16MinU)             \
1341   V(I8x16MaxS)             \
1342   V(I8x16MaxU)             \
1343   V(I8x16RoundingAverageU) \
1344   V(S128And)               \
1345   V(S128Or)                \
1346   V(S128Xor)               \
1347   V(S128AndNot)
1348 
1349 #define PROTOTYPE_SIMD_BINOP(name) \
1350   void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
1351   SIMD_BINOP_LIST(PROTOTYPE_SIMD_BINOP)
1352 #undef PROTOTYPE_SIMD_BINOP
1353 #undef SIMD_BINOP_LIST
1354 
1355 #define SIMD_EXT_MUL_LIST(V) \
1356   V(I64x2ExtMulLowI32x4S)    \
1357   V(I64x2ExtMulHighI32x4S)   \
1358   V(I64x2ExtMulLowI32x4U)    \
1359   V(I64x2ExtMulHighI32x4U)   \
1360   V(I32x4ExtMulLowI16x8S)    \
1361   V(I32x4ExtMulHighI16x8S)   \
1362   V(I32x4ExtMulLowI16x8U)    \
1363   V(I32x4ExtMulHighI16x8U)   \
1364   V(I16x8ExtMulLowI8x16S)    \
1365   V(I16x8ExtMulHighI8x16S)   \
1366   V(I16x8ExtMulLowI8x16U)    \
1367   V(I16x8ExtMulHighI8x16U)
1368 
1369 #define PROTOTYPE_SIMD_EXT_MUL(name)                                         \
1370   void name(Simd128Register dst, Simd128Register src1, Simd128Register src2, \
1371             Simd128Register scratch);
1372   SIMD_EXT_MUL_LIST(PROTOTYPE_SIMD_EXT_MUL)
1373 #undef PROTOTYPE_SIMD_EXT_MUL
1374 #undef SIMD_EXT_MUL_LIST
1375 
1376 #define SIMD_ALL_TRUE_LIST(V) \
1377   V(I64x2AllTrue)             \
1378   V(I32x4AllTrue)             \
1379   V(I16x8AllTrue)             \
1380   V(I8x16AllTrue)
1381 
1382 #define PROTOTYPE_SIMD_ALL_TRUE(name)                             \
1383   void name(Register dst, Simd128Register src, Register scratch1, \
1384             Simd128Register scratch2);
1385   SIMD_ALL_TRUE_LIST(PROTOTYPE_SIMD_ALL_TRUE)
1386 #undef PROTOTYPE_SIMD_ALL_TRUE
1387 #undef SIMD_ALL_TRUE_LIST
1388 
1389 #define SIMD_QFM_LIST(V) \
1390   V(F64x2Qfma)           \
1391   V(F64x2Qfms)           \
1392   V(F32x4Qfma)           \
1393   V(F32x4Qfms)
1394 
1395 #define PROTOTYPE_SIMD_QFM(name)                                             \
1396   void name(Simd128Register dst, Simd128Register src1, Simd128Register src2, \
1397             Simd128Register src3);
1398   SIMD_QFM_LIST(PROTOTYPE_SIMD_QFM)
1399 #undef PROTOTYPE_SIMD_QFM
1400 #undef SIMD_QFM_LIST
1401 
1402 #define SIMD_ADD_SUB_SAT_LIST(V) \
1403   V(I16x8AddSatS)                \
1404   V(I16x8SubSatS)                \
1405   V(I16x8AddSatU)                \
1406   V(I16x8SubSatU)                \
1407   V(I8x16AddSatS)                \
1408   V(I8x16SubSatS)                \
1409   V(I8x16AddSatU)                \
1410   V(I8x16SubSatU)
1411 
1412 #define PROTOTYPE_SIMD_ADD_SUB_SAT(name)                                     \
1413   void name(Simd128Register dst, Simd128Register src1, Simd128Register src2, \
1414             Simd128Register scratch1, Simd128Register scratch2);
1415   SIMD_ADD_SUB_SAT_LIST(PROTOTYPE_SIMD_ADD_SUB_SAT)
1416 #undef PROTOTYPE_SIMD_ADD_SUB_SAT
1417 #undef SIMD_ADD_SUB_SAT_LIST
1418 
1419 #define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
1420   V(I32x4ExtAddPairwiseI16x8S)        \
1421   V(I32x4ExtAddPairwiseI16x8U)        \
1422   V(I16x8ExtAddPairwiseI8x16S)        \
1423   V(I16x8ExtAddPairwiseI8x16U)
1424 
1425 #define PROTOTYPE_SIMD_EXT_ADD_PAIRWISE(name)         \
1426   void name(Simd128Register dst, Simd128Register src, \
1427             Simd128Register scratch1, Simd128Register scratch2);
1428   SIMD_EXT_ADD_PAIRWISE_LIST(PROTOTYPE_SIMD_EXT_ADD_PAIRWISE)
1429 #undef PROTOTYPE_SIMD_EXT_ADD_PAIRWISE
1430 #undef SIMD_EXT_ADD_PAIRWISE_LIST
1431 
1432   // ---------------------------------------------------------------------------
1433   // Pointer compression Support
1434 
1435   void SmiToPtrArrayOffset(Register dst, Register src) {
1436 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
1437     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
1438     ShiftLeftU64(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
1439 #else
1440     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
1441     ShiftRightS64(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
1442 #endif
1443   }
1444 
1445   // Loads a field containing a HeapObject and decompresses it if pointer
1446   // compression is enabled.
1447   void LoadTaggedPointerField(const Register& destination,
1448                               const MemOperand& field_operand,
1449                               const Register& scratch = no_reg);
1450   void LoadTaggedSignedField(Register destination, MemOperand field_operand);
1451 
1452   // Loads a field containing any tagged value and decompresses it if necessary.
1453   void LoadAnyTaggedField(const Register& destination,
1454                           const MemOperand& field_operand,
1455                           const Register& scratch = no_reg);
1456 
1457   // Loads a field containing smi value and untags it.
1458   void SmiUntagField(Register dst, const MemOperand& src);
1459 
1460   // Compresses and stores tagged value to given on-heap location.
1461   void StoreTaggedField(const Register& value,
1462                         const MemOperand& dst_field_operand,
1463                         const Register& scratch = no_reg);
1464 
1465   void DecompressTaggedSigned(Register destination, MemOperand field_operand);
1466   void DecompressTaggedSigned(Register destination, Register src);
1467   void DecompressTaggedPointer(Register destination, MemOperand field_operand);
1468   void DecompressTaggedPointer(Register destination, Register source);
1469   void DecompressAnyTagged(Register destination, MemOperand field_operand);
1470   void DecompressAnyTagged(Register destination, Register source);
1471 
1472   // CountLeadingZeros will corrupt the scratch register pair (eg. r0:r1)
1473   void CountLeadingZerosU32(Register dst, Register src,
1474                             Register scratch_pair = r0);
1475   void CountLeadingZerosU64(Register dst, Register src,
1476                             Register scratch_pair = r0);
1477   void CountTrailingZerosU32(Register dst, Register src,
1478                              Register scratch_pair = r0);
1479   void CountTrailingZerosU64(Register dst, Register src,
1480                              Register scratch_pair = r0);
1481 
1482  private:
1483   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1484 
1485   void CallCFunctionHelper(Register function, int num_reg_arguments,
1486                            int num_double_arguments);
1487 
1488   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1489   int CalculateStackPassedWords(int num_reg_arguments,
1490                                 int num_double_arguments);
1491 };
1492 
1493 // MacroAssembler implements a collection of frequently used macros.
1494 class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
1495  public:
1496   using TurboAssembler::TurboAssembler;
1497 
1498   void LoadStackLimit(Register destination, StackLimitKind kind);
1499   // It assumes that the arguments are located below the stack pointer.
1500   // argc is the number of arguments not including the receiver.
1501   // TODO(victorgomes): Remove this function once we stick with the reversed
1502   // arguments order.
LoadReceiver(Register dest,Register argc)1503   void LoadReceiver(Register dest, Register argc) {
1504     LoadU64(dest, MemOperand(sp, 0));
1505   }
1506 
StoreReceiver(Register rec,Register argc,Register scratch)1507   void StoreReceiver(Register rec, Register argc, Register scratch) {
1508     StoreU64(rec, MemOperand(sp, 0));
1509   }
1510 
1511   void CallRuntime(const Runtime::Function* f, int num_arguments,
1512                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
CallRuntimeSaveDoubles(Runtime::FunctionId fid)1513   void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
1514     const Runtime::Function* function = Runtime::FunctionForId(fid);
1515     CallRuntime(function, function->nargs, SaveFPRegsMode::kSave);
1516   }
1517 
1518   // Convenience function: Same as above, but takes the fid instead.
1519   void CallRuntime(Runtime::FunctionId fid,
1520                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1521     const Runtime::Function* function = Runtime::FunctionForId(fid);
1522     CallRuntime(function, function->nargs, save_doubles);
1523   }
1524 
1525   // Convenience function: Same as above, but takes the fid instead.
1526   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1527                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1528     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1529   }
1530 
1531   // Convenience function: tail call a runtime routine (jump).
1532   void TailCallRuntime(Runtime::FunctionId fid);
1533 
1534   // ---------------------------------------------------------------------------
1535   // Support functions.
1536 
1537   // Compare object type for heap object.  heap_object contains a non-Smi
1538   // whose object type should be compared with the given type.  This both
1539   // sets the flags and leaves the object type in the type_reg register.
1540   // It leaves the map in the map register (unless the type_reg and map register
1541   // are the same register).  It leaves the heap object in the heap_object
1542   // register unless the heap_object register is the same register as one of the
1543   // other registers.
1544   // Type_reg can be no_reg. In that case ip is used.
1545   void CompareObjectType(Register heap_object, Register map, Register type_reg,
1546                          InstanceType type);
1547 
1548   // Compare instance type in a map.  map contains a valid map object whose
1549   // object type should be compared with the given type.  This both
1550   // sets the flags and leaves the object type in the type_reg register.
1551   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
1552 
1553   // Compare instance type ranges for a map (lower_limit and higher_limit
1554   // inclusive).
1555   //
1556   // Always use unsigned comparisons: ls for a positive result.
1557   void CompareInstanceTypeRange(Register map, Register type_reg,
1558                                 InstanceType lower_limit,
1559                                 InstanceType higher_limit);
1560 
1561   // Compare the object in a register to a value from the root list.
1562   // Uses the ip register as scratch.
1563   void CompareRoot(Register obj, RootIndex index);
PushRoot(RootIndex index)1564   void PushRoot(RootIndex index) {
1565     LoadRoot(r0, index);
1566     Push(r0);
1567   }
1568 
1569   template <class T>
CompareTagged(Register src1,T src2)1570   void CompareTagged(Register src1, T src2) {
1571     if (COMPRESS_POINTERS_BOOL) {
1572       CmpS32(src1, src2);
1573     } else {
1574       CmpS64(src1, src2);
1575     }
1576   }
1577 
1578   // Jump to a runtime routine.
1579   void JumpToExternalReference(const ExternalReference& builtin,
1580                                bool builtin_exit_frame = false);
1581 
1582   // Generates a trampoline to jump to the off-heap instruction stream.
1583   void JumpToOffHeapInstructionStream(Address entry);
1584 
1585   // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,RootIndex index,Label * if_equal)1586   void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
1587     CompareRoot(with, index);
1588     beq(if_equal);
1589   }
1590 
1591   // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,RootIndex index,Label * if_not_equal)1592   void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
1593     CompareRoot(with, index);
1594     bne(if_not_equal);
1595   }
1596 
1597   // Checks if value is in range [lower_limit, higher_limit] using a single
1598   // comparison.
1599   void CompareRange(Register value, unsigned lower_limit,
1600                     unsigned higher_limit);
1601   void JumpIfIsInRange(Register value, unsigned lower_limit,
1602                        unsigned higher_limit, Label* on_in_range);
1603 
1604   // ---------------------------------------------------------------------------
1605   // In-place weak references.
1606   void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1607 
1608   // ---------------------------------------------------------------------------
1609   // StatsCounter support
1610 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1611   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1612                         Register scratch2) {
1613     if (!FLAG_native_code_counters) return;
1614     EmitIncrementCounter(counter, value, scratch1, scratch2);
1615   }
1616   void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
1617                             Register scratch2);
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1618   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1619                         Register scratch2) {
1620     if (!FLAG_native_code_counters) return;
1621     EmitDecrementCounter(counter, value, scratch1, scratch2);
1622   }
1623   void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
1624                             Register scratch2);
1625 
1626   // ---------------------------------------------------------------------------
1627   // Stack limit utilities
1628 
1629   MemOperand StackLimitAsMemOperand(StackLimitKind kind);
1630   void StackOverflowCheck(Register num_args, Register scratch,
1631                           Label* stack_overflow);
1632 
1633   // ---------------------------------------------------------------------------
1634   // JavaScript invokes
1635 
1636   // Set up call kind marking in ecx. The method takes ecx as an
1637   // explicit first parameter to make the code more readable at the
1638   // call sites.
1639   // void SetCallKind(Register dst, CallKind kind);
1640 
1641   // Removes current frame and its arguments from the stack preserving
1642   // the arguments and a return address pushed to the stack for the next call.
1643   // Both |callee_args_count| and |caller_args_count| do not include
1644   // receiver. |callee_args_count| is not modified. |caller_args_count|
1645   // is trashed.
1646 
1647   // Invoke the JavaScript function code by either calling or jumping.
1648   void InvokeFunctionCode(Register function, Register new_target,
1649                           Register expected_parameter_count,
1650                           Register actual_parameter_count, InvokeType type);
1651 
1652   // On function call, call into the debugger if necessary.
1653   void CheckDebugHook(Register fun, Register new_target,
1654                       Register expected_parameter_count,
1655                       Register actual_parameter_count);
1656 
1657   // Invoke the JavaScript function in the given register. Changes the
1658   // current context to the context in the function before invoking.
1659   void InvokeFunctionWithNewTarget(Register function, Register new_target,
1660                                    Register actual_parameter_count,
1661                                    InvokeType type);
1662   void InvokeFunction(Register function, Register expected_parameter_count,
1663                       Register actual_parameter_count, InvokeType type);
1664 
1665   // Exception handling
1666 
1667   // Push a new stack handler and link into stack handler chain.
1668   void PushStackHandler();
1669 
1670   // Unlink the stack handler on top of the stack from the stack handler chain.
1671   // Must preserve the result register.
1672   void PopStackHandler();
1673 
1674   // Enter exit frame.
1675   // stack_space - extra stack space, used for parameters before call to C.
1676   // At least one slot (for the return address) should be provided.
1677   void EnterExitFrame(bool save_doubles, int stack_space = 1,
1678                       StackFrame::Type frame_type = StackFrame::EXIT);
1679 
1680   // Leave the current exit frame. Expects the return value in r0.
1681   // Expect the number of values, pushed prior to the exit frame, to
1682   // remove in a register (or no_reg, if there is nothing to remove).
1683   void LeaveExitFrame(bool save_doubles, Register argument_count,
1684                       bool argument_count_is_length = false);
1685 
1686   // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)1687   void LoadGlobalProxy(Register dst) {
1688     LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
1689   }
1690 
1691   void LoadNativeContextSlot(Register dst, int index);
1692 
1693   // ---------------------------------------------------------------------------
1694   // Smi utilities
1695 
1696   // Jump if either of the registers contain a non-smi.
JumpIfNotSmi(Register value,Label * not_smi_label)1697   inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1698     TestIfSmi(value);
1699     bne(not_smi_label /*, cr0*/);
1700   }
1701 
1702 #if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
1703   // Ensure it is permissible to read/write int value directly from
1704   // upper half of the smi.
1705   STATIC_ASSERT(kSmiTag == 0);
1706   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
1707 #endif
1708 #if V8_TARGET_LITTLE_ENDIAN
1709 #define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
1710 #else
1711 #define SmiWordOffset(offset) offset
1712 #endif
1713 
1714   // Abort execution if argument is not a Constructor, enabled via --debug-code.
1715   void AssertConstructor(Register object, Register scratch);
1716 
1717   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1718   void AssertFunction(Register object);
1719 
1720   // Abort execution if argument is not a callable JSFunction, enabled via
1721   // --debug-code.
1722   void AssertCallableFunction(Register object);
1723 
1724   // Abort execution if argument is not a JSBoundFunction,
1725   // enabled via --debug-code.
1726   void AssertBoundFunction(Register object);
1727 
1728   // Abort execution if argument is not a JSGeneratorObject (or subclass),
1729   // enabled via --debug-code.
1730   void AssertGeneratorObject(Register object);
1731 
1732   // Abort execution if argument is not undefined or an AllocationSite, enabled
1733   // via --debug-code.
1734   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1735 
1736   template <typename Field>
DecodeField(Register dst,Register src)1737   void DecodeField(Register dst, Register src) {
1738     ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
1739   }
1740 
1741   template <typename Field>
DecodeField(Register reg)1742   void DecodeField(Register reg) {
1743     DecodeField<Field>(reg, reg);
1744   }
1745 
1746   // ---------------------------------------------------------------------------
1747   // GC Support
1748 
1749   void IncrementalMarkingRecordWriteHelper(Register object, Register value,
1750                                            Register address);
1751 
1752   void CallJSEntry(Register target);
1753   static int CallSizeNotPredictableCodeSize(Address target,
1754                                             RelocInfo::Mode rmode,
1755                                             Condition cond = al);
1756   // Notify the garbage collector that we wrote a pointer into an object.
1757   // |object| is the object being stored into, |value| is the object being
1758   // stored.  value and scratch registers are clobbered by the operation.
1759   // The offset is the offset from the start of the object, not the offset from
1760   // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
1761   void RecordWriteField(
1762       Register object, int offset, Register value, Register slot_address,
1763       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
1764       RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
1765       SmiCheck smi_check = SmiCheck::kInline);
1766 
1767   // For a given |object| notify the garbage collector that the slot |address|
1768   // has been written.  |value| is the object being stored. The value and
1769   // address registers are clobbered by the operation.
1770   void RecordWrite(
1771       Register object, Register slot_address, Register value,
1772       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
1773       RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
1774       SmiCheck smi_check = SmiCheck::kInline);
1775 
1776  private:
1777   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1778   // Helper functions for generating invokes.
1779   void InvokePrologue(Register expected_parameter_count,
1780                       Register actual_parameter_count, Label* done,
1781                       InvokeType type);
1782 
1783   DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
1784 };
1785 
1786 #define ACCESS_MASM(masm) masm->
1787 
1788 }  // namespace internal
1789 }  // namespace v8
1790 
1791 #endif  // V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_
1792