• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/code-generator.h"
6 
7 #include "src/assembler-inl.h"
8 #include "src/callable.h"
9 #include "src/compiler/code-generator-impl.h"
10 #include "src/compiler/gap-resolver.h"
11 #include "src/compiler/node-matchers.h"
12 #include "src/compiler/osr.h"
13 #include "src/optimized-compilation-info.h"
14 #include "src/s390/macro-assembler-s390.h"
15 #include "src/wasm/wasm-objects.h"
16 
17 namespace v8 {
18 namespace internal {
19 namespace compiler {
20 
21 #define __ tasm()->
22 
23 #define kScratchReg ip
24 
25 // Adds S390-specific methods to convert InstructionOperands.
26 class S390OperandConverter final : public InstructionOperandConverter {
27  public:
S390OperandConverter(CodeGenerator * gen,Instruction * instr)28   S390OperandConverter(CodeGenerator* gen, Instruction* instr)
29       : InstructionOperandConverter(gen, instr) {}
30 
OutputCount()31   size_t OutputCount() { return instr_->OutputCount(); }
32 
Is64BitOperand(int index)33   bool Is64BitOperand(int index) {
34     return LocationOperand::cast(instr_->InputAt(index))->representation() ==
35            MachineRepresentation::kWord64;
36   }
37 
Is32BitOperand(int index)38   bool Is32BitOperand(int index) {
39     return LocationOperand::cast(instr_->InputAt(index))->representation() ==
40            MachineRepresentation::kWord32;
41   }
42 
CompareLogical() const43   bool CompareLogical() const {
44     switch (instr_->flags_condition()) {
45       case kUnsignedLessThan:
46       case kUnsignedGreaterThanOrEqual:
47       case kUnsignedLessThanOrEqual:
48       case kUnsignedGreaterThan:
49         return true;
50       default:
51         return false;
52     }
53     UNREACHABLE();
54   }
55 
InputImmediate(size_t index)56   Operand InputImmediate(size_t index) {
57     Constant constant = ToConstant(instr_->InputAt(index));
58     switch (constant.type()) {
59       case Constant::kInt32:
60         return Operand(constant.ToInt32());
61       case Constant::kFloat32:
62         return Operand::EmbeddedNumber(constant.ToFloat32());
63       case Constant::kFloat64:
64         return Operand::EmbeddedNumber(constant.ToFloat64().value());
65       case Constant::kInt64:
66 #if V8_TARGET_ARCH_S390X
67         return Operand(constant.ToInt64());
68 #endif
69       case Constant::kExternalReference:
70       case Constant::kHeapObject:
71       case Constant::kRpoNumber:
72         break;
73     }
74     UNREACHABLE();
75   }
76 
MemoryOperand(AddressingMode * mode,size_t * first_index)77   MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
78     const size_t index = *first_index;
79     if (mode) *mode = AddressingModeField::decode(instr_->opcode());
80     switch (AddressingModeField::decode(instr_->opcode())) {
81       case kMode_None:
82         break;
83       case kMode_MR:
84         *first_index += 1;
85         return MemOperand(InputRegister(index + 0), 0);
86       case kMode_MRI:
87         *first_index += 2;
88         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
89       case kMode_MRR:
90         *first_index += 2;
91         return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
92       case kMode_MRRI:
93         *first_index += 3;
94         return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
95                           InputInt32(index + 2));
96     }
97     UNREACHABLE();
98   }
99 
MemoryOperand(AddressingMode * mode=nullptr,size_t first_index=0)100   MemOperand MemoryOperand(AddressingMode* mode = nullptr,
101                            size_t first_index = 0) {
102     return MemoryOperand(mode, &first_index);
103   }
104 
ToMemOperand(InstructionOperand * op) const105   MemOperand ToMemOperand(InstructionOperand* op) const {
106     DCHECK_NOT_NULL(op);
107     DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
108     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
109   }
110 
SlotToMemOperand(int slot) const111   MemOperand SlotToMemOperand(int slot) const {
112     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
113     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
114   }
115 
InputStackSlot(size_t index)116   MemOperand InputStackSlot(size_t index) {
117     InstructionOperand* op = instr_->InputAt(index);
118     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
119   }
120 
InputStackSlot32(size_t index)121   MemOperand InputStackSlot32(size_t index) {
122 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
123     // We want to read the 32-bits directly from memory
124     MemOperand mem = InputStackSlot(index);
125     return MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
126 #else
127     return InputStackSlot(index);
128 #endif
129   }
130 };
131 
HasRegisterOutput(Instruction * instr,int index=0)132 static inline bool HasRegisterOutput(Instruction* instr, int index = 0) {
133   return instr->OutputCount() > 0 && instr->OutputAt(index)->IsRegister();
134 }
135 
HasFPRegisterInput(Instruction * instr,int index)136 static inline bool HasFPRegisterInput(Instruction* instr, int index) {
137   return instr->InputAt(index)->IsFPRegister();
138 }
139 
HasRegisterInput(Instruction * instr,int index)140 static inline bool HasRegisterInput(Instruction* instr, int index) {
141   return instr->InputAt(index)->IsRegister() ||
142          HasFPRegisterInput(instr, index);
143 }
144 
HasImmediateInput(Instruction * instr,size_t index)145 static inline bool HasImmediateInput(Instruction* instr, size_t index) {
146   return instr->InputAt(index)->IsImmediate();
147 }
148 
HasFPStackSlotInput(Instruction * instr,size_t index)149 static inline bool HasFPStackSlotInput(Instruction* instr, size_t index) {
150   return instr->InputAt(index)->IsFPStackSlot();
151 }
152 
HasStackSlotInput(Instruction * instr,size_t index)153 static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
154   return instr->InputAt(index)->IsStackSlot() ||
155          HasFPStackSlotInput(instr, index);
156 }
157 
158 namespace {
159 
160 class OutOfLineRecordWrite final : public OutOfLineCode {
161  public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)162   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
163                        Register value, Register scratch0, Register scratch1,
164                        RecordWriteMode mode)
165       : OutOfLineCode(gen),
166         object_(object),
167         offset_(offset),
168         offset_immediate_(0),
169         value_(value),
170         scratch0_(scratch0),
171         scratch1_(scratch1),
172         mode_(mode),
173         must_save_lr_(!gen->frame_access_state()->has_frame()),
174         zone_(gen->zone()) {}
175 
OutOfLineRecordWrite(CodeGenerator * gen,Register object,int32_t offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)176   OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
177                        Register value, Register scratch0, Register scratch1,
178                        RecordWriteMode mode)
179       : OutOfLineCode(gen),
180         object_(object),
181         offset_(no_reg),
182         offset_immediate_(offset),
183         value_(value),
184         scratch0_(scratch0),
185         scratch1_(scratch1),
186         mode_(mode),
187         must_save_lr_(!gen->frame_access_state()->has_frame()),
188         zone_(gen->zone()) {}
189 
SaveRegisters(RegList registers)190   void SaveRegisters(RegList registers) {
191     DCHECK_LT(0, NumRegs(registers));
192     RegList regs = 0;
193     for (int i = 0; i < Register::kNumRegisters; ++i) {
194       if ((registers >> i) & 1u) {
195         regs |= Register::from_code(i).bit();
196       }
197     }
198     __ MultiPush(regs | r14.bit());
199   }
200 
RestoreRegisters(RegList registers)201   void RestoreRegisters(RegList registers) {
202     DCHECK_LT(0, NumRegs(registers));
203     RegList regs = 0;
204     for (int i = 0; i < Register::kNumRegisters; ++i) {
205       if ((registers >> i) & 1u) {
206         regs |= Register::from_code(i).bit();
207       }
208     }
209     __ MultiPop(regs | r14.bit());
210   }
211 
Generate()212   void Generate() final {
213     if (mode_ > RecordWriteMode::kValueIsPointer) {
214       __ JumpIfSmi(value_, exit());
215     }
216     __ CheckPageFlag(value_, scratch0_,
217                      MemoryChunk::kPointersToHereAreInterestingMask, eq,
218                      exit());
219     if (offset_ == no_reg) {
220       __ AddP(scratch1_, object_, Operand(offset_immediate_));
221     } else {
222       DCHECK_EQ(0, offset_immediate_);
223       __ AddP(scratch1_, object_, offset_);
224     }
225     RememberedSetAction const remembered_set_action =
226         mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
227                                              : OMIT_REMEMBERED_SET;
228     SaveFPRegsMode const save_fp_mode =
229         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
230     if (must_save_lr_) {
231       // We need to save and restore r14 if the frame was elided.
232       __ Push(r14);
233     }
234     __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
235                            save_fp_mode);
236     if (must_save_lr_) {
237       // We need to save and restore r14 if the frame was elided.
238       __ Pop(r14);
239     }
240   }
241 
242  private:
243   Register const object_;
244   Register const offset_;
245   int32_t const offset_immediate_;  // Valid if offset_ == no_reg.
246   Register const value_;
247   Register const scratch0_;
248   Register const scratch1_;
249   RecordWriteMode const mode_;
250   bool must_save_lr_;
251   Zone* zone_;
252 };
253 
FlagsConditionToCondition(FlagsCondition condition,ArchOpcode op)254 Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
255   switch (condition) {
256     case kEqual:
257       return eq;
258     case kNotEqual:
259       return ne;
260     case kUnsignedLessThan:
261       // unsigned number never less than 0
262       if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
263         return CC_NOP;
264     V8_FALLTHROUGH;
265     case kSignedLessThan:
266       return lt;
267     case kUnsignedGreaterThanOrEqual:
268       // unsigned number always greater than or equal 0
269       if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
270         return CC_ALWAYS;
271     V8_FALLTHROUGH;
272     case kSignedGreaterThanOrEqual:
273       return ge;
274     case kUnsignedLessThanOrEqual:
275       // unsigned number never less than 0
276       if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
277         return CC_EQ;
278     V8_FALLTHROUGH;
279     case kSignedLessThanOrEqual:
280       return le;
281     case kUnsignedGreaterThan:
282       // unsigned number always greater than or equal 0
283       if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
284         return ne;
285     V8_FALLTHROUGH;
286     case kSignedGreaterThan:
287       return gt;
288     case kOverflow:
289       // Overflow checked for AddP/SubP only.
290       switch (op) {
291         case kS390_Add32:
292         case kS390_Add64:
293         case kS390_Sub32:
294         case kS390_Sub64:
295         case kS390_Abs64:
296         case kS390_Abs32:
297         case kS390_Mul32:
298           return overflow;
299         default:
300           break;
301       }
302       break;
303     case kNotOverflow:
304       switch (op) {
305         case kS390_Add32:
306         case kS390_Add64:
307         case kS390_Sub32:
308         case kS390_Sub64:
309         case kS390_Abs64:
310         case kS390_Abs32:
311         case kS390_Mul32:
312           return nooverflow;
313         default:
314           break;
315       }
316       break;
317     default:
318       break;
319   }
320   UNREACHABLE();
321 }
322 
323 #define GET_MEMOPERAND32(ret, fi)                                       \
324   ([&](int& ret) {                                                      \
325     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
326     MemOperand mem(r0);                                                 \
327     if (mode != kMode_None) {                                           \
328       size_t first_index = (fi);                                        \
329       mem = i.MemoryOperand(&mode, &first_index);                       \
330       ret = first_index;                                                \
331     } else {                                                            \
332       mem = i.InputStackSlot32(fi);                                     \
333     }                                                                   \
334     return mem;                                                         \
335   })(ret)
336 
337 #define GET_MEMOPERAND(ret, fi)                                         \
338   ([&](int& ret) {                                                      \
339     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
340     MemOperand mem(r0);                                                 \
341     if (mode != kMode_None) {                                           \
342       size_t first_index = (fi);                                        \
343       mem = i.MemoryOperand(&mode, &first_index);                       \
344       ret = first_index;                                                \
345     } else {                                                            \
346       mem = i.InputStackSlot(fi);                                       \
347     }                                                                   \
348     return mem;                                                         \
349   })(ret)
350 
351 #define RRInstr(instr)                                \
352   [&]() {                                             \
353     DCHECK(i.OutputRegister() == i.InputRegister(0)); \
354     __ instr(i.OutputRegister(), i.InputRegister(1)); \
355     return 2;                                         \
356   }
357 #define RIInstr(instr)                                 \
358   [&]() {                                              \
359     DCHECK(i.OutputRegister() == i.InputRegister(0));  \
360     __ instr(i.OutputRegister(), i.InputImmediate(1)); \
361     return 2;                                          \
362   }
363 #define RMInstr(instr, GETMEM)                        \
364   [&]() {                                             \
365     DCHECK(i.OutputRegister() == i.InputRegister(0)); \
366     int ret = 2;                                      \
367     __ instr(i.OutputRegister(), GETMEM(ret, 1));     \
368     return ret;                                       \
369   }
370 #define RM32Instr(instr) RMInstr(instr, GET_MEMOPERAND32)
371 #define RM64Instr(instr) RMInstr(instr, GET_MEMOPERAND)
372 
373 #define RRRInstr(instr)                                                   \
374   [&]() {                                                                 \
375     __ instr(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); \
376     return 2;                                                             \
377   }
378 #define RRIInstr(instr)                                                    \
379   [&]() {                                                                  \
380     __ instr(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1)); \
381     return 2;                                                              \
382   }
383 #define RRMInstr(instr, GETMEM)                                       \
384   [&]() {                                                             \
385     int ret = 2;                                                      \
386     __ instr(i.OutputRegister(), i.InputRegister(0), GETMEM(ret, 1)); \
387     return ret;                                                       \
388   }
389 #define RRM32Instr(instr) RRMInstr(instr, GET_MEMOPERAND32)
390 #define RRM64Instr(instr) RRMInstr(instr, GET_MEMOPERAND)
391 
392 #define DDInstr(instr)                                            \
393   [&]() {                                                         \
394     DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
395     __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
396     return 2;                                                     \
397   }
398 
399 #define DMInstr(instr)                                            \
400   [&]() {                                                         \
401     DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
402     int ret = 2;                                                  \
403     __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1));   \
404     return ret;                                                   \
405   }
406 
407 #define DMTInstr(instr)                                           \
408   [&]() {                                                         \
409     DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
410     int ret = 2;                                                  \
411     __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1),    \
412              kScratchDoubleReg);                                  \
413     return ret;                                                   \
414   }
415 
416 #define R_MInstr(instr)                                   \
417   [&]() {                                                 \
418     int ret = 2;                                          \
419     __ instr(i.OutputRegister(), GET_MEMOPERAND(ret, 0)); \
420     return ret;                                           \
421   }
422 
423 #define R_DInstr(instr)                                     \
424   [&]() {                                                   \
425     __ instr(i.OutputRegister(), i.InputDoubleRegister(0)); \
426     return 2;                                               \
427   }
428 
429 #define D_DInstr(instr)                                           \
430   [&]() {                                                         \
431     __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
432     return 2;                                                     \
433   }
434 
435 #define D_MInstr(instr)                                         \
436   [&]() {                                                       \
437     int ret = 2;                                                \
438     __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0)); \
439     return ret;                                                 \
440   }
441 
442 #define D_MTInstr(instr)                                       \
443   [&]() {                                                      \
444     int ret = 2;                                               \
445     __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0), \
446              kScratchDoubleReg);                               \
447     return ret;                                                \
448   }
449 
nullInstr()450 static int nullInstr() {
451   UNREACHABLE();
452 }
453 
454 template <int numOfOperand, class RType, class MType, class IType>
AssembleOp(Instruction * instr,RType r,MType m,IType i)455 static inline int AssembleOp(Instruction* instr, RType r, MType m, IType i) {
456   AddressingMode mode = AddressingModeField::decode(instr->opcode());
457   if (mode != kMode_None || HasStackSlotInput(instr, numOfOperand - 1)) {
458     return m();
459   } else if (HasRegisterInput(instr, numOfOperand - 1)) {
460     return r();
461   } else if (HasImmediateInput(instr, numOfOperand - 1)) {
462     return i();
463   } else {
464     UNREACHABLE();
465   }
466 }
467 
468 template <class _RR, class _RM, class _RI>
AssembleBinOp(Instruction * instr,_RR _rr,_RM _rm,_RI _ri)469 static inline int AssembleBinOp(Instruction* instr, _RR _rr, _RM _rm, _RI _ri) {
470   return AssembleOp<2>(instr, _rr, _rm, _ri);
471 }
472 
473 template <class _R, class _M, class _I>
AssembleUnaryOp(Instruction * instr,_R _r,_M _m,_I _i)474 static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
475   return AssembleOp<1>(instr, _r, _m, _i);
476 }
477 
478 #define ASSEMBLE_BIN_OP(_rr, _rm, _ri) AssembleBinOp(instr, _rr, _rm, _ri)
479 #define ASSEMBLE_UNARY_OP(_r, _m, _i) AssembleUnaryOp(instr, _r, _m, _i)
480 
481 #ifdef V8_TARGET_ARCH_S390X
482 #define CHECK_AND_ZERO_EXT_OUTPUT(num)                                \
483   ([&](int index) {                                                   \
484     DCHECK(HasImmediateInput(instr, (index)));                        \
485     int doZeroExt = i.InputInt32(index);                              \
486     if (doZeroExt) __ LoadlW(i.OutputRegister(), i.OutputRegister()); \
487   })(num)
488 
489 #define ASSEMBLE_BIN32_OP(_rr, _rm, _ri) \
490   { CHECK_AND_ZERO_EXT_OUTPUT(AssembleBinOp(instr, _rr, _rm, _ri)); }
491 #else
492 #define ASSEMBLE_BIN32_OP ASSEMBLE_BIN_OP
493 #define CHECK_AND_ZERO_EXT_OUTPUT(num)
494 #endif
495 
496 }  // namespace
497 
498 #define ASSEMBLE_FLOAT_UNOP(asm_instr)                                \
499   do {                                                                \
500     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
501   } while (0)
502 
503 #define ASSEMBLE_FLOAT_BINOP(asm_instr)                              \
504   do {                                                               \
505     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
506                  i.InputDoubleRegister(1));                          \
507   } while (0)
508 
509 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                         \
510   do {                                                                  \
511     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
512     if (mode != kMode_None) {                                           \
513       size_t first_index = 1;                                           \
514       MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
515       if (i.CompareLogical()) {                                         \
516         __ cmpl_instr(i.InputRegister(0), operand);                     \
517       } else {                                                          \
518         __ cmp_instr(i.InputRegister(0), operand);                      \
519       }                                                                 \
520     } else if (HasRegisterInput(instr, 1)) {                            \
521       if (i.CompareLogical()) {                                         \
522         __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));          \
523       } else {                                                          \
524         __ cmp_instr(i.InputRegister(0), i.InputRegister(1));           \
525       }                                                                 \
526     } else if (HasImmediateInput(instr, 1)) {                           \
527       if (i.CompareLogical()) {                                         \
528         __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1));         \
529       } else {                                                          \
530         __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));          \
531       }                                                                 \
532     } else {                                                            \
533       DCHECK(HasStackSlotInput(instr, 1));                              \
534       if (i.CompareLogical()) {                                         \
535         __ cmpl_instr(i.InputRegister(0), i.InputStackSlot(1));         \
536       } else {                                                          \
537         __ cmp_instr(i.InputRegister(0), i.InputStackSlot(1));          \
538       }                                                                 \
539     }                                                                   \
540   } while (0)
541 
542 #define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr)                       \
543   do {                                                                  \
544     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
545     if (mode != kMode_None) {                                           \
546       size_t first_index = 1;                                           \
547       MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
548       if (i.CompareLogical()) {                                         \
549         __ cmpl_instr(i.InputRegister(0), operand);                     \
550       } else {                                                          \
551         __ cmp_instr(i.InputRegister(0), operand);                      \
552       }                                                                 \
553     } else if (HasRegisterInput(instr, 1)) {                            \
554       if (i.CompareLogical()) {                                         \
555         __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));          \
556       } else {                                                          \
557         __ cmp_instr(i.InputRegister(0), i.InputRegister(1));           \
558       }                                                                 \
559     } else if (HasImmediateInput(instr, 1)) {                           \
560       if (i.CompareLogical()) {                                         \
561         __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1));         \
562       } else {                                                          \
563         __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));          \
564       }                                                                 \
565     } else {                                                            \
566       DCHECK(HasStackSlotInput(instr, 1));                              \
567       if (i.CompareLogical()) {                                         \
568         __ cmpl_instr(i.InputRegister(0), i.InputStackSlot32(1));       \
569       } else {                                                          \
570         __ cmp_instr(i.InputRegister(0), i.InputStackSlot32(1));        \
571       }                                                                 \
572     }                                                                   \
573   } while (0)
574 
575 #define ASSEMBLE_FLOAT_COMPARE(cmp_rr_instr, cmp_rm_instr, load_instr)     \
576   do {                                                                     \
577     AddressingMode mode = AddressingModeField::decode(instr->opcode());    \
578     if (mode != kMode_None) {                                              \
579       size_t first_index = 1;                                              \
580       MemOperand operand = i.MemoryOperand(&mode, &first_index);           \
581       __ cmp_rm_instr(i.InputDoubleRegister(0), operand);                  \
582     } else if (HasFPRegisterInput(instr, 1)) {                             \
583       __ cmp_rr_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
584     } else {                                                               \
585       USE(HasFPStackSlotInput);                                            \
586       DCHECK(HasFPStackSlotInput(instr, 1));                               \
587       MemOperand operand = i.InputStackSlot(1);                            \
588       if (operand.offset() >= 0) {                                         \
589         __ cmp_rm_instr(i.InputDoubleRegister(0), operand);                \
590       } else {                                                             \
591         __ load_instr(kScratchDoubleReg, operand);                         \
592         __ cmp_rr_instr(i.InputDoubleRegister(0), kScratchDoubleReg);      \
593       }                                                                    \
594     }                                                                      \
595   } while (0)
596 
597 // Divide instruction dr will implicity use register pair
598 // r0 & r1 below.
599 // R0:R1 = R1 / divisor - R0 remainder
600 // Copy remainder to output reg
601 #define ASSEMBLE_MODULO(div_instr, shift_instr) \
602   do {                                          \
603     __ LoadRR(r0, i.InputRegister(0));          \
604     __ shift_instr(r0, Operand(32));            \
605     __ div_instr(r0, i.InputRegister(1));       \
606     __ LoadlW(i.OutputRegister(), r0);          \
607   } while (0)
608 
609 #define ASSEMBLE_FLOAT_MODULO()                                             \
610   do {                                                                      \
611     FrameScope scope(tasm(), StackFrame::MANUAL);                           \
612     __ PrepareCallCFunction(0, 2, kScratchReg);                             \
613     __ MovToFloatParameters(i.InputDoubleRegister(0),                       \
614                             i.InputDoubleRegister(1));                      \
615     __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); \
616     __ MovFromFloatResult(i.OutputDoubleRegister());                        \
617   } while (0)
618 
619 #define ASSEMBLE_IEEE754_UNOP(name)                                            \
620   do {                                                                         \
621     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
622     /* and generate a CallAddress instruction instead. */                      \
623     FrameScope scope(tasm(), StackFrame::MANUAL);                              \
624     __ PrepareCallCFunction(0, 1, kScratchReg);                                \
625     __ MovToFloatParameter(i.InputDoubleRegister(0));                          \
626     __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1);    \
627     /* Move the result in the double result register. */                       \
628     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
629   } while (0)
630 
631 #define ASSEMBLE_IEEE754_BINOP(name)                                           \
632   do {                                                                         \
633     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
634     /* and generate a CallAddress instruction instead. */                      \
635     FrameScope scope(tasm(), StackFrame::MANUAL);                              \
636     __ PrepareCallCFunction(0, 2, kScratchReg);                                \
637     __ MovToFloatParameters(i.InputDoubleRegister(0),                          \
638                             i.InputDoubleRegister(1));                         \
639     __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2);    \
640     /* Move the result in the double result register. */                       \
641     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
642   } while (0)
643 
644 #define ASSEMBLE_DOUBLE_MAX()                                          \
645   do {                                                                 \
646     DoubleRegister left_reg = i.InputDoubleRegister(0);                \
647     DoubleRegister right_reg = i.InputDoubleRegister(1);               \
648     DoubleRegister result_reg = i.OutputDoubleRegister();              \
649     Label check_nan_left, check_zero, return_left, return_right, done; \
650     __ cdbr(left_reg, right_reg);                                      \
651     __ bunordered(&check_nan_left, Label::kNear);                      \
652     __ beq(&check_zero);                                               \
653     __ bge(&return_left, Label::kNear);                                \
654     __ b(&return_right, Label::kNear);                                 \
655                                                                        \
656     __ bind(&check_zero);                                              \
657     __ lzdr(kDoubleRegZero);                                           \
658     __ cdbr(left_reg, kDoubleRegZero);                                 \
659     /* left == right != 0. */                                          \
660     __ bne(&return_left, Label::kNear);                                \
661     /* At this point, both left and right are either 0 or -0. */       \
662     /* N.B. The following works because +0 + -0 == +0 */               \
663     /* For max we want logical-and of sign bit: (L + R) */             \
664     __ ldr(result_reg, left_reg);                                      \
665     __ adbr(result_reg, right_reg);                                    \
666     __ b(&done, Label::kNear);                                         \
667                                                                        \
668     __ bind(&check_nan_left);                                          \
669     __ cdbr(left_reg, left_reg);                                       \
670     /* left == NaN. */                                                 \
671     __ bunordered(&return_left, Label::kNear);                         \
672                                                                        \
673     __ bind(&return_right);                                            \
674     if (right_reg != result_reg) {                                     \
675       __ ldr(result_reg, right_reg);                                   \
676     }                                                                  \
677     __ b(&done, Label::kNear);                                         \
678                                                                        \
679     __ bind(&return_left);                                             \
680     if (left_reg != result_reg) {                                      \
681       __ ldr(result_reg, left_reg);                                    \
682     }                                                                  \
683     __ bind(&done);                                                    \
684   } while (0)
685 
686 #define ASSEMBLE_DOUBLE_MIN()                                          \
687   do {                                                                 \
688     DoubleRegister left_reg = i.InputDoubleRegister(0);                \
689     DoubleRegister right_reg = i.InputDoubleRegister(1);               \
690     DoubleRegister result_reg = i.OutputDoubleRegister();              \
691     Label check_nan_left, check_zero, return_left, return_right, done; \
692     __ cdbr(left_reg, right_reg);                                      \
693     __ bunordered(&check_nan_left, Label::kNear);                      \
694     __ beq(&check_zero);                                               \
695     __ ble(&return_left, Label::kNear);                                \
696     __ b(&return_right, Label::kNear);                                 \
697                                                                        \
698     __ bind(&check_zero);                                              \
699     __ lzdr(kDoubleRegZero);                                           \
700     __ cdbr(left_reg, kDoubleRegZero);                                 \
701     /* left == right != 0. */                                          \
702     __ bne(&return_left, Label::kNear);                                \
703     /* At this point, both left and right are either 0 or -0. */       \
704     /* N.B. The following works because +0 + -0 == +0 */               \
705     /* For min we want logical-or of sign bit: -(-L + -R) */           \
706     __ lcdbr(left_reg, left_reg);                                      \
707     __ ldr(result_reg, left_reg);                                      \
708     if (left_reg == right_reg) {                                       \
709       __ adbr(result_reg, right_reg);                                  \
710     } else {                                                           \
711       __ sdbr(result_reg, right_reg);                                  \
712     }                                                                  \
713     __ lcdbr(result_reg, result_reg);                                  \
714     __ b(&done, Label::kNear);                                         \
715                                                                        \
716     __ bind(&check_nan_left);                                          \
717     __ cdbr(left_reg, left_reg);                                       \
718     /* left == NaN. */                                                 \
719     __ bunordered(&return_left, Label::kNear);                         \
720                                                                        \
721     __ bind(&return_right);                                            \
722     if (right_reg != result_reg) {                                     \
723       __ ldr(result_reg, right_reg);                                   \
724     }                                                                  \
725     __ b(&done, Label::kNear);                                         \
726                                                                        \
727     __ bind(&return_left);                                             \
728     if (left_reg != result_reg) {                                      \
729       __ ldr(result_reg, left_reg);                                    \
730     }                                                                  \
731     __ bind(&done);                                                    \
732   } while (0)
733 
734 #define ASSEMBLE_FLOAT_MAX()                                           \
735   do {                                                                 \
736     DoubleRegister left_reg = i.InputDoubleRegister(0);                \
737     DoubleRegister right_reg = i.InputDoubleRegister(1);               \
738     DoubleRegister result_reg = i.OutputDoubleRegister();              \
739     Label check_nan_left, check_zero, return_left, return_right, done; \
740     __ cebr(left_reg, right_reg);                                      \
741     __ bunordered(&check_nan_left, Label::kNear);                      \
742     __ beq(&check_zero);                                               \
743     __ bge(&return_left, Label::kNear);                                \
744     __ b(&return_right, Label::kNear);                                 \
745                                                                        \
746     __ bind(&check_zero);                                              \
747     __ lzdr(kDoubleRegZero);                                           \
748     __ cebr(left_reg, kDoubleRegZero);                                 \
749     /* left == right != 0. */                                          \
750     __ bne(&return_left, Label::kNear);                                \
751     /* At this point, both left and right are either 0 or -0. */       \
752     /* N.B. The following works because +0 + -0 == +0 */               \
753     /* For max we want logical-and of sign bit: (L + R) */             \
754     __ ldr(result_reg, left_reg);                                      \
755     __ aebr(result_reg, right_reg);                                    \
756     __ b(&done, Label::kNear);                                         \
757                                                                        \
758     __ bind(&check_nan_left);                                          \
759     __ cebr(left_reg, left_reg);                                       \
760     /* left == NaN. */                                                 \
761     __ bunordered(&return_left, Label::kNear);                         \
762                                                                        \
763     __ bind(&return_right);                                            \
764     if (right_reg != result_reg) {                                     \
765       __ ldr(result_reg, right_reg);                                   \
766     }                                                                  \
767     __ b(&done, Label::kNear);                                         \
768                                                                        \
769     __ bind(&return_left);                                             \
770     if (left_reg != result_reg) {                                      \
771       __ ldr(result_reg, left_reg);                                    \
772     }                                                                  \
773     __ bind(&done);                                                    \
774   } while (0)
775 
776 #define ASSEMBLE_FLOAT_MIN()                                           \
777   do {                                                                 \
778     DoubleRegister left_reg = i.InputDoubleRegister(0);                \
779     DoubleRegister right_reg = i.InputDoubleRegister(1);               \
780     DoubleRegister result_reg = i.OutputDoubleRegister();              \
781     Label check_nan_left, check_zero, return_left, return_right, done; \
782     __ cebr(left_reg, right_reg);                                      \
783     __ bunordered(&check_nan_left, Label::kNear);                      \
784     __ beq(&check_zero);                                               \
785     __ ble(&return_left, Label::kNear);                                \
786     __ b(&return_right, Label::kNear);                                 \
787                                                                        \
788     __ bind(&check_zero);                                              \
789     __ lzdr(kDoubleRegZero);                                           \
790     __ cebr(left_reg, kDoubleRegZero);                                 \
791     /* left == right != 0. */                                          \
792     __ bne(&return_left, Label::kNear);                                \
793     /* At this point, both left and right are either 0 or -0. */       \
794     /* N.B. The following works because +0 + -0 == +0 */               \
795     /* For min we want logical-or of sign bit: -(-L + -R) */           \
796     __ lcebr(left_reg, left_reg);                                      \
797     __ ldr(result_reg, left_reg);                                      \
798     if (left_reg == right_reg) {                                       \
799       __ aebr(result_reg, right_reg);                                  \
800     } else {                                                           \
801       __ sebr(result_reg, right_reg);                                  \
802     }                                                                  \
803     __ lcebr(result_reg, result_reg);                                  \
804     __ b(&done, Label::kNear);                                         \
805                                                                        \
806     __ bind(&check_nan_left);                                          \
807     __ cebr(left_reg, left_reg);                                       \
808     /* left == NaN. */                                                 \
809     __ bunordered(&return_left, Label::kNear);                         \
810                                                                        \
811     __ bind(&return_right);                                            \
812     if (right_reg != result_reg) {                                     \
813       __ ldr(result_reg, right_reg);                                   \
814     }                                                                  \
815     __ b(&done, Label::kNear);                                         \
816                                                                        \
817     __ bind(&return_left);                                             \
818     if (left_reg != result_reg) {                                      \
819       __ ldr(result_reg, left_reg);                                    \
820     }                                                                  \
821     __ bind(&done);                                                    \
822   } while (0)
823 //
824 // Only MRI mode for these instructions available
825 #define ASSEMBLE_LOAD_FLOAT(asm_instr)                \
826   do {                                                \
827     DoubleRegister result = i.OutputDoubleRegister(); \
828     AddressingMode mode = kMode_None;                 \
829     MemOperand operand = i.MemoryOperand(&mode);      \
830     __ asm_instr(result, operand);                    \
831   } while (0)
832 
833 #define ASSEMBLE_LOAD_INTEGER(asm_instr)         \
834   do {                                           \
835     Register result = i.OutputRegister();        \
836     AddressingMode mode = kMode_None;            \
837     MemOperand operand = i.MemoryOperand(&mode); \
838     __ asm_instr(result, operand);               \
839   } while (0)
840 
841 #define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm)              \
842   {                                                                     \
843     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
844     Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0;  \
845     if (mode != kMode_None) {                                           \
846       size_t first_index = 0;                                           \
847       MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
848       __ asm_instr_rm(dst, operand);                                    \
849     } else if (HasRegisterInput(instr, 0)) {                            \
850       __ asm_instr_rr(dst, i.InputRegister(0));                         \
851     } else {                                                            \
852       DCHECK(HasStackSlotInput(instr, 0));                              \
853       __ asm_instr_rm(dst, i.InputStackSlot(0));                        \
854     }                                                                   \
855   }
856 
857 #define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm)              \
858   {                                                                     \
859     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
860     Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0;  \
861     if (mode != kMode_None) {                                           \
862       size_t first_index = 0;                                           \
863       MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
864       __ asm_instr_rm(dst, operand);                                    \
865     } else if (HasRegisterInput(instr, 0)) {                            \
866       __ asm_instr_rr(dst, i.InputRegister(0));                         \
867     } else {                                                            \
868       DCHECK(HasStackSlotInput(instr, 0));                              \
869       __ asm_instr_rm(dst, i.InputStackSlot32(0));                      \
870     }                                                                   \
871   }
872 
873 #define ASSEMBLE_STORE_FLOAT32()                         \
874   do {                                                   \
875     size_t index = 0;                                    \
876     AddressingMode mode = kMode_None;                    \
877     MemOperand operand = i.MemoryOperand(&mode, &index); \
878     DoubleRegister value = i.InputDoubleRegister(index); \
879     __ StoreFloat32(value, operand);                     \
880   } while (0)
881 
882 #define ASSEMBLE_STORE_DOUBLE()                          \
883   do {                                                   \
884     size_t index = 0;                                    \
885     AddressingMode mode = kMode_None;                    \
886     MemOperand operand = i.MemoryOperand(&mode, &index); \
887     DoubleRegister value = i.InputDoubleRegister(index); \
888     __ StoreDouble(value, operand);                      \
889   } while (0)
890 
891 #define ASSEMBLE_STORE_INTEGER(asm_instr)                \
892   do {                                                   \
893     size_t index = 0;                                    \
894     AddressingMode mode = kMode_None;                    \
895     MemOperand operand = i.MemoryOperand(&mode, &index); \
896     Register value = i.InputRegister(index);             \
897     __ asm_instr(value, operand);                        \
898   } while (0)
899 
900 #define ATOMIC_COMP_EXCHANGE(start, end, shift_amount, offset)              \
901   {                                                                         \
902     __ LoadlW(temp0, MemOperand(addr, offset));                             \
903     __ llgfr(temp1, temp0);                                                 \
904     __ RotateInsertSelectBits(temp0, old_val, Operand(start),               \
905              Operand(end), Operand(shift_amount), false);                   \
906     __ RotateInsertSelectBits(temp1, new_val, Operand(start),               \
907              Operand(end), Operand(shift_amount), false);                   \
908     __ CmpAndSwap(temp0, temp1, MemOperand(addr, offset));                  \
909     __ RotateInsertSelectBits(output, temp0, Operand(start+shift_amount),   \
910              Operand(end+shift_amount), Operand(64-shift_amount), true);    \
911   }
912 
913 #ifdef V8_TARGET_BIG_ENDIAN
914 #define ATOMIC_COMP_EXCHANGE_BYTE(i)                                        \
915   {                                                                         \
916     constexpr int idx = (i);                                                \
917     static_assert(idx <= 3 && idx >= 0, "idx is out of range!");            \
918     constexpr int start = 32 + 8 * idx;                                     \
919     constexpr int end = start + 7;                                          \
920     constexpr int shift_amount = (3 - idx) * 8;                             \
921     ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx);                   \
922   }
923 #define ATOMIC_COMP_EXCHANGE_HALFWORD(i)                                    \
924   {                                                                         \
925     constexpr int idx = (i);                                                \
926     static_assert(idx <= 1 && idx >= 0, "idx is out of range!");            \
927     constexpr int start = 32 + 16 * idx;                                    \
928     constexpr int end = start + 15;                                         \
929     constexpr int shift_amount = (1 - idx) * 16;                            \
930     ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2);               \
931   }
932 #else
933 #define ATOMIC_COMP_EXCHANGE_BYTE(i)                                        \
934   {                                                                         \
935     constexpr int idx = (i);                                                \
936     static_assert(idx <= 3 && idx >= 0, "idx is out of range!");            \
937     constexpr int start = 32 + 8 * (3 - idx);                               \
938     constexpr int end = start + 7;                                          \
939     constexpr int shift_amount = idx * 8;                                   \
940     ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx);                   \
941   }
942 #define ATOMIC_COMP_EXCHANGE_HALFWORD(i)                                    \
943   {                                                                         \
944     constexpr int idx = (i);                                                \
945     static_assert(idx <= 1 && idx >= 0, "idx is out of range!");            \
946     constexpr int start = 32 + 16 * (1 - idx);                              \
947     constexpr int end = start + 15;                                         \
948     constexpr int shift_amount = idx * 16;                                  \
949     ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2);               \
950   }
951 #endif
952 
953 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext)            \
954   do {                                                                 \
955     Register old_val = i.InputRegister(0);                             \
956     Register new_val = i.InputRegister(1);                             \
957     Register output = i.OutputRegister();                              \
958     Register addr = kScratchReg;                                       \
959     Register temp0 = r0;                                               \
960     Register temp1 = r1;                                               \
961     size_t index = 2;                                                  \
962     AddressingMode mode = kMode_None;                                  \
963     MemOperand op = i.MemoryOperand(&mode, &index);                    \
964     Label three, two, one, done;                                       \
965     __ lay(addr, op);                                                  \
966     __ tmll(addr, Operand(3));                                         \
967     __ b(Condition(1), &three);                                        \
968     __ b(Condition(2), &two);                                          \
969     __ b(Condition(4), &one);                                          \
970     /* ending with 0b00 */                                             \
971     ATOMIC_COMP_EXCHANGE_BYTE(0);                                      \
972     __ b(&done);                                                       \
973     /* ending with 0b01 */                                             \
974     __ bind(&one);                                                     \
975     ATOMIC_COMP_EXCHANGE_BYTE(1);                                      \
976     __ b(&done);                                                       \
977     /* ending with 0b10 */                                             \
978     __ bind(&two);                                                     \
979     ATOMIC_COMP_EXCHANGE_BYTE(2);                                      \
980     __ b(&done);                                                       \
981     /* ending with 0b11 */                                             \
982     __ bind(&three);                                                   \
983     ATOMIC_COMP_EXCHANGE_BYTE(3);                                      \
984     __ bind(&done);                                                    \
985     __ load_and_ext(output, output);                                   \
986   } while (false)
987 
988 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext)          \
989   do {                                                                   \
990     Register old_val = i.InputRegister(0);                               \
991     Register new_val = i.InputRegister(1);                               \
992     Register output = i.OutputRegister();                                \
993     Register addr = kScratchReg;                                         \
994     Register temp0 = r0;                                                 \
995     Register temp1 = r1;                                                 \
996     size_t index = 2;                                                    \
997     AddressingMode mode = kMode_None;                                    \
998     MemOperand op = i.MemoryOperand(&mode, &index);                      \
999     Label two, done;                                                     \
1000     __ lay(addr, op);                                                    \
1001     __ tmll(addr, Operand(3));                                           \
1002     __ b(Condition(2), &two);                                            \
1003     ATOMIC_COMP_EXCHANGE_HALFWORD(0);                                    \
1004     __ b(&done);                                                         \
1005     __ bind(&two);                                                       \
1006     ATOMIC_COMP_EXCHANGE_HALFWORD(1);                                    \
1007     __ bind(&done);                                                      \
1008     __ load_and_ext(output, output);                                     \
1009   } while (false)
1010 
1011 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD()                          \
1012   do {                                                                   \
1013     Register new_val = i.InputRegister(1);                               \
1014     Register output = i.OutputRegister();                                \
1015     Register addr = kScratchReg;                                         \
1016     size_t index = 2;                                                    \
1017     AddressingMode mode = kMode_None;                                    \
1018     MemOperand op = i.MemoryOperand(&mode, &index);                      \
1019     __ lay(addr, op);                                                    \
1020     __ CmpAndSwap(output, new_val, MemOperand(addr));                    \
1021   } while (false)
1022 
1023 #define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op)                           \
1024   do {                                                                    \
1025     Register value = i.InputRegister(2);                                  \
1026     Register result = i.OutputRegister(0);                                \
1027     Register addr = r1;                                                   \
1028     AddressingMode mode = kMode_None;                                     \
1029     MemOperand op = i.MemoryOperand(&mode);                               \
1030     __ lay(addr, op);                                                     \
1031     __ load_and_op(result, value, MemOperand(addr));                      \
1032     __ LoadlW(result, result);                                            \
1033   } while (false)
1034 
1035 #define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end)         \
1036   do {                                                                    \
1037     Label do_cs;                                                          \
1038     __ LoadlW(prev, MemOperand(addr, offset));                            \
1039     __ bind(&do_cs);                                                      \
1040     __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end),  \
1041              Operand(static_cast<intptr_t>(shift_amount)), true);         \
1042     __ bin_inst(new_val, prev, temp);                                     \
1043     __ lr(temp, prev);                                                    \
1044     __ RotateInsertSelectBits(temp, new_val, Operand(start),              \
1045              Operand(end), Operand::Zero(), false);                       \
1046     __ CmpAndSwap(prev, temp, MemOperand(addr, offset));                  \
1047     __ bne(&do_cs, Label::kNear);                                         \
1048   } while (false)
1049 
1050 #ifdef V8_TARGET_BIG_ENDIAN
1051 #define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result)  \
1052   {                                                              \
1053     constexpr int offset = -(2 * index);                         \
1054     constexpr int shift_amount = 16 - (index * 16);              \
1055     constexpr int start = 48 - shift_amount;                     \
1056     constexpr int end = start + 15;                              \
1057     ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end);   \
1058     extract_result();                                            \
1059   }
1060 #define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result)      \
1061   {                                                              \
1062     constexpr int offset = -(index);                             \
1063     constexpr int shift_amount = 24 - (index * 8);               \
1064     constexpr int start = 56 - shift_amount;                     \
1065     constexpr int end = start + 7;                               \
1066     ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end);   \
1067     extract_result();                                            \
1068   }
1069 #else
1070 #define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result)  \
1071   {                                                              \
1072     constexpr int offset = -(2 * index);                         \
1073     constexpr int shift_amount = index * 16;                     \
1074     constexpr int start = 48 - shift_amount;                     \
1075     constexpr int end = start + 15;                              \
1076     ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end);   \
1077     extract_result();                                            \
1078   }
1079 #define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result)      \
1080   {                                                              \
1081     constexpr int offset = -(index);                             \
1082     constexpr int shift_amount = index * 8;                      \
1083     constexpr int start = 56 - shift_amount;                     \
1084     constexpr int end = start + 7;                               \
1085     ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end);   \
1086     extract_result();                                            \
1087   }
1088 #endif  // V8_TARGET_BIG_ENDIAN
1089 
1090 #define ASSEMBLE_ATOMIC_BINOP_HALFWORD(bin_inst, extract_result)          \
1091   do {                                                                    \
1092     Register value = i.InputRegister(2);                                  \
1093     Register result = i.OutputRegister(0);                                \
1094     Register prev = i.TempRegister(0);                                    \
1095     Register new_val = r0;                                                \
1096     Register addr = r1;                                                   \
1097     Register temp = kScratchReg;                                          \
1098     AddressingMode mode = kMode_None;                                     \
1099     MemOperand op = i.MemoryOperand(&mode);                               \
1100     Label two, done;                                                      \
1101     __ lay(addr, op);                                                     \
1102     __ tmll(addr, Operand(3));                                            \
1103     __ b(Condition(2), &two);                                             \
1104     /* word boundary */                                                   \
1105     ATOMIC_BIN_OP_HALFWORD(bin_inst, 0, extract_result);                  \
1106     __ b(&done);                                                          \
1107     __ bind(&two);                                                        \
1108     /* halfword boundary */                                               \
1109     ATOMIC_BIN_OP_HALFWORD(bin_inst, 1, extract_result);                  \
1110     __ bind(&done);                                                       \
1111   } while (false)
1112 
1113 #define ASSEMBLE_ATOMIC_BINOP_BYTE(bin_inst, extract_result)              \
1114   do {                                                                    \
1115     Register value = i.InputRegister(2);                                  \
1116     Register result = i.OutputRegister(0);                                \
1117     Register addr = i.TempRegister(0);                                    \
1118     Register prev = r0;                                                   \
1119     Register new_val = r1;                                                \
1120     Register temp = kScratchReg;                                          \
1121     AddressingMode mode = kMode_None;                                     \
1122     MemOperand op = i.MemoryOperand(&mode);                               \
1123     Label done, one, two, three;                                          \
1124     __ lay(addr, op);                                                     \
1125     __ tmll(addr, Operand(3));                                            \
1126     __ b(Condition(1), &three);                                           \
1127     __ b(Condition(2), &two);                                             \
1128     __ b(Condition(4), &one);                                             \
1129     /* ending with 0b00 (word boundary) */                                \
1130     ATOMIC_BIN_OP_BYTE(bin_inst, 0, extract_result);                      \
1131     __ b(&done);                                                          \
1132     /* ending with 0b01 */                                                \
1133     __ bind(&one);                                                        \
1134     ATOMIC_BIN_OP_BYTE(bin_inst, 1, extract_result);                      \
1135     __ b(&done);                                                          \
1136     /* ending with 0b10 (hw boundary) */                                  \
1137     __ bind(&two);                                                        \
1138     ATOMIC_BIN_OP_BYTE(bin_inst, 2, extract_result);                      \
1139     __ b(&done);                                                          \
1140     /* ending with 0b11 */                                                \
1141     __ bind(&three);                                                      \
1142     ATOMIC_BIN_OP_BYTE(bin_inst, 3, extract_result);                      \
1143     __ bind(&done);                                                       \
1144   } while (false)
1145 
AssembleDeconstructFrame()1146 void CodeGenerator::AssembleDeconstructFrame() {
1147   __ LeaveFrame(StackFrame::MANUAL);
1148 }
1149 
AssemblePrepareTailCall()1150 void CodeGenerator::AssemblePrepareTailCall() {
1151   if (frame_access_state()->has_frame()) {
1152     __ RestoreFrameStateForTailCall();
1153   }
1154   frame_access_state()->SetFrameAccessToSP();
1155 }
1156 
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)1157 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
1158                                                      Register scratch1,
1159                                                      Register scratch2,
1160                                                      Register scratch3) {
1161   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
1162   Label done;
1163 
1164   // Check if current frame is an arguments adaptor frame.
1165   __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
1166   __ CmpP(scratch1,
1167           Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1168   __ bne(&done);
1169 
1170   // Load arguments count from current arguments adaptor frame (note, it
1171   // does not include receiver).
1172   Register caller_args_count_reg = scratch1;
1173   __ LoadP(caller_args_count_reg,
1174            MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1175   __ SmiUntag(caller_args_count_reg);
1176 
1177   ParameterCount callee_args_count(args_reg);
1178   __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
1179                         scratch3);
1180   __ bind(&done);
1181 }
1182 
1183 namespace {
1184 
FlushPendingPushRegisters(TurboAssembler * tasm,FrameAccessState * frame_access_state,ZoneVector<Register> * pending_pushes)1185 void FlushPendingPushRegisters(TurboAssembler* tasm,
1186                                FrameAccessState* frame_access_state,
1187                                ZoneVector<Register>* pending_pushes) {
1188   switch (pending_pushes->size()) {
1189     case 0:
1190       break;
1191     case 1:
1192       tasm->Push((*pending_pushes)[0]);
1193       break;
1194     case 2:
1195       tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
1196       break;
1197     case 3:
1198       tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
1199                  (*pending_pushes)[2]);
1200       break;
1201     default:
1202       UNREACHABLE();
1203       break;
1204   }
1205   frame_access_state->IncreaseSPDelta(pending_pushes->size());
1206   pending_pushes->clear();
1207 }
1208 
AdjustStackPointerForTailCall(TurboAssembler * tasm,FrameAccessState * state,int new_slot_above_sp,ZoneVector<Register> * pending_pushes=nullptr,bool allow_shrinkage=true)1209 void AdjustStackPointerForTailCall(
1210     TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
1211     ZoneVector<Register>* pending_pushes = nullptr,
1212     bool allow_shrinkage = true) {
1213   int current_sp_offset = state->GetSPToFPSlotCount() +
1214                           StandardFrameConstants::kFixedSlotCountAboveFp;
1215   int stack_slot_delta = new_slot_above_sp - current_sp_offset;
1216   if (stack_slot_delta > 0) {
1217     if (pending_pushes != nullptr) {
1218       FlushPendingPushRegisters(tasm, state, pending_pushes);
1219     }
1220     tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
1221     state->IncreaseSPDelta(stack_slot_delta);
1222   } else if (allow_shrinkage && stack_slot_delta < 0) {
1223     if (pending_pushes != nullptr) {
1224       FlushPendingPushRegisters(tasm, state, pending_pushes);
1225     }
1226     tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
1227     state->IncreaseSPDelta(stack_slot_delta);
1228   }
1229 }
1230 
EmitWordLoadPoisoningIfNeeded(CodeGenerator * codegen,Instruction * instr,S390OperandConverter & i)1231 void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
1232                                    S390OperandConverter& i) {
1233   const MemoryAccessMode access_mode =
1234       static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
1235   if (access_mode == kMemoryAccessPoisoned) {
1236     Register value = i.OutputRegister();
1237     codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
1238   }
1239 }
1240 
1241 }  // namespace
1242 
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)1243 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
1244                                               int first_unused_stack_slot) {
1245   ZoneVector<MoveOperands*> pushes(zone());
1246   GetPushCompatibleMoves(instr, kRegisterPush, &pushes);
1247 
1248   if (!pushes.empty() &&
1249       (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
1250        first_unused_stack_slot)) {
1251     S390OperandConverter g(this, instr);
1252     ZoneVector<Register> pending_pushes(zone());
1253     for (auto move : pushes) {
1254       LocationOperand destination_location(
1255           LocationOperand::cast(move->destination()));
1256       InstructionOperand source(move->source());
1257       AdjustStackPointerForTailCall(
1258           tasm(), frame_access_state(),
1259           destination_location.index() - pending_pushes.size(),
1260           &pending_pushes);
1261       // Pushes of non-register data types are not supported.
1262       DCHECK(source.IsRegister());
1263       LocationOperand source_location(LocationOperand::cast(source));
1264       pending_pushes.push_back(source_location.GetRegister());
1265       // TODO(arm): We can push more than 3 registers at once. Add support in
1266       // the macro-assembler for pushing a list of registers.
1267       if (pending_pushes.size() == 3) {
1268         FlushPendingPushRegisters(tasm(), frame_access_state(),
1269                                   &pending_pushes);
1270       }
1271       move->Eliminate();
1272     }
1273     FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
1274   }
1275   AdjustStackPointerForTailCall(tasm(), frame_access_state(),
1276                                 first_unused_stack_slot, nullptr, false);
1277 }
1278 
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)1279 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
1280                                              int first_unused_stack_slot) {
1281   AdjustStackPointerForTailCall(tasm(), frame_access_state(),
1282                                 first_unused_stack_slot);
1283 }
1284 
1285 // Check that {kJavaScriptCallCodeStartRegister} is correct.
AssembleCodeStartRegisterCheck()1286 void CodeGenerator::AssembleCodeStartRegisterCheck() {
1287   Register scratch = r1;
1288   __ ComputeCodeStartAddress(scratch);
1289   __ CmpP(scratch, kJavaScriptCallCodeStartRegister);
1290   __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
1291 }
1292 
1293 // Check if the code object is marked for deoptimization. If it is, then it
1294 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
1295 // to:
1296 //    1. read from memory the word that contains that bit, which can be found in
1297 //       the flags in the referenced {CodeDataContainer} object;
1298 //    2. test kMarkedForDeoptimizationBit in those flags; and
1299 //    3. if it is not zero then it jumps to the builtin.
BailoutIfDeoptimized()1300 void CodeGenerator::BailoutIfDeoptimized() {
1301   if (FLAG_debug_code) {
1302     // Check that {kJavaScriptCallCodeStartRegister} is correct.
1303     __ ComputeCodeStartAddress(ip);
1304     __ CmpP(ip, kJavaScriptCallCodeStartRegister);
1305     __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
1306   }
1307 
1308   int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
1309   __ LoadP(ip, MemOperand(kJavaScriptCallCodeStartRegister, offset));
1310   __ LoadW(ip,
1311            FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
1312   __ TestBit(ip, Code::kMarkedForDeoptimizationBit);
1313   // Ensure we're not serializing (otherwise we'd need to use an indirection to
1314   // access the builtin below).
1315   DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
1316   Handle<Code> code = isolate()->builtins()->builtin_handle(
1317       Builtins::kCompileLazyDeoptimizedCode);
1318   __ Jump(code, RelocInfo::CODE_TARGET, ne);
1319 }
1320 
GenerateSpeculationPoisonFromCodeStartRegister()1321 void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
1322   Register scratch = r1;
1323 
1324   Label current_pc;
1325   __ larl(scratch, &current_pc);
1326 
1327   __ bind(&current_pc);
1328   __ SubP(scratch, Operand(__ pc_offset()));
1329 
1330   // Calculate a mask which has all bits set in the normal case, but has all
1331   // bits cleared if we are speculatively executing the wrong PC.
1332   __ LoadImmP(kSpeculationPoisonRegister, Operand::Zero());
1333   __ LoadImmP(r0, Operand(-1));
1334   __ CmpP(kJavaScriptCallCodeStartRegister, scratch);
1335   __ LoadOnConditionP(eq, kSpeculationPoisonRegister, r0);
1336 }
1337 
AssembleRegisterArgumentPoisoning()1338 void CodeGenerator::AssembleRegisterArgumentPoisoning() {
1339   __ AndP(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
1340   __ AndP(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
1341   __ AndP(sp, sp, kSpeculationPoisonRegister);
1342 }
1343 
1344 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)1345 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
1346     Instruction* instr) {
1347   S390OperandConverter i(this, instr);
1348   ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
1349 
1350   switch (opcode) {
1351     case kArchComment:
1352 #ifdef V8_TARGET_ARCH_S390X
1353       __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
1354 #else
1355       __ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
1356 #endif
1357       break;
1358     case kArchCallCodeObject: {
1359       if (HasRegisterInput(instr, 0)) {
1360         Register reg = i.InputRegister(0);
1361         DCHECK_IMPLIES(
1362             HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
1363             reg == kJavaScriptCallCodeStartRegister);
1364         __ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
1365         __ Call(reg);
1366       } else {
1367         __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
1368       }
1369       RecordCallPosition(instr);
1370       frame_access_state()->ClearSPDelta();
1371       break;
1372     }
1373     case kArchCallWasmFunction: {
1374       // We must not share code targets for calls to builtins for wasm code, as
1375       // they might need to be patched individually.
1376       if (instr->InputAt(0)->IsImmediate()) {
1377         Constant constant = i.ToConstant(instr->InputAt(0));
1378 #ifdef V8_TARGET_ARCH_S390X
1379         Address wasm_code = static_cast<Address>(constant.ToInt64());
1380 #else
1381         Address wasm_code = static_cast<Address>(constant.ToInt32());
1382 #endif
1383         __ Call(wasm_code, constant.rmode());
1384       } else {
1385         __ Call(i.InputRegister(0));
1386       }
1387       RecordCallPosition(instr);
1388       frame_access_state()->ClearSPDelta();
1389       break;
1390     }
1391     case kArchTailCallCodeObjectFromJSFunction:
1392     case kArchTailCallCodeObject: {
1393       if (opcode == kArchTailCallCodeObjectFromJSFunction) {
1394         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
1395                                          i.TempRegister(0), i.TempRegister(1),
1396                                          i.TempRegister(2));
1397       }
1398       if (HasRegisterInput(instr, 0)) {
1399         Register reg = i.InputRegister(0);
1400         DCHECK_IMPLIES(
1401             HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
1402             reg == kJavaScriptCallCodeStartRegister);
1403         __ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
1404         __ Jump(reg);
1405       } else {
1406         // We cannot use the constant pool to load the target since
1407         // we've already restored the caller's frame.
1408         ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
1409         __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
1410       }
1411       frame_access_state()->ClearSPDelta();
1412       frame_access_state()->SetFrameAccessToDefault();
1413       break;
1414     }
1415     case kArchTailCallWasm: {
1416       // We must not share code targets for calls to builtins for wasm code, as
1417       // they might need to be patched individually.
1418       if (instr->InputAt(0)->IsImmediate()) {
1419         Constant constant = i.ToConstant(instr->InputAt(0));
1420 #ifdef V8_TARGET_ARCH_S390X
1421         Address wasm_code = static_cast<Address>(constant.ToInt64());
1422 #else
1423         Address wasm_code = static_cast<Address>(constant.ToInt32());
1424 #endif
1425         __ Jump(wasm_code, constant.rmode());
1426       } else {
1427         __ Jump(i.InputRegister(0));
1428       }
1429       frame_access_state()->ClearSPDelta();
1430       frame_access_state()->SetFrameAccessToDefault();
1431       break;
1432     }
1433     case kArchTailCallAddress: {
1434       CHECK(!instr->InputAt(0)->IsImmediate());
1435       Register reg = i.InputRegister(0);
1436       DCHECK_IMPLIES(
1437           HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
1438           reg == kJavaScriptCallCodeStartRegister);
1439       __ Jump(reg);
1440       frame_access_state()->ClearSPDelta();
1441       frame_access_state()->SetFrameAccessToDefault();
1442       break;
1443     }
1444     case kArchCallJSFunction: {
1445       Register func = i.InputRegister(0);
1446       if (FLAG_debug_code) {
1447         // Check the function's context matches the context argument.
1448         __ LoadP(kScratchReg,
1449                  FieldMemOperand(func, JSFunction::kContextOffset));
1450         __ CmpP(cp, kScratchReg);
1451         __ Assert(eq, AbortReason::kWrongFunctionContext);
1452       }
1453       static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
1454       __ LoadP(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
1455       __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
1456       __ Call(r4);
1457       RecordCallPosition(instr);
1458       frame_access_state()->ClearSPDelta();
1459       break;
1460     }
1461     case kArchPrepareCallCFunction: {
1462       int const num_parameters = MiscField::decode(instr->opcode());
1463       __ PrepareCallCFunction(num_parameters, kScratchReg);
1464       // Frame alignment requires using FP-relative frame addressing.
1465       frame_access_state()->SetFrameAccessToFP();
1466       break;
1467     }
1468     case kArchSaveCallerRegisters: {
1469       fp_mode_ =
1470           static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
1471       DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
1472       // kReturnRegister0 should have been saved before entering the stub.
1473       int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
1474       DCHECK_EQ(0, bytes % kPointerSize);
1475       DCHECK_EQ(0, frame_access_state()->sp_delta());
1476       frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
1477       DCHECK(!caller_registers_saved_);
1478       caller_registers_saved_ = true;
1479       break;
1480     }
1481     case kArchRestoreCallerRegisters: {
1482       DCHECK(fp_mode_ ==
1483              static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
1484       DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
1485       // Don't overwrite the returned value.
1486       int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
1487       frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
1488       DCHECK_EQ(0, frame_access_state()->sp_delta());
1489       DCHECK(caller_registers_saved_);
1490       caller_registers_saved_ = false;
1491       break;
1492     }
1493     case kArchPrepareTailCall:
1494       AssemblePrepareTailCall();
1495       break;
1496     case kArchCallCFunction: {
1497       int const num_parameters = MiscField::decode(instr->opcode());
1498       if (instr->InputAt(0)->IsImmediate()) {
1499         ExternalReference ref = i.InputExternalReference(0);
1500         __ CallCFunction(ref, num_parameters);
1501       } else {
1502         Register func = i.InputRegister(0);
1503         __ CallCFunction(func, num_parameters);
1504       }
1505       frame_access_state()->SetFrameAccessToDefault();
1506       // Ideally, we should decrement SP delta to match the change of stack
1507       // pointer in CallCFunction. However, for certain architectures (e.g.
1508       // ARM), there may be more strict alignment requirement, causing old SP
1509       // to be saved on the stack. In those cases, we can not calculate the SP
1510       // delta statically.
1511       frame_access_state()->ClearSPDelta();
1512       if (caller_registers_saved_) {
1513         // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
1514         // Here, we assume the sequence to be:
1515         //   kArchSaveCallerRegisters;
1516         //   kArchCallCFunction;
1517         //   kArchRestoreCallerRegisters;
1518         int bytes =
1519             __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
1520         frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
1521       }
1522       break;
1523     }
1524     case kArchJmp:
1525       AssembleArchJump(i.InputRpo(0));
1526       break;
1527     case kArchBinarySearchSwitch:
1528       AssembleArchBinarySearchSwitch(instr);
1529       break;
1530     case kArchLookupSwitch:
1531       AssembleArchLookupSwitch(instr);
1532       break;
1533     case kArchTableSwitch:
1534       AssembleArchTableSwitch(instr);
1535       break;
1536     case kArchDebugAbort:
1537       DCHECK(i.InputRegister(0) == r3);
1538       if (!frame_access_state()->has_frame()) {
1539         // We don't actually want to generate a pile of code for this, so just
1540         // claim there is a stack frame, without generating one.
1541         FrameScope scope(tasm(), StackFrame::NONE);
1542         __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
1543                 RelocInfo::CODE_TARGET);
1544       } else {
1545         __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
1546                 RelocInfo::CODE_TARGET);
1547       }
1548       __ stop("kArchDebugAbort");
1549       break;
1550     case kArchDebugBreak:
1551       __ stop("kArchDebugBreak");
1552       break;
1553     case kArchNop:
1554     case kArchThrowTerminator:
1555       // don't emit code for nops.
1556       break;
1557     case kArchDeoptimize: {
1558       int deopt_state_id =
1559           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
1560       CodeGenResult result =
1561           AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
1562       if (result != kSuccess) return result;
1563       break;
1564     }
1565     case kArchRet:
1566       AssembleReturn(instr->InputAt(0));
1567       break;
1568     case kArchStackPointer:
1569       __ LoadRR(i.OutputRegister(), sp);
1570       break;
1571     case kArchFramePointer:
1572       __ LoadRR(i.OutputRegister(), fp);
1573       break;
1574     case kArchParentFramePointer:
1575       if (frame_access_state()->has_frame()) {
1576         __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
1577       } else {
1578         __ LoadRR(i.OutputRegister(), fp);
1579       }
1580       break;
1581     case kArchTruncateDoubleToI:
1582       __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
1583                            i.InputDoubleRegister(0), DetermineStubCallMode());
1584       break;
1585     case kArchStoreWithWriteBarrier: {
1586       RecordWriteMode mode =
1587           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
1588       Register object = i.InputRegister(0);
1589       Register value = i.InputRegister(2);
1590       Register scratch0 = i.TempRegister(0);
1591       Register scratch1 = i.TempRegister(1);
1592       OutOfLineRecordWrite* ool;
1593 
1594       AddressingMode addressing_mode =
1595           AddressingModeField::decode(instr->opcode());
1596       if (addressing_mode == kMode_MRI) {
1597         int32_t offset = i.InputInt32(1);
1598         ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
1599                                                 scratch0, scratch1, mode);
1600         __ StoreP(value, MemOperand(object, offset));
1601       } else {
1602         DCHECK_EQ(kMode_MRR, addressing_mode);
1603         Register offset(i.InputRegister(1));
1604         ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
1605                                                 scratch0, scratch1, mode);
1606         __ StoreP(value, MemOperand(object, offset));
1607       }
1608       __ CheckPageFlag(object, scratch0,
1609                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
1610                        ool->entry());
1611       __ bind(ool->exit());
1612       break;
1613     }
1614     case kArchStackSlot: {
1615       FrameOffset offset =
1616           frame_access_state()->GetFrameOffset(i.InputInt32(0));
1617       __ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
1618               Operand(offset.offset()));
1619       break;
1620     }
1621     case kArchWordPoisonOnSpeculation:
1622       DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
1623       __ AndP(i.InputRegister(0), kSpeculationPoisonRegister);
1624       break;
1625     case kS390_Abs32:
1626       // TODO(john.yan): zero-ext
1627       __ lpr(i.OutputRegister(0), i.InputRegister(0));
1628       break;
1629     case kS390_Abs64:
1630       __ lpgr(i.OutputRegister(0), i.InputRegister(0));
1631       break;
1632     case kS390_And32:
1633       // zero-ext
1634       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1635         ASSEMBLE_BIN32_OP(RRRInstr(nrk), RM32Instr(And), RIInstr(nilf));
1636       } else {
1637         ASSEMBLE_BIN32_OP(RRInstr(nr), RM32Instr(And), RIInstr(nilf));
1638       }
1639       break;
1640     case kS390_And64:
1641       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1642         ASSEMBLE_BIN_OP(RRRInstr(ngrk), RM64Instr(ng), nullInstr);
1643       } else {
1644         ASSEMBLE_BIN_OP(RRInstr(ngr), RM64Instr(ng), nullInstr);
1645       }
1646       break;
1647     case kS390_Or32:
1648       // zero-ext
1649       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1650         ASSEMBLE_BIN32_OP(RRRInstr(ork), RM32Instr(Or), RIInstr(oilf));
1651       } else {
1652         ASSEMBLE_BIN32_OP(RRInstr(or_z), RM32Instr(Or), RIInstr(oilf));
1653       }
1654       break;
1655     case kS390_Or64:
1656       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1657         ASSEMBLE_BIN_OP(RRRInstr(ogrk), RM64Instr(og), nullInstr);
1658       } else {
1659         ASSEMBLE_BIN_OP(RRInstr(ogr), RM64Instr(og), nullInstr);
1660       }
1661       break;
1662     case kS390_Xor32:
1663       // zero-ext
1664       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1665         ASSEMBLE_BIN32_OP(RRRInstr(xrk), RM32Instr(Xor), RIInstr(xilf));
1666       } else {
1667         ASSEMBLE_BIN32_OP(RRInstr(xr), RM32Instr(Xor), RIInstr(xilf));
1668       }
1669       break;
1670     case kS390_Xor64:
1671       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1672         ASSEMBLE_BIN_OP(RRRInstr(xgrk), RM64Instr(xg), nullInstr);
1673       } else {
1674         ASSEMBLE_BIN_OP(RRInstr(xgr), RM64Instr(xg), nullInstr);
1675       }
1676       break;
1677     case kS390_ShiftLeft32:
1678       // zero-ext
1679       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1680         ASSEMBLE_BIN32_OP(RRRInstr(ShiftLeft), nullInstr, RRIInstr(ShiftLeft));
1681       } else {
1682         ASSEMBLE_BIN32_OP(RRInstr(sll), nullInstr, RIInstr(sll));
1683       }
1684       break;
1685     case kS390_ShiftLeft64:
1686       ASSEMBLE_BIN_OP(RRRInstr(sllg), nullInstr, RRIInstr(sllg));
1687       break;
1688     case kS390_ShiftRight32:
1689       // zero-ext
1690       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1691         ASSEMBLE_BIN32_OP(RRRInstr(srlk), nullInstr, RRIInstr(srlk));
1692       } else {
1693         ASSEMBLE_BIN32_OP(RRInstr(srl), nullInstr, RIInstr(srl));
1694       }
1695       break;
1696     case kS390_ShiftRight64:
1697       ASSEMBLE_BIN_OP(RRRInstr(srlg), nullInstr, RRIInstr(srlg));
1698       break;
1699     case kS390_ShiftRightArith32:
1700       // zero-ext
1701       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1702         ASSEMBLE_BIN32_OP(RRRInstr(srak), nullInstr, RRIInstr(srak));
1703       } else {
1704         ASSEMBLE_BIN32_OP(RRInstr(sra), nullInstr, RIInstr(sra));
1705       }
1706       break;
1707     case kS390_ShiftRightArith64:
1708       ASSEMBLE_BIN_OP(RRRInstr(srag), nullInstr, RRIInstr(srag));
1709       break;
1710 #if !V8_TARGET_ARCH_S390X
1711     case kS390_AddPair:
1712       // i.InputRegister(0) ... left low word.
1713       // i.InputRegister(1) ... left high word.
1714       // i.InputRegister(2) ... right low word.
1715       // i.InputRegister(3) ... right high word.
1716       __ AddLogical32(i.OutputRegister(0), i.InputRegister(0),
1717                       i.InputRegister(2));
1718       __ AddLogicalWithCarry32(i.OutputRegister(1), i.InputRegister(1),
1719                                i.InputRegister(3));
1720       break;
1721     case kS390_SubPair:
1722       // i.InputRegister(0) ... left low word.
1723       // i.InputRegister(1) ... left high word.
1724       // i.InputRegister(2) ... right low word.
1725       // i.InputRegister(3) ... right high word.
1726       __ SubLogical32(i.OutputRegister(0), i.InputRegister(0),
1727                       i.InputRegister(2));
1728       __ SubLogicalWithBorrow32(i.OutputRegister(1), i.InputRegister(1),
1729                                 i.InputRegister(3));
1730       break;
1731     case kS390_MulPair:
1732       // i.InputRegister(0) ... left low word.
1733       // i.InputRegister(1) ... left high word.
1734       // i.InputRegister(2) ... right low word.
1735       // i.InputRegister(3) ... right high word.
1736       __ sllg(r0, i.InputRegister(1), Operand(32));
1737       __ sllg(r1, i.InputRegister(3), Operand(32));
1738       __ lr(r0, i.InputRegister(0));
1739       __ lr(r1, i.InputRegister(2));
1740       __ msgr(r1, r0);
1741       __ lr(i.OutputRegister(0), r1);
1742       __ srag(i.OutputRegister(1), r1, Operand(32));
1743       break;
1744     case kS390_ShiftLeftPair: {
1745       Register second_output =
1746           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1747       if (instr->InputAt(2)->IsImmediate()) {
1748         __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1749                          i.InputRegister(1), i.InputInt32(2));
1750       } else {
1751         __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1752                          i.InputRegister(1), kScratchReg, i.InputRegister(2));
1753       }
1754       break;
1755     }
1756     case kS390_ShiftRightPair: {
1757       Register second_output =
1758           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1759       if (instr->InputAt(2)->IsImmediate()) {
1760         __ ShiftRightPair(i.OutputRegister(0), second_output,
1761                           i.InputRegister(0), i.InputRegister(1),
1762                           i.InputInt32(2));
1763       } else {
1764         __ ShiftRightPair(i.OutputRegister(0), second_output,
1765                           i.InputRegister(0), i.InputRegister(1), kScratchReg,
1766                           i.InputRegister(2));
1767       }
1768       break;
1769     }
1770     case kS390_ShiftRightArithPair: {
1771       Register second_output =
1772           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1773       if (instr->InputAt(2)->IsImmediate()) {
1774         __ ShiftRightArithPair(i.OutputRegister(0), second_output,
1775                                i.InputRegister(0), i.InputRegister(1),
1776                                i.InputInt32(2));
1777       } else {
1778         __ ShiftRightArithPair(i.OutputRegister(0), second_output,
1779                                i.InputRegister(0), i.InputRegister(1),
1780                                kScratchReg, i.InputRegister(2));
1781       }
1782       break;
1783     }
1784 #endif
1785     case kS390_RotRight32: {
1786       // zero-ext
1787       if (HasRegisterInput(instr, 1)) {
1788         __ LoadComplementRR(kScratchReg, i.InputRegister(1));
1789         __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1790       } else {
1791         __ rll(i.OutputRegister(), i.InputRegister(0),
1792                Operand(32 - i.InputInt32(1)));
1793       }
1794       CHECK_AND_ZERO_EXT_OUTPUT(2);
1795       break;
1796     }
1797     case kS390_RotRight64:
1798       if (HasRegisterInput(instr, 1)) {
1799         __ lcgr(kScratchReg, i.InputRegister(1));
1800         __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1801       } else {
1802         DCHECK(HasImmediateInput(instr, 1));
1803         __ rllg(i.OutputRegister(), i.InputRegister(0),
1804                 Operand(64 - i.InputInt32(1)));
1805       }
1806       break;
1807     // TODO(john.yan): clean up kS390_RotLeftAnd...
1808     case kS390_RotLeftAndClear64:
1809       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1810         int shiftAmount = i.InputInt32(1);
1811         int endBit = 63 - shiftAmount;
1812         int startBit = 63 - i.InputInt32(2);
1813         __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1814                Operand(startBit), Operand(endBit), Operand(shiftAmount), true);
1815       } else {
1816         int shiftAmount = i.InputInt32(1);
1817         int clearBit = 63 - i.InputInt32(2);
1818         __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1819         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1820         __ srlg(i.OutputRegister(), i.OutputRegister(),
1821                 Operand(clearBit + shiftAmount));
1822         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(shiftAmount));
1823       }
1824       break;
1825     case kS390_RotLeftAndClearLeft64:
1826       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1827         int shiftAmount = i.InputInt32(1);
1828         int endBit = 63;
1829         int startBit = 63 - i.InputInt32(2);
1830         __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1831                 Operand(startBit), Operand(endBit), Operand(shiftAmount), true);
1832       } else {
1833         int shiftAmount = i.InputInt32(1);
1834         int clearBit = 63 - i.InputInt32(2);
1835         __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1836         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1837         __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1838       }
1839       break;
1840     case kS390_RotLeftAndClearRight64:
1841       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1842         int shiftAmount = i.InputInt32(1);
1843         int endBit = 63 - i.InputInt32(2);
1844         int startBit = 0;
1845         __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1846                 Operand(startBit), Operand(endBit), Operand(shiftAmount), true);
1847       } else {
1848         int shiftAmount = i.InputInt32(1);
1849         int clearBit = i.InputInt32(2);
1850         __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1851         __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1852         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1853       }
1854       break;
1855     case kS390_Add32: {
1856       // zero-ext
1857       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1858         ASSEMBLE_BIN32_OP(RRRInstr(ark), RM32Instr(Add32), RRIInstr(Add32));
1859       } else {
1860         ASSEMBLE_BIN32_OP(RRInstr(ar), RM32Instr(Add32), RIInstr(Add32));
1861       }
1862       break;
1863     }
1864     case kS390_Add64:
1865       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1866         ASSEMBLE_BIN_OP(RRRInstr(agrk), RM64Instr(ag), RRIInstr(AddP));
1867       } else {
1868         ASSEMBLE_BIN_OP(RRInstr(agr), RM64Instr(ag), RIInstr(agfi));
1869       }
1870       break;
1871     case kS390_AddFloat:
1872       ASSEMBLE_BIN_OP(DDInstr(aebr), DMTInstr(AddFloat32), nullInstr);
1873       break;
1874     case kS390_AddDouble:
1875       ASSEMBLE_BIN_OP(DDInstr(adbr), DMTInstr(AddFloat64), nullInstr);
1876       break;
1877     case kS390_Sub32:
1878       // zero-ext
1879       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1880         ASSEMBLE_BIN32_OP(RRRInstr(srk), RM32Instr(Sub32), RRIInstr(Sub32));
1881       } else {
1882         ASSEMBLE_BIN32_OP(RRInstr(sr), RM32Instr(Sub32), RIInstr(Sub32));
1883       }
1884       break;
1885     case kS390_Sub64:
1886       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1887         ASSEMBLE_BIN_OP(RRRInstr(sgrk), RM64Instr(sg), RRIInstr(SubP));
1888       } else {
1889         ASSEMBLE_BIN_OP(RRInstr(sgr), RM64Instr(sg), RIInstr(SubP));
1890       }
1891       break;
1892     case kS390_SubFloat:
1893       ASSEMBLE_BIN_OP(DDInstr(sebr), DMTInstr(SubFloat32), nullInstr);
1894       break;
1895     case kS390_SubDouble:
1896       ASSEMBLE_BIN_OP(DDInstr(sdbr), DMTInstr(SubFloat64), nullInstr);
1897       break;
1898     case kS390_Mul32:
1899       // zero-ext
1900       if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1901         ASSEMBLE_BIN32_OP(RRRInstr(msrkc), RM32Instr(msc), RIInstr(Mul32));
1902       } else {
1903         ASSEMBLE_BIN32_OP(RRInstr(Mul32), RM32Instr(Mul32), RIInstr(Mul32));
1904       }
1905       break;
1906     case kS390_Mul32WithOverflow:
1907       // zero-ext
1908       ASSEMBLE_BIN32_OP(RRRInstr(Mul32WithOverflowIfCCUnequal),
1909                         RRM32Instr(Mul32WithOverflowIfCCUnequal),
1910                         RRIInstr(Mul32WithOverflowIfCCUnequal));
1911       break;
1912     case kS390_Mul64:
1913       ASSEMBLE_BIN_OP(RRInstr(Mul64), RM64Instr(Mul64), RIInstr(Mul64));
1914       break;
1915     case kS390_MulHigh32:
1916       // zero-ext
1917       ASSEMBLE_BIN_OP(RRRInstr(MulHigh32), RRM32Instr(MulHigh32),
1918                       RRIInstr(MulHigh32));
1919       break;
1920     case kS390_MulHighU32:
1921       // zero-ext
1922       ASSEMBLE_BIN_OP(RRRInstr(MulHighU32), RRM32Instr(MulHighU32),
1923                       RRIInstr(MulHighU32));
1924       break;
1925     case kS390_MulFloat:
1926       ASSEMBLE_BIN_OP(DDInstr(meebr), DMTInstr(MulFloat32), nullInstr);
1927       break;
1928     case kS390_MulDouble:
1929       ASSEMBLE_BIN_OP(DDInstr(mdbr), DMTInstr(MulFloat64), nullInstr);
1930       break;
1931     case kS390_Div64:
1932       ASSEMBLE_BIN_OP(RRRInstr(Div64), RRM64Instr(Div64), nullInstr);
1933       break;
1934     case kS390_Div32: {
1935       // zero-ext
1936       ASSEMBLE_BIN_OP(RRRInstr(Div32), RRM32Instr(Div32), nullInstr);
1937       break;
1938     }
1939     case kS390_DivU64:
1940       ASSEMBLE_BIN_OP(RRRInstr(DivU64), RRM64Instr(DivU64), nullInstr);
1941       break;
1942     case kS390_DivU32: {
1943       // zero-ext
1944       ASSEMBLE_BIN_OP(RRRInstr(DivU32), RRM32Instr(DivU32), nullInstr);
1945       break;
1946     }
1947     case kS390_DivFloat:
1948       ASSEMBLE_BIN_OP(DDInstr(debr), DMTInstr(DivFloat32), nullInstr);
1949       break;
1950     case kS390_DivDouble:
1951       ASSEMBLE_BIN_OP(DDInstr(ddbr), DMTInstr(DivFloat64), nullInstr);
1952       break;
1953     case kS390_Mod32:
1954       // zero-ext
1955       ASSEMBLE_BIN_OP(RRRInstr(Mod32), RRM32Instr(Mod32), nullInstr);
1956       break;
1957     case kS390_ModU32:
1958       // zero-ext
1959       ASSEMBLE_BIN_OP(RRRInstr(ModU32), RRM32Instr(ModU32), nullInstr);
1960       break;
1961     case kS390_Mod64:
1962       ASSEMBLE_BIN_OP(RRRInstr(Mod64), RRM64Instr(Mod64), nullInstr);
1963       break;
1964     case kS390_ModU64:
1965       ASSEMBLE_BIN_OP(RRRInstr(ModU64), RRM64Instr(ModU64), nullInstr);
1966       break;
1967     case kS390_AbsFloat:
1968       __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1969       break;
1970     case kS390_SqrtFloat:
1971       ASSEMBLE_UNARY_OP(D_DInstr(sqebr), nullInstr, nullInstr);
1972       break;
1973     case kS390_SqrtDouble:
1974       ASSEMBLE_UNARY_OP(D_DInstr(sqdbr), nullInstr, nullInstr);
1975       break;
1976     case kS390_FloorFloat:
1977       __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF,
1978                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1979       break;
1980     case kS390_CeilFloat:
1981       __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF,
1982                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1983       break;
1984     case kS390_TruncateFloat:
1985       __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0,
1986                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1987       break;
1988     //  Double operations
1989     case kS390_ModDouble:
1990       ASSEMBLE_FLOAT_MODULO();
1991       break;
1992     case kIeee754Float64Acos:
1993       ASSEMBLE_IEEE754_UNOP(acos);
1994       break;
1995     case kIeee754Float64Acosh:
1996       ASSEMBLE_IEEE754_UNOP(acosh);
1997       break;
1998     case kIeee754Float64Asin:
1999       ASSEMBLE_IEEE754_UNOP(asin);
2000       break;
2001     case kIeee754Float64Asinh:
2002       ASSEMBLE_IEEE754_UNOP(asinh);
2003       break;
2004     case kIeee754Float64Atanh:
2005       ASSEMBLE_IEEE754_UNOP(atanh);
2006       break;
2007     case kIeee754Float64Atan:
2008       ASSEMBLE_IEEE754_UNOP(atan);
2009       break;
2010     case kIeee754Float64Atan2:
2011       ASSEMBLE_IEEE754_BINOP(atan2);
2012       break;
2013     case kIeee754Float64Tan:
2014       ASSEMBLE_IEEE754_UNOP(tan);
2015       break;
2016     case kIeee754Float64Tanh:
2017       ASSEMBLE_IEEE754_UNOP(tanh);
2018       break;
2019     case kIeee754Float64Cbrt:
2020       ASSEMBLE_IEEE754_UNOP(cbrt);
2021       break;
2022     case kIeee754Float64Sin:
2023       ASSEMBLE_IEEE754_UNOP(sin);
2024       break;
2025     case kIeee754Float64Sinh:
2026       ASSEMBLE_IEEE754_UNOP(sinh);
2027       break;
2028     case kIeee754Float64Cos:
2029       ASSEMBLE_IEEE754_UNOP(cos);
2030       break;
2031     case kIeee754Float64Cosh:
2032       ASSEMBLE_IEEE754_UNOP(cosh);
2033       break;
2034     case kIeee754Float64Exp:
2035       ASSEMBLE_IEEE754_UNOP(exp);
2036       break;
2037     case kIeee754Float64Expm1:
2038       ASSEMBLE_IEEE754_UNOP(expm1);
2039       break;
2040     case kIeee754Float64Log:
2041       ASSEMBLE_IEEE754_UNOP(log);
2042       break;
2043     case kIeee754Float64Log1p:
2044       ASSEMBLE_IEEE754_UNOP(log1p);
2045       break;
2046     case kIeee754Float64Log2:
2047       ASSEMBLE_IEEE754_UNOP(log2);
2048       break;
2049     case kIeee754Float64Log10:
2050       ASSEMBLE_IEEE754_UNOP(log10);
2051       break;
2052     case kIeee754Float64Pow: {
2053       __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
2054       __ Move(d1, d3);
2055       break;
2056     }
2057     case kS390_Neg32:
2058       __ lcr(i.OutputRegister(), i.InputRegister(0));
2059       CHECK_AND_ZERO_EXT_OUTPUT(1);
2060       break;
2061     case kS390_Neg64:
2062       __ lcgr(i.OutputRegister(), i.InputRegister(0));
2063       break;
2064     case kS390_MaxFloat:
2065       ASSEMBLE_FLOAT_MAX();
2066       break;
2067     case kS390_MaxDouble:
2068       ASSEMBLE_DOUBLE_MAX();
2069       break;
2070     case kS390_MinFloat:
2071       ASSEMBLE_FLOAT_MIN();
2072       break;
2073     case kS390_MinDouble:
2074       ASSEMBLE_DOUBLE_MIN();
2075       break;
2076     case kS390_AbsDouble:
2077       __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2078       break;
2079     case kS390_FloorDouble:
2080       __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF,
2081                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2082       break;
2083     case kS390_CeilDouble:
2084       __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF,
2085                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2086       break;
2087     case kS390_TruncateDouble:
2088       __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0,
2089                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2090       break;
2091     case kS390_RoundDouble:
2092       __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0,
2093                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2094       break;
2095     case kS390_NegFloat:
2096       ASSEMBLE_UNARY_OP(D_DInstr(lcebr), nullInstr, nullInstr);
2097       break;
2098     case kS390_NegDouble:
2099       ASSEMBLE_UNARY_OP(D_DInstr(lcdbr), nullInstr, nullInstr);
2100       break;
2101     case kS390_Cntlz32: {
2102       __ llgfr(i.OutputRegister(), i.InputRegister(0));
2103       __ flogr(r0, i.OutputRegister());
2104       __ Add32(i.OutputRegister(), r0, Operand(-32));
2105       // No need to zero-ext b/c llgfr is done already
2106       break;
2107     }
2108 #if V8_TARGET_ARCH_S390X
2109     case kS390_Cntlz64: {
2110       __ flogr(r0, i.InputRegister(0));
2111       __ LoadRR(i.OutputRegister(), r0);
2112       break;
2113     }
2114 #endif
2115     case kS390_Popcnt32:
2116       __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
2117       break;
2118 #if V8_TARGET_ARCH_S390X
2119     case kS390_Popcnt64:
2120       __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
2121       break;
2122 #endif
2123     case kS390_Cmp32:
2124       ASSEMBLE_COMPARE32(Cmp32, CmpLogical32);
2125       break;
2126 #if V8_TARGET_ARCH_S390X
2127     case kS390_Cmp64:
2128       ASSEMBLE_COMPARE(CmpP, CmpLogicalP);
2129       break;
2130 #endif
2131     case kS390_CmpFloat:
2132       ASSEMBLE_FLOAT_COMPARE(cebr, ceb, ley);
2133       // __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
2134       break;
2135     case kS390_CmpDouble:
2136       ASSEMBLE_FLOAT_COMPARE(cdbr, cdb, ldy);
2137       // __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
2138       break;
2139     case kS390_Tst32:
2140       if (HasRegisterInput(instr, 1)) {
2141         __ And(r0, i.InputRegister(0), i.InputRegister(1));
2142       } else {
2143         // detect tmlh/tmhl/tmhh case
2144         Operand opnd = i.InputImmediate(1);
2145         if (is_uint16(opnd.immediate())) {
2146           __ tmll(i.InputRegister(0), opnd);
2147         } else {
2148           __ lr(r0, i.InputRegister(0));
2149           __ nilf(r0, opnd);
2150         }
2151       }
2152       break;
2153     case kS390_Tst64:
2154       if (HasRegisterInput(instr, 1)) {
2155         __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
2156       } else {
2157         Operand opnd = i.InputImmediate(1);
2158         if (is_uint16(opnd.immediate())) {
2159           __ tmll(i.InputRegister(0), opnd);
2160         } else {
2161           __ AndP(r0, i.InputRegister(0), opnd);
2162         }
2163       }
2164       break;
2165     case kS390_Float64SilenceNaN: {
2166       DoubleRegister value = i.InputDoubleRegister(0);
2167       DoubleRegister result = i.OutputDoubleRegister();
2168       __ CanonicalizeNaN(result, value);
2169       break;
2170     }
2171     case kS390_StackClaim: {
2172       int num_slots = i.InputInt32(0);
2173       __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
2174       frame_access_state()->IncreaseSPDelta(num_slots);
2175       break;
2176     }
2177     case kS390_Push:
2178       if (instr->InputAt(0)->IsFPRegister()) {
2179         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
2180         if (op->representation() == MachineRepresentation::kFloat64) {
2181           __ lay(sp, MemOperand(sp, -kDoubleSize));
2182           __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
2183           frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
2184         } else {
2185           DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
2186           __ lay(sp, MemOperand(sp, -kPointerSize));
2187           __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
2188           frame_access_state()->IncreaseSPDelta(1);
2189         }
2190       } else {
2191         __ Push(i.InputRegister(0));
2192         frame_access_state()->IncreaseSPDelta(1);
2193       }
2194       break;
2195     case kS390_PushFrame: {
2196       int num_slots = i.InputInt32(1);
2197       __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
2198       if (instr->InputAt(0)->IsFPRegister()) {
2199         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
2200         if (op->representation() == MachineRepresentation::kFloat64) {
2201           __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
2202         } else {
2203           DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
2204           __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
2205         }
2206       } else {
2207         __ StoreP(i.InputRegister(0),
2208                   MemOperand(sp));
2209       }
2210       break;
2211     }
2212     case kS390_StoreToStackSlot: {
2213       int slot = i.InputInt32(1);
2214       if (instr->InputAt(0)->IsFPRegister()) {
2215         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
2216         if (op->representation() == MachineRepresentation::kFloat64) {
2217           __ StoreDouble(i.InputDoubleRegister(0),
2218                          MemOperand(sp, slot * kPointerSize));
2219         } else {
2220           DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
2221           __ StoreFloat32(i.InputDoubleRegister(0),
2222                           MemOperand(sp, slot * kPointerSize));
2223         }
2224       } else {
2225         __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
2226       }
2227       break;
2228     }
2229     case kS390_SignExtendWord8ToInt32:
2230       __ lbr(i.OutputRegister(), i.InputRegister(0));
2231       CHECK_AND_ZERO_EXT_OUTPUT(1);
2232       break;
2233     case kS390_SignExtendWord16ToInt32:
2234       __ lhr(i.OutputRegister(), i.InputRegister(0));
2235       CHECK_AND_ZERO_EXT_OUTPUT(1);
2236       break;
2237     case kS390_SignExtendWord8ToInt64:
2238       __ lgbr(i.OutputRegister(), i.InputRegister(0));
2239       break;
2240     case kS390_SignExtendWord16ToInt64:
2241       __ lghr(i.OutputRegister(), i.InputRegister(0));
2242       break;
2243     case kS390_SignExtendWord32ToInt64:
2244       __ lgfr(i.OutputRegister(), i.InputRegister(0));
2245       break;
2246     case kS390_Uint32ToUint64:
2247       // Zero extend
2248       __ llgfr(i.OutputRegister(), i.InputRegister(0));
2249       break;
2250     case kS390_Int64ToInt32:
2251       // sign extend
2252       __ lgfr(i.OutputRegister(), i.InputRegister(0));
2253       break;
2254     // Convert Fixed to Floating Point
2255     case kS390_Int64ToFloat32:
2256       __ ConvertInt64ToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2257       break;
2258     case kS390_Int64ToDouble:
2259       __ ConvertInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2260       break;
2261     case kS390_Uint64ToFloat32:
2262       __ ConvertUnsignedInt64ToFloat(i.OutputDoubleRegister(),
2263                                      i.InputRegister(0));
2264       break;
2265     case kS390_Uint64ToDouble:
2266       __ ConvertUnsignedInt64ToDouble(i.OutputDoubleRegister(),
2267                                       i.InputRegister(0));
2268       break;
2269     case kS390_Int32ToFloat32:
2270       __ ConvertIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2271       break;
2272     case kS390_Int32ToDouble:
2273       __ ConvertIntToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2274       break;
2275     case kS390_Uint32ToFloat32:
2276       __ ConvertUnsignedIntToFloat(i.OutputDoubleRegister(),
2277                                    i.InputRegister(0));
2278       break;
2279     case kS390_Uint32ToDouble:
2280       __ ConvertUnsignedIntToDouble(i.OutputDoubleRegister(),
2281                                     i.InputRegister(0));
2282       break;
2283     case kS390_DoubleToInt32: {
2284       Label done;
2285       __ ConvertDoubleToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
2286                               kRoundToNearest);
2287       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2288       __ lghi(i.OutputRegister(0), Operand::Zero());
2289       __ bind(&done);
2290       break;
2291     }
2292     case kS390_DoubleToUint32: {
2293       Label done;
2294       __ ConvertDoubleToUnsignedInt32(i.OutputRegister(0),
2295                                       i.InputDoubleRegister(0));
2296       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2297       __ lghi(i.OutputRegister(0), Operand::Zero());
2298       __ bind(&done);
2299       break;
2300     }
2301     case kS390_DoubleToInt64: {
2302       Label done;
2303       if (i.OutputCount() > 1) {
2304         __ lghi(i.OutputRegister(1), Operand(1));
2305       }
2306       __ ConvertDoubleToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
2307       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2308       if (i.OutputCount() > 1) {
2309         __ lghi(i.OutputRegister(1), Operand::Zero());
2310       } else {
2311         __ lghi(i.OutputRegister(0), Operand::Zero());
2312       }
2313       __ bind(&done);
2314       break;
2315     }
2316     case kS390_DoubleToUint64: {
2317       Label done;
2318       if (i.OutputCount() > 1) {
2319         __ lghi(i.OutputRegister(1), Operand(1));
2320       }
2321       __ ConvertDoubleToUnsignedInt64(i.OutputRegister(0),
2322                                       i.InputDoubleRegister(0));
2323       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2324       if (i.OutputCount() > 1) {
2325         __ lghi(i.OutputRegister(1), Operand::Zero());
2326       } else {
2327         __ lghi(i.OutputRegister(0), Operand::Zero());
2328       }
2329       __ bind(&done);
2330       break;
2331     }
2332     case kS390_Float32ToInt32: {
2333       Label done;
2334       __ ConvertFloat32ToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
2335                                kRoundToZero);
2336       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2337       __ lghi(i.OutputRegister(0), Operand::Zero());
2338       __ bind(&done);
2339       break;
2340     }
2341     case kS390_Float32ToUint32: {
2342       Label done;
2343       __ ConvertFloat32ToUnsignedInt32(i.OutputRegister(0),
2344                                        i.InputDoubleRegister(0));
2345       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2346       __ lghi(i.OutputRegister(0), Operand::Zero());
2347       __ bind(&done);
2348       break;
2349     }
2350     case kS390_Float32ToUint64: {
2351       Label done;
2352       if (i.OutputCount() > 1) {
2353         __ lghi(i.OutputRegister(1), Operand(1));
2354       }
2355       __ ConvertFloat32ToUnsignedInt64(i.OutputRegister(0),
2356                                        i.InputDoubleRegister(0));
2357       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2358       if (i.OutputCount() > 1) {
2359         __ lghi(i.OutputRegister(1), Operand::Zero());
2360       } else {
2361         __ lghi(i.OutputRegister(0), Operand::Zero());
2362       }
2363       __ bind(&done);
2364       break;
2365     }
2366     case kS390_Float32ToInt64: {
2367       Label done;
2368       if (i.OutputCount() > 1) {
2369         __ lghi(i.OutputRegister(1), Operand(1));
2370       }
2371       __ ConvertFloat32ToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
2372       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2373       if (i.OutputCount() > 1) {
2374         __ lghi(i.OutputRegister(1), Operand::Zero());
2375       } else {
2376         __ lghi(i.OutputRegister(0), Operand::Zero());
2377       }
2378       __ bind(&done);
2379       break;
2380     }
2381     case kS390_DoubleToFloat32:
2382       ASSEMBLE_UNARY_OP(D_DInstr(ledbr), nullInstr, nullInstr);
2383       break;
2384     case kS390_Float32ToDouble:
2385       ASSEMBLE_UNARY_OP(D_DInstr(ldebr), D_MTInstr(LoadFloat32ToDouble),
2386                         nullInstr);
2387       break;
2388     case kS390_DoubleExtractLowWord32:
2389       __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2390       __ llgfr(i.OutputRegister(), i.OutputRegister());
2391       break;
2392     case kS390_DoubleExtractHighWord32:
2393       __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2394       __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(32));
2395       break;
2396     case kS390_DoubleInsertLowWord32:
2397       __ lgdr(kScratchReg, i.InputDoubleRegister(0));
2398       __ lr(kScratchReg, i.InputRegister(1));
2399       __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2400       break;
2401     case kS390_DoubleInsertHighWord32:
2402       __ sllg(kScratchReg, i.InputRegister(1), Operand(32));
2403       __ lgdr(r0, i.InputDoubleRegister(0));
2404       __ lr(kScratchReg, r0);
2405       __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2406       break;
2407     case kS390_DoubleConstruct:
2408       __ sllg(kScratchReg, i.InputRegister(0), Operand(32));
2409       __ lr(kScratchReg, i.InputRegister(1));
2410 
2411       // Bitwise convert from GPR to FPR
2412       __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2413       break;
2414     case kS390_LoadWordS8:
2415       ASSEMBLE_LOAD_INTEGER(LoadB);
2416       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2417       break;
2418     case kS390_BitcastFloat32ToInt32:
2419       ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadlW), nullInstr);
2420       break;
2421     case kS390_BitcastInt32ToFloat32:
2422       __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2423       break;
2424 #if V8_TARGET_ARCH_S390X
2425     case kS390_BitcastDoubleToInt64:
2426       __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
2427       break;
2428     case kS390_BitcastInt64ToDouble:
2429       __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2430       break;
2431 #endif
2432     case kS390_LoadWordU8:
2433       ASSEMBLE_LOAD_INTEGER(LoadlB);
2434       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2435       break;
2436     case kS390_LoadWordU16:
2437       ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
2438       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2439       break;
2440     case kS390_LoadWordS16:
2441       ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
2442       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2443       break;
2444     case kS390_LoadWordU32:
2445       ASSEMBLE_LOAD_INTEGER(LoadlW);
2446       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2447       break;
2448     case kS390_LoadWordS32:
2449       ASSEMBLE_LOAD_INTEGER(LoadW);
2450       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2451       break;
2452     case kS390_LoadReverse16:
2453       ASSEMBLE_LOAD_INTEGER(lrvh);
2454       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2455       break;
2456     case kS390_LoadReverse32:
2457       ASSEMBLE_LOAD_INTEGER(lrv);
2458       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2459       break;
2460     case kS390_LoadReverse64:
2461       ASSEMBLE_LOAD_INTEGER(lrvg);
2462       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2463       break;
2464     case kS390_LoadReverse16RR:
2465       __ lrvr(i.OutputRegister(), i.InputRegister(0));
2466       __ rll(i.OutputRegister(), i.OutputRegister(), Operand(16));
2467       break;
2468     case kS390_LoadReverse32RR:
2469       __ lrvr(i.OutputRegister(), i.InputRegister(0));
2470       break;
2471     case kS390_LoadReverse64RR:
2472       __ lrvgr(i.OutputRegister(), i.InputRegister(0));
2473       break;
2474     case kS390_LoadWord64:
2475       ASSEMBLE_LOAD_INTEGER(lg);
2476       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2477       break;
2478     case kS390_LoadAndTestWord32: {
2479       ASSEMBLE_LOADANDTEST32(ltr, lt_z);
2480       break;
2481     }
2482     case kS390_LoadAndTestWord64: {
2483       ASSEMBLE_LOADANDTEST64(ltgr, ltg);
2484       break;
2485     }
2486     case kS390_LoadFloat32:
2487       ASSEMBLE_LOAD_FLOAT(LoadFloat32);
2488       break;
2489     case kS390_LoadDouble:
2490       ASSEMBLE_LOAD_FLOAT(LoadDouble);
2491       break;
2492     case kS390_StoreWord8:
2493       ASSEMBLE_STORE_INTEGER(StoreByte);
2494       break;
2495     case kS390_StoreWord16:
2496       ASSEMBLE_STORE_INTEGER(StoreHalfWord);
2497       break;
2498     case kS390_StoreWord32:
2499       ASSEMBLE_STORE_INTEGER(StoreW);
2500       break;
2501 #if V8_TARGET_ARCH_S390X
2502     case kS390_StoreWord64:
2503       ASSEMBLE_STORE_INTEGER(StoreP);
2504       break;
2505 #endif
2506     case kS390_StoreReverse16:
2507       ASSEMBLE_STORE_INTEGER(strvh);
2508       break;
2509     case kS390_StoreReverse32:
2510       ASSEMBLE_STORE_INTEGER(strv);
2511       break;
2512     case kS390_StoreReverse64:
2513       ASSEMBLE_STORE_INTEGER(strvg);
2514       break;
2515     case kS390_StoreFloat32:
2516       ASSEMBLE_STORE_FLOAT32();
2517       break;
2518     case kS390_StoreDouble:
2519       ASSEMBLE_STORE_DOUBLE();
2520       break;
2521     case kS390_Lay:
2522       __ lay(i.OutputRegister(), i.MemoryOperand());
2523       break;
2524     case kWord32AtomicLoadInt8:
2525       __ LoadB(i.OutputRegister(), i.MemoryOperand());
2526       break;
2527     case kWord32AtomicLoadUint8:
2528       __ LoadlB(i.OutputRegister(), i.MemoryOperand());
2529       break;
2530     case kWord32AtomicLoadInt16:
2531       __ LoadHalfWordP(i.OutputRegister(), i.MemoryOperand());
2532       break;
2533     case kWord32AtomicLoadUint16:
2534       __ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
2535       break;
2536     case kWord32AtomicLoadWord32:
2537       __ LoadlW(i.OutputRegister(), i.MemoryOperand());
2538       break;
2539     case kWord32AtomicStoreWord8:
2540       __ StoreByte(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
2541       break;
2542     case kWord32AtomicStoreWord16:
2543       __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
2544       break;
2545     case kWord32AtomicStoreWord32:
2546       __ StoreW(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
2547       break;
2548 //         0x aa bb cc dd
2549 // index =    3..2..1..0
2550 #define ATOMIC_EXCHANGE(start, end, shift_amount, offset)                    \
2551   {                                                                          \
2552     Label do_cs;                                                             \
2553     __ LoadlW(output, MemOperand(r1, offset));                               \
2554     __ bind(&do_cs);                                                         \
2555     __ llgfr(r0, output);                                                    \
2556     __ RotateInsertSelectBits(r0, value, Operand(start), Operand(end),       \
2557              Operand(shift_amount), false);                                  \
2558     __ csy(output, r0, MemOperand(r1, offset));                              \
2559     __ bne(&do_cs, Label::kNear);                                            \
2560     __ srl(output, Operand(shift_amount));                                   \
2561   }
2562 #ifdef V8_TARGET_BIG_ENDIAN
2563 #define ATOMIC_EXCHANGE_BYTE(i)                                  \
2564   {                                                              \
2565     constexpr int idx = (i);                                     \
2566     static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
2567     constexpr int start = 32 + 8 * idx;                          \
2568     constexpr int end = start + 7;                               \
2569     constexpr int shift_amount = (3 - idx) * 8;                  \
2570     ATOMIC_EXCHANGE(start, end, shift_amount, -idx);             \
2571   }
2572 #define ATOMIC_EXCHANGE_HALFWORD(i)                              \
2573   {                                                              \
2574     constexpr int idx = (i);                                     \
2575     static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
2576     constexpr int start = 32 + 16 * idx;                         \
2577     constexpr int end = start + 15;                              \
2578     constexpr int shift_amount = (1 - idx) * 16;                 \
2579     ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2);         \
2580   }
2581 #else
2582 #define ATOMIC_EXCHANGE_BYTE(i)                                  \
2583   {                                                              \
2584     constexpr int idx = (i);                                     \
2585     static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
2586     constexpr int start = 32 + 8 * (3 - idx);                    \
2587     constexpr int end = start + 7;                               \
2588     constexpr int shift_amount = idx * 8;                        \
2589     ATOMIC_EXCHANGE(start, end, shift_amount, -idx);             \
2590   }
2591 #define ATOMIC_EXCHANGE_HALFWORD(i)                              \
2592   {                                                              \
2593     constexpr int idx = (i);                                     \
2594     static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
2595     constexpr int start = 32 + 16 * (1 - idx);                   \
2596     constexpr int end = start + 15;                              \
2597     constexpr int shift_amount = idx * 16;                       \
2598     ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2);         \
2599   }
2600 #endif
2601     case kWord32AtomicExchangeInt8:
2602     case kWord32AtomicExchangeUint8: {
2603       Register base = i.InputRegister(0);
2604       Register index = i.InputRegister(1);
2605       Register value = i.InputRegister(2);
2606       Register output = i.OutputRegister();
2607       Label three, two, one, done;
2608       __ la(r1, MemOperand(base, index));
2609       __ tmll(r1, Operand(3));
2610       __ b(Condition(1), &three);
2611       __ b(Condition(2), &two);
2612       __ b(Condition(4), &one);
2613 
2614       // end with 0b00
2615       ATOMIC_EXCHANGE_BYTE(0);
2616       __ b(&done);
2617 
2618       // ending with 0b01
2619       __ bind(&one);
2620       ATOMIC_EXCHANGE_BYTE(1);
2621       __ b(&done);
2622 
2623       // ending with 0b10
2624       __ bind(&two);
2625       ATOMIC_EXCHANGE_BYTE(2);
2626       __ b(&done);
2627 
2628       // ending with 0b11
2629       __ bind(&three);
2630       ATOMIC_EXCHANGE_BYTE(3);
2631 
2632       __ bind(&done);
2633       if (opcode == kWord32AtomicExchangeInt8) {
2634         __ lbr(output, output);
2635       } else {
2636         __ llcr(output, output);
2637       }
2638       break;
2639     }
2640     case kWord32AtomicExchangeInt16:
2641     case kWord32AtomicExchangeUint16: {
2642       Register base = i.InputRegister(0);
2643       Register index = i.InputRegister(1);
2644       Register value = i.InputRegister(2);
2645       Register output = i.OutputRegister();
2646       Label two, unaligned, done;
2647       __ la(r1, MemOperand(base, index));
2648       __ tmll(r1, Operand(3));
2649       __ b(Condition(2), &two);
2650 
2651       // end with 0b00
2652       ATOMIC_EXCHANGE_HALFWORD(0);
2653       __ b(&done);
2654 
2655       // ending with 0b10
2656       __ bind(&two);
2657       ATOMIC_EXCHANGE_HALFWORD(1);
2658 
2659       __ bind(&done);
2660       if (opcode == kWord32AtomicExchangeInt8) {
2661         __ lhr(output, output);
2662       } else {
2663         __ llhr(output, output);
2664       }
2665       break;
2666     }
2667     case kWord32AtomicExchangeWord32: {
2668       Register base = i.InputRegister(0);
2669       Register index = i.InputRegister(1);
2670       Register value = i.InputRegister(2);
2671       Register output = i.OutputRegister();
2672       Label do_cs;
2673       __ lay(r1, MemOperand(base, index));
2674       __ LoadlW(output, MemOperand(r1));
2675       __ bind(&do_cs);
2676       __ cs(output, value, MemOperand(r1));
2677       __ bne(&do_cs, Label::kNear);
2678       break;
2679     }
2680     case kWord32AtomicCompareExchangeInt8:
2681       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadB);
2682       break;
2683     case kWord32AtomicCompareExchangeUint8:
2684       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadlB);
2685       break;
2686     case kWord32AtomicCompareExchangeInt16:
2687       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadHalfWordP);
2688       break;
2689     case kWord32AtomicCompareExchangeUint16:
2690       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadLogicalHalfWordP);
2691       break;
2692     case kWord32AtomicCompareExchangeWord32:
2693       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD();
2694       break;
2695 #define ATOMIC_BINOP_CASE(op, inst)                                     \
2696   case kWord32Atomic##op##Int8:                                         \
2697     ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() {                            \
2698           intptr_t shift_right = static_cast<intptr_t>(shift_amount);   \
2699           __ srlk(result, prev, Operand(shift_right));                  \
2700           __ LoadB(result, result);                                     \
2701         });                                                             \
2702     break;                                                              \
2703   case kWord32Atomic##op##Uint8:                                        \
2704     ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() {                            \
2705           int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount;  \
2706           __ RotateInsertSelectBits(result, prev, Operand(56),          \
2707               Operand(63), Operand(static_cast<intptr_t>(rotate_left)), \
2708               true);                                                    \
2709         });                                                             \
2710     break;                                                              \
2711   case kWord32Atomic##op##Int16:                                        \
2712     ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() {                        \
2713           intptr_t shift_right = static_cast<intptr_t>(shift_amount);   \
2714           __ srlk(result, prev, Operand(shift_right));                  \
2715           __ LoadHalfWordP(result, result);                             \
2716         });                                                             \
2717     break;                                                              \
2718   case kWord32Atomic##op##Uint16:                                       \
2719     ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() {                        \
2720           int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount;  \
2721           __ RotateInsertSelectBits(result, prev, Operand(48),          \
2722               Operand(63), Operand(static_cast<intptr_t>(rotate_left)), \
2723               true);                                                    \
2724         });                                                             \
2725     break;
2726       ATOMIC_BINOP_CASE(Add, Add32)
2727       ATOMIC_BINOP_CASE(Sub, Sub32)
2728       ATOMIC_BINOP_CASE(And, And)
2729       ATOMIC_BINOP_CASE(Or, Or)
2730       ATOMIC_BINOP_CASE(Xor, Xor)
2731 #undef ATOMIC_BINOP_CASE
2732     case kWord32AtomicAddWord32:
2733       ASSEMBLE_ATOMIC_BINOP_WORD(laa);
2734       break;
2735     case kWord32AtomicSubWord32:
2736       ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32);
2737       break;
2738     case kWord32AtomicAndWord32:
2739       ASSEMBLE_ATOMIC_BINOP_WORD(lan);
2740       break;
2741     case kWord32AtomicOrWord32:
2742       ASSEMBLE_ATOMIC_BINOP_WORD(lao);
2743       break;
2744     case kWord32AtomicXorWord32:
2745       ASSEMBLE_ATOMIC_BINOP_WORD(lax);
2746       break;
2747     default:
2748       UNREACHABLE();
2749       break;
2750   }
2751   return kSuccess;
2752 }  // NOLINT(readability/fn_size)
2753 
2754 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)2755 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
2756   S390OperandConverter i(this, instr);
2757   Label* tlabel = branch->true_label;
2758   Label* flabel = branch->false_label;
2759   ArchOpcode op = instr->arch_opcode();
2760   FlagsCondition condition = branch->condition;
2761 
2762   Condition cond = FlagsConditionToCondition(condition, op);
2763   if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
2764     // check for unordered if necessary
2765     // Branching to flabel/tlabel according to what's expected by tests
2766     if (cond == le || cond == eq || cond == lt) {
2767       __ bunordered(flabel);
2768     } else if (cond == gt || cond == ne || cond == ge) {
2769       __ bunordered(tlabel);
2770     }
2771   }
2772   __ b(cond, tlabel);
2773   if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
2774 }
2775 
AssembleBranchPoisoning(FlagsCondition condition,Instruction * instr)2776 void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
2777                                             Instruction* instr) {
2778   // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
2779   if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
2780     return;
2781   }
2782 
2783   condition = NegateFlagsCondition(condition);
2784   __ LoadImmP(r0, Operand::Zero());
2785   __ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
2786                       kSpeculationPoisonRegister, r0);
2787 }
2788 
AssembleArchDeoptBranch(Instruction * instr,BranchInfo * branch)2789 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
2790                                             BranchInfo* branch) {
2791   AssembleArchBranch(instr, branch);
2792 }
2793 
AssembleArchJump(RpoNumber target)2794 void CodeGenerator::AssembleArchJump(RpoNumber target) {
2795   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
2796 }
2797 
AssembleArchTrap(Instruction * instr,FlagsCondition condition)2798 void CodeGenerator::AssembleArchTrap(Instruction* instr,
2799                                      FlagsCondition condition) {
2800   class OutOfLineTrap final : public OutOfLineCode {
2801    public:
2802     OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
2803         : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
2804 
2805     void Generate() final {
2806       S390OperandConverter i(gen_, instr_);
2807       TrapId trap_id =
2808           static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
2809       GenerateCallToTrap(trap_id);
2810     }
2811 
2812    private:
2813     void GenerateCallToTrap(TrapId trap_id) {
2814       if (trap_id == TrapId::kInvalid) {
2815         // We cannot test calls to the runtime in cctest/test-run-wasm.
2816         // Therefore we emit a call to C here instead of a call to the runtime.
2817         // We use the context register as the scratch register, because we do
2818         // not have a context here.
2819         __ PrepareCallCFunction(0, 0, cp);
2820         __ CallCFunction(
2821             ExternalReference::wasm_call_trap_callback_for_testing(), 0);
2822         __ LeaveFrame(StackFrame::WASM_COMPILED);
2823         auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
2824         int pop_count =
2825             static_cast<int>(call_descriptor->StackParameterCount());
2826         __ Drop(pop_count);
2827         __ Ret();
2828       } else {
2829         gen_->AssembleSourcePosition(instr_);
2830         // A direct call to a wasm runtime stub defined in this module.
2831         // Just encode the stub index. This will be patched at relocation.
2832         __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
2833         ReferenceMap* reference_map =
2834             new (gen_->zone()) ReferenceMap(gen_->zone());
2835         gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
2836                               Safepoint::kNoLazyDeopt);
2837         if (FLAG_debug_code) {
2838           __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
2839         }
2840       }
2841     }
2842 
2843     Instruction* instr_;
2844     CodeGenerator* gen_;
2845   };
2846   auto ool = new (zone()) OutOfLineTrap(this, instr);
2847   Label* tlabel = ool->entry();
2848   Label end;
2849 
2850   ArchOpcode op = instr->arch_opcode();
2851   Condition cond = FlagsConditionToCondition(condition, op);
2852   if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
2853     // check for unordered if necessary
2854     if (cond == le || cond == eq || cond == lt) {
2855       __ bunordered(&end);
2856     } else if (cond == gt || cond == ne || cond == ge) {
2857       __ bunordered(tlabel);
2858     }
2859   }
2860   __ b(cond, tlabel);
2861   __ bind(&end);
2862 }
2863 
2864 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)2865 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
2866                                         FlagsCondition condition) {
2867   S390OperandConverter i(this, instr);
2868   ArchOpcode op = instr->arch_opcode();
2869   bool check_unordered = (op == kS390_CmpDouble || op == kS390_CmpFloat);
2870 
2871   // Overflow checked for add/sub only.
2872   DCHECK((condition != kOverflow && condition != kNotOverflow) ||
2873          (op == kS390_Add32 || op == kS390_Add64 || op == kS390_Sub32 ||
2874           op == kS390_Sub64 || op == kS390_Mul32));
2875 
2876   // Materialize a full 32-bit 1 or 0 value. The result register is always the
2877   // last output of the instruction.
2878   DCHECK_NE(0u, instr->OutputCount());
2879   Register reg = i.OutputRegister(instr->OutputCount() - 1);
2880   Condition cond = FlagsConditionToCondition(condition, op);
2881   Label done;
2882   if (check_unordered) {
2883     __ LoadImmP(reg, (cond == eq || cond == le || cond == lt) ? Operand::Zero()
2884                                                               : Operand(1));
2885     __ bunordered(&done);
2886   }
2887 
2888   // TODO(john.yan): use load imm high on condition here
2889   __ LoadImmP(reg, Operand::Zero());
2890   __ LoadImmP(kScratchReg, Operand(1));
2891   // locr is sufficient since reg's upper 32 is guarrantee to be 0
2892   __ locr(cond, reg, kScratchReg);
2893   __ bind(&done);
2894 }
2895 
AssembleArchBinarySearchSwitch(Instruction * instr)2896 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
2897   S390OperandConverter i(this, instr);
2898   Register input = i.InputRegister(0);
2899   std::vector<std::pair<int32_t, Label*>> cases;
2900   for (size_t index = 2; index < instr->InputCount(); index += 2) {
2901     cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
2902   }
2903   AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
2904                                       cases.data() + cases.size());
2905 }
2906 
AssembleArchLookupSwitch(Instruction * instr)2907 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
2908   S390OperandConverter i(this, instr);
2909   Register input = i.InputRegister(0);
2910   for (size_t index = 2; index < instr->InputCount(); index += 2) {
2911     __ Cmp32(input, Operand(i.InputInt32(index + 0)));
2912     __ beq(GetLabel(i.InputRpo(index + 1)));
2913   }
2914   AssembleArchJump(i.InputRpo(1));
2915 }
2916 
AssembleArchTableSwitch(Instruction * instr)2917 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
2918   S390OperandConverter i(this, instr);
2919   Register input = i.InputRegister(0);
2920   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
2921   Label** cases = zone()->NewArray<Label*>(case_count);
2922   for (int32_t index = 0; index < case_count; ++index) {
2923     cases[index] = GetLabel(i.InputRpo(index + 2));
2924   }
2925   Label* const table = AddJumpTable(cases, case_count);
2926   __ CmpLogicalP(input, Operand(case_count));
2927   __ bge(GetLabel(i.InputRpo(1)));
2928   __ larl(kScratchReg, table);
2929   __ ShiftLeftP(r1, input, Operand(kPointerSizeLog2));
2930   __ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
2931   __ Jump(kScratchReg);
2932 }
2933 
FinishFrame(Frame * frame)2934 void CodeGenerator::FinishFrame(Frame* frame) {
2935   auto call_descriptor = linkage()->GetIncomingDescriptor();
2936   const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
2937 
2938   // Save callee-saved Double registers.
2939   if (double_saves != 0) {
2940     frame->AlignSavedCalleeRegisterSlots();
2941     DCHECK_EQ(kNumCalleeSavedDoubles,
2942               base::bits::CountPopulation(double_saves));
2943     frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
2944                                             (kDoubleSize / kPointerSize));
2945   }
2946   // Save callee-saved registers.
2947   const RegList saves = call_descriptor->CalleeSavedRegisters();
2948   if (saves != 0) {
2949     // register save area does not include the fp or constant pool pointer.
2950     const int num_saves = kNumCalleeSaved - 1;
2951     DCHECK(num_saves == base::bits::CountPopulation(saves));
2952     frame->AllocateSavedCalleeRegisterSlots(num_saves);
2953   }
2954 }
2955 
AssembleConstructFrame()2956 void CodeGenerator::AssembleConstructFrame() {
2957   auto call_descriptor = linkage()->GetIncomingDescriptor();
2958 
2959   if (frame_access_state()->has_frame()) {
2960     if (call_descriptor->IsCFunctionCall()) {
2961       __ Push(r14, fp);
2962       __ LoadRR(fp, sp);
2963     } else if (call_descriptor->IsJSFunctionCall()) {
2964       __ Prologue(ip);
2965       if (call_descriptor->PushArgumentCount()) {
2966         __ Push(kJavaScriptCallArgCountRegister);
2967       }
2968     } else {
2969       StackFrame::Type type = info()->GetOutputStackFrameType();
2970       // TODO(mbrandy): Detect cases where ip is the entrypoint (for
2971       // efficient intialization of the constant pool pointer register).
2972       __ StubPrologue(type);
2973       if (call_descriptor->IsWasmFunctionCall()) {
2974         __ Push(kWasmInstanceRegister);
2975       }
2976     }
2977   }
2978 
2979   int shrink_slots = frame()->GetTotalFrameSlotCount() -
2980                      call_descriptor->CalculateFixedFrameSize();
2981   if (info()->is_osr()) {
2982     // TurboFan OSR-compiled functions cannot be entered directly.
2983     __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
2984 
2985     // Unoptimized code jumps directly to this entrypoint while the unoptimized
2986     // frame is still on the stack. Optimized code uses OSR values directly from
2987     // the unoptimized frame. Thus, all that needs to be done is to allocate the
2988     // remaining stack slots.
2989     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
2990     osr_pc_offset_ = __ pc_offset();
2991     shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
2992     ResetSpeculationPoison();
2993   }
2994 
2995   const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
2996   const RegList saves = call_descriptor->CalleeSavedRegisters();
2997 
2998   if (shrink_slots > 0) {
2999     if (info()->IsWasm() && shrink_slots > 128) {
3000       // For WebAssembly functions with big frames we have to do the stack
3001       // overflow check before we construct the frame. Otherwise we may not
3002       // have enough space on the stack to call the runtime for the stack
3003       // overflow.
3004       Label done;
3005 
3006       // If the frame is bigger than the stack, we throw the stack overflow
3007       // exception unconditionally. Thereby we can avoid the integer overflow
3008       // check in the condition code.
3009       if ((shrink_slots * kPointerSize) < (FLAG_stack_size * 1024)) {
3010         Register scratch = r1;
3011         __ LoadP(scratch, FieldMemOperand(
3012                             kWasmInstanceRegister,
3013                             WasmInstanceObject::kRealStackLimitAddressOffset));
3014         __ LoadP(scratch, MemOperand(scratch));
3015         __ AddP(scratch, scratch, Operand(shrink_slots * kPointerSize));
3016         __ CmpLogicalP(sp, scratch);
3017         __ bge(&done);
3018       }
3019 
3020       __ LoadP(r4, FieldMemOperand(kWasmInstanceRegister,
3021                                    WasmInstanceObject::kCEntryStubOffset));
3022       __ Move(cp, Smi::kZero);
3023       __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r4);
3024       // We come from WebAssembly, there are no references for the GC.
3025       ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
3026       RecordSafepoint(reference_map, Safepoint::kSimple, 0,
3027                       Safepoint::kNoLazyDeopt);
3028       if (FLAG_debug_code) {
3029         __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
3030       }
3031 
3032       __ bind(&done);
3033     }
3034 
3035     // Skip callee-saved and return slots, which are pushed below.
3036     shrink_slots -= base::bits::CountPopulation(saves);
3037     shrink_slots -= frame()->GetReturnSlotCount();
3038     shrink_slots -=
3039         (kDoubleSize / kPointerSize) * base::bits::CountPopulation(saves_fp);
3040     __ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
3041   }
3042 
3043   // Save callee-saved Double registers.
3044   if (saves_fp != 0) {
3045     __ MultiPushDoubles(saves_fp);
3046     DCHECK_EQ(kNumCalleeSavedDoubles, base::bits::CountPopulation(saves_fp));
3047   }
3048 
3049   // Save callee-saved registers.
3050   if (saves != 0) {
3051     __ MultiPush(saves);
3052     // register save area does not include the fp or constant pool pointer.
3053   }
3054 
3055   const int returns = frame()->GetReturnSlotCount();
3056   if (returns != 0) {
3057     // Create space for returns.
3058     __ lay(sp, MemOperand(sp, -returns * kPointerSize));
3059   }
3060 }
3061 
AssembleReturn(InstructionOperand * pop)3062 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
3063   auto call_descriptor = linkage()->GetIncomingDescriptor();
3064   int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
3065 
3066   const int returns = frame()->GetReturnSlotCount();
3067   if (returns != 0) {
3068     // Create space for returns.
3069     __ lay(sp, MemOperand(sp, returns * kPointerSize));
3070   }
3071 
3072   // Restore registers.
3073   const RegList saves = call_descriptor->CalleeSavedRegisters();
3074   if (saves != 0) {
3075     __ MultiPop(saves);
3076   }
3077 
3078   // Restore double registers.
3079   const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3080   if (double_saves != 0) {
3081     __ MultiPopDoubles(double_saves);
3082   }
3083 
3084   S390OperandConverter g(this, nullptr);
3085   if (call_descriptor->IsCFunctionCall()) {
3086     AssembleDeconstructFrame();
3087   } else if (frame_access_state()->has_frame()) {
3088     // Canonicalize JSFunction return sites for now unless they have an variable
3089     // number of stack slot pops
3090     if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
3091       if (return_label_.is_bound()) {
3092         __ b(&return_label_);
3093         return;
3094       } else {
3095         __ bind(&return_label_);
3096         AssembleDeconstructFrame();
3097       }
3098     } else {
3099       AssembleDeconstructFrame();
3100     }
3101   }
3102   if (pop->IsImmediate()) {
3103     pop_count += g.ToConstant(pop).ToInt32();
3104   } else {
3105     __ Drop(g.ToRegister(pop));
3106   }
3107   __ Drop(pop_count);
3108   __ Ret();
3109 }
3110 
FinishCode()3111 void CodeGenerator::FinishCode() {}
3112 
AssembleMove(InstructionOperand * source,InstructionOperand * destination)3113 void CodeGenerator::AssembleMove(InstructionOperand* source,
3114                                  InstructionOperand* destination) {
3115   S390OperandConverter g(this, nullptr);
3116   // Dispatch on the source and destination operand kinds.  Not all
3117   // combinations are possible.
3118   if (source->IsRegister()) {
3119     DCHECK(destination->IsRegister() || destination->IsStackSlot());
3120     Register src = g.ToRegister(source);
3121     if (destination->IsRegister()) {
3122       __ Move(g.ToRegister(destination), src);
3123     } else {
3124       __ StoreP(src, g.ToMemOperand(destination));
3125     }
3126   } else if (source->IsStackSlot()) {
3127     DCHECK(destination->IsRegister() || destination->IsStackSlot());
3128     MemOperand src = g.ToMemOperand(source);
3129     if (destination->IsRegister()) {
3130       __ LoadP(g.ToRegister(destination), src);
3131     } else {
3132       Register temp = kScratchReg;
3133       __ LoadP(temp, src, r0);
3134       __ StoreP(temp, g.ToMemOperand(destination));
3135     }
3136   } else if (source->IsConstant()) {
3137     Constant src = g.ToConstant(source);
3138     if (destination->IsRegister() || destination->IsStackSlot()) {
3139       Register dst =
3140           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
3141       switch (src.type()) {
3142         case Constant::kInt32:
3143 #if V8_TARGET_ARCH_S390X
3144           if (false) {
3145 #else
3146           if (RelocInfo::IsWasmReference(src.rmode())) {
3147 #endif
3148             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
3149           } else {
3150             __ Load(dst, Operand(src.ToInt32()));
3151           }
3152           break;
3153         case Constant::kInt64:
3154 #if V8_TARGET_ARCH_S390X
3155           if (RelocInfo::IsWasmPtrReference(src.rmode())) {
3156             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
3157           } else {
3158             __ Load(dst, Operand(src.ToInt64()));
3159           }
3160 #else
3161           __ mov(dst, Operand(src.ToInt64()));
3162 #endif  // V8_TARGET_ARCH_S390X
3163           break;
3164         case Constant::kFloat32:
3165           __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
3166           break;
3167         case Constant::kFloat64:
3168           __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
3169           break;
3170         case Constant::kExternalReference:
3171           __ Move(dst, src.ToExternalReference());
3172           break;
3173         case Constant::kHeapObject: {
3174           Handle<HeapObject> src_object = src.ToHeapObject();
3175           Heap::RootListIndex index;
3176           if (IsMaterializableFromRoot(src_object, &index)) {
3177             __ LoadRoot(dst, index);
3178           } else {
3179             __ Move(dst, src_object);
3180           }
3181           break;
3182         }
3183         case Constant::kRpoNumber:
3184           UNREACHABLE();  // TODO(dcarney): loading RPO constants on S390.
3185           break;
3186       }
3187       if (destination->IsStackSlot()) {
3188         __ StoreP(dst, g.ToMemOperand(destination), r0);
3189       }
3190     } else {
3191       DoubleRegister dst = destination->IsFPRegister()
3192                                ? g.ToDoubleRegister(destination)
3193                                : kScratchDoubleReg;
3194       double value = (src.type() == Constant::kFloat32)
3195                          ? src.ToFloat32()
3196                          : src.ToFloat64().value();
3197       if (src.type() == Constant::kFloat32) {
3198         __ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
3199       } else {
3200         __ LoadDoubleLiteral(dst, value, kScratchReg);
3201       }
3202 
3203       if (destination->IsFloatStackSlot()) {
3204         __ StoreFloat32(dst, g.ToMemOperand(destination));
3205       } else if (destination->IsDoubleStackSlot()) {
3206         __ StoreDouble(dst, g.ToMemOperand(destination));
3207       }
3208     }
3209   } else if (source->IsFPRegister()) {
3210     DoubleRegister src = g.ToDoubleRegister(source);
3211     if (destination->IsFPRegister()) {
3212       DoubleRegister dst = g.ToDoubleRegister(destination);
3213       __ Move(dst, src);
3214     } else {
3215       DCHECK(destination->IsFPStackSlot());
3216       LocationOperand* op = LocationOperand::cast(source);
3217       if (op->representation() == MachineRepresentation::kFloat64) {
3218         __ StoreDouble(src, g.ToMemOperand(destination));
3219       } else {
3220         __ StoreFloat32(src, g.ToMemOperand(destination));
3221       }
3222     }
3223   } else if (source->IsFPStackSlot()) {
3224     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
3225     MemOperand src = g.ToMemOperand(source);
3226     if (destination->IsFPRegister()) {
3227       LocationOperand* op = LocationOperand::cast(source);
3228       if (op->representation() == MachineRepresentation::kFloat64) {
3229         __ LoadDouble(g.ToDoubleRegister(destination), src);
3230       } else {
3231         __ LoadFloat32(g.ToDoubleRegister(destination), src);
3232       }
3233     } else {
3234       LocationOperand* op = LocationOperand::cast(source);
3235       DoubleRegister temp = kScratchDoubleReg;
3236       if (op->representation() == MachineRepresentation::kFloat64) {
3237         __ LoadDouble(temp, src);
3238         __ StoreDouble(temp, g.ToMemOperand(destination));
3239       } else {
3240         __ LoadFloat32(temp, src);
3241         __ StoreFloat32(temp, g.ToMemOperand(destination));
3242       }
3243     }
3244   } else {
3245     UNREACHABLE();
3246   }
3247 }
3248 
3249 // Swaping contents in source and destination.
3250 // source and destination could be:
3251 //   Register,
3252 //   FloatRegister,
3253 //   DoubleRegister,
3254 //   StackSlot,
3255 //   FloatStackSlot,
3256 //   or DoubleStackSlot
3257 void CodeGenerator::AssembleSwap(InstructionOperand* source,
3258                                  InstructionOperand* destination) {
3259   S390OperandConverter g(this, nullptr);
3260   if (source->IsRegister()) {
3261     Register src = g.ToRegister(source);
3262     if (destination->IsRegister()) {
3263       __ SwapP(src, g.ToRegister(destination), kScratchReg);
3264     } else {
3265       DCHECK(destination->IsStackSlot());
3266       __ SwapP(src, g.ToMemOperand(destination), kScratchReg);
3267     }
3268   } else if (source->IsStackSlot()) {
3269     DCHECK(destination->IsStackSlot());
3270     __ SwapP(g.ToMemOperand(source), g.ToMemOperand(destination), kScratchReg,
3271              r0);
3272   } else if (source->IsFloatRegister()) {
3273     DoubleRegister src = g.ToDoubleRegister(source);
3274     if (destination->IsFloatRegister()) {
3275       __ SwapFloat32(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
3276     } else {
3277       DCHECK(destination->IsFloatStackSlot());
3278       __ SwapFloat32(src, g.ToMemOperand(destination), kScratchDoubleReg);
3279     }
3280   } else if (source->IsDoubleRegister()) {
3281     DoubleRegister src = g.ToDoubleRegister(source);
3282     if (destination->IsDoubleRegister()) {
3283       __ SwapDouble(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
3284     } else {
3285       DCHECK(destination->IsDoubleStackSlot());
3286       __ SwapDouble(src, g.ToMemOperand(destination), kScratchDoubleReg);
3287     }
3288   } else if (source->IsFloatStackSlot()) {
3289     DCHECK(destination->IsFloatStackSlot());
3290     __ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(destination),
3291                    kScratchDoubleReg, d0);
3292   } else if (source->IsDoubleStackSlot()) {
3293     DCHECK(destination->IsDoubleStackSlot());
3294     __ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(destination),
3295                   kScratchDoubleReg, d0);
3296   } else if (source->IsSimd128Register()) {
3297     UNREACHABLE();
3298   } else {
3299     UNREACHABLE();
3300   }
3301 }
3302 
3303 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
3304   for (size_t index = 0; index < target_count; ++index) {
3305     __ emit_label_addr(targets[index]);
3306   }
3307 }
3308 
3309 
3310 #undef __
3311 
3312 }  // namespace compiler
3313 }  // namespace internal
3314 }  // namespace v8
3315