• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/codegen/assembler-inl.h"
6 #include "src/codegen/callable.h"
7 #include "src/codegen/macro-assembler.h"
8 #include "src/codegen/optimized-compilation-info.h"
9 #include "src/compiler/backend/code-generator-impl.h"
10 #include "src/compiler/backend/code-generator.h"
11 #include "src/compiler/backend/gap-resolver.h"
12 #include "src/compiler/node-matchers.h"
13 #include "src/compiler/osr.h"
14 #include "src/heap/memory-chunk.h"
15 #include "src/wasm/wasm-code-manager.h"
16 #include "src/wasm/wasm-objects.h"
17 
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21 
22 #define __ tasm()->
23 
24 #define kScratchReg ip
25 
26 // Adds S390-specific methods to convert InstructionOperands.
27 class S390OperandConverter final : public InstructionOperandConverter {
28  public:
S390OperandConverter(CodeGenerator * gen,Instruction * instr)29   S390OperandConverter(CodeGenerator* gen, Instruction* instr)
30       : InstructionOperandConverter(gen, instr) {}
31 
OutputCount()32   size_t OutputCount() { return instr_->OutputCount(); }
33 
Is64BitOperand(int index)34   bool Is64BitOperand(int index) {
35     return LocationOperand::cast(instr_->InputAt(index))->representation() ==
36            MachineRepresentation::kWord64;
37   }
38 
Is32BitOperand(int index)39   bool Is32BitOperand(int index) {
40     return LocationOperand::cast(instr_->InputAt(index))->representation() ==
41            MachineRepresentation::kWord32;
42   }
43 
CompareLogical() const44   bool CompareLogical() const {
45     switch (instr_->flags_condition()) {
46       case kUnsignedLessThan:
47       case kUnsignedGreaterThanOrEqual:
48       case kUnsignedLessThanOrEqual:
49       case kUnsignedGreaterThan:
50         return true;
51       default:
52         return false;
53     }
54     UNREACHABLE();
55   }
56 
InputImmediate(size_t index)57   Operand InputImmediate(size_t index) {
58     Constant constant = ToConstant(instr_->InputAt(index));
59     switch (constant.type()) {
60       case Constant::kInt32:
61         return Operand(constant.ToInt32());
62       case Constant::kFloat32:
63         return Operand::EmbeddedNumber(constant.ToFloat32());
64       case Constant::kFloat64:
65         return Operand::EmbeddedNumber(constant.ToFloat64().value());
66       case Constant::kInt64:
67 #if V8_TARGET_ARCH_S390X
68         return Operand(constant.ToInt64());
69 #endif
70       case Constant::kExternalReference:
71         return Operand(constant.ToExternalReference());
72       case Constant::kDelayedStringConstant:
73         return Operand::EmbeddedStringConstant(
74             constant.ToDelayedStringConstant());
75       case Constant::kCompressedHeapObject:
76       case Constant::kHeapObject:
77       case Constant::kRpoNumber:
78         break;
79     }
80     UNREACHABLE();
81   }
82 
MemoryOperand(AddressingMode * mode,size_t * first_index)83   MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
84     const size_t index = *first_index;
85     if (mode) *mode = AddressingModeField::decode(instr_->opcode());
86     switch (AddressingModeField::decode(instr_->opcode())) {
87       case kMode_None:
88         break;
89       case kMode_MR:
90         *first_index += 1;
91         return MemOperand(InputRegister(index + 0), 0);
92       case kMode_MRI:
93         *first_index += 2;
94         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
95       case kMode_MRR:
96         *first_index += 2;
97         return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
98       case kMode_MRRI:
99         *first_index += 3;
100         return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
101                           InputInt32(index + 2));
102     }
103     UNREACHABLE();
104   }
105 
MemoryOperand(AddressingMode * mode=nullptr,size_t first_index=0)106   MemOperand MemoryOperand(AddressingMode* mode = nullptr,
107                            size_t first_index = 0) {
108     return MemoryOperand(mode, &first_index);
109   }
110 
ToMemOperand(InstructionOperand * op) const111   MemOperand ToMemOperand(InstructionOperand* op) const {
112     DCHECK_NOT_NULL(op);
113     DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
114     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
115   }
116 
SlotToMemOperand(int slot) const117   MemOperand SlotToMemOperand(int slot) const {
118     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
119     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
120   }
121 
InputStackSlot(size_t index)122   MemOperand InputStackSlot(size_t index) {
123     InstructionOperand* op = instr_->InputAt(index);
124     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
125   }
126 
InputStackSlot32(size_t index)127   MemOperand InputStackSlot32(size_t index) {
128 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
129     // We want to read the 32-bits directly from memory
130     MemOperand mem = InputStackSlot(index);
131     return MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
132 #else
133     return InputStackSlot(index);
134 #endif
135   }
136 };
137 
HasRegisterOutput(Instruction * instr,int index=0)138 static inline bool HasRegisterOutput(Instruction* instr, int index = 0) {
139   return instr->OutputCount() > 0 && instr->OutputAt(index)->IsRegister();
140 }
141 
HasFPRegisterInput(Instruction * instr,int index)142 static inline bool HasFPRegisterInput(Instruction* instr, int index) {
143   return instr->InputAt(index)->IsFPRegister();
144 }
145 
HasRegisterInput(Instruction * instr,int index)146 static inline bool HasRegisterInput(Instruction* instr, int index) {
147   return instr->InputAt(index)->IsRegister() ||
148          HasFPRegisterInput(instr, index);
149 }
150 
HasImmediateInput(Instruction * instr,size_t index)151 static inline bool HasImmediateInput(Instruction* instr, size_t index) {
152   return instr->InputAt(index)->IsImmediate();
153 }
154 
HasFPStackSlotInput(Instruction * instr,size_t index)155 static inline bool HasFPStackSlotInput(Instruction* instr, size_t index) {
156   return instr->InputAt(index)->IsFPStackSlot();
157 }
158 
HasStackSlotInput(Instruction * instr,size_t index)159 static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
160   return instr->InputAt(index)->IsStackSlot() ||
161          HasFPStackSlotInput(instr, index);
162 }
163 
164 namespace {
165 
166 class OutOfLineRecordWrite final : public OutOfLineCode {
167  public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode,StubCallMode stub_mode,UnwindingInfoWriter * unwinding_info_writer)168   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
169                        Register value, Register scratch0, Register scratch1,
170                        RecordWriteMode mode, StubCallMode stub_mode,
171                        UnwindingInfoWriter* unwinding_info_writer)
172       : OutOfLineCode(gen),
173         object_(object),
174         offset_(offset),
175         offset_immediate_(0),
176         value_(value),
177         scratch0_(scratch0),
178         scratch1_(scratch1),
179         mode_(mode),
180         stub_mode_(stub_mode),
181         must_save_lr_(!gen->frame_access_state()->has_frame()),
182         unwinding_info_writer_(unwinding_info_writer),
183         zone_(gen->zone()) {}
184 
OutOfLineRecordWrite(CodeGenerator * gen,Register object,int32_t offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode,StubCallMode stub_mode,UnwindingInfoWriter * unwinding_info_writer)185   OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
186                        Register value, Register scratch0, Register scratch1,
187                        RecordWriteMode mode, StubCallMode stub_mode,
188                        UnwindingInfoWriter* unwinding_info_writer)
189       : OutOfLineCode(gen),
190         object_(object),
191         offset_(no_reg),
192         offset_immediate_(offset),
193         value_(value),
194         scratch0_(scratch0),
195         scratch1_(scratch1),
196         mode_(mode),
197         stub_mode_(stub_mode),
198         must_save_lr_(!gen->frame_access_state()->has_frame()),
199         unwinding_info_writer_(unwinding_info_writer),
200         zone_(gen->zone()) {}
201 
Generate()202   void Generate() final {
203     if (mode_ > RecordWriteMode::kValueIsPointer) {
204       __ JumpIfSmi(value_, exit());
205     }
206     if (COMPRESS_POINTERS_BOOL) {
207       __ DecompressTaggedPointer(value_, value_);
208     }
209     __ CheckPageFlag(value_, scratch0_,
210                      MemoryChunk::kPointersToHereAreInterestingMask, eq,
211                      exit());
212     if (offset_ == no_reg) {
213       __ AddP(scratch1_, object_, Operand(offset_immediate_));
214     } else {
215       DCHECK_EQ(0, offset_immediate_);
216       __ AddP(scratch1_, object_, offset_);
217     }
218     RememberedSetAction const remembered_set_action =
219         mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
220                                              : OMIT_REMEMBERED_SET;
221     SaveFPRegsMode const save_fp_mode =
222         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
223     if (must_save_lr_) {
224       // We need to save and restore r14 if the frame was elided.
225       __ Push(r14);
226       unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
227     }
228     if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
229       __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
230     } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
231       __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
232                              save_fp_mode, wasm::WasmCode::kRecordWrite);
233     } else {
234       __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
235                              save_fp_mode);
236     }
237     if (must_save_lr_) {
238       // We need to save and restore r14 if the frame was elided.
239       __ Pop(r14);
240       unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
241     }
242   }
243 
244  private:
245   Register const object_;
246   Register const offset_;
247   int32_t const offset_immediate_;  // Valid if offset_ == no_reg.
248   Register const value_;
249   Register const scratch0_;
250   Register const scratch1_;
251   RecordWriteMode const mode_;
252   StubCallMode stub_mode_;
253   bool must_save_lr_;
254   UnwindingInfoWriter* const unwinding_info_writer_;
255   Zone* zone_;
256 };
257 
FlagsConditionToCondition(FlagsCondition condition,ArchOpcode op)258 Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
259   switch (condition) {
260     case kEqual:
261       return eq;
262     case kNotEqual:
263       return ne;
264     case kUnsignedLessThan:
265       // unsigned number never less than 0
266       if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
267         return CC_NOP;
268       V8_FALLTHROUGH;
269     case kSignedLessThan:
270       return lt;
271     case kUnsignedGreaterThanOrEqual:
272       // unsigned number always greater than or equal 0
273       if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
274         return CC_ALWAYS;
275       V8_FALLTHROUGH;
276     case kSignedGreaterThanOrEqual:
277       return ge;
278     case kUnsignedLessThanOrEqual:
279       // unsigned number never less than 0
280       if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
281         return CC_EQ;
282       V8_FALLTHROUGH;
283     case kSignedLessThanOrEqual:
284       return le;
285     case kUnsignedGreaterThan:
286       // unsigned number always greater than or equal 0
287       if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
288         return ne;
289       V8_FALLTHROUGH;
290     case kSignedGreaterThan:
291       return gt;
292     case kOverflow:
293       // Overflow checked for AddP/SubP only.
294       switch (op) {
295         case kS390_Add32:
296         case kS390_Add64:
297         case kS390_Sub32:
298         case kS390_Sub64:
299         case kS390_Abs64:
300         case kS390_Abs32:
301         case kS390_Mul32:
302           return overflow;
303         default:
304           break;
305       }
306       break;
307     case kNotOverflow:
308       switch (op) {
309         case kS390_Add32:
310         case kS390_Add64:
311         case kS390_Sub32:
312         case kS390_Sub64:
313         case kS390_Abs64:
314         case kS390_Abs32:
315         case kS390_Mul32:
316           return nooverflow;
317         default:
318           break;
319       }
320       break;
321     default:
322       break;
323   }
324   UNREACHABLE();
325 }
326 
327 #define GET_MEMOPERAND32(ret, fi)                                       \
328   ([&](int& ret) {                                                      \
329     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
330     MemOperand mem(r0);                                                 \
331     if (mode != kMode_None) {                                           \
332       size_t first_index = (fi);                                        \
333       mem = i.MemoryOperand(&mode, &first_index);                       \
334       ret = first_index;                                                \
335     } else {                                                            \
336       mem = i.InputStackSlot32(fi);                                     \
337     }                                                                   \
338     return mem;                                                         \
339   })(ret)
340 
341 #define GET_MEMOPERAND(ret, fi)                                         \
342   ([&](int& ret) {                                                      \
343     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
344     MemOperand mem(r0);                                                 \
345     if (mode != kMode_None) {                                           \
346       size_t first_index = (fi);                                        \
347       mem = i.MemoryOperand(&mode, &first_index);                       \
348       ret = first_index;                                                \
349     } else {                                                            \
350       mem = i.InputStackSlot(fi);                                       \
351     }                                                                   \
352     return mem;                                                         \
353   })(ret)
354 
355 #define RRInstr(instr)                                \
356   [&]() {                                             \
357     DCHECK(i.OutputRegister() == i.InputRegister(0)); \
358     __ instr(i.OutputRegister(), i.InputRegister(1)); \
359     return 2;                                         \
360   }
361 #define RIInstr(instr)                                 \
362   [&]() {                                              \
363     DCHECK(i.OutputRegister() == i.InputRegister(0));  \
364     __ instr(i.OutputRegister(), i.InputImmediate(1)); \
365     return 2;                                          \
366   }
367 #define RMInstr(instr, GETMEM)                        \
368   [&]() {                                             \
369     DCHECK(i.OutputRegister() == i.InputRegister(0)); \
370     int ret = 2;                                      \
371     __ instr(i.OutputRegister(), GETMEM(ret, 1));     \
372     return ret;                                       \
373   }
374 #define RM32Instr(instr) RMInstr(instr, GET_MEMOPERAND32)
375 #define RM64Instr(instr) RMInstr(instr, GET_MEMOPERAND)
376 
377 #define RRRInstr(instr)                                                   \
378   [&]() {                                                                 \
379     __ instr(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); \
380     return 2;                                                             \
381   }
382 #define RRIInstr(instr)                                                    \
383   [&]() {                                                                  \
384     __ instr(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1)); \
385     return 2;                                                              \
386   }
387 #define RRMInstr(instr, GETMEM)                                       \
388   [&]() {                                                             \
389     int ret = 2;                                                      \
390     __ instr(i.OutputRegister(), i.InputRegister(0), GETMEM(ret, 1)); \
391     return ret;                                                       \
392   }
393 #define RRM32Instr(instr) RRMInstr(instr, GET_MEMOPERAND32)
394 #define RRM64Instr(instr) RRMInstr(instr, GET_MEMOPERAND)
395 
396 #define DDInstr(instr)                                            \
397   [&]() {                                                         \
398     DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
399     __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
400     return 2;                                                     \
401   }
402 
403 #define DMInstr(instr)                                            \
404   [&]() {                                                         \
405     DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
406     int ret = 2;                                                  \
407     __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1));   \
408     return ret;                                                   \
409   }
410 
411 #define DMTInstr(instr)                                           \
412   [&]() {                                                         \
413     DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
414     int ret = 2;                                                  \
415     __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1),    \
416              kScratchDoubleReg);                                  \
417     return ret;                                                   \
418   }
419 
420 #define R_MInstr(instr)                                   \
421   [&]() {                                                 \
422     int ret = 2;                                          \
423     __ instr(i.OutputRegister(), GET_MEMOPERAND(ret, 0)); \
424     return ret;                                           \
425   }
426 
427 #define R_DInstr(instr)                                     \
428   [&]() {                                                   \
429     __ instr(i.OutputRegister(), i.InputDoubleRegister(0)); \
430     return 2;                                               \
431   }
432 
433 #define D_DInstr(instr)                                           \
434   [&]() {                                                         \
435     __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
436     return 2;                                                     \
437   }
438 
439 #define D_MInstr(instr)                                         \
440   [&]() {                                                       \
441     int ret = 2;                                                \
442     __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0)); \
443     return ret;                                                 \
444   }
445 
446 #define D_MTInstr(instr)                                       \
447   [&]() {                                                      \
448     int ret = 2;                                               \
449     __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0), \
450              kScratchDoubleReg);                               \
451     return ret;                                                \
452   }
453 
nullInstr()454 static int nullInstr() { UNREACHABLE(); }
455 
456 template <int numOfOperand, class RType, class MType, class IType>
AssembleOp(Instruction * instr,RType r,MType m,IType i)457 static inline int AssembleOp(Instruction* instr, RType r, MType m, IType i) {
458   AddressingMode mode = AddressingModeField::decode(instr->opcode());
459   if (mode != kMode_None || HasStackSlotInput(instr, numOfOperand - 1)) {
460     return m();
461   } else if (HasRegisterInput(instr, numOfOperand - 1)) {
462     return r();
463   } else if (HasImmediateInput(instr, numOfOperand - 1)) {
464     return i();
465   } else {
466     UNREACHABLE();
467   }
468 }
469 
470 template <class _RR, class _RM, class _RI>
AssembleBinOp(Instruction * instr,_RR _rr,_RM _rm,_RI _ri)471 static inline int AssembleBinOp(Instruction* instr, _RR _rr, _RM _rm, _RI _ri) {
472   return AssembleOp<2>(instr, _rr, _rm, _ri);
473 }
474 
475 template <class _R, class _M, class _I>
AssembleUnaryOp(Instruction * instr,_R _r,_M _m,_I _i)476 static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
477   return AssembleOp<1>(instr, _r, _m, _i);
478 }
479 
480 #define ASSEMBLE_BIN_OP(_rr, _rm, _ri) AssembleBinOp(instr, _rr, _rm, _ri)
481 #define ASSEMBLE_UNARY_OP(_r, _m, _i) AssembleUnaryOp(instr, _r, _m, _i)
482 
483 #ifdef V8_TARGET_ARCH_S390X
484 #define CHECK_AND_ZERO_EXT_OUTPUT(num)                                \
485   ([&](int index) {                                                   \
486     DCHECK(HasImmediateInput(instr, (index)));                        \
487     int doZeroExt = i.InputInt32(index);                              \
488     if (doZeroExt) __ LoadlW(i.OutputRegister(), i.OutputRegister()); \
489   })(num)
490 
491 #define ASSEMBLE_BIN32_OP(_rr, _rm, _ri) \
492   { CHECK_AND_ZERO_EXT_OUTPUT(AssembleBinOp(instr, _rr, _rm, _ri)); }
493 #else
494 #define ASSEMBLE_BIN32_OP ASSEMBLE_BIN_OP
495 #define CHECK_AND_ZERO_EXT_OUTPUT(num)
496 #endif
497 
498 }  // namespace
499 
500 #define ASSEMBLE_FLOAT_UNOP(asm_instr)                                \
501   do {                                                                \
502     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
503   } while (0)
504 
505 #define ASSEMBLE_FLOAT_BINOP(asm_instr)                              \
506   do {                                                               \
507     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
508                  i.InputDoubleRegister(1));                          \
509   } while (0)
510 
511 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                         \
512   do {                                                                  \
513     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
514     if (mode != kMode_None) {                                           \
515       size_t first_index = 1;                                           \
516       MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
517       if (i.CompareLogical()) {                                         \
518         __ cmpl_instr(i.InputRegister(0), operand);                     \
519       } else {                                                          \
520         __ cmp_instr(i.InputRegister(0), operand);                      \
521       }                                                                 \
522     } else if (HasRegisterInput(instr, 1)) {                            \
523       if (i.CompareLogical()) {                                         \
524         __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));          \
525       } else {                                                          \
526         __ cmp_instr(i.InputRegister(0), i.InputRegister(1));           \
527       }                                                                 \
528     } else if (HasImmediateInput(instr, 1)) {                           \
529       if (i.CompareLogical()) {                                         \
530         __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1));         \
531       } else {                                                          \
532         __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));          \
533       }                                                                 \
534     } else {                                                            \
535       DCHECK(HasStackSlotInput(instr, 1));                              \
536       if (i.CompareLogical()) {                                         \
537         __ cmpl_instr(i.InputRegister(0), i.InputStackSlot(1));         \
538       } else {                                                          \
539         __ cmp_instr(i.InputRegister(0), i.InputStackSlot(1));          \
540       }                                                                 \
541     }                                                                   \
542   } while (0)
543 
544 #define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr)                       \
545   do {                                                                  \
546     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
547     if (mode != kMode_None) {                                           \
548       size_t first_index = 1;                                           \
549       MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
550       if (i.CompareLogical()) {                                         \
551         __ cmpl_instr(i.InputRegister(0), operand);                     \
552       } else {                                                          \
553         __ cmp_instr(i.InputRegister(0), operand);                      \
554       }                                                                 \
555     } else if (HasRegisterInput(instr, 1)) {                            \
556       if (i.CompareLogical()) {                                         \
557         __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));          \
558       } else {                                                          \
559         __ cmp_instr(i.InputRegister(0), i.InputRegister(1));           \
560       }                                                                 \
561     } else if (HasImmediateInput(instr, 1)) {                           \
562       if (i.CompareLogical()) {                                         \
563         __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1));         \
564       } else {                                                          \
565         __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));          \
566       }                                                                 \
567     } else {                                                            \
568       DCHECK(HasStackSlotInput(instr, 1));                              \
569       if (i.CompareLogical()) {                                         \
570         __ cmpl_instr(i.InputRegister(0), i.InputStackSlot32(1));       \
571       } else {                                                          \
572         __ cmp_instr(i.InputRegister(0), i.InputStackSlot32(1));        \
573       }                                                                 \
574     }                                                                   \
575   } while (0)
576 
577 #define ASSEMBLE_FLOAT_COMPARE(cmp_rr_instr, cmp_rm_instr, load_instr)     \
578   do {                                                                     \
579     AddressingMode mode = AddressingModeField::decode(instr->opcode());    \
580     if (mode != kMode_None) {                                              \
581       size_t first_index = 1;                                              \
582       MemOperand operand = i.MemoryOperand(&mode, &first_index);           \
583       __ cmp_rm_instr(i.InputDoubleRegister(0), operand);                  \
584     } else if (HasFPRegisterInput(instr, 1)) {                             \
585       __ cmp_rr_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
586     } else {                                                               \
587       USE(HasFPStackSlotInput);                                            \
588       DCHECK(HasFPStackSlotInput(instr, 1));                               \
589       MemOperand operand = i.InputStackSlot(1);                            \
590       if (operand.offset() >= 0) {                                         \
591         __ cmp_rm_instr(i.InputDoubleRegister(0), operand);                \
592       } else {                                                             \
593         __ load_instr(kScratchDoubleReg, operand);                         \
594         __ cmp_rr_instr(i.InputDoubleRegister(0), kScratchDoubleReg);      \
595       }                                                                    \
596     }                                                                      \
597   } while (0)
598 
599 // Divide instruction dr will implicity use register pair
600 // r0 & r1 below.
601 // R0:R1 = R1 / divisor - R0 remainder
602 // Copy remainder to output reg
603 #define ASSEMBLE_MODULO(div_instr, shift_instr) \
604   do {                                          \
605     __ LoadRR(r0, i.InputRegister(0));          \
606     __ shift_instr(r0, Operand(32));            \
607     __ div_instr(r0, i.InputRegister(1));       \
608     __ LoadlW(i.OutputRegister(), r0);          \
609   } while (0)
610 
611 #define ASSEMBLE_FLOAT_MODULO()                                             \
612   do {                                                                      \
613     FrameScope scope(tasm(), StackFrame::MANUAL);                           \
614     __ PrepareCallCFunction(0, 2, kScratchReg);                             \
615     __ MovToFloatParameters(i.InputDoubleRegister(0),                       \
616                             i.InputDoubleRegister(1));                      \
617     __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); \
618     __ MovFromFloatResult(i.OutputDoubleRegister());                        \
619   } while (0)
620 
621 #define ASSEMBLE_IEEE754_UNOP(name)                                            \
622   do {                                                                         \
623     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
624     /* and generate a CallAddress instruction instead. */                      \
625     FrameScope scope(tasm(), StackFrame::MANUAL);                              \
626     __ PrepareCallCFunction(0, 1, kScratchReg);                                \
627     __ MovToFloatParameter(i.InputDoubleRegister(0));                          \
628     __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1);    \
629     /* Move the result in the double result register. */                       \
630     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
631   } while (0)
632 
633 #define ASSEMBLE_IEEE754_BINOP(name)                                           \
634   do {                                                                         \
635     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
636     /* and generate a CallAddress instruction instead. */                      \
637     FrameScope scope(tasm(), StackFrame::MANUAL);                              \
638     __ PrepareCallCFunction(0, 2, kScratchReg);                                \
639     __ MovToFloatParameters(i.InputDoubleRegister(0),                          \
640                             i.InputDoubleRegister(1));                         \
641     __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2);    \
642     /* Move the result in the double result register. */                       \
643     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
644   } while (0)
645 
646 #define ASSEMBLE_DOUBLE_MAX()                                           \
647   do {                                                                  \
648     DoubleRegister left_reg = i.InputDoubleRegister(0);                 \
649     DoubleRegister right_reg = i.InputDoubleRegister(1);                \
650     DoubleRegister result_reg = i.OutputDoubleRegister();               \
651     Label check_zero, return_left, return_right, return_nan, done;      \
652     __ cdbr(left_reg, right_reg);                                       \
653     __ bunordered(&return_nan, Label::kNear);                           \
654     __ beq(&check_zero);                                                \
655     __ bge(&return_left, Label::kNear);                                 \
656     __ b(&return_right, Label::kNear);                                  \
657                                                                         \
658     __ bind(&check_zero);                                               \
659     __ lzdr(kDoubleRegZero);                                            \
660     __ cdbr(left_reg, kDoubleRegZero);                                  \
661     /* left == right != 0. */                                           \
662     __ bne(&return_left, Label::kNear);                                 \
663     /* At this point, both left and right are either 0 or -0. */        \
664     /* N.B. The following works because +0 + -0 == +0 */                \
665     /* For max we want logical-and of sign bit: (L + R) */              \
666     __ ldr(result_reg, left_reg);                                       \
667     __ adbr(result_reg, right_reg);                                     \
668     __ b(&done, Label::kNear);                                          \
669                                                                         \
670     __ bind(&return_nan);                                               \
671     /* If left or right are NaN, adbr propagates the appropriate one.*/ \
672     __ adbr(left_reg, right_reg);                                       \
673     __ b(&return_left, Label::kNear);                                   \
674                                                                         \
675     __ bind(&return_right);                                             \
676     if (right_reg != result_reg) {                                      \
677       __ ldr(result_reg, right_reg);                                    \
678     }                                                                   \
679     __ b(&done, Label::kNear);                                          \
680                                                                         \
681     __ bind(&return_left);                                              \
682     if (left_reg != result_reg) {                                       \
683       __ ldr(result_reg, left_reg);                                     \
684     }                                                                   \
685     __ bind(&done);                                                     \
686   } while (0)
687 
688 #define ASSEMBLE_DOUBLE_MIN()                                           \
689   do {                                                                  \
690     DoubleRegister left_reg = i.InputDoubleRegister(0);                 \
691     DoubleRegister right_reg = i.InputDoubleRegister(1);                \
692     DoubleRegister result_reg = i.OutputDoubleRegister();               \
693     Label check_zero, return_left, return_right, return_nan, done;      \
694     __ cdbr(left_reg, right_reg);                                       \
695     __ bunordered(&return_nan, Label::kNear);                           \
696     __ beq(&check_zero);                                                \
697     __ ble(&return_left, Label::kNear);                                 \
698     __ b(&return_right, Label::kNear);                                  \
699                                                                         \
700     __ bind(&check_zero);                                               \
701     __ lzdr(kDoubleRegZero);                                            \
702     __ cdbr(left_reg, kDoubleRegZero);                                  \
703     /* left == right != 0. */                                           \
704     __ bne(&return_left, Label::kNear);                                 \
705     /* At this point, both left and right are either 0 or -0. */        \
706     /* N.B. The following works because +0 + -0 == +0 */                \
707     /* For min we want logical-or of sign bit: -(-L + -R) */            \
708     __ lcdbr(left_reg, left_reg);                                       \
709     __ ldr(result_reg, left_reg);                                       \
710     if (left_reg == right_reg) {                                        \
711       __ adbr(result_reg, right_reg);                                   \
712     } else {                                                            \
713       __ sdbr(result_reg, right_reg);                                   \
714     }                                                                   \
715     __ lcdbr(result_reg, result_reg);                                   \
716     __ b(&done, Label::kNear);                                          \
717                                                                         \
718     __ bind(&return_nan);                                               \
719     /* If left or right are NaN, adbr propagates the appropriate one.*/ \
720     __ adbr(left_reg, right_reg);                                       \
721     __ b(&return_left, Label::kNear);                                   \
722                                                                         \
723     __ bind(&return_right);                                             \
724     if (right_reg != result_reg) {                                      \
725       __ ldr(result_reg, right_reg);                                    \
726     }                                                                   \
727     __ b(&done, Label::kNear);                                          \
728                                                                         \
729     __ bind(&return_left);                                              \
730     if (left_reg != result_reg) {                                       \
731       __ ldr(result_reg, left_reg);                                     \
732     }                                                                   \
733     __ bind(&done);                                                     \
734   } while (0)
735 
736 #define ASSEMBLE_FLOAT_MAX()                                            \
737   do {                                                                  \
738     DoubleRegister left_reg = i.InputDoubleRegister(0);                 \
739     DoubleRegister right_reg = i.InputDoubleRegister(1);                \
740     DoubleRegister result_reg = i.OutputDoubleRegister();               \
741     Label check_zero, return_left, return_right, return_nan, done;      \
742     __ cebr(left_reg, right_reg);                                       \
743     __ bunordered(&return_nan, Label::kNear);                           \
744     __ beq(&check_zero);                                                \
745     __ bge(&return_left, Label::kNear);                                 \
746     __ b(&return_right, Label::kNear);                                  \
747                                                                         \
748     __ bind(&check_zero);                                               \
749     __ lzdr(kDoubleRegZero);                                            \
750     __ cebr(left_reg, kDoubleRegZero);                                  \
751     /* left == right != 0. */                                           \
752     __ bne(&return_left, Label::kNear);                                 \
753     /* At this point, both left and right are either 0 or -0. */        \
754     /* N.B. The following works because +0 + -0 == +0 */                \
755     /* For max we want logical-and of sign bit: (L + R) */              \
756     __ ldr(result_reg, left_reg);                                       \
757     __ aebr(result_reg, right_reg);                                     \
758     __ b(&done, Label::kNear);                                          \
759                                                                         \
760     __ bind(&return_nan);                                               \
761     /* If left or right are NaN, aebr propagates the appropriate one.*/ \
762     __ aebr(left_reg, right_reg);                                       \
763     __ b(&return_left, Label::kNear);                                   \
764                                                                         \
765     __ bind(&return_right);                                             \
766     if (right_reg != result_reg) {                                      \
767       __ ldr(result_reg, right_reg);                                    \
768     }                                                                   \
769     __ b(&done, Label::kNear);                                          \
770                                                                         \
771     __ bind(&return_left);                                              \
772     if (left_reg != result_reg) {                                       \
773       __ ldr(result_reg, left_reg);                                     \
774     }                                                                   \
775     __ bind(&done);                                                     \
776   } while (0)
777 
778 #define ASSEMBLE_FLOAT_MIN()                                            \
779   do {                                                                  \
780     DoubleRegister left_reg = i.InputDoubleRegister(0);                 \
781     DoubleRegister right_reg = i.InputDoubleRegister(1);                \
782     DoubleRegister result_reg = i.OutputDoubleRegister();               \
783     Label check_zero, return_left, return_right, return_nan, done;      \
784     __ cebr(left_reg, right_reg);                                       \
785     __ bunordered(&return_nan, Label::kNear);                           \
786     __ beq(&check_zero);                                                \
787     __ ble(&return_left, Label::kNear);                                 \
788     __ b(&return_right, Label::kNear);                                  \
789                                                                         \
790     __ bind(&check_zero);                                               \
791     __ lzdr(kDoubleRegZero);                                            \
792     __ cebr(left_reg, kDoubleRegZero);                                  \
793     /* left == right != 0. */                                           \
794     __ bne(&return_left, Label::kNear);                                 \
795     /* At this point, both left and right are either 0 or -0. */        \
796     /* N.B. The following works because +0 + -0 == +0 */                \
797     /* For min we want logical-or of sign bit: -(-L + -R) */            \
798     __ lcebr(left_reg, left_reg);                                       \
799     __ ldr(result_reg, left_reg);                                       \
800     if (left_reg == right_reg) {                                        \
801       __ aebr(result_reg, right_reg);                                   \
802     } else {                                                            \
803       __ sebr(result_reg, right_reg);                                   \
804     }                                                                   \
805     __ lcebr(result_reg, result_reg);                                   \
806     __ b(&done, Label::kNear);                                          \
807                                                                         \
808     __ bind(&return_nan);                                               \
809     /* If left or right are NaN, aebr propagates the appropriate one.*/ \
810     __ aebr(left_reg, right_reg);                                       \
811     __ b(&return_left, Label::kNear);                                   \
812                                                                         \
813     __ bind(&return_right);                                             \
814     if (right_reg != result_reg) {                                      \
815       __ ldr(result_reg, right_reg);                                    \
816     }                                                                   \
817     __ b(&done, Label::kNear);                                          \
818                                                                         \
819     __ bind(&return_left);                                              \
820     if (left_reg != result_reg) {                                       \
821       __ ldr(result_reg, left_reg);                                     \
822     }                                                                   \
823     __ bind(&done);                                                     \
824   } while (0)
825 //
826 // Only MRI mode for these instructions available
827 #define ASSEMBLE_LOAD_FLOAT(asm_instr)                \
828   do {                                                \
829     DoubleRegister result = i.OutputDoubleRegister(); \
830     AddressingMode mode = kMode_None;                 \
831     MemOperand operand = i.MemoryOperand(&mode);      \
832     __ asm_instr(result, operand);                    \
833   } while (0)
834 
835 #define ASSEMBLE_LOAD_INTEGER(asm_instr)         \
836   do {                                           \
837     Register result = i.OutputRegister();        \
838     AddressingMode mode = kMode_None;            \
839     MemOperand operand = i.MemoryOperand(&mode); \
840     __ asm_instr(result, operand);               \
841   } while (0)
842 
843 #define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm)              \
844   {                                                                     \
845     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
846     Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0;  \
847     if (mode != kMode_None) {                                           \
848       size_t first_index = 0;                                           \
849       MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
850       __ asm_instr_rm(dst, operand);                                    \
851     } else if (HasRegisterInput(instr, 0)) {                            \
852       __ asm_instr_rr(dst, i.InputRegister(0));                         \
853     } else {                                                            \
854       DCHECK(HasStackSlotInput(instr, 0));                              \
855       __ asm_instr_rm(dst, i.InputStackSlot(0));                        \
856     }                                                                   \
857   }
858 
859 #define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm)              \
860   {                                                                     \
861     AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
862     Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0;  \
863     if (mode != kMode_None) {                                           \
864       size_t first_index = 0;                                           \
865       MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
866       __ asm_instr_rm(dst, operand);                                    \
867     } else if (HasRegisterInput(instr, 0)) {                            \
868       __ asm_instr_rr(dst, i.InputRegister(0));                         \
869     } else {                                                            \
870       DCHECK(HasStackSlotInput(instr, 0));                              \
871       __ asm_instr_rm(dst, i.InputStackSlot32(0));                      \
872     }                                                                   \
873   }
874 
875 #define ASSEMBLE_STORE_FLOAT32()                         \
876   do {                                                   \
877     size_t index = 0;                                    \
878     AddressingMode mode = kMode_None;                    \
879     MemOperand operand = i.MemoryOperand(&mode, &index); \
880     DoubleRegister value = i.InputDoubleRegister(index); \
881     __ StoreFloat32(value, operand);                     \
882   } while (0)
883 
884 #define ASSEMBLE_STORE_DOUBLE()                          \
885   do {                                                   \
886     size_t index = 0;                                    \
887     AddressingMode mode = kMode_None;                    \
888     MemOperand operand = i.MemoryOperand(&mode, &index); \
889     DoubleRegister value = i.InputDoubleRegister(index); \
890     __ StoreDouble(value, operand);                      \
891   } while (0)
892 
893 #define ASSEMBLE_STORE_INTEGER(asm_instr)                \
894   do {                                                   \
895     size_t index = 0;                                    \
896     AddressingMode mode = kMode_None;                    \
897     MemOperand operand = i.MemoryOperand(&mode, &index); \
898     Register value = i.InputRegister(index);             \
899     __ asm_instr(value, operand);                        \
900   } while (0)
901 
902 #define ATOMIC_COMP_EXCHANGE(start, end, shift_amount, offset)              \
903   {                                                                         \
904     __ LoadlW(temp0, MemOperand(addr, offset));                             \
905     __ llgfr(temp1, temp0);                                                 \
906     __ RotateInsertSelectBits(temp0, old_val, Operand(start), Operand(end), \
907                               Operand(shift_amount), false);                \
908     __ RotateInsertSelectBits(temp1, new_val, Operand(start), Operand(end), \
909                               Operand(shift_amount), false);                \
910     __ CmpAndSwap(temp0, temp1, MemOperand(addr, offset));                  \
911     __ RotateInsertSelectBits(output, temp0, Operand(start + shift_amount), \
912                               Operand(end + shift_amount),                  \
913                               Operand(64 - shift_amount), true);            \
914   }
915 
916 #ifdef V8_TARGET_BIG_ENDIAN
917 #define ATOMIC_COMP_EXCHANGE_BYTE(i)                             \
918   {                                                              \
919     constexpr int idx = (i);                                     \
920     static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
921     constexpr int start = 32 + 8 * idx;                          \
922     constexpr int end = start + 7;                               \
923     constexpr int shift_amount = (3 - idx) * 8;                  \
924     ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx);        \
925   }
926 #define ATOMIC_COMP_EXCHANGE_HALFWORD(i)                         \
927   {                                                              \
928     constexpr int idx = (i);                                     \
929     static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
930     constexpr int start = 32 + 16 * idx;                         \
931     constexpr int end = start + 15;                              \
932     constexpr int shift_amount = (1 - idx) * 16;                 \
933     ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2);    \
934   }
935 #else
936 #define ATOMIC_COMP_EXCHANGE_BYTE(i)                             \
937   {                                                              \
938     constexpr int idx = (i);                                     \
939     static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
940     constexpr int start = 32 + 8 * (3 - idx);                    \
941     constexpr int end = start + 7;                               \
942     constexpr int shift_amount = idx * 8;                        \
943     ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx);        \
944   }
945 #define ATOMIC_COMP_EXCHANGE_HALFWORD(i)                         \
946   {                                                              \
947     constexpr int idx = (i);                                     \
948     static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
949     constexpr int start = 32 + 16 * (1 - idx);                   \
950     constexpr int end = start + 15;                              \
951     constexpr int shift_amount = idx * 16;                       \
952     ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2);    \
953   }
954 #endif
955 
956 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext) \
957   do {                                                      \
958     Register old_val = i.InputRegister(0);                  \
959     Register new_val = i.InputRegister(1);                  \
960     Register output = i.OutputRegister();                   \
961     Register addr = kScratchReg;                            \
962     Register temp0 = r0;                                    \
963     Register temp1 = r1;                                    \
964     size_t index = 2;                                       \
965     AddressingMode mode = kMode_None;                       \
966     MemOperand op = i.MemoryOperand(&mode, &index);         \
967     Label three, two, one, done;                            \
968     __ lay(addr, op);                                       \
969     __ tmll(addr, Operand(3));                              \
970     __ b(Condition(1), &three);                             \
971     __ b(Condition(2), &two);                               \
972     __ b(Condition(4), &one);                               \
973     /* ending with 0b00 */                                  \
974     ATOMIC_COMP_EXCHANGE_BYTE(0);                           \
975     __ b(&done);                                            \
976     /* ending with 0b01 */                                  \
977     __ bind(&one);                                          \
978     ATOMIC_COMP_EXCHANGE_BYTE(1);                           \
979     __ b(&done);                                            \
980     /* ending with 0b10 */                                  \
981     __ bind(&two);                                          \
982     ATOMIC_COMP_EXCHANGE_BYTE(2);                           \
983     __ b(&done);                                            \
984     /* ending with 0b11 */                                  \
985     __ bind(&three);                                        \
986     ATOMIC_COMP_EXCHANGE_BYTE(3);                           \
987     __ bind(&done);                                         \
988     __ load_and_ext(output, output);                        \
989   } while (false)
990 
991 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext) \
992   do {                                                          \
993     Register old_val = i.InputRegister(0);                      \
994     Register new_val = i.InputRegister(1);                      \
995     Register output = i.OutputRegister();                       \
996     Register addr = kScratchReg;                                \
997     Register temp0 = r0;                                        \
998     Register temp1 = r1;                                        \
999     size_t index = 2;                                           \
1000     AddressingMode mode = kMode_None;                           \
1001     MemOperand op = i.MemoryOperand(&mode, &index);             \
1002     Label two, done;                                            \
1003     __ lay(addr, op);                                           \
1004     __ tmll(addr, Operand(3));                                  \
1005     __ b(Condition(2), &two);                                   \
1006     ATOMIC_COMP_EXCHANGE_HALFWORD(0);                           \
1007     __ b(&done);                                                \
1008     __ bind(&two);                                              \
1009     ATOMIC_COMP_EXCHANGE_HALFWORD(1);                           \
1010     __ bind(&done);                                             \
1011     __ load_and_ext(output, output);                            \
1012   } while (false)
1013 
1014 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD()       \
1015   do {                                                \
1016     Register new_val = i.InputRegister(1);            \
1017     Register output = i.OutputRegister();             \
1018     Register addr = kScratchReg;                      \
1019     size_t index = 2;                                 \
1020     AddressingMode mode = kMode_None;                 \
1021     MemOperand op = i.MemoryOperand(&mode, &index);   \
1022     __ lay(addr, op);                                 \
1023     __ CmpAndSwap(output, new_val, MemOperand(addr)); \
1024     __ LoadlW(output, output);                        \
1025   } while (false)
1026 
1027 #define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op)      \
1028   do {                                               \
1029     Register value = i.InputRegister(2);             \
1030     Register result = i.OutputRegister(0);           \
1031     Register addr = r1;                              \
1032     AddressingMode mode = kMode_None;                \
1033     MemOperand op = i.MemoryOperand(&mode);          \
1034     __ lay(addr, op);                                \
1035     __ load_and_op(result, value, MemOperand(addr)); \
1036     __ LoadlW(result, result);                       \
1037   } while (false)
1038 
1039 #define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op)    \
1040   do {                                               \
1041     Register value = i.InputRegister(2);             \
1042     Register result = i.OutputRegister(0);           \
1043     Register addr = r1;                              \
1044     AddressingMode mode = kMode_None;                \
1045     MemOperand op = i.MemoryOperand(&mode);          \
1046     __ lay(addr, op);                                \
1047     __ load_and_op(result, value, MemOperand(addr)); \
1048   } while (false)
1049 
1050 #define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end)           \
1051   do {                                                                      \
1052     Label do_cs;                                                            \
1053     __ LoadlW(prev, MemOperand(addr, offset));                              \
1054     __ bind(&do_cs);                                                        \
1055     __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end),    \
1056                               Operand(static_cast<intptr_t>(shift_amount)), \
1057                               true);                                        \
1058     __ bin_inst(new_val, prev, temp);                                       \
1059     __ lr(temp, prev);                                                      \
1060     __ RotateInsertSelectBits(temp, new_val, Operand(start), Operand(end),  \
1061                               Operand::Zero(), false);                      \
1062     __ CmpAndSwap(prev, temp, MemOperand(addr, offset));                    \
1063     __ bne(&do_cs, Label::kNear);                                           \
1064   } while (false)
1065 
1066 #ifdef V8_TARGET_BIG_ENDIAN
1067 #define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
1068   {                                                             \
1069     constexpr int offset = -(2 * index);                        \
1070     constexpr int shift_amount = 16 - (index * 16);             \
1071     constexpr int start = 48 - shift_amount;                    \
1072     constexpr int end = start + 15;                             \
1073     ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end);  \
1074     extract_result();                                           \
1075   }
1076 #define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result)    \
1077   {                                                            \
1078     constexpr int offset = -(index);                           \
1079     constexpr int shift_amount = 24 - (index * 8);             \
1080     constexpr int start = 56 - shift_amount;                   \
1081     constexpr int end = start + 7;                             \
1082     ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
1083     extract_result();                                          \
1084   }
1085 #else
1086 #define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
1087   {                                                             \
1088     constexpr int offset = -(2 * index);                        \
1089     constexpr int shift_amount = index * 16;                    \
1090     constexpr int start = 48 - shift_amount;                    \
1091     constexpr int end = start + 15;                             \
1092     ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end);  \
1093     extract_result();                                           \
1094   }
1095 #define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result)    \
1096   {                                                            \
1097     constexpr int offset = -(index);                           \
1098     constexpr int shift_amount = index * 8;                    \
1099     constexpr int start = 56 - shift_amount;                   \
1100     constexpr int end = start + 7;                             \
1101     ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
1102     extract_result();                                          \
1103   }
1104 #endif  // V8_TARGET_BIG_ENDIAN
1105 
1106 #define ASSEMBLE_ATOMIC_BINOP_HALFWORD(bin_inst, extract_result) \
1107   do {                                                           \
1108     Register value = i.InputRegister(2);                         \
1109     Register result = i.OutputRegister(0);                       \
1110     Register prev = i.TempRegister(0);                           \
1111     Register new_val = r0;                                       \
1112     Register addr = r1;                                          \
1113     Register temp = kScratchReg;                                 \
1114     AddressingMode mode = kMode_None;                            \
1115     MemOperand op = i.MemoryOperand(&mode);                      \
1116     Label two, done;                                             \
1117     __ lay(addr, op);                                            \
1118     __ tmll(addr, Operand(3));                                   \
1119     __ b(Condition(2), &two);                                    \
1120     /* word boundary */                                          \
1121     ATOMIC_BIN_OP_HALFWORD(bin_inst, 0, extract_result);         \
1122     __ b(&done);                                                 \
1123     __ bind(&two);                                               \
1124     /* halfword boundary */                                      \
1125     ATOMIC_BIN_OP_HALFWORD(bin_inst, 1, extract_result);         \
1126     __ bind(&done);                                              \
1127   } while (false)
1128 
1129 #define ASSEMBLE_ATOMIC_BINOP_BYTE(bin_inst, extract_result) \
1130   do {                                                       \
1131     Register value = i.InputRegister(2);                     \
1132     Register result = i.OutputRegister(0);                   \
1133     Register addr = i.TempRegister(0);                       \
1134     Register prev = r0;                                      \
1135     Register new_val = r1;                                   \
1136     Register temp = kScratchReg;                             \
1137     AddressingMode mode = kMode_None;                        \
1138     MemOperand op = i.MemoryOperand(&mode);                  \
1139     Label done, one, two, three;                             \
1140     __ lay(addr, op);                                        \
1141     __ tmll(addr, Operand(3));                               \
1142     __ b(Condition(1), &three);                              \
1143     __ b(Condition(2), &two);                                \
1144     __ b(Condition(4), &one);                                \
1145     /* ending with 0b00 (word boundary) */                   \
1146     ATOMIC_BIN_OP_BYTE(bin_inst, 0, extract_result);         \
1147     __ b(&done);                                             \
1148     /* ending with 0b01 */                                   \
1149     __ bind(&one);                                           \
1150     ATOMIC_BIN_OP_BYTE(bin_inst, 1, extract_result);         \
1151     __ b(&done);                                             \
1152     /* ending with 0b10 (hw boundary) */                     \
1153     __ bind(&two);                                           \
1154     ATOMIC_BIN_OP_BYTE(bin_inst, 2, extract_result);         \
1155     __ b(&done);                                             \
1156     /* ending with 0b11 */                                   \
1157     __ bind(&three);                                         \
1158     ATOMIC_BIN_OP_BYTE(bin_inst, 3, extract_result);         \
1159     __ bind(&done);                                          \
1160   } while (false)
1161 
1162 #define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64()        \
1163   do {                                                  \
1164     Register new_val = i.InputRegister(1);              \
1165     Register output = i.OutputRegister();               \
1166     Register addr = kScratchReg;                        \
1167     size_t index = 2;                                   \
1168     AddressingMode mode = kMode_None;                   \
1169     MemOperand op = i.MemoryOperand(&mode, &index);     \
1170     __ lay(addr, op);                                   \
1171     __ CmpAndSwap64(output, new_val, MemOperand(addr)); \
1172   } while (false)
1173 
AssembleDeconstructFrame()1174 void CodeGenerator::AssembleDeconstructFrame() {
1175   __ LeaveFrame(StackFrame::MANUAL);
1176   unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
1177 }
1178 
AssemblePrepareTailCall()1179 void CodeGenerator::AssemblePrepareTailCall() {
1180   if (frame_access_state()->has_frame()) {
1181     __ RestoreFrameStateForTailCall();
1182   }
1183   frame_access_state()->SetFrameAccessToSP();
1184 }
1185 
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)1186 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
1187                                                      Register scratch1,
1188                                                      Register scratch2,
1189                                                      Register scratch3) {
1190   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
1191   Label done;
1192 
1193   // Check if current frame is an arguments adaptor frame.
1194   __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
1195   __ CmpP(scratch1,
1196           Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1197   __ bne(&done);
1198 
1199   // Load arguments count from current arguments adaptor frame (note, it
1200   // does not include receiver).
1201   Register caller_args_count_reg = scratch1;
1202   __ LoadP(caller_args_count_reg,
1203            MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1204   __ SmiUntag(caller_args_count_reg);
1205 
1206   __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
1207   __ bind(&done);
1208 }
1209 
1210 namespace {
1211 
FlushPendingPushRegisters(TurboAssembler * tasm,FrameAccessState * frame_access_state,ZoneVector<Register> * pending_pushes)1212 void FlushPendingPushRegisters(TurboAssembler* tasm,
1213                                FrameAccessState* frame_access_state,
1214                                ZoneVector<Register>* pending_pushes) {
1215   switch (pending_pushes->size()) {
1216     case 0:
1217       break;
1218     case 1:
1219       tasm->Push((*pending_pushes)[0]);
1220       break;
1221     case 2:
1222       tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
1223       break;
1224     case 3:
1225       tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
1226                  (*pending_pushes)[2]);
1227       break;
1228     default:
1229       UNREACHABLE();
1230   }
1231   frame_access_state->IncreaseSPDelta(pending_pushes->size());
1232   pending_pushes->clear();
1233 }
1234 
AdjustStackPointerForTailCall(TurboAssembler * tasm,FrameAccessState * state,int new_slot_above_sp,ZoneVector<Register> * pending_pushes=nullptr,bool allow_shrinkage=true)1235 void AdjustStackPointerForTailCall(
1236     TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
1237     ZoneVector<Register>* pending_pushes = nullptr,
1238     bool allow_shrinkage = true) {
1239   int current_sp_offset = state->GetSPToFPSlotCount() +
1240                           StandardFrameConstants::kFixedSlotCountAboveFp;
1241   int stack_slot_delta = new_slot_above_sp - current_sp_offset;
1242   if (stack_slot_delta > 0) {
1243     if (pending_pushes != nullptr) {
1244       FlushPendingPushRegisters(tasm, state, pending_pushes);
1245     }
1246     tasm->AddP(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
1247     state->IncreaseSPDelta(stack_slot_delta);
1248   } else if (allow_shrinkage && stack_slot_delta < 0) {
1249     if (pending_pushes != nullptr) {
1250       FlushPendingPushRegisters(tasm, state, pending_pushes);
1251     }
1252     tasm->AddP(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
1253     state->IncreaseSPDelta(stack_slot_delta);
1254   }
1255 }
1256 
EmitWordLoadPoisoningIfNeeded(CodeGenerator * codegen,Instruction * instr,S390OperandConverter const & i)1257 void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
1258                                    S390OperandConverter const& i) {
1259   const MemoryAccessMode access_mode =
1260       static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
1261   if (access_mode == kMemoryAccessPoisoned) {
1262     Register value = i.OutputRegister();
1263     codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
1264   }
1265 }
1266 
1267 }  // namespace
1268 
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)1269 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
1270                                               int first_unused_stack_slot) {
1271   ZoneVector<MoveOperands*> pushes(zone());
1272   GetPushCompatibleMoves(instr, kRegisterPush, &pushes);
1273 
1274   if (!pushes.empty() &&
1275       (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
1276        first_unused_stack_slot)) {
1277     S390OperandConverter g(this, instr);
1278     ZoneVector<Register> pending_pushes(zone());
1279     for (auto move : pushes) {
1280       LocationOperand destination_location(
1281           LocationOperand::cast(move->destination()));
1282       InstructionOperand source(move->source());
1283       AdjustStackPointerForTailCall(
1284           tasm(), frame_access_state(),
1285           destination_location.index() - pending_pushes.size(),
1286           &pending_pushes);
1287       // Pushes of non-register data types are not supported.
1288       DCHECK(source.IsRegister());
1289       LocationOperand source_location(LocationOperand::cast(source));
1290       pending_pushes.push_back(source_location.GetRegister());
1291       // TODO(arm): We can push more than 3 registers at once. Add support in
1292       // the macro-assembler for pushing a list of registers.
1293       if (pending_pushes.size() == 3) {
1294         FlushPendingPushRegisters(tasm(), frame_access_state(),
1295                                   &pending_pushes);
1296       }
1297       move->Eliminate();
1298     }
1299     FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
1300   }
1301   AdjustStackPointerForTailCall(tasm(), frame_access_state(),
1302                                 first_unused_stack_slot, nullptr, false);
1303 }
1304 
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)1305 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
1306                                              int first_unused_stack_slot) {
1307   AdjustStackPointerForTailCall(tasm(), frame_access_state(),
1308                                 first_unused_stack_slot);
1309 }
1310 
1311 // Check that {kJavaScriptCallCodeStartRegister} is correct.
AssembleCodeStartRegisterCheck()1312 void CodeGenerator::AssembleCodeStartRegisterCheck() {
1313   Register scratch = r1;
1314   __ ComputeCodeStartAddress(scratch);
1315   __ CmpP(scratch, kJavaScriptCallCodeStartRegister);
1316   __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
1317 }
1318 
1319 // Check if the code object is marked for deoptimization. If it is, then it
1320 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
1321 // to:
1322 //    1. read from memory the word that contains that bit, which can be found in
1323 //       the flags in the referenced {CodeDataContainer} object;
1324 //    2. test kMarkedForDeoptimizationBit in those flags; and
1325 //    3. if it is not zero then it jumps to the builtin.
BailoutIfDeoptimized()1326 void CodeGenerator::BailoutIfDeoptimized() {
1327   if (FLAG_debug_code) {
1328     // Check that {kJavaScriptCallCodeStartRegister} is correct.
1329     __ ComputeCodeStartAddress(ip);
1330     __ CmpP(ip, kJavaScriptCallCodeStartRegister);
1331     __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
1332   }
1333 
1334   int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
1335   __ LoadTaggedPointerField(
1336       ip, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
1337   __ LoadW(ip,
1338            FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
1339   __ TestBit(ip, Code::kMarkedForDeoptimizationBit);
1340   __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
1341           RelocInfo::CODE_TARGET, ne);
1342 }
1343 
GenerateSpeculationPoisonFromCodeStartRegister()1344 void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
1345   Register scratch = r1;
1346 
1347   __ ComputeCodeStartAddress(scratch);
1348 
1349   // Calculate a mask which has all bits set in the normal case, but has all
1350   // bits cleared if we are speculatively executing the wrong PC.
1351   __ LoadImmP(kSpeculationPoisonRegister, Operand::Zero());
1352   __ LoadImmP(r0, Operand(-1));
1353   __ CmpP(kJavaScriptCallCodeStartRegister, scratch);
1354   __ LoadOnConditionP(eq, kSpeculationPoisonRegister, r0);
1355 }
1356 
AssembleRegisterArgumentPoisoning()1357 void CodeGenerator::AssembleRegisterArgumentPoisoning() {
1358   __ AndP(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
1359   __ AndP(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
1360   __ AndP(sp, sp, kSpeculationPoisonRegister);
1361 }
1362 
1363 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)1364 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
1365     Instruction* instr) {
1366   S390OperandConverter i(this, instr);
1367   ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
1368 
1369   switch (opcode) {
1370     case kArchComment:
1371 #ifdef V8_TARGET_ARCH_S390X
1372       __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
1373 #else
1374       __ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
1375 #endif
1376       break;
1377     case kArchCallCodeObject: {
1378       if (HasRegisterInput(instr, 0)) {
1379         Register reg = i.InputRegister(0);
1380         DCHECK_IMPLIES(
1381             instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
1382             reg == kJavaScriptCallCodeStartRegister);
1383         __ CallCodeObject(reg);
1384       } else {
1385         __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
1386       }
1387       RecordCallPosition(instr);
1388       frame_access_state()->ClearSPDelta();
1389       break;
1390     }
1391     case kArchCallBuiltinPointer: {
1392       DCHECK(!instr->InputAt(0)->IsImmediate());
1393       Register builtin_index = i.InputRegister(0);
1394       __ CallBuiltinByIndex(builtin_index);
1395       RecordCallPosition(instr);
1396       frame_access_state()->ClearSPDelta();
1397       break;
1398     }
1399     case kArchCallWasmFunction: {
1400       // We must not share code targets for calls to builtins for wasm code, as
1401       // they might need to be patched individually.
1402       if (instr->InputAt(0)->IsImmediate()) {
1403         Constant constant = i.ToConstant(instr->InputAt(0));
1404 #ifdef V8_TARGET_ARCH_S390X
1405         Address wasm_code = static_cast<Address>(constant.ToInt64());
1406 #else
1407         Address wasm_code = static_cast<Address>(constant.ToInt32());
1408 #endif
1409         __ Call(wasm_code, constant.rmode());
1410       } else {
1411         __ Call(i.InputRegister(0));
1412       }
1413       RecordCallPosition(instr);
1414       frame_access_state()->ClearSPDelta();
1415       break;
1416     }
1417     case kArchTailCallCodeObjectFromJSFunction:
1418     case kArchTailCallCodeObject: {
1419       if (opcode == kArchTailCallCodeObjectFromJSFunction) {
1420         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
1421                                          i.TempRegister(0), i.TempRegister(1),
1422                                          i.TempRegister(2));
1423       }
1424       if (HasRegisterInput(instr, 0)) {
1425         Register reg = i.InputRegister(0);
1426         DCHECK_IMPLIES(
1427             instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
1428             reg == kJavaScriptCallCodeStartRegister);
1429         __ JumpCodeObject(reg);
1430       } else {
1431         // We cannot use the constant pool to load the target since
1432         // we've already restored the caller's frame.
1433         ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
1434         __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
1435       }
1436       frame_access_state()->ClearSPDelta();
1437       frame_access_state()->SetFrameAccessToDefault();
1438       break;
1439     }
1440     case kArchTailCallWasm: {
1441       // We must not share code targets for calls to builtins for wasm code, as
1442       // they might need to be patched individually.
1443       if (instr->InputAt(0)->IsImmediate()) {
1444         Constant constant = i.ToConstant(instr->InputAt(0));
1445 #ifdef V8_TARGET_ARCH_S390X
1446         Address wasm_code = static_cast<Address>(constant.ToInt64());
1447 #else
1448         Address wasm_code = static_cast<Address>(constant.ToInt32());
1449 #endif
1450         __ Jump(wasm_code, constant.rmode());
1451       } else {
1452         __ Jump(i.InputRegister(0));
1453       }
1454       frame_access_state()->ClearSPDelta();
1455       frame_access_state()->SetFrameAccessToDefault();
1456       break;
1457     }
1458     case kArchTailCallAddress: {
1459       CHECK(!instr->InputAt(0)->IsImmediate());
1460       Register reg = i.InputRegister(0);
1461       DCHECK_IMPLIES(
1462           instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
1463           reg == kJavaScriptCallCodeStartRegister);
1464       __ Jump(reg);
1465       frame_access_state()->ClearSPDelta();
1466       frame_access_state()->SetFrameAccessToDefault();
1467       break;
1468     }
1469     case kArchCallJSFunction: {
1470       Register func = i.InputRegister(0);
1471       if (FLAG_debug_code) {
1472         // Check the function's context matches the context argument.
1473         __ LoadTaggedPointerField(
1474             kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
1475         __ CmpP(cp, kScratchReg);
1476         __ Assert(eq, AbortReason::kWrongFunctionContext);
1477       }
1478       static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
1479       __ LoadTaggedPointerField(r4,
1480                                 FieldMemOperand(func, JSFunction::kCodeOffset));
1481       __ CallCodeObject(r4);
1482       RecordCallPosition(instr);
1483       frame_access_state()->ClearSPDelta();
1484       break;
1485     }
1486     case kArchPrepareCallCFunction: {
1487       int const num_parameters = MiscField::decode(instr->opcode());
1488       __ PrepareCallCFunction(num_parameters, kScratchReg);
1489       // Frame alignment requires using FP-relative frame addressing.
1490       frame_access_state()->SetFrameAccessToFP();
1491       break;
1492     }
1493     case kArchSaveCallerRegisters: {
1494       fp_mode_ =
1495           static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
1496       DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
1497       // kReturnRegister0 should have been saved before entering the stub.
1498       int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
1499       DCHECK(IsAligned(bytes, kSystemPointerSize));
1500       DCHECK_EQ(0, frame_access_state()->sp_delta());
1501       frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
1502       DCHECK(!caller_registers_saved_);
1503       caller_registers_saved_ = true;
1504       break;
1505     }
1506     case kArchRestoreCallerRegisters: {
1507       DCHECK(fp_mode_ ==
1508              static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
1509       DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
1510       // Don't overwrite the returned value.
1511       int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
1512       frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
1513       DCHECK_EQ(0, frame_access_state()->sp_delta());
1514       DCHECK(caller_registers_saved_);
1515       caller_registers_saved_ = false;
1516       break;
1517     }
1518     case kArchPrepareTailCall:
1519       AssemblePrepareTailCall();
1520       break;
1521     case kArchCallCFunction: {
1522       int const num_parameters = MiscField::decode(instr->opcode());
1523       Label return_location;
1524       // Put the return address in a stack slot.
1525       if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
1526         // Put the return address in a stack slot.
1527         __ larl(r0, &return_location);
1528         __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
1529       }
1530       if (instr->InputAt(0)->IsImmediate()) {
1531         ExternalReference ref = i.InputExternalReference(0);
1532         __ CallCFunction(ref, num_parameters);
1533       } else {
1534         Register func = i.InputRegister(0);
1535         __ CallCFunction(func, num_parameters);
1536       }
1537       __ bind(&return_location);
1538       if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
1539         RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
1540       }
1541       frame_access_state()->SetFrameAccessToDefault();
1542       // Ideally, we should decrement SP delta to match the change of stack
1543       // pointer in CallCFunction. However, for certain architectures (e.g.
1544       // ARM), there may be more strict alignment requirement, causing old SP
1545       // to be saved on the stack. In those cases, we can not calculate the SP
1546       // delta statically.
1547       frame_access_state()->ClearSPDelta();
1548       if (caller_registers_saved_) {
1549         // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
1550         // Here, we assume the sequence to be:
1551         //   kArchSaveCallerRegisters;
1552         //   kArchCallCFunction;
1553         //   kArchRestoreCallerRegisters;
1554         int bytes =
1555             __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
1556         frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
1557       }
1558       break;
1559     }
1560     case kArchJmp:
1561       AssembleArchJump(i.InputRpo(0));
1562       break;
1563     case kArchBinarySearchSwitch:
1564       AssembleArchBinarySearchSwitch(instr);
1565       break;
1566     case kArchTableSwitch:
1567       AssembleArchTableSwitch(instr);
1568       break;
1569     case kArchAbortCSAAssert:
1570       DCHECK(i.InputRegister(0) == r3);
1571       {
1572         // We don't actually want to generate a pile of code for this, so just
1573         // claim there is a stack frame, without generating one.
1574         FrameScope scope(tasm(), StackFrame::NONE);
1575         __ Call(
1576             isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
1577             RelocInfo::CODE_TARGET);
1578       }
1579       __ stop();
1580       break;
1581     case kArchDebugBreak:
1582       __ DebugBreak();
1583       break;
1584     case kArchNop:
1585     case kArchThrowTerminator:
1586       // don't emit code for nops.
1587       break;
1588     case kArchDeoptimize: {
1589       DeoptimizationExit* exit =
1590           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
1591       __ b(exit->label());
1592       break;
1593     }
1594     case kArchRet:
1595       AssembleReturn(instr->InputAt(0));
1596       break;
1597     case kArchFramePointer:
1598       __ LoadRR(i.OutputRegister(), fp);
1599       break;
1600     case kArchParentFramePointer:
1601       if (frame_access_state()->has_frame()) {
1602         __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
1603       } else {
1604         __ LoadRR(i.OutputRegister(), fp);
1605       }
1606       break;
1607     case kArchStackPointerGreaterThan: {
1608       // Potentially apply an offset to the current stack pointer before the
1609       // comparison to consider the size difference of an optimized frame versus
1610       // the contained unoptimized frames.
1611 
1612       Register lhs_register = sp;
1613       uint32_t offset;
1614 
1615       if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
1616         lhs_register = i.TempRegister(0);
1617         __ SubP(lhs_register, sp, Operand(offset));
1618       }
1619 
1620       constexpr size_t kValueIndex = 0;
1621       DCHECK(instr->InputAt(kValueIndex)->IsRegister());
1622       __ CmpLogicalP(lhs_register, i.InputRegister(kValueIndex));
1623       break;
1624     }
1625     case kArchStackCheckOffset:
1626       __ LoadSmiLiteral(i.OutputRegister(),
1627                         Smi::FromInt(GetStackCheckOffset()));
1628       break;
1629     case kArchTruncateDoubleToI:
1630       __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
1631                            i.InputDoubleRegister(0), DetermineStubCallMode());
1632       break;
1633     case kArchStoreWithWriteBarrier: {
1634       RecordWriteMode mode =
1635           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
1636       Register object = i.InputRegister(0);
1637       Register value = i.InputRegister(2);
1638       Register scratch0 = i.TempRegister(0);
1639       Register scratch1 = i.TempRegister(1);
1640       OutOfLineRecordWrite* ool;
1641 
1642       AddressingMode addressing_mode =
1643           AddressingModeField::decode(instr->opcode());
1644       if (addressing_mode == kMode_MRI) {
1645         int32_t offset = i.InputInt32(1);
1646         ool = zone()->New<OutOfLineRecordWrite>(
1647             this, object, offset, value, scratch0, scratch1, mode,
1648             DetermineStubCallMode(), &unwinding_info_writer_);
1649         __ StoreTaggedField(value, MemOperand(object, offset), r0);
1650       } else {
1651         DCHECK_EQ(kMode_MRR, addressing_mode);
1652         Register offset(i.InputRegister(1));
1653         ool = zone()->New<OutOfLineRecordWrite>(
1654             this, object, offset, value, scratch0, scratch1, mode,
1655             DetermineStubCallMode(), &unwinding_info_writer_);
1656         __ StoreTaggedField(value, MemOperand(object, offset));
1657       }
1658       __ CheckPageFlag(object, scratch0,
1659                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
1660                        ool->entry());
1661       __ bind(ool->exit());
1662       break;
1663     }
1664     case kArchStackSlot: {
1665       FrameOffset offset =
1666           frame_access_state()->GetFrameOffset(i.InputInt32(0));
1667       __ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
1668               Operand(offset.offset()));
1669       break;
1670     }
1671     case kArchWordPoisonOnSpeculation:
1672       DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
1673       __ AndP(i.InputRegister(0), kSpeculationPoisonRegister);
1674       break;
1675     case kS390_Peek: {
1676       int reverse_slot = i.InputInt32(0);
1677       int offset =
1678           FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1679       if (instr->OutputAt(0)->IsFPRegister()) {
1680         LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1681         if (op->representation() == MachineRepresentation::kFloat64) {
1682           __ LoadDouble(i.OutputDoubleRegister(), MemOperand(fp, offset));
1683         } else if (op->representation() == MachineRepresentation::kFloat32) {
1684           __ LoadFloat32(i.OutputFloatRegister(), MemOperand(fp, offset));
1685         } else {
1686           DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
1687           __ LoadSimd128(i.OutputSimd128Register(), MemOperand(fp, offset),
1688                          kScratchReg);
1689         }
1690       } else {
1691         __ LoadP(i.OutputRegister(), MemOperand(fp, offset));
1692       }
1693       break;
1694     }
1695     case kS390_Abs32:
1696       // TODO(john.yan): zero-ext
1697       __ lpr(i.OutputRegister(0), i.InputRegister(0));
1698       break;
1699     case kS390_Abs64:
1700       __ lpgr(i.OutputRegister(0), i.InputRegister(0));
1701       break;
1702     case kS390_And32:
1703       // zero-ext
1704       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1705         ASSEMBLE_BIN32_OP(RRRInstr(nrk), RM32Instr(And), RIInstr(nilf));
1706       } else {
1707         ASSEMBLE_BIN32_OP(RRInstr(nr), RM32Instr(And), RIInstr(nilf));
1708       }
1709       break;
1710     case kS390_And64:
1711       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1712         ASSEMBLE_BIN_OP(RRRInstr(ngrk), RM64Instr(ng), nullInstr);
1713       } else {
1714         ASSEMBLE_BIN_OP(RRInstr(ngr), RM64Instr(ng), nullInstr);
1715       }
1716       break;
1717     case kS390_Or32:
1718       // zero-ext
1719       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1720         ASSEMBLE_BIN32_OP(RRRInstr(ork), RM32Instr(Or), RIInstr(oilf));
1721       } else {
1722         ASSEMBLE_BIN32_OP(RRInstr(or_z), RM32Instr(Or), RIInstr(oilf));
1723       }
1724       break;
1725     case kS390_Or64:
1726       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1727         ASSEMBLE_BIN_OP(RRRInstr(ogrk), RM64Instr(og), nullInstr);
1728       } else {
1729         ASSEMBLE_BIN_OP(RRInstr(ogr), RM64Instr(og), nullInstr);
1730       }
1731       break;
1732     case kS390_Xor32:
1733       // zero-ext
1734       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1735         ASSEMBLE_BIN32_OP(RRRInstr(xrk), RM32Instr(Xor), RIInstr(xilf));
1736       } else {
1737         ASSEMBLE_BIN32_OP(RRInstr(xr), RM32Instr(Xor), RIInstr(xilf));
1738       }
1739       break;
1740     case kS390_Xor64:
1741       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1742         ASSEMBLE_BIN_OP(RRRInstr(xgrk), RM64Instr(xg), nullInstr);
1743       } else {
1744         ASSEMBLE_BIN_OP(RRInstr(xgr), RM64Instr(xg), nullInstr);
1745       }
1746       break;
1747     case kS390_ShiftLeft32:
1748       // zero-ext
1749       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1750         ASSEMBLE_BIN32_OP(RRRInstr(ShiftLeft), nullInstr, RRIInstr(ShiftLeft));
1751       } else {
1752         ASSEMBLE_BIN32_OP(RRInstr(sll), nullInstr, RIInstr(sll));
1753       }
1754       break;
1755     case kS390_ShiftLeft64:
1756       ASSEMBLE_BIN_OP(RRRInstr(sllg), nullInstr, RRIInstr(sllg));
1757       break;
1758     case kS390_ShiftRight32:
1759       // zero-ext
1760       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1761         ASSEMBLE_BIN32_OP(RRRInstr(srlk), nullInstr, RRIInstr(srlk));
1762       } else {
1763         ASSEMBLE_BIN32_OP(RRInstr(srl), nullInstr, RIInstr(srl));
1764       }
1765       break;
1766     case kS390_ShiftRight64:
1767       ASSEMBLE_BIN_OP(RRRInstr(srlg), nullInstr, RRIInstr(srlg));
1768       break;
1769     case kS390_ShiftRightArith32:
1770       // zero-ext
1771       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1772         ASSEMBLE_BIN32_OP(RRRInstr(srak), nullInstr, RRIInstr(srak));
1773       } else {
1774         ASSEMBLE_BIN32_OP(RRInstr(sra), nullInstr, RIInstr(sra));
1775       }
1776       break;
1777     case kS390_ShiftRightArith64:
1778       ASSEMBLE_BIN_OP(RRRInstr(srag), nullInstr, RRIInstr(srag));
1779       break;
1780 #if !V8_TARGET_ARCH_S390X
1781     case kS390_AddPair:
1782       // i.InputRegister(0) ... left low word.
1783       // i.InputRegister(1) ... left high word.
1784       // i.InputRegister(2) ... right low word.
1785       // i.InputRegister(3) ... right high word.
1786       __ AddLogical32(i.OutputRegister(0), i.InputRegister(0),
1787                       i.InputRegister(2));
1788       __ AddLogicalWithCarry32(i.OutputRegister(1), i.InputRegister(1),
1789                                i.InputRegister(3));
1790       break;
1791     case kS390_SubPair:
1792       // i.InputRegister(0) ... left low word.
1793       // i.InputRegister(1) ... left high word.
1794       // i.InputRegister(2) ... right low word.
1795       // i.InputRegister(3) ... right high word.
1796       __ SubLogical32(i.OutputRegister(0), i.InputRegister(0),
1797                       i.InputRegister(2));
1798       __ SubLogicalWithBorrow32(i.OutputRegister(1), i.InputRegister(1),
1799                                 i.InputRegister(3));
1800       break;
1801     case kS390_MulPair:
1802       // i.InputRegister(0) ... left low word.
1803       // i.InputRegister(1) ... left high word.
1804       // i.InputRegister(2) ... right low word.
1805       // i.InputRegister(3) ... right high word.
1806       __ sllg(r0, i.InputRegister(1), Operand(32));
1807       __ sllg(r1, i.InputRegister(3), Operand(32));
1808       __ lr(r0, i.InputRegister(0));
1809       __ lr(r1, i.InputRegister(2));
1810       __ msgr(r1, r0);
1811       __ lr(i.OutputRegister(0), r1);
1812       __ srag(i.OutputRegister(1), r1, Operand(32));
1813       break;
1814     case kS390_ShiftLeftPair: {
1815       Register second_output =
1816           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1817       if (instr->InputAt(2)->IsImmediate()) {
1818         __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1819                          i.InputRegister(1), i.InputInt32(2));
1820       } else {
1821         __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1822                          i.InputRegister(1), kScratchReg, i.InputRegister(2));
1823       }
1824       break;
1825     }
1826     case kS390_ShiftRightPair: {
1827       Register second_output =
1828           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1829       if (instr->InputAt(2)->IsImmediate()) {
1830         __ ShiftRightPair(i.OutputRegister(0), second_output,
1831                           i.InputRegister(0), i.InputRegister(1),
1832                           i.InputInt32(2));
1833       } else {
1834         __ ShiftRightPair(i.OutputRegister(0), second_output,
1835                           i.InputRegister(0), i.InputRegister(1), kScratchReg,
1836                           i.InputRegister(2));
1837       }
1838       break;
1839     }
1840     case kS390_ShiftRightArithPair: {
1841       Register second_output =
1842           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1843       if (instr->InputAt(2)->IsImmediate()) {
1844         __ ShiftRightArithPair(i.OutputRegister(0), second_output,
1845                                i.InputRegister(0), i.InputRegister(1),
1846                                i.InputInt32(2));
1847       } else {
1848         __ ShiftRightArithPair(i.OutputRegister(0), second_output,
1849                                i.InputRegister(0), i.InputRegister(1),
1850                                kScratchReg, i.InputRegister(2));
1851       }
1852       break;
1853     }
1854 #endif
1855     case kS390_RotRight32: {
1856       // zero-ext
1857       if (HasRegisterInput(instr, 1)) {
1858         __ LoadComplementRR(kScratchReg, i.InputRegister(1));
1859         __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1860       } else {
1861         __ rll(i.OutputRegister(), i.InputRegister(0),
1862                Operand(32 - i.InputInt32(1)));
1863       }
1864       CHECK_AND_ZERO_EXT_OUTPUT(2);
1865       break;
1866     }
1867     case kS390_RotRight64:
1868       if (HasRegisterInput(instr, 1)) {
1869         __ lcgr(kScratchReg, i.InputRegister(1));
1870         __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1871       } else {
1872         DCHECK(HasImmediateInput(instr, 1));
1873         __ rllg(i.OutputRegister(), i.InputRegister(0),
1874                 Operand(64 - i.InputInt32(1)));
1875       }
1876       break;
1877     // TODO(john.yan): clean up kS390_RotLeftAnd...
1878     case kS390_RotLeftAndClear64:
1879       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1880         int shiftAmount = i.InputInt32(1);
1881         int endBit = 63 - shiftAmount;
1882         int startBit = 63 - i.InputInt32(2);
1883         __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1884                                   Operand(startBit), Operand(endBit),
1885                                   Operand(shiftAmount), true);
1886       } else {
1887         int shiftAmount = i.InputInt32(1);
1888         int clearBit = 63 - i.InputInt32(2);
1889         __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1890         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1891         __ srlg(i.OutputRegister(), i.OutputRegister(),
1892                 Operand(clearBit + shiftAmount));
1893         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(shiftAmount));
1894       }
1895       break;
1896     case kS390_RotLeftAndClearLeft64:
1897       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1898         int shiftAmount = i.InputInt32(1);
1899         int endBit = 63;
1900         int startBit = 63 - i.InputInt32(2);
1901         __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1902                                   Operand(startBit), Operand(endBit),
1903                                   Operand(shiftAmount), true);
1904       } else {
1905         int shiftAmount = i.InputInt32(1);
1906         int clearBit = 63 - i.InputInt32(2);
1907         __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1908         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1909         __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1910       }
1911       break;
1912     case kS390_RotLeftAndClearRight64:
1913       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1914         int shiftAmount = i.InputInt32(1);
1915         int endBit = 63 - i.InputInt32(2);
1916         int startBit = 0;
1917         __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1918                                   Operand(startBit), Operand(endBit),
1919                                   Operand(shiftAmount), true);
1920       } else {
1921         int shiftAmount = i.InputInt32(1);
1922         int clearBit = i.InputInt32(2);
1923         __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1924         __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1925         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1926       }
1927       break;
1928     case kS390_Add32: {
1929       // zero-ext
1930       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1931         ASSEMBLE_BIN32_OP(RRRInstr(ark), RM32Instr(Add32), RRIInstr(Add32));
1932       } else {
1933         ASSEMBLE_BIN32_OP(RRInstr(ar), RM32Instr(Add32), RIInstr(Add32));
1934       }
1935       break;
1936     }
1937     case kS390_Add64:
1938       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1939         ASSEMBLE_BIN_OP(RRRInstr(agrk), RM64Instr(ag), RRIInstr(AddP));
1940       } else {
1941         ASSEMBLE_BIN_OP(RRInstr(agr), RM64Instr(ag), RIInstr(agfi));
1942       }
1943       break;
1944     case kS390_AddFloat:
1945       ASSEMBLE_BIN_OP(DDInstr(aebr), DMTInstr(AddFloat32), nullInstr);
1946       break;
1947     case kS390_AddDouble:
1948       ASSEMBLE_BIN_OP(DDInstr(adbr), DMTInstr(AddFloat64), nullInstr);
1949       break;
1950     case kS390_Sub32:
1951       // zero-ext
1952       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1953         ASSEMBLE_BIN32_OP(RRRInstr(srk), RM32Instr(Sub32), RRIInstr(Sub32));
1954       } else {
1955         ASSEMBLE_BIN32_OP(RRInstr(sr), RM32Instr(Sub32), RIInstr(Sub32));
1956       }
1957       break;
1958     case kS390_Sub64:
1959       if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1960         ASSEMBLE_BIN_OP(RRRInstr(sgrk), RM64Instr(sg), RRIInstr(SubP));
1961       } else {
1962         ASSEMBLE_BIN_OP(RRInstr(sgr), RM64Instr(sg), RIInstr(SubP));
1963       }
1964       break;
1965     case kS390_SubFloat:
1966       ASSEMBLE_BIN_OP(DDInstr(sebr), DMTInstr(SubFloat32), nullInstr);
1967       break;
1968     case kS390_SubDouble:
1969       ASSEMBLE_BIN_OP(DDInstr(sdbr), DMTInstr(SubFloat64), nullInstr);
1970       break;
1971     case kS390_Mul32:
1972       // zero-ext
1973       if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1974         ASSEMBLE_BIN32_OP(RRRInstr(msrkc), RM32Instr(msc), RIInstr(Mul32));
1975       } else {
1976         ASSEMBLE_BIN32_OP(RRInstr(Mul32), RM32Instr(Mul32), RIInstr(Mul32));
1977       }
1978       break;
1979     case kS390_Mul32WithOverflow:
1980       // zero-ext
1981       ASSEMBLE_BIN32_OP(RRRInstr(Mul32WithOverflowIfCCUnequal),
1982                         RRM32Instr(Mul32WithOverflowIfCCUnequal),
1983                         RRIInstr(Mul32WithOverflowIfCCUnequal));
1984       break;
1985     case kS390_Mul64:
1986       ASSEMBLE_BIN_OP(RRInstr(Mul64), RM64Instr(Mul64), RIInstr(Mul64));
1987       break;
1988     case kS390_MulHigh32:
1989       // zero-ext
1990       ASSEMBLE_BIN_OP(RRRInstr(MulHigh32), RRM32Instr(MulHigh32),
1991                       RRIInstr(MulHigh32));
1992       break;
1993     case kS390_MulHighU32:
1994       // zero-ext
1995       ASSEMBLE_BIN_OP(RRRInstr(MulHighU32), RRM32Instr(MulHighU32),
1996                       RRIInstr(MulHighU32));
1997       break;
1998     case kS390_MulFloat:
1999       ASSEMBLE_BIN_OP(DDInstr(meebr), DMTInstr(MulFloat32), nullInstr);
2000       break;
2001     case kS390_MulDouble:
2002       ASSEMBLE_BIN_OP(DDInstr(mdbr), DMTInstr(MulFloat64), nullInstr);
2003       break;
2004     case kS390_Div64:
2005       ASSEMBLE_BIN_OP(RRRInstr(Div64), RRM64Instr(Div64), nullInstr);
2006       break;
2007     case kS390_Div32: {
2008       // zero-ext
2009       ASSEMBLE_BIN_OP(RRRInstr(Div32), RRM32Instr(Div32), nullInstr);
2010       break;
2011     }
2012     case kS390_DivU64:
2013       ASSEMBLE_BIN_OP(RRRInstr(DivU64), RRM64Instr(DivU64), nullInstr);
2014       break;
2015     case kS390_DivU32: {
2016       // zero-ext
2017       ASSEMBLE_BIN_OP(RRRInstr(DivU32), RRM32Instr(DivU32), nullInstr);
2018       break;
2019     }
2020     case kS390_DivFloat:
2021       ASSEMBLE_BIN_OP(DDInstr(debr), DMTInstr(DivFloat32), nullInstr);
2022       break;
2023     case kS390_DivDouble:
2024       ASSEMBLE_BIN_OP(DDInstr(ddbr), DMTInstr(DivFloat64), nullInstr);
2025       break;
2026     case kS390_Mod32:
2027       // zero-ext
2028       ASSEMBLE_BIN_OP(RRRInstr(Mod32), RRM32Instr(Mod32), nullInstr);
2029       break;
2030     case kS390_ModU32:
2031       // zero-ext
2032       ASSEMBLE_BIN_OP(RRRInstr(ModU32), RRM32Instr(ModU32), nullInstr);
2033       break;
2034     case kS390_Mod64:
2035       ASSEMBLE_BIN_OP(RRRInstr(Mod64), RRM64Instr(Mod64), nullInstr);
2036       break;
2037     case kS390_ModU64:
2038       ASSEMBLE_BIN_OP(RRRInstr(ModU64), RRM64Instr(ModU64), nullInstr);
2039       break;
2040     case kS390_AbsFloat:
2041       __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2042       break;
2043     case kS390_SqrtFloat:
2044       ASSEMBLE_UNARY_OP(D_DInstr(sqebr), nullInstr, nullInstr);
2045       break;
2046     case kS390_SqrtDouble:
2047       ASSEMBLE_UNARY_OP(D_DInstr(sqdbr), nullInstr, nullInstr);
2048       break;
2049     case kS390_FloorFloat:
2050       __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF,
2051                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2052       break;
2053     case kS390_CeilFloat:
2054       __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF,
2055                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2056       break;
2057     case kS390_TruncateFloat:
2058       __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0,
2059                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2060       break;
2061     //  Double operations
2062     case kS390_ModDouble:
2063       ASSEMBLE_FLOAT_MODULO();
2064       break;
2065     case kIeee754Float64Acos:
2066       ASSEMBLE_IEEE754_UNOP(acos);
2067       break;
2068     case kIeee754Float64Acosh:
2069       ASSEMBLE_IEEE754_UNOP(acosh);
2070       break;
2071     case kIeee754Float64Asin:
2072       ASSEMBLE_IEEE754_UNOP(asin);
2073       break;
2074     case kIeee754Float64Asinh:
2075       ASSEMBLE_IEEE754_UNOP(asinh);
2076       break;
2077     case kIeee754Float64Atanh:
2078       ASSEMBLE_IEEE754_UNOP(atanh);
2079       break;
2080     case kIeee754Float64Atan:
2081       ASSEMBLE_IEEE754_UNOP(atan);
2082       break;
2083     case kIeee754Float64Atan2:
2084       ASSEMBLE_IEEE754_BINOP(atan2);
2085       break;
2086     case kIeee754Float64Tan:
2087       ASSEMBLE_IEEE754_UNOP(tan);
2088       break;
2089     case kIeee754Float64Tanh:
2090       ASSEMBLE_IEEE754_UNOP(tanh);
2091       break;
2092     case kIeee754Float64Cbrt:
2093       ASSEMBLE_IEEE754_UNOP(cbrt);
2094       break;
2095     case kIeee754Float64Sin:
2096       ASSEMBLE_IEEE754_UNOP(sin);
2097       break;
2098     case kIeee754Float64Sinh:
2099       ASSEMBLE_IEEE754_UNOP(sinh);
2100       break;
2101     case kIeee754Float64Cos:
2102       ASSEMBLE_IEEE754_UNOP(cos);
2103       break;
2104     case kIeee754Float64Cosh:
2105       ASSEMBLE_IEEE754_UNOP(cosh);
2106       break;
2107     case kIeee754Float64Exp:
2108       ASSEMBLE_IEEE754_UNOP(exp);
2109       break;
2110     case kIeee754Float64Expm1:
2111       ASSEMBLE_IEEE754_UNOP(expm1);
2112       break;
2113     case kIeee754Float64Log:
2114       ASSEMBLE_IEEE754_UNOP(log);
2115       break;
2116     case kIeee754Float64Log1p:
2117       ASSEMBLE_IEEE754_UNOP(log1p);
2118       break;
2119     case kIeee754Float64Log2:
2120       ASSEMBLE_IEEE754_UNOP(log2);
2121       break;
2122     case kIeee754Float64Log10:
2123       ASSEMBLE_IEEE754_UNOP(log10);
2124       break;
2125     case kIeee754Float64Pow:
2126       ASSEMBLE_IEEE754_BINOP(pow);
2127       break;
2128     case kS390_Neg32:
2129       __ lcr(i.OutputRegister(), i.InputRegister(0));
2130       CHECK_AND_ZERO_EXT_OUTPUT(1);
2131       break;
2132     case kS390_Neg64:
2133       __ lcgr(i.OutputRegister(), i.InputRegister(0));
2134       break;
2135     case kS390_MaxFloat:
2136       ASSEMBLE_FLOAT_MAX();
2137       break;
2138     case kS390_MaxDouble:
2139       ASSEMBLE_DOUBLE_MAX();
2140       break;
2141     case kS390_MinFloat:
2142       ASSEMBLE_FLOAT_MIN();
2143       break;
2144     case kS390_MinDouble:
2145       ASSEMBLE_DOUBLE_MIN();
2146       break;
2147     case kS390_AbsDouble:
2148       __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2149       break;
2150     case kS390_FloorDouble:
2151       __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF,
2152                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2153       break;
2154     case kS390_CeilDouble:
2155       __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF,
2156                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2157       break;
2158     case kS390_TruncateDouble:
2159       __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0,
2160                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2161       break;
2162     case kS390_RoundDouble:
2163       __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0,
2164                 i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2165       break;
2166     case kS390_NegFloat:
2167       ASSEMBLE_UNARY_OP(D_DInstr(lcebr), nullInstr, nullInstr);
2168       break;
2169     case kS390_NegDouble:
2170       ASSEMBLE_UNARY_OP(D_DInstr(lcdbr), nullInstr, nullInstr);
2171       break;
2172     case kS390_Cntlz32: {
2173       __ llgfr(i.OutputRegister(), i.InputRegister(0));
2174       __ flogr(r0, i.OutputRegister());
2175       __ Add32(i.OutputRegister(), r0, Operand(-32));
2176       // No need to zero-ext b/c llgfr is done already
2177       break;
2178     }
2179 #if V8_TARGET_ARCH_S390X
2180     case kS390_Cntlz64: {
2181       __ flogr(r0, i.InputRegister(0));
2182       __ LoadRR(i.OutputRegister(), r0);
2183       break;
2184     }
2185 #endif
2186     case kS390_Popcnt32:
2187       __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
2188       break;
2189 #if V8_TARGET_ARCH_S390X
2190     case kS390_Popcnt64:
2191       __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
2192       break;
2193 #endif
2194     case kS390_Cmp32:
2195       ASSEMBLE_COMPARE32(Cmp32, CmpLogical32);
2196       break;
2197 #if V8_TARGET_ARCH_S390X
2198     case kS390_Cmp64:
2199       ASSEMBLE_COMPARE(CmpP, CmpLogicalP);
2200       break;
2201 #endif
2202     case kS390_CmpFloat:
2203       ASSEMBLE_FLOAT_COMPARE(cebr, ceb, ley);
2204       // __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
2205       break;
2206     case kS390_CmpDouble:
2207       ASSEMBLE_FLOAT_COMPARE(cdbr, cdb, ldy);
2208       // __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
2209       break;
2210     case kS390_Tst32:
2211       if (HasRegisterInput(instr, 1)) {
2212         __ And(r0, i.InputRegister(0), i.InputRegister(1));
2213       } else {
2214         // detect tmlh/tmhl/tmhh case
2215         Operand opnd = i.InputImmediate(1);
2216         if (is_uint16(opnd.immediate())) {
2217           __ tmll(i.InputRegister(0), opnd);
2218         } else {
2219           __ lr(r0, i.InputRegister(0));
2220           __ nilf(r0, opnd);
2221         }
2222       }
2223       break;
2224     case kS390_Tst64:
2225       if (HasRegisterInput(instr, 1)) {
2226         __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
2227       } else {
2228         Operand opnd = i.InputImmediate(1);
2229         if (is_uint16(opnd.immediate())) {
2230           __ tmll(i.InputRegister(0), opnd);
2231         } else {
2232           __ AndP(r0, i.InputRegister(0), opnd);
2233         }
2234       }
2235       break;
2236     case kS390_Float64SilenceNaN: {
2237       DoubleRegister value = i.InputDoubleRegister(0);
2238       DoubleRegister result = i.OutputDoubleRegister();
2239       __ CanonicalizeNaN(result, value);
2240       break;
2241     }
2242     case kS390_StackClaim: {
2243       int num_slots = i.InputInt32(0);
2244       __ lay(sp, MemOperand(sp, -num_slots * kSystemPointerSize));
2245       frame_access_state()->IncreaseSPDelta(num_slots);
2246       break;
2247     }
2248     case kS390_Push:
2249       if (instr->InputAt(0)->IsFPRegister()) {
2250         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
2251         switch (op->representation()) {
2252           case MachineRepresentation::kFloat32:
2253             __ lay(sp, MemOperand(sp, -kSystemPointerSize));
2254             __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
2255             break;
2256           case MachineRepresentation::kFloat64:
2257             __ lay(sp, MemOperand(sp, -kDoubleSize));
2258             __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
2259             frame_access_state()->IncreaseSPDelta(kDoubleSize /
2260                                                   kSystemPointerSize);
2261             break;
2262           case MachineRepresentation::kSimd128: {
2263             __ lay(sp, MemOperand(sp, -kSimd128Size));
2264             __ StoreSimd128(i.InputDoubleRegister(0), MemOperand(sp),
2265                             kScratchReg);
2266             frame_access_state()->IncreaseSPDelta(kSimd128Size /
2267                                                   kSystemPointerSize);
2268             break;
2269           }
2270           default:
2271             UNREACHABLE();
2272             break;
2273         }
2274       } else {
2275         __ Push(i.InputRegister(0));
2276         frame_access_state()->IncreaseSPDelta(1);
2277       }
2278       break;
2279     case kS390_PushFrame: {
2280       int num_slots = i.InputInt32(1);
2281       __ lay(sp, MemOperand(sp, -num_slots * kSystemPointerSize));
2282       if (instr->InputAt(0)->IsFPRegister()) {
2283         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
2284         if (op->representation() == MachineRepresentation::kFloat64) {
2285           __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
2286         } else {
2287           DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
2288           __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
2289         }
2290       } else {
2291         __ StoreP(i.InputRegister(0), MemOperand(sp));
2292       }
2293       break;
2294     }
2295     case kS390_StoreToStackSlot: {
2296       int slot = i.InputInt32(1);
2297       if (instr->InputAt(0)->IsFPRegister()) {
2298         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
2299         if (op->representation() == MachineRepresentation::kFloat64) {
2300           __ StoreDouble(i.InputDoubleRegister(0),
2301                          MemOperand(sp, slot * kSystemPointerSize));
2302         } else if (op->representation() == MachineRepresentation::kFloat32) {
2303           __ StoreFloat32(i.InputDoubleRegister(0),
2304                           MemOperand(sp, slot * kSystemPointerSize));
2305         } else {
2306           DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
2307           __ StoreSimd128(i.InputDoubleRegister(0),
2308                           MemOperand(sp, slot * kSystemPointerSize),
2309                           kScratchReg);
2310         }
2311       } else {
2312         __ StoreP(i.InputRegister(0),
2313                   MemOperand(sp, slot * kSystemPointerSize));
2314       }
2315       break;
2316     }
2317     case kS390_SignExtendWord8ToInt32:
2318       __ lbr(i.OutputRegister(), i.InputRegister(0));
2319       CHECK_AND_ZERO_EXT_OUTPUT(1);
2320       break;
2321     case kS390_SignExtendWord16ToInt32:
2322       __ lhr(i.OutputRegister(), i.InputRegister(0));
2323       CHECK_AND_ZERO_EXT_OUTPUT(1);
2324       break;
2325     case kS390_SignExtendWord8ToInt64:
2326       __ lgbr(i.OutputRegister(), i.InputRegister(0));
2327       break;
2328     case kS390_SignExtendWord16ToInt64:
2329       __ lghr(i.OutputRegister(), i.InputRegister(0));
2330       break;
2331     case kS390_SignExtendWord32ToInt64:
2332       __ lgfr(i.OutputRegister(), i.InputRegister(0));
2333       break;
2334     case kS390_Uint32ToUint64:
2335       // Zero extend
2336       __ llgfr(i.OutputRegister(), i.InputRegister(0));
2337       break;
2338     case kS390_Int64ToInt32:
2339       // sign extend
2340       __ lgfr(i.OutputRegister(), i.InputRegister(0));
2341       break;
2342     // Convert Fixed to Floating Point
2343     case kS390_Int64ToFloat32:
2344       __ ConvertInt64ToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2345       break;
2346     case kS390_Int64ToDouble:
2347       __ ConvertInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2348       break;
2349     case kS390_Uint64ToFloat32:
2350       __ ConvertUnsignedInt64ToFloat(i.OutputDoubleRegister(),
2351                                      i.InputRegister(0));
2352       break;
2353     case kS390_Uint64ToDouble:
2354       __ ConvertUnsignedInt64ToDouble(i.OutputDoubleRegister(),
2355                                       i.InputRegister(0));
2356       break;
2357     case kS390_Int32ToFloat32:
2358       __ ConvertIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2359       break;
2360     case kS390_Int32ToDouble:
2361       __ ConvertIntToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2362       break;
2363     case kS390_Uint32ToFloat32:
2364       __ ConvertUnsignedIntToFloat(i.OutputDoubleRegister(),
2365                                    i.InputRegister(0));
2366       break;
2367     case kS390_Uint32ToDouble:
2368       __ ConvertUnsignedIntToDouble(i.OutputDoubleRegister(),
2369                                     i.InputRegister(0));
2370       break;
2371     case kS390_DoubleToInt32: {
2372       Label done;
2373       __ ConvertDoubleToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
2374                               kRoundToNearest);
2375       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2376       __ lghi(i.OutputRegister(0), Operand::Zero());
2377       __ bind(&done);
2378       break;
2379     }
2380     case kS390_DoubleToUint32: {
2381       Label done;
2382       __ ConvertDoubleToUnsignedInt32(i.OutputRegister(0),
2383                                       i.InputDoubleRegister(0));
2384       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2385       __ lghi(i.OutputRegister(0), Operand::Zero());
2386       __ bind(&done);
2387       break;
2388     }
2389     case kS390_DoubleToInt64: {
2390       Label done;
2391       if (i.OutputCount() > 1) {
2392         __ lghi(i.OutputRegister(1), Operand(1));
2393       }
2394       __ ConvertDoubleToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
2395       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2396       if (i.OutputCount() > 1) {
2397         __ lghi(i.OutputRegister(1), Operand::Zero());
2398       } else {
2399         __ lghi(i.OutputRegister(0), Operand::Zero());
2400       }
2401       __ bind(&done);
2402       break;
2403     }
2404     case kS390_DoubleToUint64: {
2405       Label done;
2406       if (i.OutputCount() > 1) {
2407         __ lghi(i.OutputRegister(1), Operand(1));
2408       }
2409       __ ConvertDoubleToUnsignedInt64(i.OutputRegister(0),
2410                                       i.InputDoubleRegister(0));
2411       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2412       if (i.OutputCount() > 1) {
2413         __ lghi(i.OutputRegister(1), Operand::Zero());
2414       } else {
2415         __ lghi(i.OutputRegister(0), Operand::Zero());
2416       }
2417       __ bind(&done);
2418       break;
2419     }
2420     case kS390_Float32ToInt32: {
2421       Label done;
2422       __ ConvertFloat32ToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
2423                                kRoundToZero);
2424       bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
2425       if (set_overflow_to_min_i32) {
2426         // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
2427         // because INT32_MIN allows easier out-of-bounds detection.
2428         __ b(Condition(0xE), &done, Label::kNear);  // normal case
2429         __ llilh(i.OutputRegister(0), Operand(0x8000));
2430       }
2431       __ bind(&done);
2432       break;
2433     }
2434     case kS390_Float32ToUint32: {
2435       Label done;
2436       __ ConvertFloat32ToUnsignedInt32(i.OutputRegister(0),
2437                                        i.InputDoubleRegister(0));
2438       bool set_overflow_to_min_u32 = MiscField::decode(instr->opcode());
2439       if (set_overflow_to_min_u32) {
2440         // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
2441         // because 0 allows easier out-of-bounds detection.
2442         __ b(Condition(0xE), &done, Label::kNear);  // normal case
2443         __ lghi(i.OutputRegister(0), Operand::Zero());
2444       }
2445       __ bind(&done);
2446       break;
2447     }
2448     case kS390_Float32ToUint64: {
2449       Label done;
2450       if (i.OutputCount() > 1) {
2451         __ lghi(i.OutputRegister(1), Operand(1));
2452       }
2453       __ ConvertFloat32ToUnsignedInt64(i.OutputRegister(0),
2454                                        i.InputDoubleRegister(0));
2455       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2456       if (i.OutputCount() > 1) {
2457         __ lghi(i.OutputRegister(1), Operand::Zero());
2458       } else {
2459         __ lghi(i.OutputRegister(0), Operand::Zero());
2460       }
2461       __ bind(&done);
2462       break;
2463     }
2464     case kS390_Float32ToInt64: {
2465       Label done;
2466       if (i.OutputCount() > 1) {
2467         __ lghi(i.OutputRegister(1), Operand(1));
2468       }
2469       __ ConvertFloat32ToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
2470       __ b(Condition(0xE), &done, Label::kNear);  // normal case
2471       if (i.OutputCount() > 1) {
2472         __ lghi(i.OutputRegister(1), Operand::Zero());
2473       } else {
2474         __ lghi(i.OutputRegister(0), Operand::Zero());
2475       }
2476       __ bind(&done);
2477       break;
2478     }
2479     case kS390_DoubleToFloat32:
2480       ASSEMBLE_UNARY_OP(D_DInstr(ledbr), nullInstr, nullInstr);
2481       break;
2482     case kS390_Float32ToDouble:
2483       ASSEMBLE_UNARY_OP(D_DInstr(ldebr), D_MTInstr(LoadFloat32ToDouble),
2484                         nullInstr);
2485       break;
2486     case kS390_DoubleExtractLowWord32:
2487       __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2488       __ llgfr(i.OutputRegister(), i.OutputRegister());
2489       break;
2490     case kS390_DoubleExtractHighWord32:
2491       __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2492       __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(32));
2493       break;
2494     case kS390_DoubleInsertLowWord32:
2495       __ lgdr(kScratchReg, i.InputDoubleRegister(0));
2496       __ lr(kScratchReg, i.InputRegister(1));
2497       __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2498       break;
2499     case kS390_DoubleInsertHighWord32:
2500       __ sllg(kScratchReg, i.InputRegister(1), Operand(32));
2501       __ lgdr(r0, i.InputDoubleRegister(0));
2502       __ lr(kScratchReg, r0);
2503       __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2504       break;
2505     case kS390_DoubleConstruct:
2506       __ sllg(kScratchReg, i.InputRegister(0), Operand(32));
2507       __ lr(kScratchReg, i.InputRegister(1));
2508 
2509       // Bitwise convert from GPR to FPR
2510       __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2511       break;
2512     case kS390_LoadWordS8:
2513       ASSEMBLE_LOAD_INTEGER(LoadB);
2514       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2515       break;
2516     case kS390_BitcastFloat32ToInt32:
2517       ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadlW), nullInstr);
2518       break;
2519     case kS390_BitcastInt32ToFloat32:
2520       __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2521       break;
2522 #if V8_TARGET_ARCH_S390X
2523     case kS390_BitcastDoubleToInt64:
2524       __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
2525       break;
2526     case kS390_BitcastInt64ToDouble:
2527       __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2528       break;
2529 #endif
2530     case kS390_LoadWordU8:
2531       ASSEMBLE_LOAD_INTEGER(LoadlB);
2532       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2533       break;
2534     case kS390_LoadWordU16:
2535       ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
2536       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2537       break;
2538     case kS390_LoadWordS16:
2539       ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
2540       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2541       break;
2542     case kS390_LoadWordU32:
2543       ASSEMBLE_LOAD_INTEGER(LoadlW);
2544       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2545       break;
2546     case kS390_LoadWordS32:
2547       ASSEMBLE_LOAD_INTEGER(LoadW);
2548       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2549       break;
2550     case kS390_LoadReverse16:
2551       ASSEMBLE_LOAD_INTEGER(lrvh);
2552       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2553       break;
2554     case kS390_LoadReverse32:
2555       ASSEMBLE_LOAD_INTEGER(lrv);
2556       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2557       break;
2558     case kS390_LoadReverse64:
2559       ASSEMBLE_LOAD_INTEGER(lrvg);
2560       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2561       break;
2562     case kS390_LoadReverse16RR:
2563       __ lrvr(i.OutputRegister(), i.InputRegister(0));
2564       __ rll(i.OutputRegister(), i.OutputRegister(), Operand(16));
2565       break;
2566     case kS390_LoadReverse32RR:
2567       __ lrvr(i.OutputRegister(), i.InputRegister(0));
2568       break;
2569     case kS390_LoadReverse64RR:
2570       __ lrvgr(i.OutputRegister(), i.InputRegister(0));
2571       break;
2572     case kS390_LoadReverseSimd128RR:
2573       __ vlgv(r0, i.InputSimd128Register(0), MemOperand(r0, 0), Condition(3));
2574       __ vlgv(r1, i.InputSimd128Register(0), MemOperand(r0, 1), Condition(3));
2575       __ lrvgr(r0, r0);
2576       __ lrvgr(r1, r1);
2577       __ vlvg(i.OutputSimd128Register(), r0, MemOperand(r0, 1), Condition(3));
2578       __ vlvg(i.OutputSimd128Register(), r1, MemOperand(r0, 0), Condition(3));
2579       break;
2580     case kS390_LoadReverseSimd128: {
2581       AddressingMode mode = kMode_None;
2582       MemOperand operand = i.MemoryOperand(&mode);
2583       if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
2584           is_uint12(operand.offset())) {
2585         __ vlbr(i.OutputSimd128Register(), operand, Condition(4));
2586       } else {
2587         __ lrvg(r0, operand);
2588         __ lrvg(r1, MemOperand(operand.rx(), operand.rb(),
2589                                operand.offset() + kBitsPerByte));
2590         __ vlvgp(i.OutputSimd128Register(), r1, r0);
2591       }
2592       break;
2593     }
2594     case kS390_LoadWord64:
2595       ASSEMBLE_LOAD_INTEGER(lg);
2596       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2597       break;
2598     case kS390_LoadAndTestWord32: {
2599       ASSEMBLE_LOADANDTEST32(ltr, lt_z);
2600       break;
2601     }
2602     case kS390_LoadAndTestWord64: {
2603       ASSEMBLE_LOADANDTEST64(ltgr, ltg);
2604       break;
2605     }
2606     case kS390_LoadFloat32:
2607       ASSEMBLE_LOAD_FLOAT(LoadFloat32);
2608       break;
2609     case kS390_LoadDouble:
2610       ASSEMBLE_LOAD_FLOAT(LoadDouble);
2611       break;
2612     case kS390_LoadSimd128: {
2613       AddressingMode mode = kMode_None;
2614       MemOperand operand = i.MemoryOperand(&mode);
2615       __ vl(i.OutputSimd128Register(), operand, Condition(0));
2616       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2617       break;
2618     }
2619     case kS390_StoreWord8:
2620       ASSEMBLE_STORE_INTEGER(StoreByte);
2621       break;
2622     case kS390_StoreWord16:
2623       ASSEMBLE_STORE_INTEGER(StoreHalfWord);
2624       break;
2625     case kS390_StoreWord32:
2626       ASSEMBLE_STORE_INTEGER(StoreW);
2627       break;
2628 #if V8_TARGET_ARCH_S390X
2629     case kS390_StoreWord64:
2630       ASSEMBLE_STORE_INTEGER(StoreP);
2631       break;
2632 #endif
2633     case kS390_StoreReverse16:
2634       ASSEMBLE_STORE_INTEGER(strvh);
2635       break;
2636     case kS390_StoreReverse32:
2637       ASSEMBLE_STORE_INTEGER(strv);
2638       break;
2639     case kS390_StoreReverse64:
2640       ASSEMBLE_STORE_INTEGER(strvg);
2641       break;
2642     case kS390_StoreReverseSimd128: {
2643       size_t index = 0;
2644       AddressingMode mode = kMode_None;
2645       MemOperand operand = i.MemoryOperand(&mode, &index);
2646       if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
2647           is_uint12(operand.offset())) {
2648         __ vstbr(i.InputSimd128Register(index), operand, Condition(4));
2649       } else {
2650         __ vlgv(r0, i.InputSimd128Register(index), MemOperand(r0, 1),
2651                 Condition(3));
2652         __ vlgv(r1, i.InputSimd128Register(index), MemOperand(r0, 0),
2653                 Condition(3));
2654         __ strvg(r0, operand);
2655         __ strvg(r1, MemOperand(operand.rx(), operand.rb(),
2656                                 operand.offset() + kBitsPerByte));
2657       }
2658       break;
2659     }
2660     case kS390_StoreFloat32:
2661       ASSEMBLE_STORE_FLOAT32();
2662       break;
2663     case kS390_StoreDouble:
2664       ASSEMBLE_STORE_DOUBLE();
2665       break;
2666     case kS390_StoreSimd128: {
2667       size_t index = 0;
2668       AddressingMode mode = kMode_None;
2669       MemOperand operand = i.MemoryOperand(&mode, &index);
2670       __ vst(i.InputSimd128Register(index), operand, Condition(0));
2671       break;
2672     }
2673     case kS390_Lay:
2674       __ lay(i.OutputRegister(), i.MemoryOperand());
2675       break;
2676 //         0x aa bb cc dd
2677 // index =    3..2..1..0
2678 #define ATOMIC_EXCHANGE(start, end, shift_amount, offset)              \
2679   {                                                                    \
2680     Label do_cs;                                                       \
2681     __ LoadlW(output, MemOperand(r1, offset));                         \
2682     __ bind(&do_cs);                                                   \
2683     __ llgfr(r0, output);                                              \
2684     __ RotateInsertSelectBits(r0, value, Operand(start), Operand(end), \
2685                               Operand(shift_amount), false);           \
2686     __ csy(output, r0, MemOperand(r1, offset));                        \
2687     __ bne(&do_cs, Label::kNear);                                      \
2688     __ srl(output, Operand(shift_amount));                             \
2689   }
2690 #ifdef V8_TARGET_BIG_ENDIAN
2691 #define ATOMIC_EXCHANGE_BYTE(i)                                  \
2692   {                                                              \
2693     constexpr int idx = (i);                                     \
2694     static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
2695     constexpr int start = 32 + 8 * idx;                          \
2696     constexpr int end = start + 7;                               \
2697     constexpr int shift_amount = (3 - idx) * 8;                  \
2698     ATOMIC_EXCHANGE(start, end, shift_amount, -idx);             \
2699   }
2700 #define ATOMIC_EXCHANGE_HALFWORD(i)                              \
2701   {                                                              \
2702     constexpr int idx = (i);                                     \
2703     static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
2704     constexpr int start = 32 + 16 * idx;                         \
2705     constexpr int end = start + 15;                              \
2706     constexpr int shift_amount = (1 - idx) * 16;                 \
2707     ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2);         \
2708   }
2709 #else
2710 #define ATOMIC_EXCHANGE_BYTE(i)                                  \
2711   {                                                              \
2712     constexpr int idx = (i);                                     \
2713     static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
2714     constexpr int start = 32 + 8 * (3 - idx);                    \
2715     constexpr int end = start + 7;                               \
2716     constexpr int shift_amount = idx * 8;                        \
2717     ATOMIC_EXCHANGE(start, end, shift_amount, -idx);             \
2718   }
2719 #define ATOMIC_EXCHANGE_HALFWORD(i)                              \
2720   {                                                              \
2721     constexpr int idx = (i);                                     \
2722     static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
2723     constexpr int start = 32 + 16 * (1 - idx);                   \
2724     constexpr int end = start + 15;                              \
2725     constexpr int shift_amount = idx * 16;                       \
2726     ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2);         \
2727   }
2728 #endif
2729     case kS390_Word64AtomicExchangeUint8:
2730     case kWord32AtomicExchangeInt8:
2731     case kWord32AtomicExchangeUint8: {
2732       Register base = i.InputRegister(0);
2733       Register index = i.InputRegister(1);
2734       Register value = i.InputRegister(2);
2735       Register output = i.OutputRegister();
2736       Label three, two, one, done;
2737       __ la(r1, MemOperand(base, index));
2738       __ tmll(r1, Operand(3));
2739       __ b(Condition(1), &three);
2740       __ b(Condition(2), &two);
2741       __ b(Condition(4), &one);
2742 
2743       // end with 0b00
2744       ATOMIC_EXCHANGE_BYTE(0);
2745       __ b(&done);
2746 
2747       // ending with 0b01
2748       __ bind(&one);
2749       ATOMIC_EXCHANGE_BYTE(1);
2750       __ b(&done);
2751 
2752       // ending with 0b10
2753       __ bind(&two);
2754       ATOMIC_EXCHANGE_BYTE(2);
2755       __ b(&done);
2756 
2757       // ending with 0b11
2758       __ bind(&three);
2759       ATOMIC_EXCHANGE_BYTE(3);
2760 
2761       __ bind(&done);
2762       if (opcode == kWord32AtomicExchangeInt8) {
2763         __ lgbr(output, output);
2764       } else {
2765         __ llgcr(output, output);
2766       }
2767       break;
2768     }
2769     case kS390_Word64AtomicExchangeUint16:
2770     case kWord32AtomicExchangeInt16:
2771     case kWord32AtomicExchangeUint16: {
2772       Register base = i.InputRegister(0);
2773       Register index = i.InputRegister(1);
2774       Register value = i.InputRegister(2);
2775       Register output = i.OutputRegister();
2776       Label two, done;
2777       __ la(r1, MemOperand(base, index));
2778       __ tmll(r1, Operand(3));
2779       __ b(Condition(2), &two);
2780 
2781       // end with 0b00
2782       ATOMIC_EXCHANGE_HALFWORD(0);
2783       __ b(&done);
2784 
2785       // ending with 0b10
2786       __ bind(&two);
2787       ATOMIC_EXCHANGE_HALFWORD(1);
2788 
2789       __ bind(&done);
2790       if (opcode == kWord32AtomicExchangeInt16) {
2791         __ lghr(output, output);
2792       } else {
2793         __ llghr(output, output);
2794       }
2795       break;
2796     }
2797     case kS390_Word64AtomicExchangeUint32:
2798     case kWord32AtomicExchangeWord32: {
2799       Register base = i.InputRegister(0);
2800       Register index = i.InputRegister(1);
2801       Register value = i.InputRegister(2);
2802       Register output = i.OutputRegister();
2803       Label do_cs;
2804       __ lay(r1, MemOperand(base, index));
2805       __ LoadlW(output, MemOperand(r1));
2806       __ bind(&do_cs);
2807       __ cs(output, value, MemOperand(r1));
2808       __ bne(&do_cs, Label::kNear);
2809       break;
2810     }
2811     case kWord32AtomicCompareExchangeInt8:
2812       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadB);
2813       break;
2814     case kS390_Word64AtomicCompareExchangeUint8:
2815     case kWord32AtomicCompareExchangeUint8:
2816       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadlB);
2817       break;
2818     case kWord32AtomicCompareExchangeInt16:
2819       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadHalfWordP);
2820       break;
2821     case kS390_Word64AtomicCompareExchangeUint16:
2822     case kWord32AtomicCompareExchangeUint16:
2823       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadLogicalHalfWordP);
2824       break;
2825     case kS390_Word64AtomicCompareExchangeUint32:
2826     case kWord32AtomicCompareExchangeWord32:
2827       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD();
2828       break;
2829 #define ATOMIC_BINOP_CASE(op, inst)                                          \
2830   case kWord32Atomic##op##Int8:                                              \
2831     ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() {                                 \
2832       intptr_t shift_right = static_cast<intptr_t>(shift_amount);            \
2833       __ srlk(result, prev, Operand(shift_right));                           \
2834       __ LoadB(result, result);                                              \
2835     });                                                                      \
2836     break;                                                                   \
2837   case kS390_Word64Atomic##op##Uint8:                                        \
2838   case kWord32Atomic##op##Uint8:                                             \
2839     ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() {                                 \
2840       int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount;           \
2841       __ RotateInsertSelectBits(result, prev, Operand(56), Operand(63),      \
2842                                 Operand(static_cast<intptr_t>(rotate_left)), \
2843                                 true);                                       \
2844     });                                                                      \
2845     break;                                                                   \
2846   case kWord32Atomic##op##Int16:                                             \
2847     ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() {                             \
2848       intptr_t shift_right = static_cast<intptr_t>(shift_amount);            \
2849       __ srlk(result, prev, Operand(shift_right));                           \
2850       __ LoadHalfWordP(result, result);                                      \
2851     });                                                                      \
2852     break;                                                                   \
2853   case kS390_Word64Atomic##op##Uint16:                                       \
2854   case kWord32Atomic##op##Uint16:                                            \
2855     ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() {                             \
2856       int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount;           \
2857       __ RotateInsertSelectBits(result, prev, Operand(48), Operand(63),      \
2858                                 Operand(static_cast<intptr_t>(rotate_left)), \
2859                                 true);                                       \
2860     });                                                                      \
2861     break;
2862       ATOMIC_BINOP_CASE(Add, Add32)
2863       ATOMIC_BINOP_CASE(Sub, Sub32)
2864       ATOMIC_BINOP_CASE(And, And)
2865       ATOMIC_BINOP_CASE(Or, Or)
2866       ATOMIC_BINOP_CASE(Xor, Xor)
2867 #undef ATOMIC_BINOP_CASE
2868     case kS390_Word64AtomicAddUint32:
2869     case kWord32AtomicAddWord32:
2870       ASSEMBLE_ATOMIC_BINOP_WORD(laa);
2871       break;
2872     case kS390_Word64AtomicSubUint32:
2873     case kWord32AtomicSubWord32:
2874       ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32);
2875       break;
2876     case kS390_Word64AtomicAndUint32:
2877     case kWord32AtomicAndWord32:
2878       ASSEMBLE_ATOMIC_BINOP_WORD(lan);
2879       break;
2880     case kS390_Word64AtomicOrUint32:
2881     case kWord32AtomicOrWord32:
2882       ASSEMBLE_ATOMIC_BINOP_WORD(lao);
2883       break;
2884     case kS390_Word64AtomicXorUint32:
2885     case kWord32AtomicXorWord32:
2886       ASSEMBLE_ATOMIC_BINOP_WORD(lax);
2887       break;
2888     case kS390_Word64AtomicAddUint64:
2889       ASSEMBLE_ATOMIC_BINOP_WORD64(laag);
2890       break;
2891     case kS390_Word64AtomicSubUint64:
2892       ASSEMBLE_ATOMIC_BINOP_WORD64(LoadAndSub64);
2893       break;
2894     case kS390_Word64AtomicAndUint64:
2895       ASSEMBLE_ATOMIC_BINOP_WORD64(lang);
2896       break;
2897     case kS390_Word64AtomicOrUint64:
2898       ASSEMBLE_ATOMIC_BINOP_WORD64(laog);
2899       break;
2900     case kS390_Word64AtomicXorUint64:
2901       ASSEMBLE_ATOMIC_BINOP_WORD64(laxg);
2902       break;
2903     case kS390_Word64AtomicExchangeUint64: {
2904       Register base = i.InputRegister(0);
2905       Register index = i.InputRegister(1);
2906       Register value = i.InputRegister(2);
2907       Register output = i.OutputRegister();
2908       Label do_cs;
2909       __ la(r1, MemOperand(base, index));
2910       __ lg(output, MemOperand(r1));
2911       __ bind(&do_cs);
2912       __ csg(output, value, MemOperand(r1));
2913       __ bne(&do_cs, Label::kNear);
2914       break;
2915     }
2916     case kS390_Word64AtomicCompareExchangeUint64:
2917       ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
2918       break;
2919       // vector replicate element
2920     case kS390_F64x2Splat: {
2921       __ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0),
2922               Condition(3));
2923       break;
2924     }
2925     case kS390_F32x4Splat: {
2926 #ifdef V8_TARGET_BIG_ENDIAN
2927       __ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0),
2928               Condition(2));
2929 #else
2930       __ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(1),
2931               Condition(2));
2932 #endif
2933       break;
2934     }
2935     case kS390_I64x2Splat: {
2936       Simd128Register dst = i.OutputSimd128Register();
2937       __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(3));
2938       __ vrep(dst, dst, Operand(0), Condition(3));
2939       break;
2940     }
2941     case kS390_I32x4Splat: {
2942       Simd128Register dst = i.OutputSimd128Register();
2943       __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(2));
2944       __ vrep(dst, dst, Operand(0), Condition(2));
2945       break;
2946     }
2947     case kS390_I16x8Splat: {
2948       Simd128Register dst = i.OutputSimd128Register();
2949       __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(1));
2950       __ vrep(dst, dst, Operand(0), Condition(1));
2951       break;
2952     }
2953     case kS390_I8x16Splat: {
2954       Simd128Register dst = i.OutputSimd128Register();
2955       __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(0));
2956       __ vrep(dst, dst, Operand(0), Condition(0));
2957       break;
2958     }
2959     // vector extract element
2960     case kS390_F64x2ExtractLane: {
2961 #ifdef V8_TARGET_BIG_ENDIAN
2962       __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
2963               Operand(1 - i.InputInt8(1)), Condition(3));
2964 #else
2965       __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
2966               Operand(i.InputInt8(1)), Condition(3));
2967 #endif
2968       break;
2969     }
2970     case kS390_F32x4ExtractLane: {
2971 #ifdef V8_TARGET_BIG_ENDIAN
2972       __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
2973               Operand(3 - i.InputInt8(1)), Condition(2));
2974 #else
2975       __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
2976               Operand(i.InputInt8(1)), Condition(2));
2977 #endif
2978       break;
2979     }
2980     case kS390_I64x2ExtractLane: {
2981 #ifdef V8_TARGET_BIG_ENDIAN
2982       __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
2983               MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
2984 #else
2985       __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
2986               MemOperand(r0, i.InputInt8(1)), Condition(3));
2987 #endif
2988       break;
2989     }
2990     case kS390_I32x4ExtractLane: {
2991 #ifdef V8_TARGET_BIG_ENDIAN
2992       __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
2993               MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
2994 #else
2995       __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
2996               MemOperand(r0, i.InputInt8(1)), Condition(2));
2997 #endif
2998       break;
2999     }
3000     case kS390_I16x8ExtractLaneU: {
3001 #ifdef V8_TARGET_BIG_ENDIAN
3002       __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
3003               MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
3004 #else
3005       __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
3006               MemOperand(r0, i.InputInt8(1)), Condition(1));
3007 #endif
3008       break;
3009     }
3010     case kS390_I16x8ExtractLaneS: {
3011 #ifdef V8_TARGET_BIG_ENDIAN
3012       __ vlgv(kScratchReg, i.InputSimd128Register(0),
3013               MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
3014 #else
3015       __ vlgv(kScratchReg, i.InputSimd128Register(0),
3016               MemOperand(r0, i.InputInt8(1)), Condition(1));
3017 #endif
3018       __ lghr(i.OutputRegister(), kScratchReg);
3019       break;
3020     }
3021     case kS390_I8x16ExtractLaneU: {
3022 #ifdef V8_TARGET_BIG_ENDIAN
3023       __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
3024               MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
3025 #else
3026       __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
3027               MemOperand(r0, i.InputInt8(1)), Condition(0));
3028 #endif
3029       break;
3030     }
3031     case kS390_I8x16ExtractLaneS: {
3032 #ifdef V8_TARGET_BIG_ENDIAN
3033       __ vlgv(kScratchReg, i.InputSimd128Register(0),
3034               MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
3035 #else
3036       __ vlgv(kScratchReg, i.InputSimd128Register(0),
3037               MemOperand(r0, i.InputInt8(1)), Condition(0));
3038 #endif
3039       __ lgbr(i.OutputRegister(), kScratchReg);
3040       break;
3041     }
3042     // vector replace element
3043     case kS390_F64x2ReplaceLane: {
3044       Simd128Register src = i.InputSimd128Register(0);
3045       Simd128Register dst = i.OutputSimd128Register();
3046       __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
3047       __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0),
3048               Condition(3));
3049 #ifdef V8_TARGET_BIG_ENDIAN
3050       __ vlvg(kScratchDoubleReg, kScratchReg,
3051               MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
3052 #else
3053       __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, i.InputInt8(1)),
3054               Condition(3));
3055 #endif
3056       __ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
3057       break;
3058     }
3059     case kS390_F32x4ReplaceLane: {
3060       Simd128Register src = i.InputSimd128Register(0);
3061       Simd128Register dst = i.OutputSimd128Register();
3062       __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
3063 #ifdef V8_TARGET_BIG_ENDIAN
3064       __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0),
3065               Condition(2));
3066       __ vlvg(kScratchDoubleReg, kScratchReg,
3067               MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
3068 #else
3069       __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 1),
3070               Condition(2));
3071       __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, i.InputInt8(1)),
3072               Condition(2));
3073 #endif
3074       __ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
3075       break;
3076     }
3077     case kS390_I64x2ReplaceLane: {
3078       Simd128Register src = i.InputSimd128Register(0);
3079       Simd128Register dst = i.OutputSimd128Register();
3080       if (src != dst) {
3081         __ vlr(dst, src, Condition(0), Condition(0), Condition(0));
3082       }
3083 #ifdef V8_TARGET_BIG_ENDIAN
3084       __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
3085               MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
3086 #else
3087       __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
3088               MemOperand(r0, i.InputInt8(1)), Condition(3));
3089 #endif
3090       break;
3091     }
3092     case kS390_I32x4ReplaceLane: {
3093       Simd128Register src = i.InputSimd128Register(0);
3094       Simd128Register dst = i.OutputSimd128Register();
3095       if (src != dst) {
3096         __ vlr(dst, src, Condition(0), Condition(0), Condition(0));
3097       }
3098 #ifdef V8_TARGET_BIG_ENDIAN
3099       __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
3100               MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
3101 #else
3102       __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
3103               MemOperand(r0, i.InputInt8(1)), Condition(2));
3104 #endif
3105       break;
3106     }
3107     case kS390_I16x8ReplaceLane: {
3108       Simd128Register src = i.InputSimd128Register(0);
3109       Simd128Register dst = i.OutputSimd128Register();
3110       if (src != dst) {
3111         __ vlr(dst, src, Condition(0), Condition(0), Condition(0));
3112       }
3113 #ifdef V8_TARGET_BIG_ENDIAN
3114       __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
3115               MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
3116 #else
3117       __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
3118               MemOperand(r0, i.InputInt8(1)), Condition(1));
3119 #endif
3120       break;
3121     }
3122     case kS390_I8x16ReplaceLane: {
3123       Simd128Register src = i.InputSimd128Register(0);
3124       Simd128Register dst = i.OutputSimd128Register();
3125       if (src != dst) {
3126         __ vlr(dst, src, Condition(0), Condition(0), Condition(0));
3127       }
3128 #ifdef V8_TARGET_BIG_ENDIAN
3129       __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
3130               MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
3131 #else
3132       __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
3133               MemOperand(r0, i.InputInt8(1)), Condition(0));
3134 #endif
3135       break;
3136     }
3137     // vector binops
3138     case kS390_F64x2Add: {
3139       __ vfa(i.OutputSimd128Register(), i.InputSimd128Register(0),
3140              i.InputSimd128Register(1), Condition(0), Condition(0),
3141              Condition(3));
3142       break;
3143     }
3144     case kS390_F64x2Sub: {
3145       __ vfs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3146              i.InputSimd128Register(1), Condition(0), Condition(0),
3147              Condition(3));
3148       break;
3149     }
3150     case kS390_F64x2Mul: {
3151       __ vfm(i.OutputSimd128Register(), i.InputSimd128Register(0),
3152              i.InputSimd128Register(1), Condition(0), Condition(0),
3153              Condition(3));
3154       break;
3155     }
3156     case kS390_F64x2Div: {
3157       __ vfd(i.OutputSimd128Register(), i.InputSimd128Register(0),
3158              i.InputSimd128Register(1), Condition(0), Condition(0),
3159              Condition(3));
3160       break;
3161     }
3162     case kS390_F64x2Min: {
3163       __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
3164                i.InputSimd128Register(1), Condition(1), Condition(0),
3165                Condition(3));
3166       break;
3167     }
3168     case kS390_F64x2Max: {
3169       __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
3170                i.InputSimd128Register(1), Condition(1), Condition(0),
3171                Condition(3));
3172       break;
3173     }
3174     case kS390_F64x2Qfma: {
3175       Simd128Register src0 = i.InputSimd128Register(0);
3176       Simd128Register src1 = i.InputSimd128Register(1);
3177       Simd128Register src2 = i.InputSimd128Register(2);
3178       Simd128Register dst = i.OutputSimd128Register();
3179       __ vfma(dst, src1, src2, src0, Condition(3), Condition(0));
3180       break;
3181     }
3182     case kS390_F64x2Qfms: {
3183       Simd128Register src0 = i.InputSimd128Register(0);
3184       Simd128Register src1 = i.InputSimd128Register(1);
3185       Simd128Register src2 = i.InputSimd128Register(2);
3186       Simd128Register dst = i.OutputSimd128Register();
3187       __ vfnms(dst, src1, src2, src0, Condition(3), Condition(0));
3188       break;
3189     }
3190     case kS390_F32x4Add: {
3191       __ vfa(i.OutputSimd128Register(), i.InputSimd128Register(0),
3192              i.InputSimd128Register(1), Condition(0), Condition(0),
3193              Condition(2));
3194       break;
3195     }
3196 #define FLOAT_ADD_HORIZ(src0, src1, scratch0, scratch1, add0, add1)         \
3197   __ vpk(dst, src0, src1, Condition(0), Condition(0), Condition(3));        \
3198   __ vesrl(scratch0, src0, MemOperand(r0, shift_bits), Condition(3));       \
3199   __ vesrl(scratch1, src1, MemOperand(r0, shift_bits), Condition(3));       \
3200   __ vpk(kScratchDoubleReg, scratch0, scratch1, Condition(0), Condition(0), \
3201          Condition(3));                                                     \
3202   __ vfa(dst, add0, add1, Condition(0), Condition(0), Condition(2));
3203     case kS390_F32x4AddHoriz: {
3204       Simd128Register src0 = i.InputSimd128Register(0);
3205       Simd128Register src1 = i.InputSimd128Register(1);
3206       Simd128Register dst = i.OutputSimd128Register();
3207       DoubleRegister tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
3208       DoubleRegister tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
3209       constexpr int shift_bits = 32;
3210 #ifdef V8_TARGET_BIG_ENDIAN
3211       FLOAT_ADD_HORIZ(src1, src0, tempFPReg2, tempFPReg1, kScratchDoubleReg,
3212                       dst)
3213 #else
3214       FLOAT_ADD_HORIZ(src0, src1, tempFPReg1, tempFPReg2, dst,
3215                       kScratchDoubleReg)
3216 #endif
3217 #undef FLOAT_ADD_HORIZ
3218       break;
3219     }
3220     case kS390_F32x4Sub: {
3221       __ vfs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3222              i.InputSimd128Register(1), Condition(0), Condition(0),
3223              Condition(2));
3224       break;
3225     }
3226     case kS390_F32x4Mul: {
3227       __ vfm(i.OutputSimd128Register(), i.InputSimd128Register(0),
3228              i.InputSimd128Register(1), Condition(0), Condition(0),
3229              Condition(2));
3230       break;
3231     }
3232     case kS390_F32x4Div: {
3233       __ vfd(i.OutputSimd128Register(), i.InputSimd128Register(0),
3234              i.InputSimd128Register(1), Condition(0), Condition(0),
3235              Condition(2));
3236       break;
3237     }
3238     case kS390_F32x4Min: {
3239       __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
3240                i.InputSimd128Register(1), Condition(1), Condition(0),
3241                Condition(2));
3242       break;
3243     }
3244     case kS390_F32x4Max: {
3245       __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
3246                i.InputSimd128Register(1), Condition(1), Condition(0),
3247                Condition(2));
3248       break;
3249     }
3250     case kS390_F32x4Qfma: {
3251       Simd128Register src0 = i.InputSimd128Register(0);
3252       Simd128Register src1 = i.InputSimd128Register(1);
3253       Simd128Register src2 = i.InputSimd128Register(2);
3254       Simd128Register dst = i.OutputSimd128Register();
3255       __ vfma(dst, src1, src2, src0, Condition(2), Condition(0));
3256       break;
3257     }
3258     case kS390_F32x4Qfms: {
3259       Simd128Register src0 = i.InputSimd128Register(0);
3260       Simd128Register src1 = i.InputSimd128Register(1);
3261       Simd128Register src2 = i.InputSimd128Register(2);
3262       Simd128Register dst = i.OutputSimd128Register();
3263       __ vfnms(dst, src1, src2, src0, Condition(2), Condition(0));
3264       break;
3265     }
3266     case kS390_I64x2Add: {
3267       __ va(i.OutputSimd128Register(), i.InputSimd128Register(0),
3268             i.InputSimd128Register(1), Condition(0), Condition(0),
3269             Condition(3));
3270       break;
3271     }
3272     case kS390_I64x2Sub: {
3273       __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3274             i.InputSimd128Register(1), Condition(0), Condition(0),
3275             Condition(3));
3276       break;
3277     }
3278     case kS390_I64x2Mul: {
3279       Simd128Register src0 = i.InputSimd128Register(0);
3280       Simd128Register src1 = i.InputSimd128Register(1);
3281       Register scratch_0 = r0;
3282       Register scratch_1 = r1;
3283       for (int i = 0; i < 2; i++) {
3284         __ vlgv(scratch_0, src0, MemOperand(r0, i), Condition(3));
3285         __ vlgv(scratch_1, src1, MemOperand(r0, i), Condition(3));
3286         __ Mul64(scratch_0, scratch_1);
3287         scratch_0 = r1;
3288         scratch_1 = ip;
3289       }
3290       __ vlvgp(i.OutputSimd128Register(), r0, r1);
3291       break;
3292     }
3293     case kS390_I32x4Add: {
3294       __ va(i.OutputSimd128Register(), i.InputSimd128Register(0),
3295             i.InputSimd128Register(1), Condition(0), Condition(0),
3296             Condition(2));
3297       break;
3298     }
3299     case kS390_I32x4AddHoriz: {
3300       Simd128Register src0 = i.InputSimd128Register(0);
3301       Simd128Register src1 = i.InputSimd128Register(1);
3302       Simd128Register dst = i.OutputSimd128Register();
3303       __ vs(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
3304             Condition(0), Condition(0), Condition(2));
3305       __ vsumg(dst, src0, kScratchDoubleReg, Condition(0), Condition(0),
3306                Condition(2));
3307       __ vsumg(kScratchDoubleReg, src1, kScratchDoubleReg, Condition(0),
3308                Condition(0), Condition(2));
3309 #ifdef V8_TARGET_BIG_ENDIAN
3310       __ vpk(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
3311              Condition(3));
3312 #else
3313       __ vpk(dst, dst, kScratchDoubleReg, Condition(0), Condition(0),
3314              Condition(3));
3315 #endif
3316       break;
3317     }
3318     case kS390_I32x4Sub: {
3319       __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3320             i.InputSimd128Register(1), Condition(0), Condition(0),
3321             Condition(2));
3322       break;
3323     }
3324     case kS390_I32x4Mul: {
3325       __ vml(i.OutputSimd128Register(), i.InputSimd128Register(0),
3326              i.InputSimd128Register(1), Condition(0), Condition(0),
3327              Condition(2));
3328       break;
3329     }
3330     case kS390_I16x8Add: {
3331       __ va(i.OutputSimd128Register(), i.InputSimd128Register(0),
3332             i.InputSimd128Register(1), Condition(0), Condition(0),
3333             Condition(1));
3334       break;
3335     }
3336     case kS390_I16x8AddHoriz: {
3337       Simd128Register src0 = i.InputSimd128Register(0);
3338       Simd128Register src1 = i.InputSimd128Register(1);
3339       Simd128Register dst = i.OutputSimd128Register();
3340       __ vs(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
3341             Condition(0), Condition(0), Condition(1));
3342       __ vsum(dst, src0, kScratchDoubleReg, Condition(0), Condition(0),
3343               Condition(1));
3344       __ vsum(kScratchDoubleReg, src1, kScratchDoubleReg, Condition(0),
3345               Condition(0), Condition(1));
3346 #ifdef V8_TARGET_BIG_ENDIAN
3347       __ vpk(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
3348              Condition(2));
3349 #else
3350       __ vpk(dst, dst, kScratchDoubleReg, Condition(0), Condition(0),
3351              Condition(2));
3352 #endif
3353       break;
3354     }
3355     case kS390_I16x8Sub: {
3356       __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3357             i.InputSimd128Register(1), Condition(0), Condition(0),
3358             Condition(1));
3359       break;
3360     }
3361     case kS390_I16x8Mul: {
3362       __ vml(i.OutputSimd128Register(), i.InputSimd128Register(0),
3363              i.InputSimd128Register(1), Condition(0), Condition(0),
3364              Condition(1));
3365       break;
3366     }
3367     case kS390_I8x16Add: {
3368       __ va(i.OutputSimd128Register(), i.InputSimd128Register(0),
3369             i.InputSimd128Register(1), Condition(0), Condition(0),
3370             Condition(0));
3371       break;
3372     }
3373     case kS390_I8x16Sub: {
3374       __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3375             i.InputSimd128Register(1), Condition(0), Condition(0),
3376             Condition(0));
3377       break;
3378     }
3379     case kS390_I8x16Mul: {
3380       __ vml(i.OutputSimd128Register(), i.InputSimd128Register(0),
3381              i.InputSimd128Register(1), Condition(0), Condition(0),
3382              Condition(0));
3383       break;
3384     }
3385     case kS390_I16x8RoundingAverageU: {
3386       __ vavgl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3387                i.InputSimd128Register(1), Condition(0), Condition(0),
3388                Condition(1));
3389       break;
3390     }
3391     case kS390_I8x16RoundingAverageU: {
3392       __ vavgl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3393                i.InputSimd128Register(1), Condition(0), Condition(0),
3394                Condition(0));
3395       break;
3396     }
3397     // vector comparisons
3398     case kS390_F64x2Eq: {
3399       __ vfce(i.OutputSimd128Register(), i.InputSimd128Register(0),
3400               i.InputSimd128Register(1), Condition(0), Condition(0),
3401               Condition(3));
3402       break;
3403     }
3404     case kS390_F64x2Ne: {
3405       __ vfce(kScratchDoubleReg, i.InputSimd128Register(0),
3406               i.InputSimd128Register(1), Condition(0), Condition(0),
3407               Condition(3));
3408       __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
3409              Condition(0), Condition(0), Condition(3));
3410       break;
3411     }
3412     case kS390_F64x2Le: {
3413       __ vfche(i.OutputSimd128Register(), i.InputSimd128Register(1),
3414                i.InputSimd128Register(0), Condition(0), Condition(0),
3415                Condition(3));
3416       break;
3417     }
3418     case kS390_F64x2Lt: {
3419       __ vfch(i.OutputSimd128Register(), i.InputSimd128Register(1),
3420               i.InputSimd128Register(0), Condition(0), Condition(0),
3421               Condition(3));
3422       break;
3423     }
3424     case kS390_I32x4MinS: {
3425       __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
3426              i.InputSimd128Register(1), Condition(0), Condition(0),
3427              Condition(2));
3428       break;
3429     }
3430     case kS390_I32x4MinU: {
3431       __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3432               i.InputSimd128Register(1), Condition(0), Condition(0),
3433               Condition(2));
3434       break;
3435     }
3436     case kS390_I16x8MinS: {
3437       __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
3438              i.InputSimd128Register(1), Condition(0), Condition(0),
3439              Condition(1));
3440       break;
3441     }
3442     case kS390_I16x8MinU: {
3443       __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3444               i.InputSimd128Register(1), Condition(0), Condition(0),
3445               Condition(1));
3446       break;
3447     }
3448     case kS390_I8x16MinS: {
3449       __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
3450              i.InputSimd128Register(1), Condition(0), Condition(0),
3451              Condition(0));
3452       break;
3453     }
3454     case kS390_I8x16MinU: {
3455       __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3456               i.InputSimd128Register(1), Condition(0), Condition(0),
3457               Condition(0));
3458       break;
3459     }
3460     case kS390_I32x4MaxS: {
3461       __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
3462              i.InputSimd128Register(1), Condition(0), Condition(0),
3463              Condition(2));
3464       break;
3465     }
3466     case kS390_I32x4MaxU: {
3467       __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3468               i.InputSimd128Register(1), Condition(0), Condition(0),
3469               Condition(2));
3470       break;
3471     }
3472     case kS390_I16x8MaxS: {
3473       __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
3474              i.InputSimd128Register(1), Condition(0), Condition(0),
3475              Condition(1));
3476       break;
3477     }
3478     case kS390_I16x8MaxU: {
3479       __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3480               i.InputSimd128Register(1), Condition(0), Condition(0),
3481               Condition(1));
3482       break;
3483     }
3484     case kS390_I8x16MaxS: {
3485       __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
3486              i.InputSimd128Register(1), Condition(0), Condition(0),
3487              Condition(0));
3488       break;
3489     }
3490     case kS390_I8x16MaxU: {
3491       __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3492               i.InputSimd128Register(1), Condition(0), Condition(0),
3493               Condition(0));
3494       break;
3495     }
3496     case kS390_F32x4Eq: {
3497       __ vfce(i.OutputSimd128Register(), i.InputSimd128Register(0),
3498               i.InputSimd128Register(1), Condition(0), Condition(0),
3499               Condition(2));
3500       break;
3501     }
3502     case kS390_I64x2Eq: {
3503       __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
3504               i.InputSimd128Register(1), Condition(0), Condition(3));
3505       break;
3506     }
3507     case kS390_I32x4Eq: {
3508       __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
3509               i.InputSimd128Register(1), Condition(0), Condition(2));
3510       break;
3511     }
3512     case kS390_I16x8Eq: {
3513       __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
3514               i.InputSimd128Register(1), Condition(0), Condition(1));
3515       break;
3516     }
3517     case kS390_I8x16Eq: {
3518       __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
3519               i.InputSimd128Register(1), Condition(0), Condition(0));
3520       break;
3521     }
3522     case kS390_F32x4Ne: {
3523       __ vfce(kScratchDoubleReg, i.InputSimd128Register(0),
3524               i.InputSimd128Register(1), Condition(0), Condition(0),
3525               Condition(2));
3526       __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
3527              Condition(0), Condition(0), Condition(2));
3528       break;
3529     }
3530     case kS390_I32x4Ne: {
3531       __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
3532               i.InputSimd128Register(1), Condition(0), Condition(2));
3533       __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
3534              i.OutputSimd128Register(), Condition(0), Condition(0),
3535              Condition(2));
3536       break;
3537     }
3538     case kS390_I16x8Ne: {
3539       __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
3540               i.InputSimd128Register(1), Condition(0), Condition(1));
3541       __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
3542              i.OutputSimd128Register(), Condition(0), Condition(0),
3543              Condition(1));
3544       break;
3545     }
3546     case kS390_I8x16Ne: {
3547       __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
3548               i.InputSimd128Register(1), Condition(0), Condition(0));
3549       __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
3550              i.OutputSimd128Register(), Condition(0), Condition(0),
3551              Condition(0));
3552       break;
3553     }
3554     case kS390_F32x4Lt: {
3555       __ vfch(i.OutputSimd128Register(), i.InputSimd128Register(1),
3556               i.InputSimd128Register(0), Condition(0), Condition(0),
3557               Condition(2));
3558       break;
3559     }
3560     case kS390_F32x4Le: {
3561       __ vfche(i.OutputSimd128Register(), i.InputSimd128Register(1),
3562                i.InputSimd128Register(0), Condition(0), Condition(0),
3563                Condition(2));
3564       break;
3565     }
3566     case kS390_I32x4GtS: {
3567       __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
3568              i.InputSimd128Register(1), Condition(0), Condition(2));
3569       break;
3570     }
3571     case kS390_I32x4GeS: {
3572       __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
3573               i.InputSimd128Register(1), Condition(0), Condition(2));
3574       __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
3575              i.InputSimd128Register(1), Condition(0), Condition(2));
3576       __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
3577             kScratchDoubleReg, Condition(0), Condition(0), Condition(2));
3578       break;
3579     }
3580     case kS390_I32x4GtU: {
3581       __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3582               i.InputSimd128Register(1), Condition(0), Condition(2));
3583       break;
3584     }
3585     case kS390_I32x4GeU: {
3586       __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
3587               i.InputSimd128Register(1), Condition(0), Condition(2));
3588       __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3589               i.InputSimd128Register(1), Condition(0), Condition(2));
3590       __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
3591             kScratchDoubleReg, Condition(0), Condition(0), Condition(2));
3592       break;
3593     }
3594     case kS390_I16x8GtS: {
3595       __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
3596              i.InputSimd128Register(1), Condition(0), Condition(1));
3597       break;
3598     }
3599     case kS390_I16x8GeS: {
3600       __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
3601               i.InputSimd128Register(1), Condition(0), Condition(1));
3602       __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
3603              i.InputSimd128Register(1), Condition(0), Condition(1));
3604       __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
3605             kScratchDoubleReg, Condition(0), Condition(0), Condition(1));
3606       break;
3607     }
3608     case kS390_I16x8GtU: {
3609       __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3610               i.InputSimd128Register(1), Condition(0), Condition(1));
3611       break;
3612     }
3613     case kS390_I16x8GeU: {
3614       __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
3615               i.InputSimd128Register(1), Condition(0), Condition(1));
3616       __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3617               i.InputSimd128Register(1), Condition(0), Condition(1));
3618       __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
3619             kScratchDoubleReg, Condition(0), Condition(0), Condition(1));
3620       break;
3621     }
3622     case kS390_I8x16GtS: {
3623       __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
3624              i.InputSimd128Register(1), Condition(0), Condition(0));
3625       break;
3626     }
3627     case kS390_I8x16GeS: {
3628       __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
3629               i.InputSimd128Register(1), Condition(0), Condition(0));
3630       __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
3631              i.InputSimd128Register(1), Condition(0), Condition(0));
3632       __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
3633             kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
3634       break;
3635     }
3636     case kS390_I8x16GtU: {
3637       __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3638               i.InputSimd128Register(1), Condition(0), Condition(0));
3639       break;
3640     }
3641     case kS390_I8x16GeU: {
3642       __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
3643               i.InputSimd128Register(1), Condition(0), Condition(0));
3644       __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
3645               i.InputSimd128Register(1), Condition(0), Condition(0));
3646       __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
3647             kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
3648       break;
3649     }
3650     // vector shifts
3651 #define VECTOR_SHIFT(op, mode)                                             \
3652   {                                                                        \
3653     __ vlvg(kScratchDoubleReg, i.InputRegister(1), MemOperand(r0, 0),      \
3654             Condition(mode));                                              \
3655     __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0),              \
3656             Condition(mode));                                              \
3657     __ op(i.OutputSimd128Register(), i.InputSimd128Register(0),            \
3658           kScratchDoubleReg, Condition(0), Condition(0), Condition(mode)); \
3659   }
3660     case kS390_I64x2Shl: {
3661       VECTOR_SHIFT(veslv, 3);
3662       break;
3663     }
3664     case kS390_I64x2ShrS: {
3665       VECTOR_SHIFT(vesrav, 3);
3666       break;
3667     }
3668     case kS390_I64x2ShrU: {
3669       VECTOR_SHIFT(vesrlv, 3);
3670       break;
3671     }
3672     case kS390_I32x4Shl: {
3673       VECTOR_SHIFT(veslv, 2);
3674       break;
3675     }
3676     case kS390_I32x4ShrS: {
3677       VECTOR_SHIFT(vesrav, 2);
3678       break;
3679     }
3680     case kS390_I32x4ShrU: {
3681       VECTOR_SHIFT(vesrlv, 2);
3682       break;
3683     }
3684     case kS390_I16x8Shl: {
3685       VECTOR_SHIFT(veslv, 1);
3686       break;
3687     }
3688     case kS390_I16x8ShrS: {
3689       VECTOR_SHIFT(vesrav, 1);
3690       break;
3691     }
3692     case kS390_I16x8ShrU: {
3693       VECTOR_SHIFT(vesrlv, 1);
3694       break;
3695     }
3696     case kS390_I8x16Shl: {
3697       VECTOR_SHIFT(veslv, 0);
3698       break;
3699     }
3700     case kS390_I8x16ShrS: {
3701       VECTOR_SHIFT(vesrav, 0);
3702       break;
3703     }
3704     case kS390_I8x16ShrU: {
3705       VECTOR_SHIFT(vesrlv, 0);
3706       break;
3707     }
3708     // vector unary ops
3709     case kS390_F64x2Abs: {
3710       __ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
3711                Condition(2), Condition(0), Condition(3));
3712       break;
3713     }
3714     case kS390_F64x2Neg: {
3715       __ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
3716                Condition(0), Condition(0), Condition(3));
3717       break;
3718     }
3719     case kS390_F64x2Sqrt: {
3720       __ vfsq(i.OutputSimd128Register(), i.InputSimd128Register(0),
3721               Condition(0), Condition(0), Condition(3));
3722       break;
3723     }
3724     case kS390_F32x4Abs: {
3725       __ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
3726                Condition(2), Condition(0), Condition(2));
3727       break;
3728     }
3729     case kS390_F32x4Neg: {
3730       __ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
3731                Condition(0), Condition(0), Condition(2));
3732       break;
3733     }
3734     case kS390_I64x2Neg: {
3735       __ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
3736              Condition(0), Condition(3));
3737       break;
3738     }
3739     case kS390_I32x4Neg: {
3740       __ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
3741              Condition(0), Condition(2));
3742       break;
3743     }
3744     case kS390_I16x8Neg: {
3745       __ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
3746              Condition(0), Condition(1));
3747       break;
3748     }
3749     case kS390_I8x16Neg: {
3750       __ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
3751              Condition(0), Condition(0));
3752       break;
3753     }
3754     case kS390_F32x4RecipApprox: {
3755       __ lgfi(kScratchReg, Operand(1));
3756       __ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
3757 #ifdef V8_TARGET_BIG_ENDIAN
3758       __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
3759 #else
3760       __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(1), Condition(2));
3761 #endif
3762       __ vfd(i.OutputSimd128Register(), kScratchDoubleReg,
3763              i.InputSimd128Register(0), Condition(0), Condition(0),
3764              Condition(2));
3765       break;
3766     }
3767     case kS390_F32x4RecipSqrtApprox: {
3768       DoubleRegister tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
3769       __ vfsq(tempFPReg1, i.InputSimd128Register(0), Condition(0), Condition(0),
3770               Condition(2));
3771       __ lgfi(kScratchReg, Operand(1));
3772       __ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
3773 #ifdef V8_TARGET_BIG_ENDIAN
3774       __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
3775 #else
3776       __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(1), Condition(2));
3777 #endif
3778       __ vfd(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
3779              Condition(0), Condition(0), Condition(2));
3780       break;
3781     }
3782     case kS390_F32x4Sqrt: {
3783       __ vfsq(i.OutputSimd128Register(), i.InputSimd128Register(0),
3784               Condition(0), Condition(0), Condition(2));
3785       break;
3786     }
3787     case kS390_S128Not: {
3788       Simd128Register src = i.InputSimd128Register(0);
3789       Simd128Register dst = i.OutputSimd128Register();
3790       __ vno(dst, src, src, Condition(0), Condition(0), Condition(0));
3791       break;
3792     }
3793     case kS390_I8x16Abs: {
3794       __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
3795              Condition(0), Condition(0));
3796       break;
3797     }
3798     case kS390_I16x8Abs: {
3799       __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
3800              Condition(0), Condition(1));
3801       break;
3802     }
3803     case kS390_I32x4Abs: {
3804       __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
3805              Condition(0), Condition(2));
3806       break;
3807     }
3808     // vector boolean unops
3809     case kS390_V32x4AnyTrue:
3810     case kS390_V16x8AnyTrue:
3811     case kS390_V8x16AnyTrue: {
3812       Simd128Register src = i.InputSimd128Register(0);
3813       Register dst = i.OutputRegister();
3814       Register temp = i.TempRegister(0);
3815       __ lgfi(dst, Operand(1));
3816       __ xgr(temp, temp);
3817       __ vtm(src, src, Condition(0), Condition(0), Condition(0));
3818       __ locgr(Condition(8), dst, temp);
3819       break;
3820     }
3821 #define SIMD_ALL_TRUE(mode)                                                    \
3822   Simd128Register src = i.InputSimd128Register(0);                             \
3823   Register dst = i.OutputRegister();                                           \
3824   Register temp = i.TempRegister(0);                                           \
3825   __ lgfi(temp, Operand(1));                                                   \
3826   __ xgr(dst, dst);                                                            \
3827   __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg, Condition(0), \
3828         Condition(0), Condition(2));                                           \
3829   __ vceq(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0),             \
3830           Condition(mode));                                                    \
3831   __ vtm(kScratchDoubleReg, kScratchDoubleReg, Condition(0), Condition(0),     \
3832          Condition(0));                                                        \
3833   __ locgr(Condition(8), dst, temp);
3834     case kS390_V32x4AllTrue: {
3835       SIMD_ALL_TRUE(2)
3836       break;
3837     }
3838     case kS390_V16x8AllTrue: {
3839       SIMD_ALL_TRUE(1)
3840       break;
3841     }
3842     case kS390_V8x16AllTrue: {
3843       SIMD_ALL_TRUE(0)
3844       break;
3845     }
3846 #undef SIMD_ALL_TRUE
3847     // vector bitwise ops
3848     case kS390_S128And: {
3849       Simd128Register dst = i.OutputSimd128Register();
3850       Simd128Register src = i.InputSimd128Register(1);
3851       __ vn(dst, i.InputSimd128Register(0), src, Condition(0), Condition(0),
3852             Condition(0));
3853       break;
3854     }
3855     case kS390_S128Or: {
3856       Simd128Register dst = i.OutputSimd128Register();
3857       Simd128Register src = i.InputSimd128Register(1);
3858       __ vo(dst, i.InputSimd128Register(0), src, Condition(0), Condition(0),
3859             Condition(0));
3860       break;
3861     }
3862     case kS390_S128Xor: {
3863       Simd128Register dst = i.OutputSimd128Register();
3864       Simd128Register src = i.InputSimd128Register(1);
3865       __ vx(dst, i.InputSimd128Register(0), src, Condition(0), Condition(0),
3866             Condition(0));
3867       break;
3868     }
3869     case kS390_S128Const: {
3870 #ifdef V8_TARGET_BIG_ENDIAN
3871       for (int index = 0, j = 0; index < 2; index++, j = +2) {
3872         __ lgfi(index < 1 ? ip : r0, Operand(i.InputInt32(j)));
3873         __ iihf(index < 1 ? ip : r0, Operand(i.InputInt32(j + 1)));
3874       }
3875 #else
3876       for (int index = 0, j = 0; index < 2; index++, j = +2) {
3877         __ lgfi(index < 1 ? r0 : ip, Operand(i.InputInt32(j)));
3878         __ iihf(index < 1 ? r0 : ip, Operand(i.InputInt32(j + 1)));
3879       }
3880 #endif
3881       __ vlvgp(i.OutputSimd128Register(), r0, ip);
3882       break;
3883     }
3884     case kS390_S128Zero: {
3885       Simd128Register dst = i.OutputSimd128Register();
3886       __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
3887       break;
3888     }
3889     case kS390_S128AllOnes: {
3890       Simd128Register dst = i.OutputSimd128Register();
3891       __ vceq(dst, dst, dst, Condition(0), Condition(3));
3892       break;
3893     }
3894     case kS390_S128Select: {
3895       Simd128Register dst = i.OutputSimd128Register();
3896       Simd128Register mask = i.InputSimd128Register(0);
3897       Simd128Register src1 = i.InputSimd128Register(1);
3898       Simd128Register src2 = i.InputSimd128Register(2);
3899       __ vsel(dst, src1, src2, mask, Condition(0), Condition(0));
3900       break;
3901     }
3902     case kS390_S128AndNot: {
3903       Simd128Register dst = i.OutputSimd128Register();
3904       Simd128Register src = i.InputSimd128Register(1);
3905       __ vnc(dst, i.InputSimd128Register(0), src, Condition(0), Condition(0),
3906              Condition(0));
3907       break;
3908     }
3909     // vector conversions
3910 #define CONVERT_FLOAT_TO_INT32(convert)                             \
3911   for (int index = 0; index < 4; index++) {                         \
3912     __ vlgv(kScratchReg, kScratchDoubleReg, MemOperand(r0, index),  \
3913             Condition(2));                                          \
3914     __ MovIntToFloat(tempFPReg1, kScratchReg);                      \
3915     __ convert(kScratchReg, tempFPReg1, kRoundToZero);              \
3916     __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
3917   }
3918     case kS390_I32x4SConvertF32x4: {
3919       Simd128Register src = i.InputSimd128Register(0);
3920       Simd128Register dst = i.OutputSimd128Register();
3921       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
3922       // NaN to 0
3923       __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
3924       __ vfce(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
3925               Condition(0), Condition(0), Condition(2));
3926       __ vn(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0),
3927             Condition(0), Condition(0));
3928       CONVERT_FLOAT_TO_INT32(ConvertFloat32ToInt32)
3929       break;
3930     }
3931     case kS390_I32x4UConvertF32x4: {
3932       Simd128Register src = i.InputSimd128Register(0);
3933       Simd128Register dst = i.OutputSimd128Register();
3934       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
3935       // NaN to 0, negative to 0
3936       __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
3937             Condition(0), Condition(0), Condition(0));
3938       __ vfmax(kScratchDoubleReg, src, kScratchDoubleReg, Condition(1),
3939                Condition(0), Condition(2));
3940       CONVERT_FLOAT_TO_INT32(ConvertFloat32ToUnsignedInt32)
3941       break;
3942     }
3943 #undef CONVERT_FLOAT_TO_INT32
3944 #define CONVERT_INT32_TO_FLOAT(convert, double_index)               \
3945   Simd128Register src = i.InputSimd128Register(0);                  \
3946   Simd128Register dst = i.OutputSimd128Register();                  \
3947   for (int index = 0; index < 4; index++) {                         \
3948     __ vlgv(kScratchReg, src, MemOperand(r0, index), Condition(2)); \
3949     __ convert(kScratchDoubleReg, kScratchReg);                     \
3950     __ MovFloatToInt(kScratchReg, kScratchDoubleReg);               \
3951     __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
3952   }
3953     case kS390_F32x4SConvertI32x4: {
3954 #ifdef V8_TARGET_BIG_ENDIAN
3955       CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, 0)
3956 #else
3957       CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, 1)
3958 #endif
3959       break;
3960     }
3961     case kS390_F32x4UConvertI32x4: {
3962 #ifdef V8_TARGET_BIG_ENDIAN
3963       CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, 0)
3964 #else
3965       CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, 1)
3966 #endif
3967       break;
3968     }
3969 #undef CONVERT_INT32_TO_FLOAT
3970 #define VECTOR_UNPACK(op, mode)                                             \
3971   __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0), \
3972         Condition(0), Condition(mode));
3973     case kS390_I32x4SConvertI16x8Low: {
3974       VECTOR_UNPACK(vupl, 1)
3975       break;
3976     }
3977     case kS390_I32x4SConvertI16x8High: {
3978       VECTOR_UNPACK(vuph, 1)
3979       break;
3980     }
3981     case kS390_I32x4UConvertI16x8Low: {
3982       VECTOR_UNPACK(vupll, 1)
3983       break;
3984     }
3985     case kS390_I32x4UConvertI16x8High: {
3986       VECTOR_UNPACK(vuplh, 1)
3987       break;
3988     }
3989     case kS390_I16x8SConvertI8x16Low: {
3990       VECTOR_UNPACK(vupl, 0)
3991       break;
3992     }
3993     case kS390_I16x8SConvertI8x16High: {
3994       VECTOR_UNPACK(vuph, 0)
3995       break;
3996     }
3997     case kS390_I16x8UConvertI8x16Low: {
3998       VECTOR_UNPACK(vupll, 0)
3999       break;
4000     }
4001     case kS390_I16x8UConvertI8x16High: {
4002       VECTOR_UNPACK(vuplh, 0)
4003       break;
4004     }
4005 #undef VECTOR_UNPACK
4006     case kS390_I16x8SConvertI32x4:
4007 #ifdef V8_TARGET_BIG_ENDIAN
4008       __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
4009               i.InputSimd128Register(0), Condition(0), Condition(2));
4010 #else
4011       __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
4012               i.InputSimd128Register(1), Condition(0), Condition(2));
4013 #endif
4014       break;
4015     case kS390_I8x16SConvertI16x8:
4016 #ifdef V8_TARGET_BIG_ENDIAN
4017       __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
4018               i.InputSimd128Register(0), Condition(0), Condition(1));
4019 #else
4020       __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
4021               i.InputSimd128Register(1), Condition(0), Condition(1));
4022 #endif
4023       break;
4024 #define VECTOR_PACK_UNSIGNED(mode)                                             \
4025   Simd128Register tempFPReg = i.ToSimd128Register(instr->TempAt(0));           \
4026   __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg, Condition(0), \
4027         Condition(0), Condition(mode));                                        \
4028   __ vmx(tempFPReg, i.InputSimd128Register(0), kScratchDoubleReg,              \
4029          Condition(0), Condition(0), Condition(mode));                         \
4030   __ vmx(kScratchDoubleReg, i.InputSimd128Register(1), kScratchDoubleReg,      \
4031          Condition(0), Condition(0), Condition(mode));
4032     case kS390_I16x8UConvertI32x4: {
4033       // treat inputs as signed, and saturate to unsigned (negative to 0)
4034       VECTOR_PACK_UNSIGNED(2)
4035 #ifdef V8_TARGET_BIG_ENDIAN
4036       __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
4037                Condition(0), Condition(2));
4038 #else
4039       __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg,
4040                Condition(0), Condition(2));
4041 #endif
4042       break;
4043     }
4044     case kS390_I8x16UConvertI16x8: {
4045       // treat inputs as signed, and saturate to unsigned (negative to 0)
4046       VECTOR_PACK_UNSIGNED(1)
4047 #ifdef V8_TARGET_BIG_ENDIAN
4048       __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
4049                Condition(0), Condition(1));
4050 #else
4051       __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg,
4052                Condition(0), Condition(1));
4053 #endif
4054       break;
4055     }
4056 #undef VECTOR_PACK_UNSIGNED
4057 #define BINOP_EXTRACT(op, extract_high, extract_low, mode)              \
4058   Simd128Register src1 = i.InputSimd128Register(0);                     \
4059   Simd128Register src2 = i.InputSimd128Register(1);                     \
4060   Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));   \
4061   Simd128Register tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));   \
4062   __ extract_high(kScratchDoubleReg, src1, Condition(0), Condition(0),  \
4063                   Condition(mode));                                     \
4064   __ extract_high(tempFPReg1, src2, Condition(0), Condition(0),         \
4065                   Condition(mode));                                     \
4066   __ op(kScratchDoubleReg, kScratchDoubleReg, tempFPReg1, Condition(0), \
4067         Condition(0), Condition(mode + 1));                             \
4068   __ extract_low(tempFPReg1, src1, Condition(0), Condition(0),          \
4069                  Condition(mode));                                      \
4070   __ extract_low(tempFPReg2, src2, Condition(0), Condition(0),          \
4071                  Condition(mode));                                      \
4072   __ op(tempFPReg1, tempFPReg1, tempFPReg2, Condition(0), Condition(0), \
4073         Condition(mode + 1));
4074     case kS390_I16x8AddSatS: {
4075       BINOP_EXTRACT(va, vuph, vupl, 1)
4076 #ifdef V8_TARGET_BIG_ENDIAN
4077       __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
4078               Condition(0), Condition(2));
4079 #else
4080       __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
4081               Condition(0), Condition(2));
4082 #endif
4083       break;
4084     }
4085     case kS390_I16x8SubSatS: {
4086       BINOP_EXTRACT(vs, vuph, vupl, 1)
4087 #ifdef V8_TARGET_BIG_ENDIAN
4088       __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
4089               Condition(0), Condition(2));
4090 #else
4091       __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
4092               Condition(0), Condition(2));
4093 #endif
4094       break;
4095     }
4096     case kS390_I16x8AddSatU: {
4097       BINOP_EXTRACT(va, vuplh, vupll, 1)
4098 #ifdef V8_TARGET_BIG_ENDIAN
4099       __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
4100                Condition(0), Condition(2));
4101 #else
4102       __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
4103                Condition(0), Condition(2));
4104 #endif
4105       break;
4106     }
4107     case kS390_I16x8SubSatU: {
4108       BINOP_EXTRACT(vs, vuplh, vupll, 1)
4109       // negative to 0
4110       __ vx(tempFPReg2, tempFPReg2, tempFPReg2, Condition(0), Condition(0),
4111             Condition(0));
4112       __ vmx(kScratchDoubleReg, tempFPReg2, kScratchDoubleReg, Condition(0),
4113              Condition(0), Condition(2));
4114       __ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
4115              Condition(2));
4116 #ifdef V8_TARGET_BIG_ENDIAN
4117       __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
4118                Condition(0), Condition(2));
4119 #else
4120       __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
4121                Condition(0), Condition(2));
4122 #endif
4123       break;
4124     }
4125     case kS390_I8x16AddSatS: {
4126       BINOP_EXTRACT(va, vuph, vupl, 0)
4127 #ifdef V8_TARGET_BIG_ENDIAN
4128       __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
4129               Condition(0), Condition(1));
4130 #else
4131       __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
4132               Condition(0), Condition(1));
4133 #endif
4134       break;
4135     }
4136     case kS390_I8x16SubSatS: {
4137       BINOP_EXTRACT(vs, vuph, vupl, 0)
4138 #ifdef V8_TARGET_BIG_ENDIAN
4139       __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
4140               Condition(0), Condition(1));
4141 #else
4142       __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
4143               Condition(0), Condition(1));
4144 #endif
4145       break;
4146     }
4147     case kS390_I8x16AddSatU: {
4148       BINOP_EXTRACT(va, vuplh, vupll, 0)
4149 #ifdef V8_TARGET_BIG_ENDIAN
4150       __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
4151                Condition(0), Condition(1));
4152 #else
4153       __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
4154                Condition(0), Condition(1));
4155 #endif
4156       break;
4157     }
4158     case kS390_I8x16SubSatU: {
4159       BINOP_EXTRACT(vs, vuplh, vupll, 0)
4160       // negative to 0
4161       __ vx(tempFPReg2, tempFPReg2, tempFPReg2, Condition(0), Condition(0),
4162             Condition(0));
4163       __ vmx(kScratchDoubleReg, tempFPReg2, kScratchDoubleReg, Condition(0),
4164              Condition(0), Condition(1));
4165       __ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
4166              Condition(1));
4167 #ifdef V8_TARGET_BIG_ENDIAN
4168       __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
4169                Condition(0), Condition(1));
4170 #else
4171       __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
4172                Condition(0), Condition(1));
4173 
4174 #endif
4175       break;
4176     }
4177 #undef BINOP_EXTRACT
4178     case kS390_I8x16Shuffle: {
4179       Simd128Register dst = i.OutputSimd128Register(),
4180                       src0 = i.InputSimd128Register(0),
4181                       src1 = i.InputSimd128Register(1);
4182       int32_t k8x16_indices[] = {i.InputInt32(2), i.InputInt32(3),
4183                                  i.InputInt32(4), i.InputInt32(5)};
4184       // create 2 * 8 byte inputs indicating new indices
4185       for (int i = 0, j = 0; i < 2; i++, j = +2) {
4186 #ifdef V8_TARGET_BIG_ENDIAN
4187         __ lgfi(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
4188         __ iihf(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
4189 #else
4190         __ lgfi(i < 1 ? r0 : ip, Operand(k8x16_indices[j]));
4191         __ iihf(i < 1 ? r0 : ip, Operand(k8x16_indices[j + 1]));
4192 #endif
4193       }
4194       __ vlvgp(kScratchDoubleReg, r0, ip);
4195       __ vperm(dst, src0, src1, kScratchDoubleReg, Condition(0), Condition(0));
4196       break;
4197     }
4198     case kS390_I8x16Swizzle: {
4199       Simd128Register dst = i.OutputSimd128Register(),
4200                       src0 = i.InputSimd128Register(0),
4201                       src1 = i.InputSimd128Register(1);
4202       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
4203       // Saturate the indices to 5 bits. Input indices more than 31 should
4204       // return 0.
4205       __ vrepi(kScratchDoubleReg, Operand(31), Condition(0));
4206       __ vmnl(tempFPReg1, src1, kScratchDoubleReg, Condition(0), Condition(0),
4207               Condition(0));
4208 #ifdef V8_TARGET_BIG_ENDIAN
4209       //  input needs to be reversed
4210       __ vlgv(r0, src0, MemOperand(r0, 0), Condition(3));
4211       __ vlgv(r1, src0, MemOperand(r0, 1), Condition(3));
4212       __ lrvgr(r0, r0);
4213       __ lrvgr(r1, r1);
4214       __ vlvgp(dst, r1, r0);
4215       // clear scratch
4216       __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
4217             Condition(0), Condition(0), Condition(0));
4218       __ vperm(dst, dst, kScratchDoubleReg, tempFPReg1, Condition(0),
4219                Condition(0));
4220 #else
4221       __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
4222             Condition(0), Condition(0), Condition(0));
4223       __ vperm(dst, src0, kScratchDoubleReg, tempFPReg1, Condition(0),
4224                Condition(0));
4225 #endif
4226       break;
4227     }
4228     case kS390_I32x4BitMask: {
4229 #ifdef V8_TARGET_BIG_ENDIAN
4230       __ lgfi(kScratchReg, Operand(0x204060));
4231       __ iihf(kScratchReg, Operand(0x80808080));  // Zeroing the high bits.
4232 #else
4233       __ lgfi(kScratchReg, Operand(0x80808080));
4234       __ iihf(kScratchReg, Operand(0x60402000));
4235 #endif
4236       __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
4237       __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
4238                 Condition(0), Condition(0), Condition(0));
4239       __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 7),
4240               Condition(0));
4241       break;
4242     }
4243     case kS390_I16x8BitMask: {
4244 #ifdef V8_TARGET_BIG_ENDIAN
4245       __ lgfi(kScratchReg, Operand(0x40506070));
4246       __ iihf(kScratchReg, Operand(0x102030));
4247 #else
4248       __ lgfi(kScratchReg, Operand(0x30201000));
4249       __ iihf(kScratchReg, Operand(0x70605040));
4250 #endif
4251       __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
4252       __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
4253                 Condition(0), Condition(0), Condition(0));
4254       __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 7),
4255               Condition(0));
4256       break;
4257     }
4258     case kS390_I8x16BitMask: {
4259 #ifdef V8_TARGET_BIG_ENDIAN
4260       __ lgfi(r0, Operand(0x60687078));
4261       __ iihf(r0, Operand(0x40485058));
4262       __ lgfi(ip, Operand(0x20283038));
4263       __ iihf(ip, Operand(0x81018));
4264 #else
4265       __ lgfi(ip, Operand(0x58504840));
4266       __ iihf(ip, Operand(0x78706860));
4267       __ lgfi(r0, Operand(0x18100800));
4268       __ iihf(r0, Operand(0x38302820));
4269 #endif
4270       __ vlvgp(kScratchDoubleReg, ip, r0);
4271       __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
4272                 Condition(0), Condition(0), Condition(0));
4273       __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 3),
4274               Condition(1));
4275       break;
4276     }
4277     case kS390_F32x4Pmin: {
4278       __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
4279                i.InputSimd128Register(1), Condition(3), Condition(0),
4280                Condition(2));
4281       break;
4282     }
4283     case kS390_F32x4Pmax: {
4284       __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
4285                i.InputSimd128Register(1), Condition(3), Condition(0),
4286                Condition(2));
4287       break;
4288     }
4289     case kS390_F64x2Pmin: {
4290       __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
4291                i.InputSimd128Register(1), Condition(3), Condition(0),
4292                Condition(3));
4293       break;
4294     }
4295     case kS390_F64x2Pmax: {
4296       __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
4297                i.InputSimd128Register(1), Condition(3), Condition(0),
4298                Condition(3));
4299       break;
4300     }
4301     case kS390_F64x2Ceil: {
4302       __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(6),
4303              Condition(0), Condition(3));
4304       break;
4305     }
4306     case kS390_F64x2Floor: {
4307       __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(7),
4308              Condition(0), Condition(3));
4309       break;
4310     }
4311     case kS390_F64x2Trunc: {
4312       __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(5),
4313              Condition(0), Condition(3));
4314       break;
4315     }
4316     case kS390_F64x2NearestInt: {
4317       __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(4),
4318              Condition(0), Condition(3));
4319       break;
4320     }
4321     case kS390_F32x4Ceil: {
4322       __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(6),
4323              Condition(0), Condition(2));
4324       break;
4325     }
4326     case kS390_F32x4Floor: {
4327       __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(7),
4328              Condition(0), Condition(2));
4329       break;
4330     }
4331     case kS390_F32x4Trunc: {
4332       __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(5),
4333              Condition(0), Condition(2));
4334       break;
4335     }
4336     case kS390_F32x4NearestInt: {
4337       __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(4),
4338              Condition(0), Condition(2));
4339       break;
4340     }
4341     case kS390_I32x4DotI16x8S: {
4342       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
4343       __ vme(kScratchDoubleReg, i.InputSimd128Register(0),
4344              i.InputSimd128Register(1), Condition(0), Condition(0),
4345              Condition(1));
4346       __ vmo(tempFPReg1, i.InputSimd128Register(0), i.InputSimd128Register(1),
4347              Condition(0), Condition(0), Condition(1));
4348       __ va(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
4349             Condition(0), Condition(0), Condition(2));
4350       break;
4351     }
4352     case kS390_StoreCompressTagged: {
4353       CHECK(!instr->HasOutput());
4354       size_t index = 0;
4355       AddressingMode mode = kMode_None;
4356       MemOperand operand = i.MemoryOperand(&mode, &index);
4357       Register value = i.InputRegister(index);
4358       __ StoreTaggedField(value, operand, r1);
4359       break;
4360     }
4361     case kS390_LoadDecompressTaggedSigned: {
4362       CHECK(instr->HasOutput());
4363       __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
4364       break;
4365     }
4366     case kS390_LoadDecompressTaggedPointer: {
4367       CHECK(instr->HasOutput());
4368       __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
4369       break;
4370     }
4371     case kS390_LoadDecompressAnyTagged: {
4372       CHECK(instr->HasOutput());
4373       __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
4374       break;
4375     }
4376     default:
4377       UNREACHABLE();
4378   }
4379   return kSuccess;
4380 }  // NOLINT(readability/fn_size)
4381 
4382 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)4383 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
4384   S390OperandConverter i(this, instr);
4385   Label* tlabel = branch->true_label;
4386   Label* flabel = branch->false_label;
4387   ArchOpcode op = instr->arch_opcode();
4388   FlagsCondition condition = branch->condition;
4389 
4390   Condition cond = FlagsConditionToCondition(condition, op);
4391   if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
4392     // check for unordered if necessary
4393     // Branching to flabel/tlabel according to what's expected by tests
4394     if (cond == le || cond == eq || cond == lt) {
4395       __ bunordered(flabel);
4396     } else if (cond == gt || cond == ne || cond == ge) {
4397       __ bunordered(tlabel);
4398     }
4399   }
4400   __ b(cond, tlabel);
4401   if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
4402 }
4403 
AssembleBranchPoisoning(FlagsCondition condition,Instruction * instr)4404 void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
4405                                             Instruction* instr) {
4406   // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
4407   if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
4408       condition == kOverflow || condition == kNotOverflow) {
4409     return;
4410   }
4411 
4412   condition = NegateFlagsCondition(condition);
4413   __ LoadImmP(r0, Operand::Zero());
4414   __ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
4415                       kSpeculationPoisonRegister, r0);
4416 }
4417 
AssembleArchDeoptBranch(Instruction * instr,BranchInfo * branch)4418 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
4419                                             BranchInfo* branch) {
4420   AssembleArchBranch(instr, branch);
4421 }
4422 
AssembleArchJump(RpoNumber target)4423 void CodeGenerator::AssembleArchJump(RpoNumber target) {
4424   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
4425 }
4426 
AssembleArchTrap(Instruction * instr,FlagsCondition condition)4427 void CodeGenerator::AssembleArchTrap(Instruction* instr,
4428                                      FlagsCondition condition) {
4429   class OutOfLineTrap final : public OutOfLineCode {
4430    public:
4431     OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
4432         : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
4433 
4434     void Generate() final {
4435       S390OperandConverter i(gen_, instr_);
4436       TrapId trap_id =
4437           static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
4438       GenerateCallToTrap(trap_id);
4439     }
4440 
4441    private:
4442     void GenerateCallToTrap(TrapId trap_id) {
4443       if (trap_id == TrapId::kInvalid) {
4444         // We cannot test calls to the runtime in cctest/test-run-wasm.
4445         // Therefore we emit a call to C here instead of a call to the runtime.
4446         // We use the context register as the scratch register, because we do
4447         // not have a context here.
4448         __ PrepareCallCFunction(0, 0, cp);
4449         __ CallCFunction(
4450             ExternalReference::wasm_call_trap_callback_for_testing(), 0);
4451         __ LeaveFrame(StackFrame::WASM);
4452         auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
4453         int pop_count =
4454             static_cast<int>(call_descriptor->StackParameterCount());
4455         __ Drop(pop_count);
4456         __ Ret();
4457       } else {
4458         gen_->AssembleSourcePosition(instr_);
4459         // A direct call to a wasm runtime stub defined in this module.
4460         // Just encode the stub index. This will be patched when the code
4461         // is added to the native module and copied into wasm code space.
4462         __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
4463         ReferenceMap* reference_map =
4464             gen_->zone()->New<ReferenceMap>(gen_->zone());
4465         gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
4466         if (FLAG_debug_code) {
4467           __ stop();
4468         }
4469       }
4470     }
4471 
4472     Instruction* instr_;
4473     CodeGenerator* gen_;
4474   };
4475   auto ool = zone()->New<OutOfLineTrap>(this, instr);
4476   Label* tlabel = ool->entry();
4477   Label end;
4478 
4479   ArchOpcode op = instr->arch_opcode();
4480   Condition cond = FlagsConditionToCondition(condition, op);
4481   if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
4482     // check for unordered if necessary
4483     if (cond == le || cond == eq || cond == lt) {
4484       __ bunordered(&end);
4485     } else if (cond == gt || cond == ne || cond == ge) {
4486       __ bunordered(tlabel);
4487     }
4488   }
4489   __ b(cond, tlabel);
4490   __ bind(&end);
4491 }
4492 
4493 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)4494 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
4495                                         FlagsCondition condition) {
4496   S390OperandConverter i(this, instr);
4497   ArchOpcode op = instr->arch_opcode();
4498   bool check_unordered = (op == kS390_CmpDouble || op == kS390_CmpFloat);
4499 
4500   // Overflow checked for add/sub only.
4501   DCHECK((condition != kOverflow && condition != kNotOverflow) ||
4502          (op == kS390_Add32 || op == kS390_Add64 || op == kS390_Sub32 ||
4503           op == kS390_Sub64 || op == kS390_Mul32));
4504 
4505   // Materialize a full 32-bit 1 or 0 value. The result register is always the
4506   // last output of the instruction.
4507   DCHECK_NE(0u, instr->OutputCount());
4508   Register reg = i.OutputRegister(instr->OutputCount() - 1);
4509   Condition cond = FlagsConditionToCondition(condition, op);
4510   Label done;
4511   if (check_unordered) {
4512     __ LoadImmP(reg, (cond == eq || cond == le || cond == lt) ? Operand::Zero()
4513                                                               : Operand(1));
4514     __ bunordered(&done);
4515   }
4516 
4517   // TODO(john.yan): use load imm high on condition here
4518   __ LoadImmP(reg, Operand::Zero());
4519   __ LoadImmP(kScratchReg, Operand(1));
4520   // locr is sufficient since reg's upper 32 is guarrantee to be 0
4521   __ locr(cond, reg, kScratchReg);
4522   __ bind(&done);
4523 }
4524 
AssembleArchBinarySearchSwitch(Instruction * instr)4525 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
4526   S390OperandConverter i(this, instr);
4527   Register input = i.InputRegister(0);
4528   std::vector<std::pair<int32_t, Label*>> cases;
4529   for (size_t index = 2; index < instr->InputCount(); index += 2) {
4530     cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
4531   }
4532   AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
4533                                       cases.data() + cases.size());
4534 }
4535 
AssembleArchTableSwitch(Instruction * instr)4536 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
4537   S390OperandConverter i(this, instr);
4538   Register input = i.InputRegister(0);
4539   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
4540   Label** cases = zone()->NewArray<Label*>(case_count);
4541   for (int32_t index = 0; index < case_count; ++index) {
4542     cases[index] = GetLabel(i.InputRpo(index + 2));
4543   }
4544   Label* const table = AddJumpTable(cases, case_count);
4545   __ CmpLogicalP(input, Operand(case_count));
4546   __ bge(GetLabel(i.InputRpo(1)));
4547   __ larl(kScratchReg, table);
4548   __ ShiftLeftP(r1, input, Operand(kSystemPointerSizeLog2));
4549   __ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
4550   __ Jump(kScratchReg);
4551 }
4552 
FinishFrame(Frame * frame)4553 void CodeGenerator::FinishFrame(Frame* frame) {
4554   auto call_descriptor = linkage()->GetIncomingDescriptor();
4555   const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
4556 
4557   // Save callee-saved Double registers.
4558   if (double_saves != 0) {
4559     frame->AlignSavedCalleeRegisterSlots();
4560     DCHECK_EQ(kNumCalleeSavedDoubles,
4561               base::bits::CountPopulation(double_saves));
4562     frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
4563                                             (kDoubleSize / kSystemPointerSize));
4564   }
4565   // Save callee-saved registers.
4566   const RegList saves = call_descriptor->CalleeSavedRegisters();
4567   if (saves != 0) {
4568     // register save area does not include the fp or constant pool pointer.
4569     const int num_saves = kNumCalleeSaved - 1;
4570     DCHECK(num_saves == base::bits::CountPopulation(saves));
4571     frame->AllocateSavedCalleeRegisterSlots(num_saves);
4572   }
4573 }
4574 
AssembleConstructFrame()4575 void CodeGenerator::AssembleConstructFrame() {
4576   auto call_descriptor = linkage()->GetIncomingDescriptor();
4577 
4578   if (frame_access_state()->has_frame()) {
4579     if (call_descriptor->IsCFunctionCall()) {
4580       if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
4581         __ StubPrologue(StackFrame::C_WASM_ENTRY);
4582         // Reserve stack space for saving the c_entry_fp later.
4583         __ lay(sp, MemOperand(sp, -kSystemPointerSize));
4584       } else {
4585         __ Push(r14, fp);
4586         __ LoadRR(fp, sp);
4587       }
4588     } else if (call_descriptor->IsJSFunctionCall()) {
4589       __ Prologue(ip);
4590     } else {
4591       StackFrame::Type type = info()->GetOutputStackFrameType();
4592       // TODO(mbrandy): Detect cases where ip is the entrypoint (for
4593       // efficient intialization of the constant pool pointer register).
4594       __ StubPrologue(type);
4595       if (call_descriptor->IsWasmFunctionCall()) {
4596         __ Push(kWasmInstanceRegister);
4597       } else if (call_descriptor->IsWasmImportWrapper() ||
4598                  call_descriptor->IsWasmCapiFunction()) {
4599         // Wasm import wrappers are passed a tuple in the place of the instance.
4600         // Unpack the tuple into the instance and the target callable.
4601         // This must be done here in the codegen because it cannot be expressed
4602         // properly in the graph.
4603         __ LoadTaggedPointerField(
4604             kJSFunctionRegister,
4605             FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset), r0);
4606         __ LoadTaggedPointerField(
4607             kWasmInstanceRegister,
4608             FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset), r0);
4609         __ Push(kWasmInstanceRegister);
4610         if (call_descriptor->IsWasmCapiFunction()) {
4611           // Reserve space for saving the PC later.
4612           __ lay(sp, MemOperand(sp, -kSystemPointerSize));
4613         }
4614       }
4615     }
4616     unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
4617   }
4618 
4619   int required_slots =
4620       frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
4621   if (info()->is_osr()) {
4622     // TurboFan OSR-compiled functions cannot be entered directly.
4623     __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
4624 
4625     // Unoptimized code jumps directly to this entrypoint while the unoptimized
4626     // frame is still on the stack. Optimized code uses OSR values directly from
4627     // the unoptimized frame. Thus, all that needs to be done is to allocate the
4628     // remaining stack slots.
4629     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
4630     osr_pc_offset_ = __ pc_offset();
4631     required_slots -= osr_helper()->UnoptimizedFrameSlots();
4632     ResetSpeculationPoison();
4633   }
4634 
4635   const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
4636   const RegList saves = call_descriptor->CalleeSavedRegisters();
4637 
4638   if (required_slots > 0) {
4639     if (info()->IsWasm() && required_slots > 128) {
4640       // For WebAssembly functions with big frames we have to do the stack
4641       // overflow check before we construct the frame. Otherwise we may not
4642       // have enough space on the stack to call the runtime for the stack
4643       // overflow.
4644       Label done;
4645 
4646       // If the frame is bigger than the stack, we throw the stack overflow
4647       // exception unconditionally. Thereby we can avoid the integer overflow
4648       // check in the condition code.
4649       if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
4650         Register scratch = r1;
4651         __ LoadP(
4652             scratch,
4653             FieldMemOperand(kWasmInstanceRegister,
4654                             WasmInstanceObject::kRealStackLimitAddressOffset));
4655         __ LoadP(scratch, MemOperand(scratch));
4656         __ AddP(scratch, scratch, Operand(required_slots * kSystemPointerSize));
4657         __ CmpLogicalP(sp, scratch);
4658         __ bge(&done);
4659       }
4660 
4661       __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
4662       // We come from WebAssembly, there are no references for the GC.
4663       ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
4664       RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
4665       if (FLAG_debug_code) {
4666         __ stop();
4667       }
4668 
4669       __ bind(&done);
4670     }
4671 
4672     // Skip callee-saved and return slots, which are pushed below.
4673     required_slots -= base::bits::CountPopulation(saves);
4674     required_slots -= frame()->GetReturnSlotCount();
4675     required_slots -= (kDoubleSize / kSystemPointerSize) *
4676                       base::bits::CountPopulation(saves_fp);
4677     __ lay(sp, MemOperand(sp, -required_slots * kSystemPointerSize));
4678   }
4679 
4680   // Save callee-saved Double registers.
4681   if (saves_fp != 0) {
4682     __ MultiPushDoubles(saves_fp);
4683     DCHECK_EQ(kNumCalleeSavedDoubles, base::bits::CountPopulation(saves_fp));
4684   }
4685 
4686   // Save callee-saved registers.
4687   if (saves != 0) {
4688     __ MultiPush(saves);
4689     // register save area does not include the fp or constant pool pointer.
4690   }
4691 
4692   const int returns = frame()->GetReturnSlotCount();
4693   if (returns != 0) {
4694     // Create space for returns.
4695     __ lay(sp, MemOperand(sp, -returns * kSystemPointerSize));
4696   }
4697 }
4698 
AssembleReturn(InstructionOperand * pop)4699 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
4700   auto call_descriptor = linkage()->GetIncomingDescriptor();
4701   int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
4702 
4703   const int returns = frame()->GetReturnSlotCount();
4704   if (returns != 0) {
4705     // Create space for returns.
4706     __ lay(sp, MemOperand(sp, returns * kSystemPointerSize));
4707   }
4708 
4709   // Restore registers.
4710   const RegList saves = call_descriptor->CalleeSavedRegisters();
4711   if (saves != 0) {
4712     __ MultiPop(saves);
4713   }
4714 
4715   // Restore double registers.
4716   const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
4717   if (double_saves != 0) {
4718     __ MultiPopDoubles(double_saves);
4719   }
4720 
4721   unwinding_info_writer_.MarkBlockWillExit();
4722 
4723   S390OperandConverter g(this, nullptr);
4724   if (call_descriptor->IsCFunctionCall()) {
4725     AssembleDeconstructFrame();
4726   } else if (frame_access_state()->has_frame()) {
4727     // Canonicalize JSFunction return sites for now unless they have an variable
4728     // number of stack slot pops
4729     if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
4730       if (return_label_.is_bound()) {
4731         __ b(&return_label_);
4732         return;
4733       } else {
4734         __ bind(&return_label_);
4735         AssembleDeconstructFrame();
4736       }
4737     } else {
4738       AssembleDeconstructFrame();
4739     }
4740   }
4741   if (pop->IsImmediate()) {
4742     pop_count += g.ToConstant(pop).ToInt32();
4743   } else {
4744     __ Drop(g.ToRegister(pop));
4745   }
4746   __ Drop(pop_count);
4747   __ Ret();
4748 }
4749 
FinishCode()4750 void CodeGenerator::FinishCode() {}
4751 
PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit * > * exits)4752 void CodeGenerator::PrepareForDeoptimizationExits(
4753     ZoneDeque<DeoptimizationExit*>* exits) {}
4754 
AssembleMove(InstructionOperand * source,InstructionOperand * destination)4755 void CodeGenerator::AssembleMove(InstructionOperand* source,
4756                                  InstructionOperand* destination) {
4757   S390OperandConverter g(this, nullptr);
4758   // Dispatch on the source and destination operand kinds.  Not all
4759   // combinations are possible.
4760   if (source->IsRegister()) {
4761     DCHECK(destination->IsRegister() || destination->IsStackSlot());
4762     Register src = g.ToRegister(source);
4763     if (destination->IsRegister()) {
4764       __ Move(g.ToRegister(destination), src);
4765     } else {
4766       __ StoreP(src, g.ToMemOperand(destination));
4767     }
4768   } else if (source->IsStackSlot()) {
4769     DCHECK(destination->IsRegister() || destination->IsStackSlot());
4770     MemOperand src = g.ToMemOperand(source);
4771     if (destination->IsRegister()) {
4772       __ LoadP(g.ToRegister(destination), src);
4773     } else {
4774       Register temp = kScratchReg;
4775       __ LoadP(temp, src, r0);
4776       __ StoreP(temp, g.ToMemOperand(destination));
4777     }
4778   } else if (source->IsConstant()) {
4779     Constant src = g.ToConstant(source);
4780     if (destination->IsRegister() || destination->IsStackSlot()) {
4781       Register dst =
4782           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
4783       switch (src.type()) {
4784         case Constant::kInt32:
4785 #if V8_TARGET_ARCH_S390X
4786           if (false) {
4787 #else
4788           if (RelocInfo::IsWasmReference(src.rmode())) {
4789 #endif
4790             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
4791           } else {
4792             __ Load(dst, Operand(src.ToInt32()));
4793           }
4794           break;
4795         case Constant::kInt64:
4796 #if V8_TARGET_ARCH_S390X
4797           if (RelocInfo::IsWasmReference(src.rmode())) {
4798             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
4799           } else {
4800             __ Load(dst, Operand(src.ToInt64()));
4801           }
4802 #else
4803           __ mov(dst, Operand(src.ToInt64()));
4804 #endif  // V8_TARGET_ARCH_S390X
4805           break;
4806         case Constant::kFloat32:
4807           __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
4808           break;
4809         case Constant::kFloat64:
4810           __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
4811           break;
4812         case Constant::kExternalReference:
4813           __ Move(dst, src.ToExternalReference());
4814           break;
4815         case Constant::kDelayedStringConstant:
4816           __ mov(dst, Operand::EmbeddedStringConstant(
4817                           src.ToDelayedStringConstant()));
4818           break;
4819         case Constant::kHeapObject: {
4820           Handle<HeapObject> src_object = src.ToHeapObject();
4821           RootIndex index;
4822           if (IsMaterializableFromRoot(src_object, &index)) {
4823             __ LoadRoot(dst, index);
4824           } else {
4825             __ Move(dst, src_object);
4826           }
4827           break;
4828         }
4829         case Constant::kCompressedHeapObject: {
4830           Handle<HeapObject> src_object = src.ToHeapObject();
4831           RootIndex index;
4832           if (IsMaterializableFromRoot(src_object, &index)) {
4833             __ LoadRoot(dst, index);
4834           } else {
4835             __ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
4836           }
4837           break;
4838         }
4839         case Constant::kRpoNumber:
4840           UNREACHABLE();  // TODO(dcarney): loading RPO constants on S390.
4841           break;
4842       }
4843       if (destination->IsStackSlot()) {
4844         __ StoreP(dst, g.ToMemOperand(destination), r0);
4845       }
4846     } else {
4847       DoubleRegister dst = destination->IsFPRegister()
4848                                ? g.ToDoubleRegister(destination)
4849                                : kScratchDoubleReg;
4850       double value = (src.type() == Constant::kFloat32)
4851                          ? src.ToFloat32()
4852                          : src.ToFloat64().value();
4853       if (src.type() == Constant::kFloat32) {
4854         __ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
4855       } else {
4856         __ LoadDoubleLiteral(dst, value, kScratchReg);
4857       }
4858 
4859       if (destination->IsFloatStackSlot()) {
4860         __ StoreFloat32(dst, g.ToMemOperand(destination));
4861       } else if (destination->IsDoubleStackSlot()) {
4862         __ StoreDouble(dst, g.ToMemOperand(destination));
4863       }
4864     }
4865   } else if (source->IsFPRegister()) {
4866     MachineRepresentation rep = LocationOperand::cast(source)->representation();
4867     if (rep == MachineRepresentation::kSimd128) {
4868       if (destination->IsSimd128Register()) {
4869         __ vlr(g.ToSimd128Register(destination), g.ToSimd128Register(source),
4870                Condition(0), Condition(0), Condition(0));
4871       } else {
4872         DCHECK(destination->IsSimd128StackSlot());
4873         __ StoreSimd128(g.ToSimd128Register(source),
4874                         g.ToMemOperand(destination), kScratchReg);
4875       }
4876     } else {
4877       DoubleRegister src = g.ToDoubleRegister(source);
4878       if (destination->IsFPRegister()) {
4879         DoubleRegister dst = g.ToDoubleRegister(destination);
4880         __ Move(dst, src);
4881       } else {
4882         DCHECK(destination->IsFPStackSlot());
4883         LocationOperand* op = LocationOperand::cast(source);
4884         if (op->representation() == MachineRepresentation::kFloat64) {
4885           __ StoreDouble(src, g.ToMemOperand(destination));
4886         } else {
4887           __ StoreFloat32(src, g.ToMemOperand(destination));
4888         }
4889       }
4890     }
4891   } else if (source->IsFPStackSlot()) {
4892     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
4893     MemOperand src = g.ToMemOperand(source);
4894     if (destination->IsFPRegister()) {
4895       LocationOperand* op = LocationOperand::cast(source);
4896       if (op->representation() == MachineRepresentation::kFloat64) {
4897         __ LoadDouble(g.ToDoubleRegister(destination), src);
4898       } else if (op->representation() == MachineRepresentation::kFloat32) {
4899         __ LoadFloat32(g.ToDoubleRegister(destination), src);
4900       } else {
4901         DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
4902         __ LoadSimd128(g.ToSimd128Register(destination), g.ToMemOperand(source),
4903                        kScratchReg);
4904       }
4905     } else {
4906       LocationOperand* op = LocationOperand::cast(source);
4907       DoubleRegister temp = kScratchDoubleReg;
4908       if (op->representation() == MachineRepresentation::kFloat64) {
4909         __ LoadDouble(temp, src);
4910         __ StoreDouble(temp, g.ToMemOperand(destination));
4911       } else if (op->representation() == MachineRepresentation::kFloat32) {
4912         __ LoadFloat32(temp, src);
4913         __ StoreFloat32(temp, g.ToMemOperand(destination));
4914       } else {
4915         DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
4916         __ LoadSimd128(kScratchDoubleReg, g.ToMemOperand(source), kScratchReg);
4917         __ StoreSimd128(kScratchDoubleReg, g.ToMemOperand(destination),
4918                         kScratchReg);
4919       }
4920     }
4921   } else {
4922     UNREACHABLE();
4923   }
4924 }
4925 
4926 // Swaping contents in source and destination.
4927 // source and destination could be:
4928 //   Register,
4929 //   FloatRegister,
4930 //   DoubleRegister,
4931 //   StackSlot,
4932 //   FloatStackSlot,
4933 //   or DoubleStackSlot
4934 void CodeGenerator::AssembleSwap(InstructionOperand* source,
4935                                  InstructionOperand* destination) {
4936   S390OperandConverter g(this, nullptr);
4937   if (source->IsRegister()) {
4938     Register src = g.ToRegister(source);
4939     if (destination->IsRegister()) {
4940       __ SwapP(src, g.ToRegister(destination), kScratchReg);
4941     } else {
4942       DCHECK(destination->IsStackSlot());
4943       __ SwapP(src, g.ToMemOperand(destination), kScratchReg);
4944     }
4945   } else if (source->IsStackSlot()) {
4946     DCHECK(destination->IsStackSlot());
4947     __ SwapP(g.ToMemOperand(source), g.ToMemOperand(destination), kScratchReg,
4948              r0);
4949   } else if (source->IsFloatRegister()) {
4950     DoubleRegister src = g.ToDoubleRegister(source);
4951     if (destination->IsFloatRegister()) {
4952       __ SwapFloat32(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
4953     } else {
4954       DCHECK(destination->IsFloatStackSlot());
4955       __ SwapFloat32(src, g.ToMemOperand(destination), kScratchDoubleReg);
4956     }
4957   } else if (source->IsDoubleRegister()) {
4958     DoubleRegister src = g.ToDoubleRegister(source);
4959     if (destination->IsDoubleRegister()) {
4960       __ SwapDouble(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
4961     } else {
4962       DCHECK(destination->IsDoubleStackSlot());
4963       __ SwapDouble(src, g.ToMemOperand(destination), kScratchDoubleReg);
4964     }
4965   } else if (source->IsFloatStackSlot()) {
4966     DCHECK(destination->IsFloatStackSlot());
4967     __ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(destination),
4968                    kScratchDoubleReg);
4969   } else if (source->IsDoubleStackSlot()) {
4970     DCHECK(destination->IsDoubleStackSlot());
4971     __ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(destination),
4972                   kScratchDoubleReg);
4973   } else if (source->IsSimd128Register()) {
4974     Simd128Register src = g.ToSimd128Register(source);
4975     if (destination->IsSimd128Register()) {
4976       __ SwapSimd128(src, g.ToSimd128Register(destination), kScratchDoubleReg);
4977     } else {
4978       DCHECK(destination->IsSimd128StackSlot());
4979       __ SwapSimd128(src, g.ToMemOperand(destination), kScratchDoubleReg);
4980     }
4981   } else if (source->IsSimd128StackSlot()) {
4982     DCHECK(destination->IsSimd128StackSlot());
4983     __ SwapSimd128(g.ToMemOperand(source), g.ToMemOperand(destination),
4984                    kScratchDoubleReg);
4985   } else {
4986     UNREACHABLE();
4987   }
4988 }
4989 
4990 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
4991   for (size_t index = 0; index < target_count; ++index) {
4992     __ emit_label_addr(targets[index]);
4993   }
4994 }
4995 
4996 #undef __
4997 
4998 }  // namespace compiler
4999 }  // namespace internal
5000 }  // namespace v8
5001