1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/platform/wrappers.h"
6 #include "src/compiler/backend/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
9 #include "src/execution/frame-constants.h"
10
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14
15 enum class OperandMode : uint32_t {
16 kNone = 0u,
17 // Immediate mode
18 kShift32Imm = 1u << 0,
19 kShift64Imm = 1u << 1,
20 kInt32Imm = 1u << 2,
21 kInt32Imm_Negate = 1u << 3,
22 kUint32Imm = 1u << 4,
23 kInt20Imm = 1u << 5,
24 kUint12Imm = 1u << 6,
25 // Instr format
26 kAllowRRR = 1u << 7,
27 kAllowRM = 1u << 8,
28 kAllowRI = 1u << 9,
29 kAllowRRI = 1u << 10,
30 kAllowRRM = 1u << 11,
31 // Useful combination
32 kAllowImmediate = kAllowRI | kAllowRRI,
33 kAllowMemoryOperand = kAllowRM | kAllowRRM,
34 kAllowDistinctOps = kAllowRRR | kAllowRRI | kAllowRRM,
35 kBitWiseCommonMode = kAllowRI,
36 kArithmeticCommonMode = kAllowRM | kAllowRI
37 };
38
39 using OperandModes = base::Flags<OperandMode, uint32_t>;
40 DEFINE_OPERATORS_FOR_FLAGS(OperandModes)
41 OperandModes immediateModeMask =
42 OperandMode::kShift32Imm | OperandMode::kShift64Imm |
43 OperandMode::kInt32Imm | OperandMode::kInt32Imm_Negate |
44 OperandMode::kUint32Imm | OperandMode::kInt20Imm;
45
46 #define AndCommonMode \
47 ((OperandMode::kAllowRM | \
48 (CpuFeatures::IsSupported(DISTINCT_OPS) ? OperandMode::kAllowRRR \
49 : OperandMode::kNone)))
50 #define And64OperandMode AndCommonMode
51 #define Or64OperandMode And64OperandMode
52 #define Xor64OperandMode And64OperandMode
53
54 #define And32OperandMode \
55 (AndCommonMode | OperandMode::kAllowRI | OperandMode::kUint32Imm)
56 #define Or32OperandMode And32OperandMode
57 #define Xor32OperandMode And32OperandMode
58
59 #define Shift32OperandMode \
60 ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
61 (CpuFeatures::IsSupported(DISTINCT_OPS) \
62 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
63 : OperandMode::kNone)))
64
65 #define Shift64OperandMode \
66 ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
67 OperandMode::kAllowRRR | OperandMode::kAllowRRI))
68
69 #define AddOperandMode \
70 ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
71 (CpuFeatures::IsSupported(DISTINCT_OPS) \
72 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
73 : OperandMode::kArithmeticCommonMode)))
74 #define SubOperandMode \
75 ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm_Negate | \
76 (CpuFeatures::IsSupported(DISTINCT_OPS) \
77 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
78 : OperandMode::kArithmeticCommonMode)))
79 #define MulOperandMode \
80 (OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm)
81
82 // Adds S390-specific methods for generating operands.
83 class S390OperandGenerator final : public OperandGenerator {
84 public:
S390OperandGenerator(InstructionSelector * selector)85 explicit S390OperandGenerator(InstructionSelector* selector)
86 : OperandGenerator(selector) {}
87
UseOperand(Node * node,OperandModes mode)88 InstructionOperand UseOperand(Node* node, OperandModes mode) {
89 if (CanBeImmediate(node, mode)) {
90 return UseImmediate(node);
91 }
92 return UseRegister(node);
93 }
94
UseAnyExceptImmediate(Node * node)95 InstructionOperand UseAnyExceptImmediate(Node* node) {
96 if (NodeProperties::IsConstant(node))
97 return UseRegister(node);
98 else
99 return Use(node);
100 }
101
GetImmediate(Node * node)102 int64_t GetImmediate(Node* node) {
103 if (node->opcode() == IrOpcode::kInt32Constant)
104 return OpParameter<int32_t>(node->op());
105 else if (node->opcode() == IrOpcode::kInt64Constant)
106 return OpParameter<int64_t>(node->op());
107 else
108 UNIMPLEMENTED();
109 }
110
CanBeImmediate(Node * node,OperandModes mode)111 bool CanBeImmediate(Node* node, OperandModes mode) {
112 int64_t value;
113 if (node->opcode() == IrOpcode::kInt32Constant)
114 value = OpParameter<int32_t>(node->op());
115 else if (node->opcode() == IrOpcode::kInt64Constant)
116 value = OpParameter<int64_t>(node->op());
117 else
118 return false;
119 return CanBeImmediate(value, mode);
120 }
121
CanBeImmediate(int64_t value,OperandModes mode)122 bool CanBeImmediate(int64_t value, OperandModes mode) {
123 if (mode & OperandMode::kShift32Imm)
124 return 0 <= value && value < 32;
125 else if (mode & OperandMode::kShift64Imm)
126 return 0 <= value && value < 64;
127 else if (mode & OperandMode::kInt32Imm)
128 return is_int32(value);
129 else if (mode & OperandMode::kInt32Imm_Negate)
130 return is_int32(-value);
131 else if (mode & OperandMode::kUint32Imm)
132 return is_uint32(value);
133 else if (mode & OperandMode::kInt20Imm)
134 return is_int20(value);
135 else if (mode & OperandMode::kUint12Imm)
136 return is_uint12(value);
137 else
138 return false;
139 }
140
CanBeMemoryOperand(InstructionCode opcode,Node * user,Node * input,int effect_level)141 bool CanBeMemoryOperand(InstructionCode opcode, Node* user, Node* input,
142 int effect_level) {
143 if ((input->opcode() != IrOpcode::kLoad &&
144 input->opcode() != IrOpcode::kLoadImmutable) ||
145 !selector()->CanCover(user, input)) {
146 return false;
147 }
148
149 if (effect_level != selector()->GetEffectLevel(input)) {
150 return false;
151 }
152
153 MachineRepresentation rep =
154 LoadRepresentationOf(input->op()).representation();
155 switch (opcode) {
156 case kS390_Cmp64:
157 case kS390_LoadAndTestWord64:
158 return rep == MachineRepresentation::kWord64 ||
159 (!COMPRESS_POINTERS_BOOL && IsAnyTagged(rep));
160 case kS390_LoadAndTestWord32:
161 case kS390_Cmp32:
162 return rep == MachineRepresentation::kWord32 ||
163 (COMPRESS_POINTERS_BOOL && IsAnyTagged(rep));
164 default:
165 break;
166 }
167 return false;
168 }
169
GenerateMemoryOperandInputs(Node * index,Node * base,Node * displacement,DisplacementMode displacement_mode,InstructionOperand inputs[],size_t * input_count)170 AddressingMode GenerateMemoryOperandInputs(Node* index, Node* base,
171 Node* displacement,
172 DisplacementMode displacement_mode,
173 InstructionOperand inputs[],
174 size_t* input_count) {
175 AddressingMode mode = kMode_MRI;
176 if (base != nullptr) {
177 inputs[(*input_count)++] = UseRegister(base);
178 if (index != nullptr) {
179 inputs[(*input_count)++] = UseRegister(index);
180 if (displacement != nullptr) {
181 inputs[(*input_count)++] = displacement_mode
182 ? UseNegatedImmediate(displacement)
183 : UseImmediate(displacement);
184 mode = kMode_MRRI;
185 } else {
186 mode = kMode_MRR;
187 }
188 } else {
189 if (displacement == nullptr) {
190 mode = kMode_MR;
191 } else {
192 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
193 ? UseNegatedImmediate(displacement)
194 : UseImmediate(displacement);
195 mode = kMode_MRI;
196 }
197 }
198 } else {
199 DCHECK_NOT_NULL(index);
200 inputs[(*input_count)++] = UseRegister(index);
201 if (displacement != nullptr) {
202 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
203 ? UseNegatedImmediate(displacement)
204 : UseImmediate(displacement);
205 mode = kMode_MRI;
206 } else {
207 mode = kMode_MR;
208 }
209 }
210 return mode;
211 }
212
GetEffectiveAddressMemoryOperand(Node * operand,InstructionOperand inputs[],size_t * input_count,OperandModes immediate_mode=OperandMode::kInt20Imm)213 AddressingMode GetEffectiveAddressMemoryOperand(
214 Node* operand, InstructionOperand inputs[], size_t* input_count,
215 OperandModes immediate_mode = OperandMode::kInt20Imm) {
216 #if V8_TARGET_ARCH_S390X
217 BaseWithIndexAndDisplacement64Matcher m(operand,
218 AddressOption::kAllowInputSwap);
219 #else
220 BaseWithIndexAndDisplacement32Matcher m(operand,
221 AddressOption::kAllowInputSwap);
222 #endif
223 DCHECK(m.matches());
224 if ((m.displacement() == nullptr ||
225 CanBeImmediate(m.displacement(), immediate_mode))) {
226 DCHECK_EQ(0, m.scale());
227 return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
228 m.displacement_mode(), inputs,
229 input_count);
230 } else {
231 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
232 inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
233 return kMode_MRR;
234 }
235 }
236
CanBeBetterLeftOperand(Node * node) const237 bool CanBeBetterLeftOperand(Node* node) const {
238 return !selector()->IsLive(node);
239 }
240
GetRepresentation(Node * node)241 MachineRepresentation GetRepresentation(Node* node) {
242 return sequence()->GetRepresentation(selector()->GetVirtualRegister(node));
243 }
244
Is64BitOperand(Node * node)245 bool Is64BitOperand(Node* node) {
246 return MachineRepresentation::kWord64 == GetRepresentation(node);
247 }
248 };
249
250 namespace {
251
S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode)252 bool S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode) {
253 switch (opcode) {
254 case kS390_AddFloat:
255 case kS390_AddDouble:
256 case kS390_CmpFloat:
257 case kS390_CmpDouble:
258 case kS390_Float32ToDouble:
259 return true;
260 default:
261 return false;
262 }
263 }
264
S390OpcodeOnlySupport12BitDisp(InstructionCode op)265 bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
266 ArchOpcode opcode = ArchOpcodeField::decode(op);
267 return S390OpcodeOnlySupport12BitDisp(opcode);
268 }
269
270 #define OpcodeImmMode(op) \
271 (S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
272 : OperandMode::kInt20Imm)
273
SelectLoadOpcode(LoadRepresentation load_rep)274 ArchOpcode SelectLoadOpcode(LoadRepresentation load_rep) {
275 ArchOpcode opcode;
276 switch (load_rep.representation()) {
277 case MachineRepresentation::kFloat32:
278 opcode = kS390_LoadFloat32;
279 break;
280 case MachineRepresentation::kFloat64:
281 opcode = kS390_LoadDouble;
282 break;
283 case MachineRepresentation::kBit: // Fall through.
284 case MachineRepresentation::kWord8:
285 opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
286 break;
287 case MachineRepresentation::kWord16:
288 opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
289 break;
290 case MachineRepresentation::kWord32:
291 opcode = kS390_LoadWordU32;
292 break;
293 case MachineRepresentation::kCompressedPointer: // Fall through.
294 case MachineRepresentation::kCompressed:
295 case MachineRepresentation::kSandboxedPointer: // Fall through.
296 #ifdef V8_COMPRESS_POINTERS
297 opcode = kS390_LoadWordS32;
298 break;
299 #else
300 UNREACHABLE();
301 #endif
302 #ifdef V8_COMPRESS_POINTERS
303 case MachineRepresentation::kTaggedSigned:
304 opcode = kS390_LoadDecompressTaggedSigned;
305 break;
306 case MachineRepresentation::kTaggedPointer:
307 opcode = kS390_LoadDecompressTaggedPointer;
308 break;
309 case MachineRepresentation::kTagged:
310 opcode = kS390_LoadDecompressAnyTagged;
311 break;
312 #else
313 case MachineRepresentation::kTaggedSigned: // Fall through.
314 case MachineRepresentation::kTaggedPointer: // Fall through.
315 case MachineRepresentation::kTagged: // Fall through.
316 #endif
317 case MachineRepresentation::kWord64:
318 opcode = kS390_LoadWord64;
319 break;
320 case MachineRepresentation::kSimd128:
321 opcode = kS390_LoadSimd128;
322 break;
323 case MachineRepresentation::kMapWord: // Fall through.
324 case MachineRepresentation::kNone:
325 default:
326 UNREACHABLE();
327 }
328 return opcode;
329 }
330
331 #define RESULT_IS_WORD32_LIST(V) \
332 /* Float unary op*/ \
333 V(BitcastFloat32ToInt32) \
334 /* V(TruncateFloat64ToWord32) */ \
335 V(RoundFloat64ToInt32) \
336 V(TruncateFloat32ToInt32) \
337 V(TruncateFloat32ToUint32) \
338 V(TruncateFloat64ToUint32) \
339 V(ChangeFloat64ToInt32) \
340 V(ChangeFloat64ToUint32) \
341 /* Word32 unary op */ \
342 V(Word32Clz) \
343 V(Word32Popcnt) \
344 V(Float64ExtractLowWord32) \
345 V(Float64ExtractHighWord32) \
346 V(SignExtendWord8ToInt32) \
347 V(SignExtendWord16ToInt32) \
348 /* Word32 bin op */ \
349 V(Int32Add) \
350 V(Int32Sub) \
351 V(Int32Mul) \
352 V(Int32AddWithOverflow) \
353 V(Int32SubWithOverflow) \
354 V(Int32MulWithOverflow) \
355 V(Int32MulHigh) \
356 V(Uint32MulHigh) \
357 V(Int32Div) \
358 V(Uint32Div) \
359 V(Int32Mod) \
360 V(Uint32Mod) \
361 V(Word32Ror) \
362 V(Word32And) \
363 V(Word32Or) \
364 V(Word32Xor) \
365 V(Word32Shl) \
366 V(Word32Shr) \
367 V(Word32Sar)
368
ProduceWord32Result(Node * node)369 bool ProduceWord32Result(Node* node) {
370 #if !V8_TARGET_ARCH_S390X
371 return true;
372 #else
373 switch (node->opcode()) {
374 #define VISITOR(name) case IrOpcode::k##name:
375 RESULT_IS_WORD32_LIST(VISITOR)
376 #undef VISITOR
377 return true;
378 // TODO(john.yan): consider the following case to be valid
379 // case IrOpcode::kWord32Equal:
380 // case IrOpcode::kInt32LessThan:
381 // case IrOpcode::kInt32LessThanOrEqual:
382 // case IrOpcode::kUint32LessThan:
383 // case IrOpcode::kUint32LessThanOrEqual:
384 // case IrOpcode::kUint32MulHigh:
385 // // These 32-bit operations implicitly zero-extend to 64-bit on x64, so
386 // the
387 // // zero-extension is a no-op.
388 // return true;
389 // case IrOpcode::kProjection: {
390 // Node* const value = node->InputAt(0);
391 // switch (value->opcode()) {
392 // case IrOpcode::kInt32AddWithOverflow:
393 // case IrOpcode::kInt32SubWithOverflow:
394 // case IrOpcode::kInt32MulWithOverflow:
395 // return true;
396 // default:
397 // return false;
398 // }
399 // }
400 case IrOpcode::kLoad:
401 case IrOpcode::kLoadImmutable: {
402 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
403 switch (load_rep.representation()) {
404 case MachineRepresentation::kWord32:
405 return true;
406 case MachineRepresentation::kWord8:
407 if (load_rep.IsSigned())
408 return false;
409 else
410 return true;
411 default:
412 return false;
413 }
414 }
415 default:
416 return false;
417 }
418 #endif
419 }
420
DoZeroExtForResult(Node * node)421 static inline bool DoZeroExtForResult(Node* node) {
422 #if V8_TARGET_ARCH_S390X
423 return ProduceWord32Result(node);
424 #else
425 return false;
426 #endif
427 }
428
429 // TODO(john.yan): Create VisiteShift to match dst = src shift (R+I)
430 #if 0
431 void VisitShift() { }
432 #endif
433
434 #if V8_TARGET_ARCH_S390X
VisitTryTruncateDouble(InstructionSelector * selector,ArchOpcode opcode,Node * node)435 void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
436 Node* node) {
437 S390OperandGenerator g(selector);
438 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
439 InstructionOperand outputs[2];
440 size_t output_count = 0;
441 outputs[output_count++] = g.DefineAsRegister(node);
442
443 Node* success_output = NodeProperties::FindProjection(node, 1);
444 if (success_output) {
445 outputs[output_count++] = g.DefineAsRegister(success_output);
446 }
447
448 selector->Emit(opcode, output_count, outputs, 1, inputs);
449 }
450 #endif
451
452 template <class CanCombineWithLoad>
GenerateRightOperands(InstructionSelector * selector,Node * node,Node * right,InstructionCode * opcode,OperandModes * operand_mode,InstructionOperand * inputs,size_t * input_count,CanCombineWithLoad canCombineWithLoad)453 void GenerateRightOperands(InstructionSelector* selector, Node* node,
454 Node* right, InstructionCode* opcode,
455 OperandModes* operand_mode,
456 InstructionOperand* inputs, size_t* input_count,
457 CanCombineWithLoad canCombineWithLoad) {
458 S390OperandGenerator g(selector);
459
460 if ((*operand_mode & OperandMode::kAllowImmediate) &&
461 g.CanBeImmediate(right, *operand_mode)) {
462 inputs[(*input_count)++] = g.UseImmediate(right);
463 // Can only be RI or RRI
464 *operand_mode &= OperandMode::kAllowImmediate;
465 } else if (*operand_mode & OperandMode::kAllowMemoryOperand) {
466 NodeMatcher mright(right);
467 if (mright.IsLoad() && selector->CanCover(node, right) &&
468 canCombineWithLoad(
469 SelectLoadOpcode(LoadRepresentationOf(right->op())))) {
470 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
471 right, inputs, input_count, OpcodeImmMode(*opcode));
472 *opcode |= AddressingModeField::encode(mode);
473 *operand_mode &= ~OperandMode::kAllowImmediate;
474 if (*operand_mode & OperandMode::kAllowRM)
475 *operand_mode &= ~OperandMode::kAllowDistinctOps;
476 } else if (*operand_mode & OperandMode::kAllowRM) {
477 DCHECK(!(*operand_mode & OperandMode::kAllowRRM));
478 inputs[(*input_count)++] = g.UseAnyExceptImmediate(right);
479 // Can not be Immediate
480 *operand_mode &=
481 ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
482 } else if (*operand_mode & OperandMode::kAllowRRM) {
483 DCHECK(!(*operand_mode & OperandMode::kAllowRM));
484 inputs[(*input_count)++] = g.UseAnyExceptImmediate(right);
485 // Can not be Immediate
486 *operand_mode &= ~OperandMode::kAllowImmediate;
487 } else {
488 UNREACHABLE();
489 }
490 } else {
491 inputs[(*input_count)++] = g.UseRegister(right);
492 // Can only be RR or RRR
493 *operand_mode &= OperandMode::kAllowRRR;
494 }
495 }
496
497 template <class CanCombineWithLoad>
GenerateBinOpOperands(InstructionSelector * selector,Node * node,Node * left,Node * right,InstructionCode * opcode,OperandModes * operand_mode,InstructionOperand * inputs,size_t * input_count,CanCombineWithLoad canCombineWithLoad)498 void GenerateBinOpOperands(InstructionSelector* selector, Node* node,
499 Node* left, Node* right, InstructionCode* opcode,
500 OperandModes* operand_mode,
501 InstructionOperand* inputs, size_t* input_count,
502 CanCombineWithLoad canCombineWithLoad) {
503 S390OperandGenerator g(selector);
504 // left is always register
505 InstructionOperand const left_input = g.UseRegister(left);
506 inputs[(*input_count)++] = left_input;
507
508 if (left == right) {
509 inputs[(*input_count)++] = left_input;
510 // Can only be RR or RRR
511 *operand_mode &= OperandMode::kAllowRRR;
512 } else {
513 GenerateRightOperands(selector, node, right, opcode, operand_mode, inputs,
514 input_count, canCombineWithLoad);
515 }
516 }
517
518 template <class CanCombineWithLoad>
519 void VisitUnaryOp(InstructionSelector* selector, Node* node,
520 InstructionCode opcode, OperandModes operand_mode,
521 FlagsContinuation* cont,
522 CanCombineWithLoad canCombineWithLoad);
523
524 template <class CanCombineWithLoad>
525 void VisitBinOp(InstructionSelector* selector, Node* node,
526 InstructionCode opcode, OperandModes operand_mode,
527 FlagsContinuation* cont, CanCombineWithLoad canCombineWithLoad);
528
529 // Generate The following variations:
530 // VisitWord32UnaryOp, VisitWord32BinOp,
531 // VisitWord64UnaryOp, VisitWord64BinOp,
532 // VisitFloat32UnaryOp, VisitFloat32BinOp,
533 // VisitFloat64UnaryOp, VisitFloat64BinOp
534 #define VISIT_OP_LIST_32(V) \
535 V(Word32, Unary, [](ArchOpcode opcode) { \
536 return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
537 }) \
538 V(Word64, Unary, \
539 [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; }) \
540 V(Float32, Unary, \
541 [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
542 V(Float64, Unary, \
543 [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; }) \
544 V(Word32, Bin, [](ArchOpcode opcode) { \
545 return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
546 }) \
547 V(Float32, Bin, \
548 [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
549 V(Float64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; })
550
551 #if V8_TARGET_ARCH_S390X
552 #define VISIT_OP_LIST(V) \
553 VISIT_OP_LIST_32(V) \
554 V(Word64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; })
555 #else
556 #define VISIT_OP_LIST VISIT_OP_LIST_32
557 #endif
558
559 #define DECLARE_VISIT_HELPER_FUNCTIONS(type1, type2, canCombineWithLoad) \
560 static inline void Visit##type1##type2##Op( \
561 InstructionSelector* selector, Node* node, InstructionCode opcode, \
562 OperandModes operand_mode, FlagsContinuation* cont) { \
563 Visit##type2##Op(selector, node, opcode, operand_mode, cont, \
564 canCombineWithLoad); \
565 } \
566 static inline void Visit##type1##type2##Op( \
567 InstructionSelector* selector, Node* node, InstructionCode opcode, \
568 OperandModes operand_mode) { \
569 FlagsContinuation cont; \
570 Visit##type1##type2##Op(selector, node, opcode, operand_mode, &cont); \
571 }
VISIT_OP_LIST(DECLARE_VISIT_HELPER_FUNCTIONS)572 VISIT_OP_LIST(DECLARE_VISIT_HELPER_FUNCTIONS)
573 #undef DECLARE_VISIT_HELPER_FUNCTIONS
574 #undef VISIT_OP_LIST_32
575 #undef VISIT_OP_LIST
576
577 template <class CanCombineWithLoad>
578 void VisitUnaryOp(InstructionSelector* selector, Node* node,
579 InstructionCode opcode, OperandModes operand_mode,
580 FlagsContinuation* cont,
581 CanCombineWithLoad canCombineWithLoad) {
582 S390OperandGenerator g(selector);
583 InstructionOperand inputs[8];
584 size_t input_count = 0;
585 InstructionOperand outputs[2];
586 size_t output_count = 0;
587 Node* input = node->InputAt(0);
588
589 GenerateRightOperands(selector, node, input, &opcode, &operand_mode, inputs,
590 &input_count, canCombineWithLoad);
591
592 bool input_is_word32 = ProduceWord32Result(input);
593
594 bool doZeroExt = DoZeroExtForResult(node);
595 bool canEliminateZeroExt = input_is_word32;
596
597 if (doZeroExt) {
598 // Add zero-ext indication
599 inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
600 }
601
602 if (!cont->IsDeoptimize()) {
603 // If we can deoptimize as a result of the binop, we need to make sure
604 // that the deopt inputs are not overwritten by the binop result. One way
605 // to achieve that is to declare the output register as same-as-first.
606 if (doZeroExt && canEliminateZeroExt) {
607 // we have to make sure result and left use the same register
608 outputs[output_count++] = g.DefineSameAsFirst(node);
609 } else {
610 outputs[output_count++] = g.DefineAsRegister(node);
611 }
612 } else {
613 outputs[output_count++] = g.DefineSameAsFirst(node);
614 }
615
616 DCHECK_NE(0u, input_count);
617 DCHECK_NE(0u, output_count);
618 DCHECK_GE(arraysize(inputs), input_count);
619 DCHECK_GE(arraysize(outputs), output_count);
620
621 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
622 inputs, cont);
623 }
624
625 template <class CanCombineWithLoad>
VisitBinOp(InstructionSelector * selector,Node * node,InstructionCode opcode,OperandModes operand_mode,FlagsContinuation * cont,CanCombineWithLoad canCombineWithLoad)626 void VisitBinOp(InstructionSelector* selector, Node* node,
627 InstructionCode opcode, OperandModes operand_mode,
628 FlagsContinuation* cont,
629 CanCombineWithLoad canCombineWithLoad) {
630 S390OperandGenerator g(selector);
631 Int32BinopMatcher m(node);
632 Node* left = m.left().node();
633 Node* right = m.right().node();
634 InstructionOperand inputs[8];
635 size_t input_count = 0;
636 InstructionOperand outputs[2];
637 size_t output_count = 0;
638
639 if (node->op()->HasProperty(Operator::kCommutative) &&
640 !g.CanBeImmediate(right, operand_mode) &&
641 (g.CanBeBetterLeftOperand(right))) {
642 std::swap(left, right);
643 }
644
645 GenerateBinOpOperands(selector, node, left, right, &opcode, &operand_mode,
646 inputs, &input_count, canCombineWithLoad);
647
648 bool left_is_word32 = ProduceWord32Result(left);
649
650 bool doZeroExt = DoZeroExtForResult(node);
651 bool canEliminateZeroExt = left_is_word32;
652
653 if (doZeroExt) {
654 // Add zero-ext indication
655 inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
656 }
657
658 if ((operand_mode & OperandMode::kAllowDistinctOps) &&
659 // If we can deoptimize as a result of the binop, we need to make sure
660 // that the deopt inputs are not overwritten by the binop result. One way
661 // to achieve that is to declare the output register as same-as-first.
662 !cont->IsDeoptimize()) {
663 if (doZeroExt && canEliminateZeroExt) {
664 // we have to make sure result and left use the same register
665 outputs[output_count++] = g.DefineSameAsFirst(node);
666 } else {
667 outputs[output_count++] = g.DefineAsRegister(node);
668 }
669 } else {
670 outputs[output_count++] = g.DefineSameAsFirst(node);
671 }
672
673 DCHECK_NE(0u, input_count);
674 DCHECK_NE(0u, output_count);
675 DCHECK_GE(arraysize(inputs), input_count);
676 DCHECK_GE(arraysize(outputs), output_count);
677
678 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
679 inputs, cont);
680 }
681
682 } // namespace
683
VisitStackSlot(Node * node)684 void InstructionSelector::VisitStackSlot(Node* node) {
685 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
686 int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
687 OperandGenerator g(this);
688
689 Emit(kArchStackSlot, g.DefineAsRegister(node),
690 sequence()->AddImmediate(Constant(slot)), 0, nullptr);
691 }
692
VisitAbortCSADcheck(Node * node)693 void InstructionSelector::VisitAbortCSADcheck(Node* node) {
694 S390OperandGenerator g(this);
695 Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
696 }
697
VisitLoad(Node * node,Node * value,InstructionCode opcode)698 void InstructionSelector::VisitLoad(Node* node, Node* value,
699 InstructionCode opcode) {
700 S390OperandGenerator g(this);
701 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
702 InstructionOperand inputs[3];
703 size_t input_count = 0;
704 AddressingMode mode =
705 g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
706 opcode |= AddressingModeField::encode(mode);
707 Emit(opcode, 1, outputs, input_count, inputs);
708 }
709
VisitLoad(Node * node)710 void InstructionSelector::VisitLoad(Node* node) {
711 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
712 InstructionCode opcode = SelectLoadOpcode(load_rep);
713 VisitLoad(node, node, opcode);
714 }
715
VisitProtectedLoad(Node * node)716 void InstructionSelector::VisitProtectedLoad(Node* node) {
717 // TODO(eholk)
718 UNIMPLEMENTED();
719 }
720
VisitGeneralStore(InstructionSelector * selector,Node * node,MachineRepresentation rep,WriteBarrierKind write_barrier_kind=kNoWriteBarrier)721 static void VisitGeneralStore(
722 InstructionSelector* selector, Node* node, MachineRepresentation rep,
723 WriteBarrierKind write_barrier_kind = kNoWriteBarrier) {
724 S390OperandGenerator g(selector);
725 Node* base = node->InputAt(0);
726 Node* offset = node->InputAt(1);
727 Node* value = node->InputAt(2);
728 if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
729 DCHECK(CanBeTaggedOrCompressedPointer(rep));
730 AddressingMode addressing_mode;
731 InstructionOperand inputs[3];
732 size_t input_count = 0;
733 inputs[input_count++] = g.UseUniqueRegister(base);
734 // OutOfLineRecordWrite uses the offset in an 'AddS64' instruction as well
735 // as for the store itself, so we must check compatibility with both.
736 if (g.CanBeImmediate(offset, OperandMode::kInt20Imm)) {
737 inputs[input_count++] = g.UseImmediate(offset);
738 addressing_mode = kMode_MRI;
739 } else {
740 inputs[input_count++] = g.UseUniqueRegister(offset);
741 addressing_mode = kMode_MRR;
742 }
743 inputs[input_count++] = g.UseUniqueRegister(value);
744 RecordWriteMode record_write_mode =
745 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
746 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
747 size_t const temp_count = arraysize(temps);
748 InstructionCode code = kArchStoreWithWriteBarrier;
749 code |= AddressingModeField::encode(addressing_mode);
750 code |= MiscField::encode(static_cast<int>(record_write_mode));
751 selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
752 } else {
753 ArchOpcode opcode;
754 NodeMatcher m(value);
755 switch (rep) {
756 case MachineRepresentation::kFloat32:
757 opcode = kS390_StoreFloat32;
758 break;
759 case MachineRepresentation::kFloat64:
760 opcode = kS390_StoreDouble;
761 break;
762 case MachineRepresentation::kBit: // Fall through.
763 case MachineRepresentation::kWord8:
764 opcode = kS390_StoreWord8;
765 break;
766 case MachineRepresentation::kWord16:
767 opcode = kS390_StoreWord16;
768 break;
769 case MachineRepresentation::kWord32:
770 opcode = kS390_StoreWord32;
771 if (m.IsWord32ReverseBytes()) {
772 opcode = kS390_StoreReverse32;
773 value = value->InputAt(0);
774 }
775 break;
776 case MachineRepresentation::kCompressedPointer: // Fall through.
777 case MachineRepresentation::kCompressed:
778 case MachineRepresentation::kSandboxedPointer: // Fall through.
779 #ifdef V8_COMPRESS_POINTERS
780 opcode = kS390_StoreCompressTagged;
781 break;
782 #else
783 UNREACHABLE();
784 #endif
785 case MachineRepresentation::kTaggedSigned: // Fall through.
786 case MachineRepresentation::kTaggedPointer: // Fall through.
787 case MachineRepresentation::kTagged:
788 opcode = kS390_StoreCompressTagged;
789 break;
790 case MachineRepresentation::kWord64:
791 opcode = kS390_StoreWord64;
792 if (m.IsWord64ReverseBytes()) {
793 opcode = kS390_StoreReverse64;
794 value = value->InputAt(0);
795 }
796 break;
797 case MachineRepresentation::kSimd128:
798 opcode = kS390_StoreSimd128;
799 if (m.IsSimd128ReverseBytes()) {
800 opcode = kS390_StoreReverseSimd128;
801 value = value->InputAt(0);
802 }
803 break;
804 case MachineRepresentation::kMapWord: // Fall through.
805 case MachineRepresentation::kNone:
806 UNREACHABLE();
807 }
808 InstructionOperand inputs[4];
809 size_t input_count = 0;
810 AddressingMode addressing_mode =
811 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
812 InstructionCode code =
813 opcode | AddressingModeField::encode(addressing_mode);
814 InstructionOperand value_operand = g.UseRegister(value);
815 inputs[input_count++] = value_operand;
816 selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
817 input_count, inputs);
818 }
819 }
820
VisitStore(Node * node)821 void InstructionSelector::VisitStore(Node* node) {
822 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
823 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
824 MachineRepresentation rep = store_rep.representation();
825
826 if (FLAG_enable_unconditional_write_barriers &&
827 CanBeTaggedOrCompressedPointer(rep)) {
828 write_barrier_kind = kFullWriteBarrier;
829 }
830
831 VisitGeneralStore(this, node, rep, write_barrier_kind);
832 }
833
VisitProtectedStore(Node * node)834 void InstructionSelector::VisitProtectedStore(Node* node) {
835 // TODO(eholk)
836 UNIMPLEMENTED();
837 }
838
839 // Architecture supports unaligned access, therefore VisitLoad is used instead
VisitUnalignedLoad(Node * node)840 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
841
842 // Architecture supports unaligned access, therefore VisitStore is used instead
VisitUnalignedStore(Node * node)843 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
844
VisitStackPointerGreaterThan(Node * node,FlagsContinuation * cont)845 void InstructionSelector::VisitStackPointerGreaterThan(
846 Node* node, FlagsContinuation* cont) {
847 StackCheckKind kind = StackCheckKindOf(node->op());
848 InstructionCode opcode =
849 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
850
851 S390OperandGenerator g(this);
852
853 // No outputs.
854 InstructionOperand* const outputs = nullptr;
855 const int output_count = 0;
856
857 // Applying an offset to this stack check requires a temp register. Offsets
858 // are only applied to the first stack check. If applying an offset, we must
859 // ensure the input and temp registers do not alias, thus kUniqueRegister.
860 InstructionOperand temps[] = {g.TempRegister()};
861 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
862 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
863 ? OperandGenerator::kUniqueRegister
864 : OperandGenerator::kRegister;
865
866 Node* const value = node->InputAt(0);
867 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
868 static constexpr int input_count = arraysize(inputs);
869
870 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
871 temp_count, temps, cont);
872 }
873
874 #if 0
875 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
876 int mask_width = base::bits::CountPopulation(value);
877 int mask_msb = base::bits::CountLeadingZeros32(value);
878 int mask_lsb = base::bits::CountTrailingZeros32(value);
879 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
880 return false;
881 *mb = mask_lsb + mask_width - 1;
882 *me = mask_lsb;
883 return true;
884 }
885 #endif
886
887 #if V8_TARGET_ARCH_S390X
IsContiguousMask64(uint64_t value,int * mb,int * me)888 static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
889 int mask_width = base::bits::CountPopulation(value);
890 int mask_msb = base::bits::CountLeadingZeros64(value);
891 int mask_lsb = base::bits::CountTrailingZeros64(value);
892 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
893 return false;
894 *mb = mask_lsb + mask_width - 1;
895 *me = mask_lsb;
896 return true;
897 }
898 #endif
899
900 #if V8_TARGET_ARCH_S390X
VisitWord64And(Node * node)901 void InstructionSelector::VisitWord64And(Node* node) {
902 S390OperandGenerator g(this);
903 Int64BinopMatcher m(node);
904 int mb = 0;
905 int me = 0;
906 if (m.right().HasResolvedValue() &&
907 IsContiguousMask64(m.right().ResolvedValue(), &mb, &me)) {
908 int sh = 0;
909 Node* left = m.left().node();
910 if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
911 CanCover(node, left)) {
912 Int64BinopMatcher mleft(m.left().node());
913 if (mleft.right().IsInRange(0, 63)) {
914 left = mleft.left().node();
915 sh = mleft.right().ResolvedValue();
916 if (m.left().IsWord64Shr()) {
917 // Adjust the mask such that it doesn't include any rotated bits.
918 if (mb > 63 - sh) mb = 63 - sh;
919 sh = (64 - sh) & 0x3F;
920 } else {
921 // Adjust the mask such that it doesn't include any rotated bits.
922 if (me < sh) me = sh;
923 }
924 }
925 }
926 if (mb >= me) {
927 bool match = false;
928 ArchOpcode opcode;
929 int mask;
930 if (me == 0) {
931 match = true;
932 opcode = kS390_RotLeftAndClearLeft64;
933 mask = mb;
934 } else if (mb == 63) {
935 match = true;
936 opcode = kS390_RotLeftAndClearRight64;
937 mask = me;
938 } else if (sh && me <= sh && m.left().IsWord64Shl()) {
939 match = true;
940 opcode = kS390_RotLeftAndClear64;
941 mask = mb;
942 }
943 if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
944 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
945 g.TempImmediate(sh), g.TempImmediate(mask));
946 return;
947 }
948 }
949 }
950 VisitWord64BinOp(this, node, kS390_And64, And64OperandMode);
951 }
952
VisitWord64Shl(Node * node)953 void InstructionSelector::VisitWord64Shl(Node* node) {
954 S390OperandGenerator g(this);
955 Int64BinopMatcher m(node);
956 // TODO(mbrandy): eliminate left sign extension if right >= 32
957 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
958 Int64BinopMatcher mleft(m.left().node());
959 int sh = m.right().ResolvedValue();
960 int mb;
961 int me;
962 if (mleft.right().HasResolvedValue() &&
963 IsContiguousMask64(mleft.right().ResolvedValue() << sh, &mb, &me)) {
964 // Adjust the mask such that it doesn't include any rotated bits.
965 if (me < sh) me = sh;
966 if (mb >= me) {
967 bool match = false;
968 ArchOpcode opcode;
969 int mask;
970 if (me == 0) {
971 match = true;
972 opcode = kS390_RotLeftAndClearLeft64;
973 mask = mb;
974 } else if (mb == 63) {
975 match = true;
976 opcode = kS390_RotLeftAndClearRight64;
977 mask = me;
978 } else if (sh && me <= sh) {
979 match = true;
980 opcode = kS390_RotLeftAndClear64;
981 mask = mb;
982 }
983 if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
984 Emit(opcode, g.DefineAsRegister(node),
985 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
986 g.TempImmediate(mask));
987 return;
988 }
989 }
990 }
991 }
992 VisitWord64BinOp(this, node, kS390_ShiftLeft64, Shift64OperandMode);
993 }
994
VisitWord64Shr(Node * node)995 void InstructionSelector::VisitWord64Shr(Node* node) {
996 S390OperandGenerator g(this);
997 Int64BinopMatcher m(node);
998 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
999 Int64BinopMatcher mleft(m.left().node());
1000 int sh = m.right().ResolvedValue();
1001 int mb;
1002 int me;
1003 if (mleft.right().HasResolvedValue() &&
1004 IsContiguousMask64((uint64_t)(mleft.right().ResolvedValue()) >> sh, &mb,
1005 &me)) {
1006 // Adjust the mask such that it doesn't include any rotated bits.
1007 if (mb > 63 - sh) mb = 63 - sh;
1008 sh = (64 - sh) & 0x3F;
1009 if (mb >= me) {
1010 bool match = false;
1011 ArchOpcode opcode;
1012 int mask;
1013 if (me == 0) {
1014 match = true;
1015 opcode = kS390_RotLeftAndClearLeft64;
1016 mask = mb;
1017 } else if (mb == 63) {
1018 match = true;
1019 opcode = kS390_RotLeftAndClearRight64;
1020 mask = me;
1021 }
1022 if (match) {
1023 Emit(opcode, g.DefineAsRegister(node),
1024 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
1025 g.TempImmediate(mask));
1026 return;
1027 }
1028 }
1029 }
1030 }
1031 VisitWord64BinOp(this, node, kS390_ShiftRight64, Shift64OperandMode);
1032 }
1033 #endif
1034
TryMatchSignExtInt16OrInt8FromWord32Sar(InstructionSelector * selector,Node * node)1035 static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar(
1036 InstructionSelector* selector, Node* node) {
1037 S390OperandGenerator g(selector);
1038 Int32BinopMatcher m(node);
1039 if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
1040 Int32BinopMatcher mleft(m.left().node());
1041 if (mleft.right().Is(16) && m.right().Is(16)) {
1042 bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
1043 selector->Emit(kS390_SignExtendWord16ToInt32,
1044 canEliminateZeroExt ? g.DefineSameAsFirst(node)
1045 : g.DefineAsRegister(node),
1046 g.UseRegister(mleft.left().node()),
1047 g.TempImmediate(!canEliminateZeroExt));
1048 return true;
1049 } else if (mleft.right().Is(24) && m.right().Is(24)) {
1050 bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
1051 selector->Emit(kS390_SignExtendWord8ToInt32,
1052 canEliminateZeroExt ? g.DefineSameAsFirst(node)
1053 : g.DefineAsRegister(node),
1054 g.UseRegister(mleft.left().node()),
1055 g.TempImmediate(!canEliminateZeroExt));
1056 return true;
1057 }
1058 }
1059 return false;
1060 }
1061
VisitWord32Rol(Node * node)1062 void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
1063
VisitWord64Rol(Node * node)1064 void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
1065
VisitWord32Ctz(Node * node)1066 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
1067
1068 #if V8_TARGET_ARCH_S390X
VisitWord64Ctz(Node * node)1069 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
1070 #endif
1071
VisitWord32ReverseBits(Node * node)1072 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
1073
1074 #if V8_TARGET_ARCH_S390X
VisitWord64ReverseBits(Node * node)1075 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
1076 #endif
1077
VisitInt32AbsWithOverflow(Node * node)1078 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
1079 VisitWord32UnaryOp(this, node, kS390_Abs32, OperandMode::kNone);
1080 }
1081
VisitInt64AbsWithOverflow(Node * node)1082 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
1083 VisitWord64UnaryOp(this, node, kS390_Abs64, OperandMode::kNone);
1084 }
1085
VisitWord64ReverseBytes(Node * node)1086 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
1087 S390OperandGenerator g(this);
1088 NodeMatcher input(node->InputAt(0));
1089 if (CanCover(node, input.node()) && input.IsLoad()) {
1090 LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
1091 if (load_rep.representation() == MachineRepresentation::kWord64) {
1092 Node* base = input.node()->InputAt(0);
1093 Node* offset = input.node()->InputAt(1);
1094 Emit(kS390_LoadReverse64 | AddressingModeField::encode(kMode_MRR),
1095 // TODO(miladfarca): one of the base and offset can be imm.
1096 g.DefineAsRegister(node), g.UseRegister(base),
1097 g.UseRegister(offset));
1098 return;
1099 }
1100 }
1101 Emit(kS390_LoadReverse64RR, g.DefineAsRegister(node),
1102 g.UseRegister(node->InputAt(0)));
1103 }
1104
VisitWord32ReverseBytes(Node * node)1105 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
1106 S390OperandGenerator g(this);
1107 NodeMatcher input(node->InputAt(0));
1108 if (CanCover(node, input.node()) && input.IsLoad()) {
1109 LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
1110 if (load_rep.representation() == MachineRepresentation::kWord32) {
1111 Node* base = input.node()->InputAt(0);
1112 Node* offset = input.node()->InputAt(1);
1113 Emit(kS390_LoadReverse32 | AddressingModeField::encode(kMode_MRR),
1114 // TODO(john.yan): one of the base and offset can be imm.
1115 g.DefineAsRegister(node), g.UseRegister(base),
1116 g.UseRegister(offset));
1117 return;
1118 }
1119 }
1120 Emit(kS390_LoadReverse32RR, g.DefineAsRegister(node),
1121 g.UseRegister(node->InputAt(0)));
1122 }
1123
VisitSimd128ReverseBytes(Node * node)1124 void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
1125 S390OperandGenerator g(this);
1126 NodeMatcher input(node->InputAt(0));
1127 if (CanCover(node, input.node()) && input.IsLoad()) {
1128 LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
1129 if (load_rep.representation() == MachineRepresentation::kSimd128) {
1130 Node* base = input.node()->InputAt(0);
1131 Node* offset = input.node()->InputAt(1);
1132 Emit(kS390_LoadReverseSimd128 | AddressingModeField::encode(kMode_MRR),
1133 // TODO(miladfar): one of the base and offset can be imm.
1134 g.DefineAsRegister(node), g.UseRegister(base),
1135 g.UseRegister(offset));
1136 return;
1137 }
1138 }
1139 Emit(kS390_LoadReverseSimd128RR, g.DefineAsRegister(node),
1140 g.UseRegister(node->InputAt(0)));
1141 }
1142
1143 template <class Matcher, ArchOpcode neg_opcode>
TryMatchNegFromSub(InstructionSelector * selector,Node * node)1144 static inline bool TryMatchNegFromSub(InstructionSelector* selector,
1145 Node* node) {
1146 S390OperandGenerator g(selector);
1147 Matcher m(node);
1148 static_assert(neg_opcode == kS390_Neg32 || neg_opcode == kS390_Neg64,
1149 "Provided opcode is not a Neg opcode.");
1150 if (m.left().Is(0)) {
1151 Node* value = m.right().node();
1152 bool doZeroExt = DoZeroExtForResult(node);
1153 bool canEliminateZeroExt = ProduceWord32Result(value);
1154 if (doZeroExt) {
1155 selector->Emit(neg_opcode,
1156 canEliminateZeroExt ? g.DefineSameAsFirst(node)
1157 : g.DefineAsRegister(node),
1158 g.UseRegister(value),
1159 g.TempImmediate(!canEliminateZeroExt));
1160 } else {
1161 selector->Emit(neg_opcode, g.DefineAsRegister(node),
1162 g.UseRegister(value));
1163 }
1164 return true;
1165 }
1166 return false;
1167 }
1168
1169 template <class Matcher, ArchOpcode shift_op>
TryMatchShiftFromMul(InstructionSelector * selector,Node * node)1170 bool TryMatchShiftFromMul(InstructionSelector* selector, Node* node) {
1171 S390OperandGenerator g(selector);
1172 Matcher m(node);
1173 Node* left = m.left().node();
1174 Node* right = m.right().node();
1175 if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
1176 base::bits::IsPowerOfTwo(g.GetImmediate(right))) {
1177 int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
1178 bool doZeroExt = DoZeroExtForResult(node);
1179 bool canEliminateZeroExt = ProduceWord32Result(left);
1180 InstructionOperand dst = (doZeroExt && !canEliminateZeroExt &&
1181 CpuFeatures::IsSupported(DISTINCT_OPS))
1182 ? g.DefineAsRegister(node)
1183 : g.DefineSameAsFirst(node);
1184
1185 if (doZeroExt) {
1186 selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power),
1187 g.TempImmediate(!canEliminateZeroExt));
1188 } else {
1189 selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power));
1190 }
1191 return true;
1192 }
1193 return false;
1194 }
1195
1196 template <ArchOpcode opcode>
TryMatchInt32OpWithOverflow(InstructionSelector * selector,Node * node,OperandModes mode)1197 static inline bool TryMatchInt32OpWithOverflow(InstructionSelector* selector,
1198 Node* node, OperandModes mode) {
1199 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1200 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1201 VisitWord32BinOp(selector, node, opcode, mode, &cont);
1202 return true;
1203 }
1204 return false;
1205 }
1206
TryMatchInt32AddWithOverflow(InstructionSelector * selector,Node * node)1207 static inline bool TryMatchInt32AddWithOverflow(InstructionSelector* selector,
1208 Node* node) {
1209 return TryMatchInt32OpWithOverflow<kS390_Add32>(selector, node,
1210 AddOperandMode);
1211 }
1212
TryMatchInt32SubWithOverflow(InstructionSelector * selector,Node * node)1213 static inline bool TryMatchInt32SubWithOverflow(InstructionSelector* selector,
1214 Node* node) {
1215 return TryMatchInt32OpWithOverflow<kS390_Sub32>(selector, node,
1216 SubOperandMode);
1217 }
1218
TryMatchInt32MulWithOverflow(InstructionSelector * selector,Node * node)1219 static inline bool TryMatchInt32MulWithOverflow(InstructionSelector* selector,
1220 Node* node) {
1221 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1222 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1223 TryMatchInt32OpWithOverflow<kS390_Mul32>(
1224 selector, node, OperandMode::kAllowRRR | OperandMode::kAllowRM);
1225 } else {
1226 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
1227 VisitWord32BinOp(selector, node, kS390_Mul32WithOverflow,
1228 OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
1229 &cont);
1230 }
1231 return true;
1232 }
1233 return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(selector,
1234 node);
1235 }
1236
1237 #if V8_TARGET_ARCH_S390X
1238 template <ArchOpcode opcode>
TryMatchInt64OpWithOverflow(InstructionSelector * selector,Node * node,OperandModes mode)1239 static inline bool TryMatchInt64OpWithOverflow(InstructionSelector* selector,
1240 Node* node, OperandModes mode) {
1241 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1242 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1243 VisitWord64BinOp(selector, node, opcode, mode, &cont);
1244 return true;
1245 }
1246 return false;
1247 }
1248
TryMatchInt64AddWithOverflow(InstructionSelector * selector,Node * node)1249 static inline bool TryMatchInt64AddWithOverflow(InstructionSelector* selector,
1250 Node* node) {
1251 return TryMatchInt64OpWithOverflow<kS390_Add64>(selector, node,
1252 AddOperandMode);
1253 }
1254
TryMatchInt64SubWithOverflow(InstructionSelector * selector,Node * node)1255 static inline bool TryMatchInt64SubWithOverflow(InstructionSelector* selector,
1256 Node* node) {
1257 return TryMatchInt64OpWithOverflow<kS390_Sub64>(selector, node,
1258 SubOperandMode);
1259 }
1260 #endif
1261
TryMatchDoubleConstructFromInsert(InstructionSelector * selector,Node * node)1262 static inline bool TryMatchDoubleConstructFromInsert(
1263 InstructionSelector* selector, Node* node) {
1264 S390OperandGenerator g(selector);
1265 Node* left = node->InputAt(0);
1266 Node* right = node->InputAt(1);
1267 Node* lo32 = nullptr;
1268 Node* hi32 = nullptr;
1269
1270 if (node->opcode() == IrOpcode::kFloat64InsertLowWord32) {
1271 lo32 = right;
1272 } else if (node->opcode() == IrOpcode::kFloat64InsertHighWord32) {
1273 hi32 = right;
1274 } else {
1275 return false; // doesn't match
1276 }
1277
1278 if (left->opcode() == IrOpcode::kFloat64InsertLowWord32) {
1279 lo32 = left->InputAt(1);
1280 } else if (left->opcode() == IrOpcode::kFloat64InsertHighWord32) {
1281 hi32 = left->InputAt(1);
1282 } else {
1283 return false; // doesn't match
1284 }
1285
1286 if (!lo32 || !hi32) return false; // doesn't match
1287
1288 selector->Emit(kS390_DoubleConstruct, g.DefineAsRegister(node),
1289 g.UseRegister(hi32), g.UseRegister(lo32));
1290 return true;
1291 }
1292
1293 #define null ([]() { return false; })
1294 // TODO(john.yan): place kAllowRM where available
1295 #define FLOAT_UNARY_OP_LIST_32(V) \
1296 V(Float32, ChangeFloat32ToFloat64, kS390_Float32ToDouble, \
1297 OperandMode::kAllowRM, null) \
1298 V(Float32, BitcastFloat32ToInt32, kS390_BitcastFloat32ToInt32, \
1299 OperandMode::kAllowRM, null) \
1300 V(Float64, TruncateFloat64ToFloat32, kS390_DoubleToFloat32, \
1301 OperandMode::kNone, null) \
1302 V(Float64, TruncateFloat64ToWord32, kArchTruncateDoubleToI, \
1303 OperandMode::kNone, null) \
1304 V(Float64, RoundFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
1305 null) \
1306 V(Float64, TruncateFloat64ToUint32, kS390_DoubleToUint32, \
1307 OperandMode::kNone, null) \
1308 V(Float64, ChangeFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
1309 null) \
1310 V(Float64, ChangeFloat64ToUint32, kS390_DoubleToUint32, OperandMode::kNone, \
1311 null) \
1312 V(Float64, Float64SilenceNaN, kS390_Float64SilenceNaN, OperandMode::kNone, \
1313 null) \
1314 V(Float32, Float32Abs, kS390_AbsFloat, OperandMode::kNone, null) \
1315 V(Float64, Float64Abs, kS390_AbsDouble, OperandMode::kNone, null) \
1316 V(Float32, Float32Sqrt, kS390_SqrtFloat, OperandMode::kNone, null) \
1317 V(Float64, Float64Sqrt, kS390_SqrtDouble, OperandMode::kNone, null) \
1318 V(Float32, Float32RoundDown, kS390_FloorFloat, OperandMode::kNone, null) \
1319 V(Float64, Float64RoundDown, kS390_FloorDouble, OperandMode::kNone, null) \
1320 V(Float32, Float32RoundUp, kS390_CeilFloat, OperandMode::kNone, null) \
1321 V(Float64, Float64RoundUp, kS390_CeilDouble, OperandMode::kNone, null) \
1322 V(Float32, Float32RoundTruncate, kS390_TruncateFloat, OperandMode::kNone, \
1323 null) \
1324 V(Float64, Float64RoundTruncate, kS390_TruncateDouble, OperandMode::kNone, \
1325 null) \
1326 V(Float64, Float64RoundTiesAway, kS390_RoundDouble, OperandMode::kNone, \
1327 null) \
1328 V(Float32, Float32RoundTiesEven, kS390_FloatNearestInt, OperandMode::kNone, \
1329 null) \
1330 V(Float64, Float64RoundTiesEven, kS390_DoubleNearestInt, OperandMode::kNone, \
1331 null) \
1332 V(Float32, Float32Neg, kS390_NegFloat, OperandMode::kNone, null) \
1333 V(Float64, Float64Neg, kS390_NegDouble, OperandMode::kNone, null) \
1334 /* TODO(john.yan): can use kAllowRM */ \
1335 V(Word32, Float64ExtractLowWord32, kS390_DoubleExtractLowWord32, \
1336 OperandMode::kNone, null) \
1337 V(Word32, Float64ExtractHighWord32, kS390_DoubleExtractHighWord32, \
1338 OperandMode::kNone, null)
1339
1340 #define FLOAT_BIN_OP_LIST(V) \
1341 V(Float32, Float32Add, kS390_AddFloat, OperandMode::kAllowRM, null) \
1342 V(Float64, Float64Add, kS390_AddDouble, OperandMode::kAllowRM, null) \
1343 V(Float32, Float32Sub, kS390_SubFloat, OperandMode::kAllowRM, null) \
1344 V(Float64, Float64Sub, kS390_SubDouble, OperandMode::kAllowRM, null) \
1345 V(Float32, Float32Mul, kS390_MulFloat, OperandMode::kAllowRM, null) \
1346 V(Float64, Float64Mul, kS390_MulDouble, OperandMode::kAllowRM, null) \
1347 V(Float32, Float32Div, kS390_DivFloat, OperandMode::kAllowRM, null) \
1348 V(Float64, Float64Div, kS390_DivDouble, OperandMode::kAllowRM, null) \
1349 V(Float32, Float32Max, kS390_MaxFloat, OperandMode::kNone, null) \
1350 V(Float64, Float64Max, kS390_MaxDouble, OperandMode::kNone, null) \
1351 V(Float32, Float32Min, kS390_MinFloat, OperandMode::kNone, null) \
1352 V(Float64, Float64Min, kS390_MinDouble, OperandMode::kNone, null)
1353
1354 #define WORD32_UNARY_OP_LIST_32(V) \
1355 V(Word32, Word32Clz, kS390_Cntlz32, OperandMode::kNone, null) \
1356 V(Word32, Word32Popcnt, kS390_Popcnt32, OperandMode::kNone, null) \
1357 V(Word32, RoundInt32ToFloat32, kS390_Int32ToFloat32, OperandMode::kNone, \
1358 null) \
1359 V(Word32, RoundUint32ToFloat32, kS390_Uint32ToFloat32, OperandMode::kNone, \
1360 null) \
1361 V(Word32, ChangeInt32ToFloat64, kS390_Int32ToDouble, OperandMode::kNone, \
1362 null) \
1363 V(Word32, ChangeUint32ToFloat64, kS390_Uint32ToDouble, OperandMode::kNone, \
1364 null) \
1365 V(Word32, SignExtendWord8ToInt32, kS390_SignExtendWord8ToInt32, \
1366 OperandMode::kNone, null) \
1367 V(Word32, SignExtendWord16ToInt32, kS390_SignExtendWord16ToInt32, \
1368 OperandMode::kNone, null) \
1369 V(Word32, BitcastInt32ToFloat32, kS390_BitcastInt32ToFloat32, \
1370 OperandMode::kNone, null)
1371
1372 #ifdef V8_TARGET_ARCH_S390X
1373 #define FLOAT_UNARY_OP_LIST(V) \
1374 FLOAT_UNARY_OP_LIST_32(V) \
1375 V(Float64, ChangeFloat64ToUint64, kS390_DoubleToUint64, OperandMode::kNone, \
1376 null) \
1377 V(Float64, ChangeFloat64ToInt64, kS390_DoubleToInt64, OperandMode::kNone, \
1378 null) \
1379 V(Float64, TruncateFloat64ToInt64, kS390_DoubleToInt64, OperandMode::kNone, \
1380 null) \
1381 V(Float64, BitcastFloat64ToInt64, kS390_BitcastDoubleToInt64, \
1382 OperandMode::kNone, null)
1383
1384 #define WORD32_UNARY_OP_LIST(V) \
1385 WORD32_UNARY_OP_LIST_32(V) \
1386 V(Word32, ChangeInt32ToInt64, kS390_SignExtendWord32ToInt64, \
1387 OperandMode::kNone, null) \
1388 V(Word32, SignExtendWord8ToInt64, kS390_SignExtendWord8ToInt64, \
1389 OperandMode::kNone, null) \
1390 V(Word32, SignExtendWord16ToInt64, kS390_SignExtendWord16ToInt64, \
1391 OperandMode::kNone, null) \
1392 V(Word32, SignExtendWord32ToInt64, kS390_SignExtendWord32ToInt64, \
1393 OperandMode::kNone, null) \
1394 V(Word32, ChangeUint32ToUint64, kS390_Uint32ToUint64, OperandMode::kNone, \
1395 [&]() -> bool { \
1396 if (ProduceWord32Result(node->InputAt(0))) { \
1397 EmitIdentity(node); \
1398 return true; \
1399 } \
1400 return false; \
1401 })
1402
1403 #else
1404 #define FLOAT_UNARY_OP_LIST(V) FLOAT_UNARY_OP_LIST_32(V)
1405 #define WORD32_UNARY_OP_LIST(V) WORD32_UNARY_OP_LIST_32(V)
1406 #endif
1407
1408 #define WORD32_BIN_OP_LIST(V) \
1409 V(Word32, Int32Add, kS390_Add32, AddOperandMode, null) \
1410 V(Word32, Int32Sub, kS390_Sub32, SubOperandMode, ([&]() { \
1411 return TryMatchNegFromSub<Int32BinopMatcher, kS390_Neg32>(this, node); \
1412 })) \
1413 V(Word32, Int32Mul, kS390_Mul32, MulOperandMode, ([&]() { \
1414 return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(this, \
1415 node); \
1416 })) \
1417 V(Word32, Int32AddWithOverflow, kS390_Add32, AddOperandMode, \
1418 ([&]() { return TryMatchInt32AddWithOverflow(this, node); })) \
1419 V(Word32, Int32SubWithOverflow, kS390_Sub32, SubOperandMode, \
1420 ([&]() { return TryMatchInt32SubWithOverflow(this, node); })) \
1421 V(Word32, Int32MulWithOverflow, kS390_Mul32, MulOperandMode, \
1422 ([&]() { return TryMatchInt32MulWithOverflow(this, node); })) \
1423 V(Word32, Int32MulHigh, kS390_MulHigh32, \
1424 OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps, null) \
1425 V(Word32, Uint32MulHigh, kS390_MulHighU32, \
1426 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1427 V(Word32, Int32Div, kS390_Div32, \
1428 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1429 V(Word32, Uint32Div, kS390_DivU32, \
1430 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1431 V(Word32, Int32Mod, kS390_Mod32, \
1432 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1433 V(Word32, Uint32Mod, kS390_ModU32, \
1434 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1435 V(Word32, Word32Ror, kS390_RotRight32, \
1436 OperandMode::kAllowRI | OperandMode::kAllowRRR | OperandMode::kAllowRRI | \
1437 OperandMode::kShift32Imm, \
1438 null) \
1439 V(Word32, Word32And, kS390_And32, And32OperandMode, null) \
1440 V(Word32, Word32Or, kS390_Or32, Or32OperandMode, null) \
1441 V(Word32, Word32Xor, kS390_Xor32, Xor32OperandMode, null) \
1442 V(Word32, Word32Shl, kS390_ShiftLeft32, Shift32OperandMode, null) \
1443 V(Word32, Word32Shr, kS390_ShiftRight32, Shift32OperandMode, null) \
1444 V(Word32, Word32Sar, kS390_ShiftRightArith32, Shift32OperandMode, \
1445 [&]() { return TryMatchSignExtInt16OrInt8FromWord32Sar(this, node); }) \
1446 V(Word32, Float64InsertLowWord32, kS390_DoubleInsertLowWord32, \
1447 OperandMode::kAllowRRR, \
1448 [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); }) \
1449 V(Word32, Float64InsertHighWord32, kS390_DoubleInsertHighWord32, \
1450 OperandMode::kAllowRRR, \
1451 [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); })
1452
1453 #define WORD64_UNARY_OP_LIST(V) \
1454 V(Word64, Word64Popcnt, kS390_Popcnt64, OperandMode::kNone, null) \
1455 V(Word64, Word64Clz, kS390_Cntlz64, OperandMode::kNone, null) \
1456 V(Word64, TruncateInt64ToInt32, kS390_Int64ToInt32, OperandMode::kNone, \
1457 null) \
1458 V(Word64, RoundInt64ToFloat32, kS390_Int64ToFloat32, OperandMode::kNone, \
1459 null) \
1460 V(Word64, RoundInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, \
1461 null) \
1462 V(Word64, ChangeInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, \
1463 null) \
1464 V(Word64, RoundUint64ToFloat32, kS390_Uint64ToFloat32, OperandMode::kNone, \
1465 null) \
1466 V(Word64, RoundUint64ToFloat64, kS390_Uint64ToDouble, OperandMode::kNone, \
1467 null) \
1468 V(Word64, BitcastInt64ToFloat64, kS390_BitcastInt64ToDouble, \
1469 OperandMode::kNone, null)
1470
1471 #define WORD64_BIN_OP_LIST(V) \
1472 V(Word64, Int64Add, kS390_Add64, AddOperandMode, null) \
1473 V(Word64, Int64Sub, kS390_Sub64, SubOperandMode, ([&]() { \
1474 return TryMatchNegFromSub<Int64BinopMatcher, kS390_Neg64>(this, node); \
1475 })) \
1476 V(Word64, Int64AddWithOverflow, kS390_Add64, AddOperandMode, \
1477 ([&]() { return TryMatchInt64AddWithOverflow(this, node); })) \
1478 V(Word64, Int64SubWithOverflow, kS390_Sub64, SubOperandMode, \
1479 ([&]() { return TryMatchInt64SubWithOverflow(this, node); })) \
1480 V(Word64, Int64Mul, kS390_Mul64, MulOperandMode, ([&]() { \
1481 return TryMatchShiftFromMul<Int64BinopMatcher, kS390_ShiftLeft64>(this, \
1482 node); \
1483 })) \
1484 V(Word64, Int64Div, kS390_Div64, \
1485 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1486 V(Word64, Uint64Div, kS390_DivU64, \
1487 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1488 V(Word64, Int64Mod, kS390_Mod64, \
1489 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1490 V(Word64, Uint64Mod, kS390_ModU64, \
1491 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1492 V(Word64, Word64Sar, kS390_ShiftRightArith64, Shift64OperandMode, null) \
1493 V(Word64, Word64Ror, kS390_RotRight64, Shift64OperandMode, null) \
1494 V(Word64, Word64Or, kS390_Or64, Or64OperandMode, null) \
1495 V(Word64, Word64Xor, kS390_Xor64, Xor64OperandMode, null)
1496
1497 #define DECLARE_UNARY_OP(type, name, op, mode, try_extra) \
1498 void InstructionSelector::Visit##name(Node* node) { \
1499 if (std::function<bool()>(try_extra)()) return; \
1500 Visit##type##UnaryOp(this, node, op, mode); \
1501 }
1502
1503 #define DECLARE_BIN_OP(type, name, op, mode, try_extra) \
1504 void InstructionSelector::Visit##name(Node* node) { \
1505 if (std::function<bool()>(try_extra)()) return; \
1506 Visit##type##BinOp(this, node, op, mode); \
1507 }
1508
1509 WORD32_BIN_OP_LIST(DECLARE_BIN_OP)
WORD32_UNARY_OP_LIST(DECLARE_UNARY_OP)1510 WORD32_UNARY_OP_LIST(DECLARE_UNARY_OP)
1511 FLOAT_UNARY_OP_LIST(DECLARE_UNARY_OP)
1512 FLOAT_BIN_OP_LIST(DECLARE_BIN_OP)
1513
1514 #if V8_TARGET_ARCH_S390X
1515 WORD64_UNARY_OP_LIST(DECLARE_UNARY_OP)
1516 WORD64_BIN_OP_LIST(DECLARE_BIN_OP)
1517 #endif
1518
1519 #undef DECLARE_BIN_OP
1520 #undef DECLARE_UNARY_OP
1521 #undef WORD64_BIN_OP_LIST
1522 #undef WORD64_UNARY_OP_LIST
1523 #undef WORD32_BIN_OP_LIST
1524 #undef WORD32_UNARY_OP_LIST
1525 #undef FLOAT_UNARY_OP_LIST
1526 #undef WORD32_UNARY_OP_LIST_32
1527 #undef FLOAT_BIN_OP_LIST
1528 #undef FLOAT_BIN_OP_LIST_32
1529 #undef null
1530
1531 #if V8_TARGET_ARCH_S390X
1532 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1533 VisitTryTruncateDouble(this, kS390_Float32ToInt64, node);
1534 }
1535
VisitTryTruncateFloat64ToInt64(Node * node)1536 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1537 VisitTryTruncateDouble(this, kS390_DoubleToInt64, node);
1538 }
1539
VisitTryTruncateFloat32ToUint64(Node * node)1540 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1541 VisitTryTruncateDouble(this, kS390_Float32ToUint64, node);
1542 }
1543
VisitTryTruncateFloat64ToUint64(Node * node)1544 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1545 VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
1546 }
1547
1548 #endif
1549
VisitBitcastWord32ToWord64(Node * node)1550 void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
1551 DCHECK(SmiValuesAre31Bits());
1552 DCHECK(COMPRESS_POINTERS_BOOL);
1553 EmitIdentity(node);
1554 }
1555
VisitFloat64Mod(Node * node)1556 void InstructionSelector::VisitFloat64Mod(Node* node) {
1557 S390OperandGenerator g(this);
1558 Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
1559 g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
1560 ->MarkAsCall();
1561 }
1562
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1563 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1564 InstructionCode opcode) {
1565 S390OperandGenerator g(this);
1566 Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
1567 ->MarkAsCall();
1568 }
1569
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1570 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1571 InstructionCode opcode) {
1572 S390OperandGenerator g(this);
1573 Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1),
1574 g.UseFixed(node->InputAt(1), d2))
1575 ->MarkAsCall();
1576 }
1577
CompareLogical(FlagsContinuation * cont)1578 static bool CompareLogical(FlagsContinuation* cont) {
1579 switch (cont->condition()) {
1580 case kUnsignedLessThan:
1581 case kUnsignedGreaterThanOrEqual:
1582 case kUnsignedLessThanOrEqual:
1583 case kUnsignedGreaterThan:
1584 return true;
1585 default:
1586 return false;
1587 }
1588 UNREACHABLE();
1589 }
1590
1591 namespace {
1592
1593 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1594 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1595 InstructionOperand left, InstructionOperand right,
1596 FlagsContinuation* cont) {
1597 selector->EmitWithContinuation(opcode, left, right, cont);
1598 }
1599
1600 void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
1601 Node* node, Node* value, FlagsContinuation* cont,
1602 bool discard_output = false);
1603
1604 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,OperandModes immediate_mode)1605 void VisitWordCompare(InstructionSelector* selector, Node* node,
1606 InstructionCode opcode, FlagsContinuation* cont,
1607 OperandModes immediate_mode) {
1608 S390OperandGenerator g(selector);
1609 Node* left = node->InputAt(0);
1610 Node* right = node->InputAt(1);
1611
1612 DCHECK(IrOpcode::IsComparisonOpcode(node->opcode()) ||
1613 node->opcode() == IrOpcode::kInt32Sub ||
1614 node->opcode() == IrOpcode::kInt64Sub);
1615
1616 InstructionOperand inputs[8];
1617 InstructionOperand outputs[1];
1618 size_t input_count = 0;
1619 size_t output_count = 0;
1620
1621 // If one of the two inputs is an immediate, make sure it's on the right, or
1622 // if one of the two inputs is a memory operand, make sure it's on the left.
1623 int effect_level = selector->GetEffectLevel(node, cont);
1624
1625 if ((!g.CanBeImmediate(right, immediate_mode) &&
1626 g.CanBeImmediate(left, immediate_mode)) ||
1627 (!g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
1628 g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
1629 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1630 std::swap(left, right);
1631 }
1632
1633 // check if compare with 0
1634 if (g.CanBeImmediate(right, immediate_mode) && g.GetImmediate(right) == 0) {
1635 DCHECK(opcode == kS390_Cmp32 || opcode == kS390_Cmp64);
1636 ArchOpcode load_and_test = (opcode == kS390_Cmp32)
1637 ? kS390_LoadAndTestWord32
1638 : kS390_LoadAndTestWord64;
1639 return VisitLoadAndTest(selector, load_and_test, node, left, cont, true);
1640 }
1641
1642 inputs[input_count++] = g.UseRegister(left);
1643 if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
1644 // generate memory operand
1645 AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
1646 right, inputs, &input_count, OpcodeImmMode(opcode));
1647 opcode |= AddressingModeField::encode(addressing_mode);
1648 } else if (g.CanBeImmediate(right, immediate_mode)) {
1649 inputs[input_count++] = g.UseImmediate(right);
1650 } else {
1651 inputs[input_count++] = g.UseAnyExceptImmediate(right);
1652 }
1653
1654 DCHECK(input_count <= 8 && output_count <= 1);
1655 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
1656 inputs, cont);
1657 }
1658
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1659 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1660 FlagsContinuation* cont) {
1661 OperandModes mode =
1662 (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
1663 VisitWordCompare(selector, node, kS390_Cmp32, cont, mode);
1664 }
1665
1666 #if V8_TARGET_ARCH_S390X
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1667 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1668 FlagsContinuation* cont) {
1669 OperandModes mode =
1670 (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
1671 VisitWordCompare(selector, node, kS390_Cmp64, cont, mode);
1672 }
1673 #endif
1674
1675 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1676 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1677 FlagsContinuation* cont) {
1678 VisitWordCompare(selector, node, kS390_CmpFloat, cont, OperandMode::kNone);
1679 }
1680
1681 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1682 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1683 FlagsContinuation* cont) {
1684 VisitWordCompare(selector, node, kS390_CmpDouble, cont, OperandMode::kNone);
1685 }
1686
VisitTestUnderMask(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1687 void VisitTestUnderMask(InstructionSelector* selector, Node* node,
1688 FlagsContinuation* cont) {
1689 DCHECK(node->opcode() == IrOpcode::kWord32And ||
1690 node->opcode() == IrOpcode::kWord64And);
1691 ArchOpcode opcode =
1692 (node->opcode() == IrOpcode::kWord32And) ? kS390_Tst32 : kS390_Tst64;
1693 S390OperandGenerator g(selector);
1694 Node* left = node->InputAt(0);
1695 Node* right = node->InputAt(1);
1696 if (!g.CanBeImmediate(right, OperandMode::kUint32Imm) &&
1697 g.CanBeImmediate(left, OperandMode::kUint32Imm)) {
1698 std::swap(left, right);
1699 }
1700 VisitCompare(selector, opcode, g.UseRegister(left),
1701 g.UseOperand(right, OperandMode::kUint32Imm), cont);
1702 }
1703
VisitLoadAndTest(InstructionSelector * selector,InstructionCode opcode,Node * node,Node * value,FlagsContinuation * cont,bool discard_output)1704 void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
1705 Node* node, Node* value, FlagsContinuation* cont,
1706 bool discard_output) {
1707 static_assert(kS390_LoadAndTestFloat64 - kS390_LoadAndTestWord32 == 3,
1708 "LoadAndTest Opcode shouldn't contain other opcodes.");
1709
1710 // TODO(john.yan): Add support for Float32/Float64.
1711 DCHECK(opcode >= kS390_LoadAndTestWord32 ||
1712 opcode <= kS390_LoadAndTestWord64);
1713
1714 S390OperandGenerator g(selector);
1715 InstructionOperand inputs[8];
1716 InstructionOperand outputs[2];
1717 size_t input_count = 0;
1718 size_t output_count = 0;
1719 bool use_value = false;
1720
1721 int effect_level = selector->GetEffectLevel(node, cont);
1722
1723 if (g.CanBeMemoryOperand(opcode, node, value, effect_level)) {
1724 // generate memory operand
1725 AddressingMode addressing_mode =
1726 g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
1727 opcode |= AddressingModeField::encode(addressing_mode);
1728 } else {
1729 inputs[input_count++] = g.UseAnyExceptImmediate(value);
1730 use_value = true;
1731 }
1732
1733 if (!discard_output && !use_value) {
1734 outputs[output_count++] = g.DefineAsRegister(value);
1735 }
1736
1737 DCHECK(input_count <= 8 && output_count <= 2);
1738 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
1739 inputs, cont);
1740 }
1741
1742 } // namespace
1743
1744 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)1745 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
1746 FlagsContinuation* cont) {
1747 // Try to combine with comparisons against 0 by simply inverting the branch.
1748 while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1749 Int32BinopMatcher m(value);
1750 if (!m.right().Is(0)) break;
1751
1752 user = value;
1753 value = m.left().node();
1754 cont->Negate();
1755 }
1756
1757 FlagsCondition fc = cont->condition();
1758 if (CanCover(user, value)) {
1759 switch (value->opcode()) {
1760 case IrOpcode::kWord32Equal: {
1761 cont->OverwriteAndNegateIfEqual(kEqual);
1762 Int32BinopMatcher m(value);
1763 if (m.right().Is(0)) {
1764 // Try to combine the branch with a comparison.
1765 Node* const user = m.node();
1766 Node* const value = m.left().node();
1767 if (CanCover(user, value)) {
1768 switch (value->opcode()) {
1769 case IrOpcode::kInt32Sub:
1770 return VisitWord32Compare(this, value, cont);
1771 case IrOpcode::kWord32And:
1772 return VisitTestUnderMask(this, value, cont);
1773 default:
1774 break;
1775 }
1776 }
1777 }
1778 return VisitWord32Compare(this, value, cont);
1779 }
1780 case IrOpcode::kInt32LessThan:
1781 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1782 return VisitWord32Compare(this, value, cont);
1783 case IrOpcode::kInt32LessThanOrEqual:
1784 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1785 return VisitWord32Compare(this, value, cont);
1786 case IrOpcode::kUint32LessThan:
1787 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1788 return VisitWord32Compare(this, value, cont);
1789 case IrOpcode::kUint32LessThanOrEqual:
1790 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1791 return VisitWord32Compare(this, value, cont);
1792 #if V8_TARGET_ARCH_S390X
1793 case IrOpcode::kWord64Equal: {
1794 cont->OverwriteAndNegateIfEqual(kEqual);
1795 Int64BinopMatcher m(value);
1796 if (m.right().Is(0)) {
1797 // Try to combine the branch with a comparison.
1798 Node* const user = m.node();
1799 Node* const value = m.left().node();
1800 if (CanCover(user, value)) {
1801 switch (value->opcode()) {
1802 case IrOpcode::kInt64Sub:
1803 return VisitWord64Compare(this, value, cont);
1804 case IrOpcode::kWord64And:
1805 return VisitTestUnderMask(this, value, cont);
1806 default:
1807 break;
1808 }
1809 }
1810 }
1811 return VisitWord64Compare(this, value, cont);
1812 }
1813 case IrOpcode::kInt64LessThan:
1814 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1815 return VisitWord64Compare(this, value, cont);
1816 case IrOpcode::kInt64LessThanOrEqual:
1817 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1818 return VisitWord64Compare(this, value, cont);
1819 case IrOpcode::kUint64LessThan:
1820 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1821 return VisitWord64Compare(this, value, cont);
1822 case IrOpcode::kUint64LessThanOrEqual:
1823 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1824 return VisitWord64Compare(this, value, cont);
1825 #endif
1826 case IrOpcode::kFloat32Equal:
1827 cont->OverwriteAndNegateIfEqual(kEqual);
1828 return VisitFloat32Compare(this, value, cont);
1829 case IrOpcode::kFloat32LessThan:
1830 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1831 return VisitFloat32Compare(this, value, cont);
1832 case IrOpcode::kFloat32LessThanOrEqual:
1833 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1834 return VisitFloat32Compare(this, value, cont);
1835 case IrOpcode::kFloat64Equal:
1836 cont->OverwriteAndNegateIfEqual(kEqual);
1837 return VisitFloat64Compare(this, value, cont);
1838 case IrOpcode::kFloat64LessThan:
1839 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1840 return VisitFloat64Compare(this, value, cont);
1841 case IrOpcode::kFloat64LessThanOrEqual:
1842 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1843 return VisitFloat64Compare(this, value, cont);
1844 case IrOpcode::kProjection:
1845 // Check if this is the overflow output projection of an
1846 // <Operation>WithOverflow node.
1847 if (ProjectionIndexOf(value->op()) == 1u) {
1848 // We cannot combine the <Operation>WithOverflow with this branch
1849 // unless the 0th projection (the use of the actual value of the
1850 // <Operation> is either nullptr, which means there's no use of the
1851 // actual value, or was already defined, which means it is scheduled
1852 // *AFTER* this branch).
1853 Node* const node = value->InputAt(0);
1854 Node* const result = NodeProperties::FindProjection(node, 0);
1855 if (result == nullptr || IsDefined(result)) {
1856 switch (node->opcode()) {
1857 case IrOpcode::kInt32AddWithOverflow:
1858 cont->OverwriteAndNegateIfEqual(kOverflow);
1859 return VisitWord32BinOp(this, node, kS390_Add32, AddOperandMode,
1860 cont);
1861 case IrOpcode::kInt32SubWithOverflow:
1862 cont->OverwriteAndNegateIfEqual(kOverflow);
1863 return VisitWord32BinOp(this, node, kS390_Sub32, SubOperandMode,
1864 cont);
1865 case IrOpcode::kInt32MulWithOverflow:
1866 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1867 cont->OverwriteAndNegateIfEqual(kOverflow);
1868 return VisitWord32BinOp(
1869 this, node, kS390_Mul32,
1870 OperandMode::kAllowRRR | OperandMode::kAllowRM, cont);
1871 } else {
1872 cont->OverwriteAndNegateIfEqual(kNotEqual);
1873 return VisitWord32BinOp(
1874 this, node, kS390_Mul32WithOverflow,
1875 OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
1876 cont);
1877 }
1878 case IrOpcode::kInt32AbsWithOverflow:
1879 cont->OverwriteAndNegateIfEqual(kOverflow);
1880 return VisitWord32UnaryOp(this, node, kS390_Abs32,
1881 OperandMode::kNone, cont);
1882 #if V8_TARGET_ARCH_S390X
1883 case IrOpcode::kInt64AbsWithOverflow:
1884 cont->OverwriteAndNegateIfEqual(kOverflow);
1885 return VisitWord64UnaryOp(this, node, kS390_Abs64,
1886 OperandMode::kNone, cont);
1887 case IrOpcode::kInt64AddWithOverflow:
1888 cont->OverwriteAndNegateIfEqual(kOverflow);
1889 return VisitWord64BinOp(this, node, kS390_Add64, AddOperandMode,
1890 cont);
1891 case IrOpcode::kInt64SubWithOverflow:
1892 cont->OverwriteAndNegateIfEqual(kOverflow);
1893 return VisitWord64BinOp(this, node, kS390_Sub64, SubOperandMode,
1894 cont);
1895 #endif
1896 default:
1897 break;
1898 }
1899 }
1900 }
1901 break;
1902 case IrOpcode::kInt32Sub:
1903 if (fc == kNotEqual || fc == kEqual)
1904 return VisitWord32Compare(this, value, cont);
1905 break;
1906 case IrOpcode::kWord32And:
1907 return VisitTestUnderMask(this, value, cont);
1908 case IrOpcode::kLoad:
1909 case IrOpcode::kLoadImmutable: {
1910 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1911 switch (load_rep.representation()) {
1912 case MachineRepresentation::kWord32:
1913 return VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value,
1914 cont);
1915 default:
1916 break;
1917 }
1918 break;
1919 }
1920 case IrOpcode::kInt32Add:
1921 // can't handle overflow case.
1922 break;
1923 case IrOpcode::kWord32Or:
1924 if (fc == kNotEqual || fc == kEqual)
1925 return VisitWord32BinOp(this, value, kS390_Or32, Or32OperandMode,
1926 cont);
1927 break;
1928 case IrOpcode::kWord32Xor:
1929 if (fc == kNotEqual || fc == kEqual)
1930 return VisitWord32BinOp(this, value, kS390_Xor32, Xor32OperandMode,
1931 cont);
1932 break;
1933 case IrOpcode::kWord32Sar:
1934 case IrOpcode::kWord32Shl:
1935 case IrOpcode::kWord32Shr:
1936 case IrOpcode::kWord32Ror:
1937 // doesn't generate cc, so ignore.
1938 break;
1939 #if V8_TARGET_ARCH_S390X
1940 case IrOpcode::kInt64Sub:
1941 if (fc == kNotEqual || fc == kEqual)
1942 return VisitWord64Compare(this, value, cont);
1943 break;
1944 case IrOpcode::kWord64And:
1945 return VisitTestUnderMask(this, value, cont);
1946 case IrOpcode::kInt64Add:
1947 // can't handle overflow case.
1948 break;
1949 case IrOpcode::kWord64Or:
1950 if (fc == kNotEqual || fc == kEqual)
1951 return VisitWord64BinOp(this, value, kS390_Or64, Or64OperandMode,
1952 cont);
1953 break;
1954 case IrOpcode::kWord64Xor:
1955 if (fc == kNotEqual || fc == kEqual)
1956 return VisitWord64BinOp(this, value, kS390_Xor64, Xor64OperandMode,
1957 cont);
1958 break;
1959 case IrOpcode::kWord64Sar:
1960 case IrOpcode::kWord64Shl:
1961 case IrOpcode::kWord64Shr:
1962 case IrOpcode::kWord64Ror:
1963 // doesn't generate cc, so ignore
1964 break;
1965 #endif
1966 case IrOpcode::kStackPointerGreaterThan:
1967 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
1968 return VisitStackPointerGreaterThan(value, cont);
1969 default:
1970 break;
1971 }
1972 }
1973
1974 // Branch could not be combined with a compare, emit LoadAndTest
1975 VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value, cont, true);
1976 }
1977
VisitSwitch(Node * node,const SwitchInfo & sw)1978 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1979 S390OperandGenerator g(this);
1980 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1981
1982 // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
1983 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
1984 static const size_t kMaxTableSwitchValueRange = 2 << 16;
1985 size_t table_space_cost = 4 + sw.value_range();
1986 size_t table_time_cost = 3;
1987 size_t lookup_space_cost = 3 + 2 * sw.case_count();
1988 size_t lookup_time_cost = sw.case_count();
1989 if (sw.case_count() > 0 &&
1990 table_space_cost + 3 * table_time_cost <=
1991 lookup_space_cost + 3 * lookup_time_cost &&
1992 sw.min_value() > std::numeric_limits<int32_t>::min() &&
1993 sw.value_range() <= kMaxTableSwitchValueRange) {
1994 InstructionOperand index_operand = value_operand;
1995 if (sw.min_value()) {
1996 index_operand = g.TempRegister();
1997 Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
1998 value_operand, g.TempImmediate(-sw.min_value()));
1999 }
2000 #if V8_TARGET_ARCH_S390X
2001 InstructionOperand index_operand_zero_ext = g.TempRegister();
2002 Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
2003 index_operand = index_operand_zero_ext;
2004 #endif
2005 // Generate a table lookup.
2006 return EmitTableSwitch(sw, index_operand);
2007 }
2008 }
2009
2010 // Generate a tree of conditional jumps.
2011 return EmitBinarySearchSwitch(sw, value_operand);
2012 }
2013
VisitWord32Equal(Node * const node)2014 void InstructionSelector::VisitWord32Equal(Node* const node) {
2015 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2016 Int32BinopMatcher m(node);
2017 if (m.right().Is(0)) {
2018 return VisitLoadAndTest(this, kS390_LoadAndTestWord32, m.node(),
2019 m.left().node(), &cont, true);
2020 }
2021 VisitWord32Compare(this, node, &cont);
2022 }
2023
VisitInt32LessThan(Node * node)2024 void InstructionSelector::VisitInt32LessThan(Node* node) {
2025 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2026 VisitWord32Compare(this, node, &cont);
2027 }
2028
VisitInt32LessThanOrEqual(Node * node)2029 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2030 FlagsContinuation cont =
2031 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2032 VisitWord32Compare(this, node, &cont);
2033 }
2034
VisitUint32LessThan(Node * node)2035 void InstructionSelector::VisitUint32LessThan(Node* node) {
2036 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2037 VisitWord32Compare(this, node, &cont);
2038 }
2039
VisitUint32LessThanOrEqual(Node * node)2040 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2041 FlagsContinuation cont =
2042 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2043 VisitWord32Compare(this, node, &cont);
2044 }
2045
2046 #if V8_TARGET_ARCH_S390X
VisitWord64Equal(Node * const node)2047 void InstructionSelector::VisitWord64Equal(Node* const node) {
2048 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2049 Int64BinopMatcher m(node);
2050 if (m.right().Is(0)) {
2051 return VisitLoadAndTest(this, kS390_LoadAndTestWord64, m.node(),
2052 m.left().node(), &cont, true);
2053 }
2054 VisitWord64Compare(this, node, &cont);
2055 }
2056
VisitInt64LessThan(Node * node)2057 void InstructionSelector::VisitInt64LessThan(Node* node) {
2058 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2059 VisitWord64Compare(this, node, &cont);
2060 }
2061
VisitInt64LessThanOrEqual(Node * node)2062 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2063 FlagsContinuation cont =
2064 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2065 VisitWord64Compare(this, node, &cont);
2066 }
2067
VisitUint64LessThan(Node * node)2068 void InstructionSelector::VisitUint64LessThan(Node* node) {
2069 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2070 VisitWord64Compare(this, node, &cont);
2071 }
2072
VisitUint64LessThanOrEqual(Node * node)2073 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2074 FlagsContinuation cont =
2075 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2076 VisitWord64Compare(this, node, &cont);
2077 }
2078 #endif
2079
VisitFloat32Equal(Node * node)2080 void InstructionSelector::VisitFloat32Equal(Node* node) {
2081 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2082 VisitFloat32Compare(this, node, &cont);
2083 }
2084
VisitFloat32LessThan(Node * node)2085 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2086 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2087 VisitFloat32Compare(this, node, &cont);
2088 }
2089
VisitFloat32LessThanOrEqual(Node * node)2090 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2091 FlagsContinuation cont =
2092 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2093 VisitFloat32Compare(this, node, &cont);
2094 }
2095
VisitFloat64Equal(Node * node)2096 void InstructionSelector::VisitFloat64Equal(Node* node) {
2097 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2098 VisitFloat64Compare(this, node, &cont);
2099 }
2100
VisitFloat64LessThan(Node * node)2101 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2102 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2103 VisitFloat64Compare(this, node, &cont);
2104 }
2105
VisitFloat64LessThanOrEqual(Node * node)2106 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2107 FlagsContinuation cont =
2108 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2109 VisitFloat64Compare(this, node, &cont);
2110 }
2111
ZeroExtendsWord32ToWord64NoPhis(Node * node)2112 bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
2113 UNIMPLEMENTED();
2114 }
2115
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)2116 void InstructionSelector::EmitPrepareArguments(
2117 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
2118 Node* node) {
2119 S390OperandGenerator g(this);
2120
2121 // Prepare for C function call.
2122 if (call_descriptor->IsCFunctionCall()) {
2123 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
2124 call_descriptor->ParameterCount())),
2125 0, nullptr, 0, nullptr);
2126
2127 // Poke any stack arguments.
2128 int slot = kStackFrameExtraParamSlot;
2129 for (PushParameter input : (*arguments)) {
2130 if (input.node == nullptr) continue;
2131 Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
2132 g.TempImmediate(slot));
2133 ++slot;
2134 }
2135 } else {
2136 // Push any stack arguments.
2137 int stack_decrement = 0;
2138 for (PushParameter input : base::Reversed(*arguments)) {
2139 stack_decrement += kSystemPointerSize;
2140 // Skip any alignment holes in pushed nodes.
2141 if (input.node == nullptr) continue;
2142 InstructionOperand decrement = g.UseImmediate(stack_decrement);
2143 stack_decrement = 0;
2144 Emit(kS390_Push, g.NoOutput(), decrement, g.UseRegister(input.node));
2145 }
2146 }
2147 }
2148
VisitMemoryBarrier(Node * node)2149 void InstructionSelector::VisitMemoryBarrier(Node* node) {
2150 S390OperandGenerator g(this);
2151 Emit(kArchNop, g.NoOutput());
2152 }
2153
IsTailCallAddressImmediate()2154 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
2155
VisitWord32AtomicLoad(Node * node)2156 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2157 AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
2158 LoadRepresentation load_rep = atomic_load_params.representation();
2159 VisitLoad(node, node, SelectLoadOpcode(load_rep));
2160 }
2161
VisitWord32AtomicStore(Node * node)2162 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2163 AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
2164 VisitGeneralStore(this, node, store_params.representation());
2165 }
2166
VisitAtomicExchange(InstructionSelector * selector,Node * node,ArchOpcode opcode,AtomicWidth width)2167 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
2168 ArchOpcode opcode, AtomicWidth width) {
2169 S390OperandGenerator g(selector);
2170 Node* base = node->InputAt(0);
2171 Node* index = node->InputAt(1);
2172 Node* value = node->InputAt(2);
2173
2174 AddressingMode addressing_mode = kMode_MRR;
2175 InstructionOperand inputs[3];
2176 size_t input_count = 0;
2177 inputs[input_count++] = g.UseUniqueRegister(base);
2178 inputs[input_count++] = g.UseUniqueRegister(index);
2179 inputs[input_count++] = g.UseUniqueRegister(value);
2180 InstructionOperand outputs[1];
2181 outputs[0] = g.DefineAsRegister(node);
2182 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2183 AtomicWidthField::encode(width);
2184 selector->Emit(code, 1, outputs, input_count, inputs);
2185 }
2186
VisitWord32AtomicExchange(Node * node)2187 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2188 ArchOpcode opcode;
2189 MachineType type = AtomicOpType(node->op());
2190 if (type == MachineType::Int8()) {
2191 opcode = kAtomicExchangeInt8;
2192 } else if (type == MachineType::Uint8()) {
2193 opcode = kAtomicExchangeUint8;
2194 } else if (type == MachineType::Int16()) {
2195 opcode = kAtomicExchangeInt16;
2196 } else if (type == MachineType::Uint16()) {
2197 opcode = kAtomicExchangeUint16;
2198 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2199 opcode = kAtomicExchangeWord32;
2200 } else {
2201 UNREACHABLE();
2202 }
2203 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
2204 }
2205
VisitWord64AtomicExchange(Node * node)2206 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2207 ArchOpcode opcode;
2208 MachineType type = AtomicOpType(node->op());
2209 if (type == MachineType::Uint8()) {
2210 opcode = kAtomicExchangeUint8;
2211 } else if (type == MachineType::Uint16()) {
2212 opcode = kAtomicExchangeUint16;
2213 } else if (type == MachineType::Uint32()) {
2214 opcode = kAtomicExchangeWord32;
2215 } else if (type == MachineType::Uint64()) {
2216 opcode = kS390_Word64AtomicExchangeUint64;
2217 } else {
2218 UNREACHABLE();
2219 }
2220 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
2221 }
2222
VisitAtomicCompareExchange(InstructionSelector * selector,Node * node,ArchOpcode opcode,AtomicWidth width)2223 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
2224 ArchOpcode opcode, AtomicWidth width) {
2225 S390OperandGenerator g(selector);
2226 Node* base = node->InputAt(0);
2227 Node* index = node->InputAt(1);
2228 Node* old_value = node->InputAt(2);
2229 Node* new_value = node->InputAt(3);
2230
2231 InstructionOperand inputs[4];
2232 size_t input_count = 0;
2233 inputs[input_count++] = g.UseUniqueRegister(old_value);
2234 inputs[input_count++] = g.UseUniqueRegister(new_value);
2235 inputs[input_count++] = g.UseUniqueRegister(base);
2236
2237 AddressingMode addressing_mode;
2238 if (g.CanBeImmediate(index, OperandMode::kInt20Imm)) {
2239 inputs[input_count++] = g.UseImmediate(index);
2240 addressing_mode = kMode_MRI;
2241 } else {
2242 inputs[input_count++] = g.UseUniqueRegister(index);
2243 addressing_mode = kMode_MRR;
2244 }
2245
2246 InstructionOperand outputs[1];
2247 size_t output_count = 0;
2248 outputs[output_count++] = g.DefineSameAsFirst(node);
2249
2250 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2251 AtomicWidthField::encode(width);
2252 selector->Emit(code, output_count, outputs, input_count, inputs);
2253 }
2254
VisitWord32AtomicCompareExchange(Node * node)2255 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2256 MachineType type = AtomicOpType(node->op());
2257 ArchOpcode opcode;
2258 if (type == MachineType::Int8()) {
2259 opcode = kAtomicCompareExchangeInt8;
2260 } else if (type == MachineType::Uint8()) {
2261 opcode = kAtomicCompareExchangeUint8;
2262 } else if (type == MachineType::Int16()) {
2263 opcode = kAtomicCompareExchangeInt16;
2264 } else if (type == MachineType::Uint16()) {
2265 opcode = kAtomicCompareExchangeUint16;
2266 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2267 opcode = kAtomicCompareExchangeWord32;
2268 } else {
2269 UNREACHABLE();
2270 }
2271 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
2272 }
2273
VisitWord64AtomicCompareExchange(Node * node)2274 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2275 MachineType type = AtomicOpType(node->op());
2276 ArchOpcode opcode;
2277 if (type == MachineType::Uint8()) {
2278 opcode = kAtomicCompareExchangeUint8;
2279 } else if (type == MachineType::Uint16()) {
2280 opcode = kAtomicCompareExchangeUint16;
2281 } else if (type == MachineType::Uint32()) {
2282 opcode = kAtomicCompareExchangeWord32;
2283 } else if (type == MachineType::Uint64()) {
2284 opcode = kS390_Word64AtomicCompareExchangeUint64;
2285 } else {
2286 UNREACHABLE();
2287 }
2288 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
2289 }
2290
VisitAtomicBinop(InstructionSelector * selector,Node * node,ArchOpcode opcode,AtomicWidth width)2291 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
2292 ArchOpcode opcode, AtomicWidth width) {
2293 S390OperandGenerator g(selector);
2294 Node* base = node->InputAt(0);
2295 Node* index = node->InputAt(1);
2296 Node* value = node->InputAt(2);
2297
2298 InstructionOperand inputs[3];
2299 size_t input_count = 0;
2300 inputs[input_count++] = g.UseUniqueRegister(base);
2301
2302 AddressingMode addressing_mode;
2303 if (g.CanBeImmediate(index, OperandMode::kInt20Imm)) {
2304 inputs[input_count++] = g.UseImmediate(index);
2305 addressing_mode = kMode_MRI;
2306 } else {
2307 inputs[input_count++] = g.UseUniqueRegister(index);
2308 addressing_mode = kMode_MRR;
2309 }
2310
2311 inputs[input_count++] = g.UseUniqueRegister(value);
2312
2313 InstructionOperand outputs[1];
2314 size_t output_count = 0;
2315 outputs[output_count++] = g.DefineAsRegister(node);
2316
2317 InstructionOperand temps[1];
2318 size_t temp_count = 0;
2319 temps[temp_count++] = g.TempRegister();
2320
2321 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2322 AtomicWidthField::encode(width);
2323 selector->Emit(code, output_count, outputs, input_count, inputs, temp_count,
2324 temps);
2325 }
2326
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2327 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2328 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2329 ArchOpcode uint16_op, ArchOpcode word32_op) {
2330 MachineType type = AtomicOpType(node->op());
2331 ArchOpcode opcode;
2332
2333 if (type == MachineType::Int8()) {
2334 opcode = int8_op;
2335 } else if (type == MachineType::Uint8()) {
2336 opcode = uint8_op;
2337 } else if (type == MachineType::Int16()) {
2338 opcode = int16_op;
2339 } else if (type == MachineType::Uint16()) {
2340 opcode = uint16_op;
2341 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2342 opcode = word32_op;
2343 } else {
2344 UNREACHABLE();
2345 }
2346 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
2347 }
2348
2349 #define VISIT_ATOMIC_BINOP(op) \
2350 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2351 VisitWord32AtomicBinaryOperation( \
2352 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2353 kAtomic##op##Uint16, kAtomic##op##Word32); \
2354 }
2355 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2356 VISIT_ATOMIC_BINOP(Sub)
2357 VISIT_ATOMIC_BINOP(And)
2358 VISIT_ATOMIC_BINOP(Or)
2359 VISIT_ATOMIC_BINOP(Xor)
2360 #undef VISIT_ATOMIC_BINOP
2361
2362 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2363 Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode word32_op,
2364 ArchOpcode word64_op) {
2365 MachineType type = AtomicOpType(node->op());
2366 ArchOpcode opcode;
2367
2368 if (type == MachineType::Uint8()) {
2369 opcode = uint8_op;
2370 } else if (type == MachineType::Uint16()) {
2371 opcode = uint16_op;
2372 } else if (type == MachineType::Uint32()) {
2373 opcode = word32_op;
2374 } else if (type == MachineType::Uint64()) {
2375 opcode = word64_op;
2376 } else {
2377 UNREACHABLE();
2378 }
2379 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
2380 }
2381
2382 #define VISIT_ATOMIC64_BINOP(op) \
2383 void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
2384 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
2385 kAtomic##op##Uint16, kAtomic##op##Word32, \
2386 kS390_Word64Atomic##op##Uint64); \
2387 }
2388 VISIT_ATOMIC64_BINOP(Add)
VISIT_ATOMIC64_BINOP(Sub)2389 VISIT_ATOMIC64_BINOP(Sub)
2390 VISIT_ATOMIC64_BINOP(And)
2391 VISIT_ATOMIC64_BINOP(Or)
2392 VISIT_ATOMIC64_BINOP(Xor)
2393 #undef VISIT_ATOMIC64_BINOP
2394
2395 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2396 AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
2397 LoadRepresentation load_rep = atomic_load_params.representation();
2398 VisitLoad(node, node, SelectLoadOpcode(load_rep));
2399 }
2400
VisitWord64AtomicStore(Node * node)2401 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2402 AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
2403 VisitGeneralStore(this, node, store_params.representation());
2404 }
2405
2406 #define SIMD_TYPES(V) \
2407 V(F64x2) \
2408 V(F32x4) \
2409 V(I64x2) \
2410 V(I32x4) \
2411 V(I16x8) \
2412 V(I8x16)
2413
2414 #define SIMD_BINOP_LIST(V) \
2415 V(F64x2Add) \
2416 V(F64x2Sub) \
2417 V(F64x2Mul) \
2418 V(F64x2Div) \
2419 V(F64x2Eq) \
2420 V(F64x2Ne) \
2421 V(F64x2Lt) \
2422 V(F64x2Le) \
2423 V(F64x2Min) \
2424 V(F64x2Max) \
2425 V(F64x2Pmin) \
2426 V(F64x2Pmax) \
2427 V(F32x4Add) \
2428 V(F32x4Sub) \
2429 V(F32x4Mul) \
2430 V(F32x4Eq) \
2431 V(F32x4Ne) \
2432 V(F32x4Lt) \
2433 V(F32x4Le) \
2434 V(F32x4Div) \
2435 V(F32x4Min) \
2436 V(F32x4Max) \
2437 V(F32x4Pmin) \
2438 V(F32x4Pmax) \
2439 V(I64x2Add) \
2440 V(I64x2Sub) \
2441 V(I64x2Mul) \
2442 V(I64x2Eq) \
2443 V(I64x2ExtMulLowI32x4S) \
2444 V(I64x2ExtMulHighI32x4S) \
2445 V(I64x2ExtMulLowI32x4U) \
2446 V(I64x2ExtMulHighI32x4U) \
2447 V(I64x2Ne) \
2448 V(I64x2GtS) \
2449 V(I64x2GeS) \
2450 V(I64x2Shl) \
2451 V(I64x2ShrS) \
2452 V(I64x2ShrU) \
2453 V(I32x4Add) \
2454 V(I32x4Sub) \
2455 V(I32x4Mul) \
2456 V(I32x4MinS) \
2457 V(I32x4MinU) \
2458 V(I32x4MaxS) \
2459 V(I32x4MaxU) \
2460 V(I32x4Eq) \
2461 V(I32x4Ne) \
2462 V(I32x4GtS) \
2463 V(I32x4GeS) \
2464 V(I32x4GtU) \
2465 V(I32x4GeU) \
2466 V(I32x4ExtMulLowI16x8S) \
2467 V(I32x4ExtMulHighI16x8S) \
2468 V(I32x4ExtMulLowI16x8U) \
2469 V(I32x4ExtMulHighI16x8U) \
2470 V(I32x4Shl) \
2471 V(I32x4ShrS) \
2472 V(I32x4ShrU) \
2473 V(I32x4DotI16x8S) \
2474 V(I16x8Add) \
2475 V(I16x8Sub) \
2476 V(I16x8Mul) \
2477 V(I16x8MinS) \
2478 V(I16x8MinU) \
2479 V(I16x8MaxS) \
2480 V(I16x8MaxU) \
2481 V(I16x8Eq) \
2482 V(I16x8Ne) \
2483 V(I16x8GtS) \
2484 V(I16x8GeS) \
2485 V(I16x8GtU) \
2486 V(I16x8GeU) \
2487 V(I16x8SConvertI32x4) \
2488 V(I16x8UConvertI32x4) \
2489 V(I16x8RoundingAverageU) \
2490 V(I16x8ExtMulLowI8x16S) \
2491 V(I16x8ExtMulHighI8x16S) \
2492 V(I16x8ExtMulLowI8x16U) \
2493 V(I16x8ExtMulHighI8x16U) \
2494 V(I16x8Shl) \
2495 V(I16x8ShrS) \
2496 V(I16x8ShrU) \
2497 V(I8x16Add) \
2498 V(I8x16Sub) \
2499 V(I8x16MinS) \
2500 V(I8x16MinU) \
2501 V(I8x16MaxS) \
2502 V(I8x16MaxU) \
2503 V(I8x16Eq) \
2504 V(I8x16Ne) \
2505 V(I8x16GtS) \
2506 V(I8x16GeS) \
2507 V(I8x16GtU) \
2508 V(I8x16GeU) \
2509 V(I8x16SConvertI16x8) \
2510 V(I8x16UConvertI16x8) \
2511 V(I8x16RoundingAverageU) \
2512 V(I8x16Shl) \
2513 V(I8x16ShrS) \
2514 V(I8x16ShrU) \
2515 V(S128And) \
2516 V(S128Or) \
2517 V(S128Xor) \
2518 V(S128AndNot)
2519
2520 #define SIMD_BINOP_UNIQUE_REGISTER_LIST(V) \
2521 V(I16x8AddSatS) \
2522 V(I16x8SubSatS) \
2523 V(I16x8AddSatU) \
2524 V(I16x8SubSatU) \
2525 V(I16x8Q15MulRSatS) \
2526 V(I8x16AddSatS) \
2527 V(I8x16SubSatS) \
2528 V(I8x16AddSatU) \
2529 V(I8x16SubSatU) \
2530 V(I8x16Swizzle)
2531
2532 #define SIMD_UNOP_LIST(V) \
2533 V(F64x2Abs) \
2534 V(F64x2Neg) \
2535 V(F64x2Sqrt) \
2536 V(F64x2Ceil) \
2537 V(F64x2Floor) \
2538 V(F64x2Trunc) \
2539 V(F64x2NearestInt) \
2540 V(F64x2ConvertLowI32x4S) \
2541 V(F64x2ConvertLowI32x4U) \
2542 V(F64x2PromoteLowF32x4) \
2543 V(F64x2Splat) \
2544 V(F32x4Abs) \
2545 V(F32x4Neg) \
2546 V(F32x4RecipApprox) \
2547 V(F32x4RecipSqrtApprox) \
2548 V(F32x4Sqrt) \
2549 V(F32x4Ceil) \
2550 V(F32x4Floor) \
2551 V(F32x4Trunc) \
2552 V(F32x4NearestInt) \
2553 V(F32x4DemoteF64x2Zero) \
2554 V(F32x4SConvertI32x4) \
2555 V(F32x4UConvertI32x4) \
2556 V(F32x4Splat) \
2557 V(I64x2Neg) \
2558 V(I64x2SConvertI32x4Low) \
2559 V(I64x2SConvertI32x4High) \
2560 V(I64x2UConvertI32x4Low) \
2561 V(I64x2UConvertI32x4High) \
2562 V(I64x2Abs) \
2563 V(I64x2BitMask) \
2564 V(I64x2Splat) \
2565 V(I64x2AllTrue) \
2566 V(I32x4Neg) \
2567 V(I32x4Abs) \
2568 V(I32x4SConvertF32x4) \
2569 V(I32x4UConvertF32x4) \
2570 V(I32x4SConvertI16x8Low) \
2571 V(I32x4SConvertI16x8High) \
2572 V(I32x4UConvertI16x8Low) \
2573 V(I32x4UConvertI16x8High) \
2574 V(I32x4TruncSatF64x2SZero) \
2575 V(I32x4TruncSatF64x2UZero) \
2576 V(I32x4BitMask) \
2577 V(I32x4Splat) \
2578 V(I32x4AllTrue) \
2579 V(I16x8Neg) \
2580 V(I16x8Abs) \
2581 V(I16x8SConvertI8x16Low) \
2582 V(I16x8SConvertI8x16High) \
2583 V(I16x8UConvertI8x16Low) \
2584 V(I16x8UConvertI8x16High) \
2585 V(I16x8BitMask) \
2586 V(I16x8Splat) \
2587 V(I16x8AllTrue) \
2588 V(I8x16Neg) \
2589 V(I8x16Abs) \
2590 V(I8x16Popcnt) \
2591 V(I8x16BitMask) \
2592 V(I8x16Splat) \
2593 V(I8x16AllTrue) \
2594 V(S128Not) \
2595 V(V128AnyTrue)
2596
2597 #define SIMD_UNOP_UNIQUE_REGISTER_LIST(V) \
2598 V(I32x4ExtAddPairwiseI16x8S) \
2599 V(I32x4ExtAddPairwiseI16x8U) \
2600 V(I16x8ExtAddPairwiseI8x16S) \
2601 V(I16x8ExtAddPairwiseI8x16U)
2602
2603 #define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
2604 void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
2605 S390OperandGenerator g(this); \
2606 int32_t lane = OpParameter<int32_t>(node->op()); \
2607 Emit(kS390_##Type##ExtractLane##Sign, g.DefineAsRegister(node), \
2608 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
2609 }
2610 SIMD_VISIT_EXTRACT_LANE(F64x2, )
2611 SIMD_VISIT_EXTRACT_LANE(F32x4, )
2612 SIMD_VISIT_EXTRACT_LANE(I64x2, )
2613 SIMD_VISIT_EXTRACT_LANE(I32x4, )
SIMD_VISIT_EXTRACT_LANE(I16x8,U)2614 SIMD_VISIT_EXTRACT_LANE(I16x8, U)
2615 SIMD_VISIT_EXTRACT_LANE(I16x8, S)
2616 SIMD_VISIT_EXTRACT_LANE(I8x16, U)
2617 SIMD_VISIT_EXTRACT_LANE(I8x16, S)
2618 #undef SIMD_VISIT_EXTRACT_LANE
2619
2620 #define SIMD_VISIT_REPLACE_LANE(Type) \
2621 void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2622 S390OperandGenerator g(this); \
2623 int32_t lane = OpParameter<int32_t>(node->op()); \
2624 Emit(kS390_##Type##ReplaceLane, g.DefineAsRegister(node), \
2625 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \
2626 g.UseRegister(node->InputAt(1))); \
2627 }
2628 SIMD_TYPES(SIMD_VISIT_REPLACE_LANE)
2629 #undef SIMD_VISIT_REPLACE_LANE
2630
2631 #define SIMD_VISIT_BINOP(Opcode) \
2632 void InstructionSelector::Visit##Opcode(Node* node) { \
2633 S390OperandGenerator g(this); \
2634 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2635 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
2636 }
2637 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
2638 #undef SIMD_VISIT_BINOP
2639 #undef SIMD_BINOP_LIST
2640
2641 #define SIMD_VISIT_BINOP_UNIQUE_REGISTER(Opcode) \
2642 void InstructionSelector::Visit##Opcode(Node* node) { \
2643 S390OperandGenerator g(this); \
2644 InstructionOperand temps[] = {g.TempSimd128Register(), \
2645 g.TempSimd128Register()}; \
2646 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2647 g.UseUniqueRegister(node->InputAt(0)), \
2648 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
2649 }
2650 SIMD_BINOP_UNIQUE_REGISTER_LIST(SIMD_VISIT_BINOP_UNIQUE_REGISTER)
2651 #undef SIMD_VISIT_BINOP_UNIQUE_REGISTER
2652 #undef SIMD_BINOP_UNIQUE_REGISTER_LIST
2653
2654 #define SIMD_VISIT_UNOP(Opcode) \
2655 void InstructionSelector::Visit##Opcode(Node* node) { \
2656 S390OperandGenerator g(this); \
2657 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2658 g.UseRegister(node->InputAt(0))); \
2659 }
2660 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
2661 #undef SIMD_VISIT_UNOP
2662 #undef SIMD_UNOP_LIST
2663
2664 #define SIMD_VISIT_UNOP_UNIQUE_REGISTER(Opcode) \
2665 void InstructionSelector::Visit##Opcode(Node* node) { \
2666 S390OperandGenerator g(this); \
2667 InstructionOperand temps[] = {g.TempSimd128Register()}; \
2668 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2669 g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
2670 }
2671 SIMD_UNOP_UNIQUE_REGISTER_LIST(SIMD_VISIT_UNOP_UNIQUE_REGISTER)
2672 #undef SIMD_VISIT_UNOP_UNIQUE_REGISTER
2673 #undef SIMD_UNOP_UNIQUE_REGISTER_LIST
2674
2675 #define SIMD_VISIT_QFMOP(Opcode) \
2676 void InstructionSelector::Visit##Opcode(Node* node) { \
2677 S390OperandGenerator g(this); \
2678 Emit(kS390_##Opcode, g.DefineSameAsFirst(node), \
2679 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
2680 g.UseRegister(node->InputAt(2))); \
2681 }
2682 SIMD_VISIT_QFMOP(F64x2Qfma)
2683 SIMD_VISIT_QFMOP(F64x2Qfms)
2684 SIMD_VISIT_QFMOP(F32x4Qfma)
2685 SIMD_VISIT_QFMOP(F32x4Qfms)
2686 #undef SIMD_VISIT_QFMOP
2687 #undef SIMD_TYPES
2688
2689 #if V8_ENABLE_WEBASSEMBLY
2690 void InstructionSelector::VisitI8x16Shuffle(Node* node) {
2691 uint8_t shuffle[kSimd128Size];
2692 bool is_swizzle;
2693 CanonicalizeShuffle(node, shuffle, &is_swizzle);
2694 S390OperandGenerator g(this);
2695 Node* input0 = node->InputAt(0);
2696 Node* input1 = node->InputAt(1);
2697 // Remap the shuffle indices to match IBM lane numbering.
2698 int max_index = 15;
2699 int total_lane_count = 2 * kSimd128Size;
2700 uint8_t shuffle_remapped[kSimd128Size];
2701 for (int i = 0; i < kSimd128Size; i++) {
2702 uint8_t current_index = shuffle[i];
2703 shuffle_remapped[i] = (current_index <= max_index
2704 ? max_index - current_index
2705 : total_lane_count - current_index + max_index);
2706 }
2707 Emit(kS390_I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2708 g.UseRegister(input1),
2709 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped)),
2710 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 4)),
2711 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 8)),
2712 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 12)));
2713 }
2714 #else
2715 void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
2716 #endif // V8_ENABLE_WEBASSEMBLY
2717
2718 // This is a replica of SimdShuffle::Pack4Lanes. However, above function will
2719 // not be available on builds with webassembly disabled, hence we need to have
2720 // it declared locally as it is used on other visitors such as S128Const.
Pack4Lanes(const uint8_t * shuffle)2721 static int32_t Pack4Lanes(const uint8_t* shuffle) {
2722 int32_t result = 0;
2723 for (int i = 3; i >= 0; --i) {
2724 result <<= 8;
2725 result |= shuffle[i];
2726 }
2727 return result;
2728 }
2729
VisitS128Const(Node * node)2730 void InstructionSelector::VisitS128Const(Node* node) {
2731 S390OperandGenerator g(this);
2732 uint32_t val[kSimd128Size / sizeof(uint32_t)];
2733 memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
2734 // If all bytes are zeros, avoid emitting code for generic constants.
2735 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
2736 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
2737 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
2738 InstructionOperand dst = g.DefineAsRegister(node);
2739 if (all_zeros) {
2740 Emit(kS390_S128Zero, dst);
2741 } else if (all_ones) {
2742 Emit(kS390_S128AllOnes, dst);
2743 } else {
2744 // We have to use Pack4Lanes to reverse the bytes (lanes) on BE,
2745 // Which in this case is ineffective on LE.
2746 Emit(kS390_S128Const, dst,
2747 g.UseImmediate(Pack4Lanes(bit_cast<uint8_t*>(&val[0]))),
2748 g.UseImmediate(Pack4Lanes(bit_cast<uint8_t*>(&val[0]) + 4)),
2749 g.UseImmediate(Pack4Lanes(bit_cast<uint8_t*>(&val[0]) + 8)),
2750 g.UseImmediate(Pack4Lanes(bit_cast<uint8_t*>(&val[0]) + 12)));
2751 }
2752 }
2753
VisitS128Zero(Node * node)2754 void InstructionSelector::VisitS128Zero(Node* node) {
2755 S390OperandGenerator g(this);
2756 Emit(kS390_S128Zero, g.DefineAsRegister(node));
2757 }
2758
VisitS128Select(Node * node)2759 void InstructionSelector::VisitS128Select(Node* node) {
2760 S390OperandGenerator g(this);
2761 Emit(kS390_S128Select, g.DefineAsRegister(node),
2762 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
2763 g.UseRegister(node->InputAt(2)));
2764 }
2765
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)2766 void InstructionSelector::EmitPrepareResults(
2767 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
2768 Node* node) {
2769 S390OperandGenerator g(this);
2770
2771 for (PushParameter output : *results) {
2772 if (!output.location.IsCallerFrameSlot()) continue;
2773 // Skip any alignment holes in nodes.
2774 if (output.node != nullptr) {
2775 DCHECK(!call_descriptor->IsCFunctionCall());
2776 if (output.location.GetType() == MachineType::Float32()) {
2777 MarkAsFloat32(output.node);
2778 } else if (output.location.GetType() == MachineType::Float64()) {
2779 MarkAsFloat64(output.node);
2780 } else if (output.location.GetType() == MachineType::Simd128()) {
2781 MarkAsSimd128(output.node);
2782 }
2783 int offset = call_descriptor->GetOffsetToReturns();
2784 int reverse_slot = -output.location.GetLocation() - offset;
2785 Emit(kS390_Peek, g.DefineAsRegister(output.node),
2786 g.UseImmediate(reverse_slot));
2787 }
2788 }
2789 }
2790
VisitLoadLane(Node * node)2791 void InstructionSelector::VisitLoadLane(Node* node) {
2792 LoadLaneParameters params = LoadLaneParametersOf(node->op());
2793 InstructionCode opcode;
2794 if (params.rep == MachineType::Int8()) {
2795 opcode = kS390_S128Load8Lane;
2796 } else if (params.rep == MachineType::Int16()) {
2797 opcode = kS390_S128Load16Lane;
2798 } else if (params.rep == MachineType::Int32()) {
2799 opcode = kS390_S128Load32Lane;
2800 } else if (params.rep == MachineType::Int64()) {
2801 opcode = kS390_S128Load64Lane;
2802 } else {
2803 UNREACHABLE();
2804 }
2805
2806 S390OperandGenerator g(this);
2807 InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
2808 InstructionOperand inputs[5];
2809 size_t input_count = 0;
2810
2811 inputs[input_count++] = g.UseRegister(node->InputAt(2));
2812 inputs[input_count++] = g.UseImmediate(params.laneidx);
2813
2814 AddressingMode mode =
2815 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
2816 opcode |= AddressingModeField::encode(mode);
2817 Emit(opcode, 1, outputs, input_count, inputs);
2818 }
2819
VisitLoadTransform(Node * node)2820 void InstructionSelector::VisitLoadTransform(Node* node) {
2821 LoadTransformParameters params = LoadTransformParametersOf(node->op());
2822 ArchOpcode opcode;
2823 switch (params.transformation) {
2824 case LoadTransformation::kS128Load8Splat:
2825 opcode = kS390_S128Load8Splat;
2826 break;
2827 case LoadTransformation::kS128Load16Splat:
2828 opcode = kS390_S128Load16Splat;
2829 break;
2830 case LoadTransformation::kS128Load32Splat:
2831 opcode = kS390_S128Load32Splat;
2832 break;
2833 case LoadTransformation::kS128Load64Splat:
2834 opcode = kS390_S128Load64Splat;
2835 break;
2836 case LoadTransformation::kS128Load8x8S:
2837 opcode = kS390_S128Load8x8S;
2838 break;
2839 case LoadTransformation::kS128Load8x8U:
2840 opcode = kS390_S128Load8x8U;
2841 break;
2842 case LoadTransformation::kS128Load16x4S:
2843 opcode = kS390_S128Load16x4S;
2844 break;
2845 case LoadTransformation::kS128Load16x4U:
2846 opcode = kS390_S128Load16x4U;
2847 break;
2848 case LoadTransformation::kS128Load32x2S:
2849 opcode = kS390_S128Load32x2S;
2850 break;
2851 case LoadTransformation::kS128Load32x2U:
2852 opcode = kS390_S128Load32x2U;
2853 break;
2854 case LoadTransformation::kS128Load32Zero:
2855 opcode = kS390_S128Load32Zero;
2856 break;
2857 case LoadTransformation::kS128Load64Zero:
2858 opcode = kS390_S128Load64Zero;
2859 break;
2860 default:
2861 UNREACHABLE();
2862 }
2863 VisitLoad(node, node, opcode);
2864 }
2865
VisitStoreLane(Node * node)2866 void InstructionSelector::VisitStoreLane(Node* node) {
2867 StoreLaneParameters params = StoreLaneParametersOf(node->op());
2868 InstructionCode opcode;
2869 if (params.rep == MachineRepresentation::kWord8) {
2870 opcode = kS390_S128Store8Lane;
2871 } else if (params.rep == MachineRepresentation::kWord16) {
2872 opcode = kS390_S128Store16Lane;
2873 } else if (params.rep == MachineRepresentation::kWord32) {
2874 opcode = kS390_S128Store32Lane;
2875 } else if (params.rep == MachineRepresentation::kWord64) {
2876 opcode = kS390_S128Store64Lane;
2877 } else {
2878 UNREACHABLE();
2879 }
2880
2881 S390OperandGenerator g(this);
2882 InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
2883 InstructionOperand inputs[5];
2884 size_t input_count = 0;
2885
2886 inputs[input_count++] = g.UseRegister(node->InputAt(2));
2887 inputs[input_count++] = g.UseImmediate(params.laneidx);
2888
2889 AddressingMode mode =
2890 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
2891 opcode |= AddressingModeField::encode(mode);
2892 Emit(opcode, 1, outputs, input_count, inputs);
2893 }
2894
VisitTruncateFloat32ToInt32(Node * node)2895 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
2896 S390OperandGenerator g(this);
2897
2898 InstructionCode opcode = kS390_Float32ToInt32;
2899 TruncateKind kind = OpParameter<TruncateKind>(node->op());
2900 if (kind == TruncateKind::kSetOverflowToMin) {
2901 opcode |= MiscField::encode(true);
2902 }
2903
2904 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2905 }
2906
VisitTruncateFloat32ToUint32(Node * node)2907 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
2908 S390OperandGenerator g(this);
2909
2910 InstructionCode opcode = kS390_Float32ToUint32;
2911 TruncateKind kind = OpParameter<TruncateKind>(node->op());
2912 if (kind == TruncateKind::kSetOverflowToMin) {
2913 opcode |= MiscField::encode(true);
2914 }
2915
2916 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2917 }
2918
AddOutputToSelectContinuation(OperandGenerator * g,int first_input_index,Node * node)2919 void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
2920 int first_input_index,
2921 Node* node) {
2922 UNREACHABLE();
2923 }
2924
2925 // static
2926 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2927 InstructionSelector::SupportedMachineOperatorFlags() {
2928 return MachineOperatorBuilder::kFloat32RoundDown |
2929 MachineOperatorBuilder::kFloat64RoundDown |
2930 MachineOperatorBuilder::kFloat32RoundUp |
2931 MachineOperatorBuilder::kFloat64RoundUp |
2932 MachineOperatorBuilder::kFloat32RoundTruncate |
2933 MachineOperatorBuilder::kFloat64RoundTruncate |
2934 MachineOperatorBuilder::kFloat32RoundTiesEven |
2935 MachineOperatorBuilder::kFloat64RoundTiesEven |
2936 MachineOperatorBuilder::kFloat64RoundTiesAway |
2937 MachineOperatorBuilder::kWord32Popcnt |
2938 MachineOperatorBuilder::kInt32AbsWithOverflow |
2939 MachineOperatorBuilder::kInt64AbsWithOverflow |
2940 MachineOperatorBuilder::kWord64Popcnt;
2941 }
2942
2943 // static
2944 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2945 InstructionSelector::AlignmentRequirements() {
2946 return MachineOperatorBuilder::AlignmentRequirements::
2947 FullUnalignedAccessSupport();
2948 }
2949
2950 } // namespace compiler
2951 } // namespace internal
2952 } // namespace v8
2953