1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/iterator.h"
6 #include "src/compiler/backend/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
9 #include "src/execution/ppc/frame-constants-ppc.h"
10
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14
15 enum ImmediateMode {
16 kInt16Imm,
17 kInt16Imm_Unsigned,
18 kInt16Imm_Negate,
19 kInt16Imm_4ByteAligned,
20 kShift32Imm,
21 kInt34Imm,
22 kShift64Imm,
23 kNoImmediate
24 };
25
26 // Adds PPC-specific methods for generating operands.
27 class PPCOperandGenerator final : public OperandGenerator {
28 public:
PPCOperandGenerator(InstructionSelector * selector)29 explicit PPCOperandGenerator(InstructionSelector* selector)
30 : OperandGenerator(selector) {}
31
UseOperand(Node * node,ImmediateMode mode)32 InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
33 if (CanBeImmediate(node, mode)) {
34 return UseImmediate(node);
35 }
36 return UseRegister(node);
37 }
38
CanBeImmediate(Node * node,ImmediateMode mode)39 bool CanBeImmediate(Node* node, ImmediateMode mode) {
40 int64_t value;
41 if (node->opcode() == IrOpcode::kInt32Constant)
42 value = OpParameter<int32_t>(node->op());
43 else if (node->opcode() == IrOpcode::kInt64Constant)
44 value = OpParameter<int64_t>(node->op());
45 else
46 return false;
47 return CanBeImmediate(value, mode);
48 }
49
CanBeImmediate(int64_t value,ImmediateMode mode)50 bool CanBeImmediate(int64_t value, ImmediateMode mode) {
51 switch (mode) {
52 case kInt16Imm:
53 return is_int16(value);
54 case kInt16Imm_Unsigned:
55 return is_uint16(value);
56 case kInt16Imm_Negate:
57 return is_int16(-value);
58 case kInt16Imm_4ByteAligned:
59 return is_int16(value) && !(value & 3);
60 case kShift32Imm:
61 return 0 <= value && value < 32;
62 case kInt34Imm:
63 return is_int34(value);
64 case kShift64Imm:
65 return 0 <= value && value < 64;
66 case kNoImmediate:
67 return false;
68 }
69 return false;
70 }
71 };
72
73 namespace {
74
VisitRR(InstructionSelector * selector,InstructionCode opcode,Node * node)75 void VisitRR(InstructionSelector* selector, InstructionCode opcode,
76 Node* node) {
77 PPCOperandGenerator g(selector);
78 selector->Emit(opcode, g.DefineAsRegister(node),
79 g.UseRegister(node->InputAt(0)));
80 }
81
VisitRRR(InstructionSelector * selector,InstructionCode opcode,Node * node)82 void VisitRRR(InstructionSelector* selector, InstructionCode opcode,
83 Node* node) {
84 PPCOperandGenerator g(selector);
85 selector->Emit(opcode, g.DefineAsRegister(node),
86 g.UseRegister(node->InputAt(0)),
87 g.UseRegister(node->InputAt(1)));
88 }
89
VisitRRO(InstructionSelector * selector,InstructionCode opcode,Node * node,ImmediateMode operand_mode)90 void VisitRRO(InstructionSelector* selector, InstructionCode opcode, Node* node,
91 ImmediateMode operand_mode) {
92 PPCOperandGenerator g(selector);
93 selector->Emit(opcode, g.DefineAsRegister(node),
94 g.UseRegister(node->InputAt(0)),
95 g.UseOperand(node->InputAt(1), operand_mode));
96 }
97
98 #if V8_TARGET_ARCH_PPC64
VisitTryTruncateDouble(InstructionSelector * selector,InstructionCode opcode,Node * node)99 void VisitTryTruncateDouble(InstructionSelector* selector,
100 InstructionCode opcode, Node* node) {
101 PPCOperandGenerator g(selector);
102 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
103 InstructionOperand outputs[2];
104 size_t output_count = 0;
105 outputs[output_count++] = g.DefineAsRegister(node);
106
107 Node* success_output = NodeProperties::FindProjection(node, 1);
108 if (success_output) {
109 outputs[output_count++] = g.DefineAsRegister(success_output);
110 }
111
112 selector->Emit(opcode, output_count, outputs, 1, inputs);
113 }
114 #endif
115
116 // Shared routine for multiple binary operations.
117 template <typename Matcher>
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,ImmediateMode operand_mode,FlagsContinuation * cont)118 void VisitBinop(InstructionSelector* selector, Node* node,
119 InstructionCode opcode, ImmediateMode operand_mode,
120 FlagsContinuation* cont) {
121 PPCOperandGenerator g(selector);
122 Matcher m(node);
123 InstructionOperand inputs[4];
124 size_t input_count = 0;
125 InstructionOperand outputs[2];
126 size_t output_count = 0;
127
128 inputs[input_count++] = g.UseRegister(m.left().node());
129 inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
130
131 if (cont->IsDeoptimize()) {
132 // If we can deoptimize as a result of the binop, we need to make sure that
133 // the deopt inputs are not overwritten by the binop result. One way
134 // to achieve that is to declare the output register as same-as-first.
135 outputs[output_count++] = g.DefineSameAsFirst(node);
136 } else {
137 outputs[output_count++] = g.DefineAsRegister(node);
138 }
139
140 DCHECK_NE(0u, input_count);
141 DCHECK_NE(0u, output_count);
142 DCHECK_GE(arraysize(inputs), input_count);
143 DCHECK_GE(arraysize(outputs), output_count);
144
145 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
146 inputs, cont);
147 }
148
149 // Shared routine for multiple binary operations.
150 template <typename Matcher>
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,ImmediateMode operand_mode)151 void VisitBinop(InstructionSelector* selector, Node* node,
152 InstructionCode opcode, ImmediateMode operand_mode) {
153 FlagsContinuation cont;
154 VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
155 }
156
157 } // namespace
158
VisitStackSlot(Node * node)159 void InstructionSelector::VisitStackSlot(Node* node) {
160 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
161 int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
162 OperandGenerator g(this);
163
164 Emit(kArchStackSlot, g.DefineAsRegister(node),
165 sequence()->AddImmediate(Constant(slot)), 0, nullptr);
166 }
167
VisitAbortCSADcheck(Node * node)168 void InstructionSelector::VisitAbortCSADcheck(Node* node) {
169 PPCOperandGenerator g(this);
170 Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
171 }
172
VisitLoadCommon(InstructionSelector * selector,Node * node,LoadRepresentation load_rep)173 static void VisitLoadCommon(InstructionSelector* selector, Node* node,
174 LoadRepresentation load_rep) {
175 PPCOperandGenerator g(selector);
176 Node* base = node->InputAt(0);
177 Node* offset = node->InputAt(1);
178 InstructionCode opcode = kArchNop;
179 ImmediateMode mode;
180 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
181 mode = kInt34Imm;
182 } else {
183 mode = kInt16Imm;
184 }
185 switch (load_rep.representation()) {
186 case MachineRepresentation::kFloat32:
187 opcode = kPPC_LoadFloat32;
188 break;
189 case MachineRepresentation::kFloat64:
190 opcode = kPPC_LoadDouble;
191 break;
192 case MachineRepresentation::kBit: // Fall through.
193 case MachineRepresentation::kWord8:
194 opcode = load_rep.IsSigned() ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
195 break;
196 case MachineRepresentation::kWord16:
197 opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
198 break;
199 case MachineRepresentation::kWord32:
200 opcode = kPPC_LoadWordU32;
201 break;
202 case MachineRepresentation::kCompressedPointer: // Fall through.
203 case MachineRepresentation::kCompressed:
204 case MachineRepresentation::kSandboxedPointer: // Fall through.
205 #ifdef V8_COMPRESS_POINTERS
206 opcode = kPPC_LoadWordS32;
207 if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned;
208 break;
209 #else
210 UNREACHABLE();
211 #endif
212 #ifdef V8_COMPRESS_POINTERS
213 case MachineRepresentation::kTaggedSigned:
214 opcode = kPPC_LoadDecompressTaggedSigned;
215 break;
216 case MachineRepresentation::kTaggedPointer:
217 opcode = kPPC_LoadDecompressTaggedPointer;
218 break;
219 case MachineRepresentation::kTagged:
220 opcode = kPPC_LoadDecompressAnyTagged;
221 break;
222 #else
223 case MachineRepresentation::kTaggedSigned: // Fall through.
224 case MachineRepresentation::kTaggedPointer: // Fall through.
225 case MachineRepresentation::kTagged: // Fall through.
226 #endif
227 case MachineRepresentation::kWord64:
228 opcode = kPPC_LoadWord64;
229 if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned;
230 break;
231 case MachineRepresentation::kSimd128:
232 opcode = kPPC_LoadSimd128;
233 // Vectors do not support MRI mode, only MRR is available.
234 mode = kNoImmediate;
235 break;
236 case MachineRepresentation::kMapWord: // Fall through.
237 case MachineRepresentation::kNone:
238 UNREACHABLE();
239 }
240
241 bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
242 node->opcode() == IrOpcode::kWord64AtomicLoad);
243
244 if (g.CanBeImmediate(offset, mode)) {
245 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
246 g.DefineAsRegister(node), g.UseRegister(base),
247 g.UseImmediate(offset), g.UseImmediate(is_atomic));
248 } else if (g.CanBeImmediate(base, mode)) {
249 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
250 g.DefineAsRegister(node), g.UseRegister(offset),
251 g.UseImmediate(base), g.UseImmediate(is_atomic));
252 } else {
253 selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
254 g.DefineAsRegister(node), g.UseRegister(base),
255 g.UseRegister(offset), g.UseImmediate(is_atomic));
256 }
257 }
258
VisitLoad(Node * node)259 void InstructionSelector::VisitLoad(Node* node) {
260 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
261 VisitLoadCommon(this, node, load_rep);
262 }
263
VisitProtectedLoad(Node * node)264 void InstructionSelector::VisitProtectedLoad(Node* node) {
265 // TODO(eholk)
266 UNIMPLEMENTED();
267 }
268
VisitStoreCommon(InstructionSelector * selector,Node * node,StoreRepresentation store_rep,base::Optional<AtomicMemoryOrder> atomic_order)269 void VisitStoreCommon(InstructionSelector* selector, Node* node,
270 StoreRepresentation store_rep,
271 base::Optional<AtomicMemoryOrder> atomic_order) {
272 PPCOperandGenerator g(selector);
273 Node* base = node->InputAt(0);
274 Node* offset = node->InputAt(1);
275 Node* value = node->InputAt(2);
276
277 // TODO(miladfarca): maybe use atomic_order?
278 bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicStore ||
279 node->opcode() == IrOpcode::kWord64AtomicStore);
280
281 MachineRepresentation rep = store_rep.representation();
282 WriteBarrierKind write_barrier_kind = kNoWriteBarrier;
283
284 if (!is_atomic) {
285 write_barrier_kind = store_rep.write_barrier_kind();
286 }
287
288 if (FLAG_enable_unconditional_write_barriers &&
289 CanBeTaggedOrCompressedPointer(rep)) {
290 write_barrier_kind = kFullWriteBarrier;
291 }
292
293 if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
294 DCHECK(CanBeTaggedOrCompressedPointer(rep));
295 AddressingMode addressing_mode;
296 InstructionOperand inputs[3];
297 size_t input_count = 0;
298 inputs[input_count++] = g.UseUniqueRegister(base);
299 // OutOfLineRecordWrite uses the offset in an 'add' instruction as well as
300 // for the store itself, so we must check compatibility with both.
301 if (g.CanBeImmediate(offset, kInt16Imm)
302 #if V8_TARGET_ARCH_PPC64
303 && g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)
304 #endif
305 ) {
306 inputs[input_count++] = g.UseImmediate(offset);
307 addressing_mode = kMode_MRI;
308 } else {
309 inputs[input_count++] = g.UseUniqueRegister(offset);
310 addressing_mode = kMode_MRR;
311 }
312 inputs[input_count++] = g.UseUniqueRegister(value);
313 RecordWriteMode record_write_mode =
314 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
315 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
316 size_t const temp_count = arraysize(temps);
317 InstructionCode code = kArchStoreWithWriteBarrier;
318 code |= AddressingModeField::encode(addressing_mode);
319 code |= MiscField::encode(static_cast<int>(record_write_mode));
320 CHECK_EQ(is_atomic, false);
321 selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
322 } else {
323 ArchOpcode opcode;
324 ImmediateMode mode = kInt16Imm;
325 NodeMatcher m(value);
326 switch (rep) {
327 case MachineRepresentation::kFloat32:
328 opcode = kPPC_StoreFloat32;
329 break;
330 case MachineRepresentation::kFloat64:
331 opcode = kPPC_StoreDouble;
332 break;
333 case MachineRepresentation::kBit: // Fall through.
334 case MachineRepresentation::kWord8:
335 opcode = kPPC_StoreWord8;
336 break;
337 case MachineRepresentation::kWord16:
338 opcode = kPPC_StoreWord16;
339 break;
340 case MachineRepresentation::kWord32:
341 opcode = kPPC_StoreWord32;
342 if (m.IsWord32ReverseBytes()) {
343 opcode = kPPC_StoreByteRev32;
344 value = value->InputAt(0);
345 mode = kNoImmediate;
346 }
347 break;
348 case MachineRepresentation::kCompressedPointer: // Fall through.
349 case MachineRepresentation::kCompressed:
350 case MachineRepresentation::kSandboxedPointer: // Fall through.
351 #ifdef V8_COMPRESS_POINTERS
352 opcode = kPPC_StoreCompressTagged;
353 break;
354 #else
355 UNREACHABLE();
356 #endif
357 case MachineRepresentation::kTaggedSigned: // Fall through.
358 case MachineRepresentation::kTaggedPointer: // Fall through.
359 case MachineRepresentation::kTagged:
360 mode = kInt16Imm_4ByteAligned;
361 opcode = kPPC_StoreCompressTagged;
362 break;
363 case MachineRepresentation::kWord64:
364 opcode = kPPC_StoreWord64;
365 mode = kInt16Imm_4ByteAligned;
366 if (m.IsWord64ReverseBytes()) {
367 opcode = kPPC_StoreByteRev64;
368 value = value->InputAt(0);
369 mode = kNoImmediate;
370 }
371 break;
372 case MachineRepresentation::kSimd128:
373 opcode = kPPC_StoreSimd128;
374 // Vectors do not support MRI mode, only MRR is available.
375 mode = kNoImmediate;
376 break;
377 case MachineRepresentation::kMapWord: // Fall through.
378 case MachineRepresentation::kNone:
379 UNREACHABLE();
380 }
381
382 if (g.CanBeImmediate(offset, mode)) {
383 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
384 g.NoOutput(), g.UseRegister(base), g.UseImmediate(offset),
385 g.UseRegister(value), g.UseImmediate(is_atomic));
386 } else if (g.CanBeImmediate(base, mode)) {
387 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
388 g.NoOutput(), g.UseRegister(offset), g.UseImmediate(base),
389 g.UseRegister(value), g.UseImmediate(is_atomic));
390 } else {
391 selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
392 g.NoOutput(), g.UseRegister(base), g.UseRegister(offset),
393 g.UseRegister(value), g.UseImmediate(is_atomic));
394 }
395 }
396 }
397
VisitStore(Node * node)398 void InstructionSelector::VisitStore(Node* node) {
399 VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
400 base::nullopt);
401 }
402
VisitProtectedStore(Node * node)403 void InstructionSelector::VisitProtectedStore(Node* node) {
404 // TODO(eholk)
405 UNIMPLEMENTED();
406 }
407
408 // Architecture supports unaligned access, therefore VisitLoad is used instead
VisitUnalignedLoad(Node * node)409 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
410
411 // Architecture supports unaligned access, therefore VisitStore is used instead
VisitUnalignedStore(Node * node)412 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
413
414 template <typename Matcher>
VisitLogical(InstructionSelector * selector,Node * node,Matcher * m,ArchOpcode opcode,bool left_can_cover,bool right_can_cover,ImmediateMode imm_mode)415 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
416 ArchOpcode opcode, bool left_can_cover,
417 bool right_can_cover, ImmediateMode imm_mode) {
418 PPCOperandGenerator g(selector);
419
420 // Map instruction to equivalent operation with inverted right input.
421 ArchOpcode inv_opcode = opcode;
422 switch (opcode) {
423 case kPPC_And:
424 inv_opcode = kPPC_AndComplement;
425 break;
426 case kPPC_Or:
427 inv_opcode = kPPC_OrComplement;
428 break;
429 default:
430 UNREACHABLE();
431 }
432
433 // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
434 if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
435 Matcher mleft(m->left().node());
436 if (mleft.right().Is(-1)) {
437 selector->Emit(inv_opcode, g.DefineAsRegister(node),
438 g.UseRegister(m->right().node()),
439 g.UseRegister(mleft.left().node()));
440 return;
441 }
442 }
443
444 // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
445 if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
446 right_can_cover) {
447 Matcher mright(m->right().node());
448 if (mright.right().Is(-1)) {
449 // TODO(all): support shifted operand on right.
450 selector->Emit(inv_opcode, g.DefineAsRegister(node),
451 g.UseRegister(m->left().node()),
452 g.UseRegister(mright.left().node()));
453 return;
454 }
455 }
456
457 VisitBinop<Matcher>(selector, node, opcode, imm_mode);
458 }
459
IsContiguousMask32(uint32_t value,int * mb,int * me)460 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
461 int mask_width = base::bits::CountPopulation(value);
462 int mask_msb = base::bits::CountLeadingZeros32(value);
463 int mask_lsb = base::bits::CountTrailingZeros32(value);
464 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
465 return false;
466 *mb = mask_lsb + mask_width - 1;
467 *me = mask_lsb;
468 return true;
469 }
470
471 #if V8_TARGET_ARCH_PPC64
IsContiguousMask64(uint64_t value,int * mb,int * me)472 static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
473 int mask_width = base::bits::CountPopulation(value);
474 int mask_msb = base::bits::CountLeadingZeros64(value);
475 int mask_lsb = base::bits::CountTrailingZeros64(value);
476 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
477 return false;
478 *mb = mask_lsb + mask_width - 1;
479 *me = mask_lsb;
480 return true;
481 }
482 #endif
483
484 // TODO(mbrandy): Absorb rotate-right into rlwinm?
VisitWord32And(Node * node)485 void InstructionSelector::VisitWord32And(Node* node) {
486 PPCOperandGenerator g(this);
487 Int32BinopMatcher m(node);
488 int mb = 0;
489 int me = 0;
490 if (m.right().HasResolvedValue() &&
491 IsContiguousMask32(m.right().ResolvedValue(), &mb, &me)) {
492 int sh = 0;
493 Node* left = m.left().node();
494 if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
495 CanCover(node, left)) {
496 // Try to absorb left/right shift into rlwinm
497 Int32BinopMatcher mleft(m.left().node());
498 if (mleft.right().IsInRange(0, 31)) {
499 left = mleft.left().node();
500 sh = mleft.right().ResolvedValue();
501 if (m.left().IsWord32Shr()) {
502 // Adjust the mask such that it doesn't include any rotated bits.
503 if (mb > 31 - sh) mb = 31 - sh;
504 sh = (32 - sh) & 0x1F;
505 } else {
506 // Adjust the mask such that it doesn't include any rotated bits.
507 if (me < sh) me = sh;
508 }
509 }
510 }
511 if (mb >= me) {
512 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left),
513 g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me));
514 return;
515 }
516 }
517 VisitLogical<Int32BinopMatcher>(
518 this, node, &m, kPPC_And, CanCover(node, m.left().node()),
519 CanCover(node, m.right().node()), kInt16Imm_Unsigned);
520 }
521
522 #if V8_TARGET_ARCH_PPC64
523 // TODO(mbrandy): Absorb rotate-right into rldic?
VisitWord64And(Node * node)524 void InstructionSelector::VisitWord64And(Node* node) {
525 PPCOperandGenerator g(this);
526 Int64BinopMatcher m(node);
527 int mb = 0;
528 int me = 0;
529 if (m.right().HasResolvedValue() &&
530 IsContiguousMask64(m.right().ResolvedValue(), &mb, &me)) {
531 int sh = 0;
532 Node* left = m.left().node();
533 if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
534 CanCover(node, left)) {
535 // Try to absorb left/right shift into rldic
536 Int64BinopMatcher mleft(m.left().node());
537 if (mleft.right().IsInRange(0, 63)) {
538 left = mleft.left().node();
539 sh = mleft.right().ResolvedValue();
540 if (m.left().IsWord64Shr()) {
541 // Adjust the mask such that it doesn't include any rotated bits.
542 if (mb > 63 - sh) mb = 63 - sh;
543 sh = (64 - sh) & 0x3F;
544 } else {
545 // Adjust the mask such that it doesn't include any rotated bits.
546 if (me < sh) me = sh;
547 }
548 }
549 }
550 if (mb >= me) {
551 bool match = false;
552 ArchOpcode opcode;
553 int mask;
554 if (me == 0) {
555 match = true;
556 opcode = kPPC_RotLeftAndClearLeft64;
557 mask = mb;
558 } else if (mb == 63) {
559 match = true;
560 opcode = kPPC_RotLeftAndClearRight64;
561 mask = me;
562 } else if (sh && me <= sh && m.left().IsWord64Shl()) {
563 match = true;
564 opcode = kPPC_RotLeftAndClear64;
565 mask = mb;
566 }
567 if (match) {
568 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
569 g.TempImmediate(sh), g.TempImmediate(mask));
570 return;
571 }
572 }
573 }
574 VisitLogical<Int64BinopMatcher>(
575 this, node, &m, kPPC_And, CanCover(node, m.left().node()),
576 CanCover(node, m.right().node()), kInt16Imm_Unsigned);
577 }
578 #endif
579
VisitWord32Or(Node * node)580 void InstructionSelector::VisitWord32Or(Node* node) {
581 Int32BinopMatcher m(node);
582 VisitLogical<Int32BinopMatcher>(
583 this, node, &m, kPPC_Or, CanCover(node, m.left().node()),
584 CanCover(node, m.right().node()), kInt16Imm_Unsigned);
585 }
586
587 #if V8_TARGET_ARCH_PPC64
VisitWord64Or(Node * node)588 void InstructionSelector::VisitWord64Or(Node* node) {
589 Int64BinopMatcher m(node);
590 VisitLogical<Int64BinopMatcher>(
591 this, node, &m, kPPC_Or, CanCover(node, m.left().node()),
592 CanCover(node, m.right().node()), kInt16Imm_Unsigned);
593 }
594 #endif
595
VisitWord32Xor(Node * node)596 void InstructionSelector::VisitWord32Xor(Node* node) {
597 PPCOperandGenerator g(this);
598 Int32BinopMatcher m(node);
599 if (m.right().Is(-1)) {
600 Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
601 } else {
602 VisitBinop<Int32BinopMatcher>(this, node, kPPC_Xor, kInt16Imm_Unsigned);
603 }
604 }
605
VisitStackPointerGreaterThan(Node * node,FlagsContinuation * cont)606 void InstructionSelector::VisitStackPointerGreaterThan(
607 Node* node, FlagsContinuation* cont) {
608 StackCheckKind kind = StackCheckKindOf(node->op());
609 InstructionCode opcode =
610 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
611
612 PPCOperandGenerator g(this);
613
614 // No outputs.
615 InstructionOperand* const outputs = nullptr;
616 const int output_count = 0;
617
618 // Applying an offset to this stack check requires a temp register. Offsets
619 // are only applied to the first stack check. If applying an offset, we must
620 // ensure the input and temp registers do not alias, thus kUniqueRegister.
621 InstructionOperand temps[] = {g.TempRegister()};
622 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
623 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
624 ? OperandGenerator::kUniqueRegister
625 : OperandGenerator::kRegister;
626
627 Node* const value = node->InputAt(0);
628 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
629 static constexpr int input_count = arraysize(inputs);
630
631 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
632 temp_count, temps, cont);
633 }
634
635 #if V8_TARGET_ARCH_PPC64
VisitWord64Xor(Node * node)636 void InstructionSelector::VisitWord64Xor(Node* node) {
637 PPCOperandGenerator g(this);
638 Int64BinopMatcher m(node);
639 if (m.right().Is(-1)) {
640 Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
641 } else {
642 VisitBinop<Int64BinopMatcher>(this, node, kPPC_Xor, kInt16Imm_Unsigned);
643 }
644 }
645 #endif
646
VisitWord32Shl(Node * node)647 void InstructionSelector::VisitWord32Shl(Node* node) {
648 PPCOperandGenerator g(this);
649 Int32BinopMatcher m(node);
650 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
651 // Try to absorb logical-and into rlwinm
652 Int32BinopMatcher mleft(m.left().node());
653 int sh = m.right().ResolvedValue();
654 int mb;
655 int me;
656 if (mleft.right().HasResolvedValue() &&
657 IsContiguousMask32(mleft.right().ResolvedValue() << sh, &mb, &me)) {
658 // Adjust the mask such that it doesn't include any rotated bits.
659 if (me < sh) me = sh;
660 if (mb >= me) {
661 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
662 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
663 g.TempImmediate(mb), g.TempImmediate(me));
664 return;
665 }
666 }
667 }
668 VisitRRO(this, kPPC_ShiftLeft32, node, kShift32Imm);
669 }
670
671 #if V8_TARGET_ARCH_PPC64
VisitWord64Shl(Node * node)672 void InstructionSelector::VisitWord64Shl(Node* node) {
673 PPCOperandGenerator g(this);
674 Int64BinopMatcher m(node);
675 // TODO(mbrandy): eliminate left sign extension if right >= 32
676 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
677 // Try to absorb logical-and into rldic
678 Int64BinopMatcher mleft(m.left().node());
679 int sh = m.right().ResolvedValue();
680 int mb;
681 int me;
682 if (mleft.right().HasResolvedValue() &&
683 IsContiguousMask64(mleft.right().ResolvedValue() << sh, &mb, &me)) {
684 // Adjust the mask such that it doesn't include any rotated bits.
685 if (me < sh) me = sh;
686 if (mb >= me) {
687 bool match = false;
688 ArchOpcode opcode;
689 int mask;
690 if (me == 0) {
691 match = true;
692 opcode = kPPC_RotLeftAndClearLeft64;
693 mask = mb;
694 } else if (mb == 63) {
695 match = true;
696 opcode = kPPC_RotLeftAndClearRight64;
697 mask = me;
698 } else if (sh && me <= sh) {
699 match = true;
700 opcode = kPPC_RotLeftAndClear64;
701 mask = mb;
702 }
703 if (match) {
704 Emit(opcode, g.DefineAsRegister(node),
705 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
706 g.TempImmediate(mask));
707 return;
708 }
709 }
710 }
711 }
712 VisitRRO(this, kPPC_ShiftLeft64, node, kShift64Imm);
713 }
714 #endif
715
VisitWord32Shr(Node * node)716 void InstructionSelector::VisitWord32Shr(Node* node) {
717 PPCOperandGenerator g(this);
718 Int32BinopMatcher m(node);
719 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
720 // Try to absorb logical-and into rlwinm
721 Int32BinopMatcher mleft(m.left().node());
722 int sh = m.right().ResolvedValue();
723 int mb;
724 int me;
725 if (mleft.right().HasResolvedValue() &&
726 IsContiguousMask32((uint32_t)(mleft.right().ResolvedValue()) >> sh, &mb,
727 &me)) {
728 // Adjust the mask such that it doesn't include any rotated bits.
729 if (mb > 31 - sh) mb = 31 - sh;
730 sh = (32 - sh) & 0x1F;
731 if (mb >= me) {
732 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
733 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
734 g.TempImmediate(mb), g.TempImmediate(me));
735 return;
736 }
737 }
738 }
739 VisitRRO(this, kPPC_ShiftRight32, node, kShift32Imm);
740 }
741
742 #if V8_TARGET_ARCH_PPC64
VisitWord64Shr(Node * node)743 void InstructionSelector::VisitWord64Shr(Node* node) {
744 PPCOperandGenerator g(this);
745 Int64BinopMatcher m(node);
746 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
747 // Try to absorb logical-and into rldic
748 Int64BinopMatcher mleft(m.left().node());
749 int sh = m.right().ResolvedValue();
750 int mb;
751 int me;
752 if (mleft.right().HasResolvedValue() &&
753 IsContiguousMask64((uint64_t)(mleft.right().ResolvedValue()) >> sh, &mb,
754 &me)) {
755 // Adjust the mask such that it doesn't include any rotated bits.
756 if (mb > 63 - sh) mb = 63 - sh;
757 sh = (64 - sh) & 0x3F;
758 if (mb >= me) {
759 bool match = false;
760 ArchOpcode opcode;
761 int mask;
762 if (me == 0) {
763 match = true;
764 opcode = kPPC_RotLeftAndClearLeft64;
765 mask = mb;
766 } else if (mb == 63) {
767 match = true;
768 opcode = kPPC_RotLeftAndClearRight64;
769 mask = me;
770 }
771 if (match) {
772 Emit(opcode, g.DefineAsRegister(node),
773 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
774 g.TempImmediate(mask));
775 return;
776 }
777 }
778 }
779 }
780 VisitRRO(this, kPPC_ShiftRight64, node, kShift64Imm);
781 }
782 #endif
783
VisitWord32Sar(Node * node)784 void InstructionSelector::VisitWord32Sar(Node* node) {
785 PPCOperandGenerator g(this);
786 Int32BinopMatcher m(node);
787 // Replace with sign extension for (x << K) >> K where K is 16 or 24.
788 if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
789 Int32BinopMatcher mleft(m.left().node());
790 if (mleft.right().Is(16) && m.right().Is(16)) {
791 Emit(kPPC_ExtendSignWord16, g.DefineAsRegister(node),
792 g.UseRegister(mleft.left().node()));
793 return;
794 } else if (mleft.right().Is(24) && m.right().Is(24)) {
795 Emit(kPPC_ExtendSignWord8, g.DefineAsRegister(node),
796 g.UseRegister(mleft.left().node()));
797 return;
798 }
799 }
800 VisitRRO(this, kPPC_ShiftRightAlg32, node, kShift32Imm);
801 }
802
803 #if !V8_TARGET_ARCH_PPC64
VisitPairBinop(InstructionSelector * selector,InstructionCode opcode,InstructionCode opcode2,Node * node)804 void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
805 InstructionCode opcode2, Node* node) {
806 PPCOperandGenerator g(selector);
807
808 Node* projection1 = NodeProperties::FindProjection(node, 1);
809 if (projection1) {
810 // We use UseUniqueRegister here to avoid register sharing with the output
811 // registers.
812 InstructionOperand inputs[] = {
813 g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
814 g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
815
816 InstructionOperand outputs[] = {
817 g.DefineAsRegister(node),
818 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
819
820 selector->Emit(opcode, 2, outputs, 4, inputs);
821 } else {
822 // The high word of the result is not used, so we emit the standard 32 bit
823 // instruction.
824 selector->Emit(opcode2, g.DefineSameAsFirst(node),
825 g.UseRegister(node->InputAt(0)),
826 g.UseRegister(node->InputAt(2)));
827 }
828 }
829
VisitInt32PairAdd(Node * node)830 void InstructionSelector::VisitInt32PairAdd(Node* node) {
831 VisitPairBinop(this, kPPC_AddPair, kPPC_Add32, node);
832 }
833
VisitInt32PairSub(Node * node)834 void InstructionSelector::VisitInt32PairSub(Node* node) {
835 VisitPairBinop(this, kPPC_SubPair, kPPC_Sub, node);
836 }
837
VisitInt32PairMul(Node * node)838 void InstructionSelector::VisitInt32PairMul(Node* node) {
839 PPCOperandGenerator g(this);
840 Node* projection1 = NodeProperties::FindProjection(node, 1);
841 if (projection1) {
842 InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
843 g.UseUniqueRegister(node->InputAt(1)),
844 g.UseUniqueRegister(node->InputAt(2)),
845 g.UseUniqueRegister(node->InputAt(3))};
846
847 InstructionOperand outputs[] = {
848 g.DefineAsRegister(node),
849 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
850
851 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
852
853 Emit(kPPC_MulPair, 2, outputs, 4, inputs, 2, temps);
854 } else {
855 // The high word of the result is not used, so we emit the standard 32 bit
856 // instruction.
857 Emit(kPPC_Mul32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
858 g.UseRegister(node->InputAt(2)));
859 }
860 }
861
862 namespace {
863 // Shared routine for multiple shift operations.
VisitPairShift(InstructionSelector * selector,InstructionCode opcode,Node * node)864 void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
865 Node* node) {
866 PPCOperandGenerator g(selector);
867 // We use g.UseUniqueRegister here to guarantee that there is
868 // no register aliasing of input registers with output registers.
869 Int32Matcher m(node->InputAt(2));
870 InstructionOperand shift_operand;
871 if (m.HasResolvedValue()) {
872 shift_operand = g.UseImmediate(m.node());
873 } else {
874 shift_operand = g.UseUniqueRegister(m.node());
875 }
876
877 InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
878 g.UseUniqueRegister(node->InputAt(1)),
879 shift_operand};
880
881 Node* projection1 = NodeProperties::FindProjection(node, 1);
882
883 InstructionOperand outputs[2];
884 InstructionOperand temps[1];
885 int32_t output_count = 0;
886 int32_t temp_count = 0;
887
888 outputs[output_count++] = g.DefineAsRegister(node);
889 if (projection1) {
890 outputs[output_count++] = g.DefineAsRegister(projection1);
891 } else {
892 temps[temp_count++] = g.TempRegister();
893 }
894
895 selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
896 }
897 } // namespace
898
VisitWord32PairShl(Node * node)899 void InstructionSelector::VisitWord32PairShl(Node* node) {
900 VisitPairShift(this, kPPC_ShiftLeftPair, node);
901 }
902
VisitWord32PairShr(Node * node)903 void InstructionSelector::VisitWord32PairShr(Node* node) {
904 VisitPairShift(this, kPPC_ShiftRightPair, node);
905 }
906
VisitWord32PairSar(Node * node)907 void InstructionSelector::VisitWord32PairSar(Node* node) {
908 VisitPairShift(this, kPPC_ShiftRightAlgPair, node);
909 }
910 #endif
911
912 #if V8_TARGET_ARCH_PPC64
VisitWord64Sar(Node * node)913 void InstructionSelector::VisitWord64Sar(Node* node) {
914 PPCOperandGenerator g(this);
915 Int64BinopMatcher m(node);
916 if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
917 m.right().Is(32)) {
918 // Just load and sign-extend the interesting 4 bytes instead. This happens,
919 // for example, when we're loading and untagging SMIs.
920 BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
921 AddressOption::kAllowAll);
922 if (mleft.matches() && mleft.index() == nullptr) {
923 int64_t offset = 0;
924 Node* displacement = mleft.displacement();
925 if (displacement != nullptr) {
926 Int64Matcher mdisplacement(displacement);
927 DCHECK(mdisplacement.HasResolvedValue());
928 offset = mdisplacement.ResolvedValue();
929 }
930 offset = SmiWordOffset(offset);
931 if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) {
932 Emit(kPPC_LoadWordS32 | AddressingModeField::encode(kMode_MRI),
933 g.DefineAsRegister(node), g.UseRegister(mleft.base()),
934 g.TempImmediate(offset), g.UseImmediate(0));
935 return;
936 }
937 }
938 }
939 VisitRRO(this, kPPC_ShiftRightAlg64, node, kShift64Imm);
940 }
941 #endif
942
VisitWord32Rol(Node * node)943 void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
944
VisitWord64Rol(Node * node)945 void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
946
947 // TODO(mbrandy): Absorb logical-and into rlwinm?
VisitWord32Ror(Node * node)948 void InstructionSelector::VisitWord32Ror(Node* node) {
949 VisitRRO(this, kPPC_RotRight32, node, kShift32Imm);
950 }
951
952 #if V8_TARGET_ARCH_PPC64
953 // TODO(mbrandy): Absorb logical-and into rldic?
VisitWord64Ror(Node * node)954 void InstructionSelector::VisitWord64Ror(Node* node) {
955 VisitRRO(this, kPPC_RotRight64, node, kShift64Imm);
956 }
957 #endif
958
VisitWord32Clz(Node * node)959 void InstructionSelector::VisitWord32Clz(Node* node) {
960 PPCOperandGenerator g(this);
961 Emit(kPPC_Cntlz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
962 }
963
964 #if V8_TARGET_ARCH_PPC64
VisitWord64Clz(Node * node)965 void InstructionSelector::VisitWord64Clz(Node* node) {
966 PPCOperandGenerator g(this);
967 Emit(kPPC_Cntlz64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
968 }
969 #endif
970
VisitWord32Popcnt(Node * node)971 void InstructionSelector::VisitWord32Popcnt(Node* node) {
972 PPCOperandGenerator g(this);
973 Emit(kPPC_Popcnt32, g.DefineAsRegister(node),
974 g.UseRegister(node->InputAt(0)));
975 }
976
977 #if V8_TARGET_ARCH_PPC64
VisitWord64Popcnt(Node * node)978 void InstructionSelector::VisitWord64Popcnt(Node* node) {
979 PPCOperandGenerator g(this);
980 Emit(kPPC_Popcnt64, g.DefineAsRegister(node),
981 g.UseRegister(node->InputAt(0)));
982 }
983 #endif
984
VisitWord32Ctz(Node * node)985 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
986
987 #if V8_TARGET_ARCH_PPC64
VisitWord64Ctz(Node * node)988 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
989 #endif
990
VisitWord32ReverseBits(Node * node)991 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
992
993 #if V8_TARGET_ARCH_PPC64
VisitWord64ReverseBits(Node * node)994 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
995 #endif
996
VisitWord64ReverseBytes(Node * node)997 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
998 PPCOperandGenerator g(this);
999 InstructionOperand temp[] = {g.TempRegister()};
1000 NodeMatcher input(node->InputAt(0));
1001 if (CanCover(node, input.node()) && input.IsLoad()) {
1002 LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
1003 if (load_rep.representation() == MachineRepresentation::kWord64) {
1004 Node* base = input.node()->InputAt(0);
1005 Node* offset = input.node()->InputAt(1);
1006 bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
1007 node->opcode() == IrOpcode::kWord64AtomicLoad);
1008 Emit(kPPC_LoadByteRev64 | AddressingModeField::encode(kMode_MRR),
1009 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
1010 g.UseImmediate(is_atomic));
1011 return;
1012 }
1013 }
1014 Emit(kPPC_ByteRev64, g.DefineAsRegister(node),
1015 g.UseUniqueRegister(node->InputAt(0)), 1, temp);
1016 }
1017
VisitWord32ReverseBytes(Node * node)1018 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
1019 PPCOperandGenerator g(this);
1020 NodeMatcher input(node->InputAt(0));
1021 if (CanCover(node, input.node()) && input.IsLoad()) {
1022 LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
1023 if (load_rep.representation() == MachineRepresentation::kWord32) {
1024 Node* base = input.node()->InputAt(0);
1025 Node* offset = input.node()->InputAt(1);
1026 bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
1027 node->opcode() == IrOpcode::kWord64AtomicLoad);
1028 Emit(kPPC_LoadByteRev32 | AddressingModeField::encode(kMode_MRR),
1029 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
1030 g.UseImmediate(is_atomic));
1031 return;
1032 }
1033 }
1034 Emit(kPPC_ByteRev32, g.DefineAsRegister(node),
1035 g.UseRegister(node->InputAt(0)));
1036 }
1037
VisitSimd128ReverseBytes(Node * node)1038 void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
1039 PPCOperandGenerator g(this);
1040 Emit(kPPC_LoadReverseSimd128RR, g.DefineAsRegister(node),
1041 g.UseRegister(node->InputAt(0)));
1042 }
1043
VisitInt32Add(Node * node)1044 void InstructionSelector::VisitInt32Add(Node* node) {
1045 VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
1046 }
1047
1048 #if V8_TARGET_ARCH_PPC64
VisitInt64Add(Node * node)1049 void InstructionSelector::VisitInt64Add(Node* node) {
1050 VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm);
1051 }
1052 #endif
1053
VisitInt32Sub(Node * node)1054 void InstructionSelector::VisitInt32Sub(Node* node) {
1055 PPCOperandGenerator g(this);
1056 Int32BinopMatcher m(node);
1057 if (m.left().Is(0)) {
1058 Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
1059 } else {
1060 VisitBinop<Int32BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate);
1061 }
1062 }
1063
1064 #if V8_TARGET_ARCH_PPC64
VisitInt64Sub(Node * node)1065 void InstructionSelector::VisitInt64Sub(Node* node) {
1066 PPCOperandGenerator g(this);
1067 Int64BinopMatcher m(node);
1068 if (m.left().Is(0)) {
1069 Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
1070 } else {
1071 VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate);
1072 }
1073 }
1074 #endif
1075
1076 namespace {
1077
1078 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1079 InstructionOperand left, InstructionOperand right,
1080 FlagsContinuation* cont);
EmitInt32MulWithOverflow(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1081 void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
1082 FlagsContinuation* cont) {
1083 PPCOperandGenerator g(selector);
1084 Int32BinopMatcher m(node);
1085 InstructionOperand result_operand = g.DefineAsRegister(node);
1086 InstructionOperand high32_operand = g.TempRegister();
1087 InstructionOperand temp_operand = g.TempRegister();
1088 {
1089 InstructionOperand outputs[] = {result_operand, high32_operand};
1090 InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
1091 g.UseRegister(m.right().node())};
1092 selector->Emit(kPPC_Mul32WithHigh32, 2, outputs, 2, inputs);
1093 }
1094 {
1095 InstructionOperand shift_31 = g.UseImmediate(31);
1096 InstructionOperand outputs[] = {temp_operand};
1097 InstructionOperand inputs[] = {result_operand, shift_31};
1098 selector->Emit(kPPC_ShiftRightAlg32, 1, outputs, 2, inputs);
1099 }
1100
1101 VisitCompare(selector, kPPC_Cmp32, high32_operand, temp_operand, cont);
1102 }
1103
1104 } // namespace
1105
VisitInt32Mul(Node * node)1106 void InstructionSelector::VisitInt32Mul(Node* node) {
1107 VisitRRR(this, kPPC_Mul32, node);
1108 }
1109
1110 #if V8_TARGET_ARCH_PPC64
VisitInt64Mul(Node * node)1111 void InstructionSelector::VisitInt64Mul(Node* node) {
1112 VisitRRR(this, kPPC_Mul64, node);
1113 }
1114 #endif
1115
VisitInt32MulHigh(Node * node)1116 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1117 PPCOperandGenerator g(this);
1118 Emit(kPPC_MulHigh32, g.DefineAsRegister(node),
1119 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1120 }
1121
VisitUint32MulHigh(Node * node)1122 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1123 PPCOperandGenerator g(this);
1124 Emit(kPPC_MulHighU32, g.DefineAsRegister(node),
1125 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1126 }
1127
VisitInt32Div(Node * node)1128 void InstructionSelector::VisitInt32Div(Node* node) {
1129 VisitRRR(this, kPPC_Div32, node);
1130 }
1131
1132 #if V8_TARGET_ARCH_PPC64
VisitInt64Div(Node * node)1133 void InstructionSelector::VisitInt64Div(Node* node) {
1134 VisitRRR(this, kPPC_Div64, node);
1135 }
1136 #endif
1137
VisitUint32Div(Node * node)1138 void InstructionSelector::VisitUint32Div(Node* node) {
1139 VisitRRR(this, kPPC_DivU32, node);
1140 }
1141
1142 #if V8_TARGET_ARCH_PPC64
VisitUint64Div(Node * node)1143 void InstructionSelector::VisitUint64Div(Node* node) {
1144 VisitRRR(this, kPPC_DivU64, node);
1145 }
1146 #endif
1147
VisitInt32Mod(Node * node)1148 void InstructionSelector::VisitInt32Mod(Node* node) {
1149 VisitRRR(this, kPPC_Mod32, node);
1150 }
1151
1152 #if V8_TARGET_ARCH_PPC64
VisitInt64Mod(Node * node)1153 void InstructionSelector::VisitInt64Mod(Node* node) {
1154 VisitRRR(this, kPPC_Mod64, node);
1155 }
1156 #endif
1157
VisitUint32Mod(Node * node)1158 void InstructionSelector::VisitUint32Mod(Node* node) {
1159 VisitRRR(this, kPPC_ModU32, node);
1160 }
1161
1162 #if V8_TARGET_ARCH_PPC64
VisitUint64Mod(Node * node)1163 void InstructionSelector::VisitUint64Mod(Node* node) {
1164 VisitRRR(this, kPPC_ModU64, node);
1165 }
1166 #endif
1167
VisitChangeFloat32ToFloat64(Node * node)1168 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1169 VisitRR(this, kPPC_Float32ToDouble, node);
1170 }
1171
VisitRoundInt32ToFloat32(Node * node)1172 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1173 VisitRR(this, kPPC_Int32ToFloat32, node);
1174 }
1175
VisitRoundUint32ToFloat32(Node * node)1176 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1177 VisitRR(this, kPPC_Uint32ToFloat32, node);
1178 }
1179
VisitChangeInt32ToFloat64(Node * node)1180 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1181 VisitRR(this, kPPC_Int32ToDouble, node);
1182 }
1183
VisitChangeUint32ToFloat64(Node * node)1184 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1185 VisitRR(this, kPPC_Uint32ToDouble, node);
1186 }
1187
VisitChangeFloat64ToInt32(Node * node)1188 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1189 VisitRR(this, kPPC_DoubleToInt32, node);
1190 }
1191
VisitChangeFloat64ToUint32(Node * node)1192 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1193 VisitRR(this, kPPC_DoubleToUint32, node);
1194 }
1195
VisitTruncateFloat64ToUint32(Node * node)1196 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1197 VisitRR(this, kPPC_DoubleToUint32, node);
1198 }
1199
VisitSignExtendWord8ToInt32(Node * node)1200 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
1201 // TODO(mbrandy): inspect input to see if nop is appropriate.
1202 VisitRR(this, kPPC_ExtendSignWord8, node);
1203 }
1204
VisitSignExtendWord16ToInt32(Node * node)1205 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
1206 // TODO(mbrandy): inspect input to see if nop is appropriate.
1207 VisitRR(this, kPPC_ExtendSignWord16, node);
1208 }
1209
1210 #if V8_TARGET_ARCH_PPC64
VisitTryTruncateFloat32ToInt64(Node * node)1211 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1212 VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
1213 }
1214
VisitTryTruncateFloat64ToInt64(Node * node)1215 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1216 VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
1217 }
1218
VisitTruncateFloat64ToInt64(Node * node)1219 void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
1220 VisitRR(this, kPPC_DoubleToInt64, node);
1221 }
1222
VisitTryTruncateFloat32ToUint64(Node * node)1223 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1224 VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
1225 }
1226
VisitTryTruncateFloat64ToUint64(Node * node)1227 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1228 VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
1229 }
1230
VisitBitcastWord32ToWord64(Node * node)1231 void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
1232 DCHECK(SmiValuesAre31Bits());
1233 DCHECK(COMPRESS_POINTERS_BOOL);
1234 EmitIdentity(node);
1235 }
1236
VisitChangeInt32ToInt64(Node * node)1237 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1238 // TODO(mbrandy): inspect input to see if nop is appropriate.
1239 VisitRR(this, kPPC_ExtendSignWord32, node);
1240 }
1241
VisitSignExtendWord8ToInt64(Node * node)1242 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
1243 // TODO(mbrandy): inspect input to see if nop is appropriate.
1244 VisitRR(this, kPPC_ExtendSignWord8, node);
1245 }
1246
VisitSignExtendWord16ToInt64(Node * node)1247 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
1248 // TODO(mbrandy): inspect input to see if nop is appropriate.
1249 VisitRR(this, kPPC_ExtendSignWord16, node);
1250 }
1251
VisitSignExtendWord32ToInt64(Node * node)1252 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
1253 // TODO(mbrandy): inspect input to see if nop is appropriate.
1254 VisitRR(this, kPPC_ExtendSignWord32, node);
1255 }
1256
ZeroExtendsWord32ToWord64NoPhis(Node * node)1257 bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
1258 UNIMPLEMENTED();
1259 }
1260
VisitChangeUint32ToUint64(Node * node)1261 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1262 // TODO(mbrandy): inspect input to see if nop is appropriate.
1263 VisitRR(this, kPPC_Uint32ToUint64, node);
1264 }
1265
VisitChangeFloat64ToUint64(Node * node)1266 void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
1267 VisitRR(this, kPPC_DoubleToUint64, node);
1268 }
1269
VisitChangeFloat64ToInt64(Node * node)1270 void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
1271 VisitRR(this, kPPC_DoubleToInt64, node);
1272 }
1273 #endif
1274
VisitTruncateFloat64ToFloat32(Node * node)1275 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1276 VisitRR(this, kPPC_DoubleToFloat32, node);
1277 }
1278
VisitTruncateFloat64ToWord32(Node * node)1279 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1280 VisitRR(this, kArchTruncateDoubleToI, node);
1281 }
1282
VisitRoundFloat64ToInt32(Node * node)1283 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1284 VisitRR(this, kPPC_DoubleToInt32, node);
1285 }
1286
VisitTruncateFloat32ToInt32(Node * node)1287 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1288 PPCOperandGenerator g(this);
1289
1290 InstructionCode opcode = kPPC_Float32ToInt32;
1291 TruncateKind kind = OpParameter<TruncateKind>(node->op());
1292 if (kind == TruncateKind::kSetOverflowToMin) {
1293 opcode |= MiscField::encode(true);
1294 }
1295
1296 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1297 }
1298
VisitTruncateFloat32ToUint32(Node * node)1299 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1300 PPCOperandGenerator g(this);
1301
1302 InstructionCode opcode = kPPC_Float32ToUint32;
1303 TruncateKind kind = OpParameter<TruncateKind>(node->op());
1304 if (kind == TruncateKind::kSetOverflowToMin) {
1305 opcode |= MiscField::encode(true);
1306 }
1307
1308 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1309 }
1310
1311 #if V8_TARGET_ARCH_PPC64
VisitTruncateInt64ToInt32(Node * node)1312 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1313 // TODO(mbrandy): inspect input to see if nop is appropriate.
1314 VisitRR(this, kPPC_Int64ToInt32, node);
1315 }
1316
VisitRoundInt64ToFloat32(Node * node)1317 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1318 VisitRR(this, kPPC_Int64ToFloat32, node);
1319 }
1320
VisitRoundInt64ToFloat64(Node * node)1321 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1322 VisitRR(this, kPPC_Int64ToDouble, node);
1323 }
1324
VisitChangeInt64ToFloat64(Node * node)1325 void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
1326 VisitRR(this, kPPC_Int64ToDouble, node);
1327 }
1328
VisitRoundUint64ToFloat32(Node * node)1329 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1330 VisitRR(this, kPPC_Uint64ToFloat32, node);
1331 }
1332
VisitRoundUint64ToFloat64(Node * node)1333 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1334 VisitRR(this, kPPC_Uint64ToDouble, node);
1335 }
1336 #endif
1337
VisitBitcastFloat32ToInt32(Node * node)1338 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1339 VisitRR(this, kPPC_BitcastFloat32ToInt32, node);
1340 }
1341
1342 #if V8_TARGET_ARCH_PPC64
VisitBitcastFloat64ToInt64(Node * node)1343 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1344 VisitRR(this, kPPC_BitcastDoubleToInt64, node);
1345 }
1346 #endif
1347
VisitBitcastInt32ToFloat32(Node * node)1348 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1349 VisitRR(this, kPPC_BitcastInt32ToFloat32, node);
1350 }
1351
1352 #if V8_TARGET_ARCH_PPC64
VisitBitcastInt64ToFloat64(Node * node)1353 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1354 VisitRR(this, kPPC_BitcastInt64ToDouble, node);
1355 }
1356 #endif
1357
VisitFloat32Add(Node * node)1358 void InstructionSelector::VisitFloat32Add(Node* node) {
1359 VisitRRR(this, kPPC_AddDouble | MiscField::encode(1), node);
1360 }
1361
VisitFloat64Add(Node * node)1362 void InstructionSelector::VisitFloat64Add(Node* node) {
1363 // TODO(mbrandy): detect multiply-add
1364 VisitRRR(this, kPPC_AddDouble, node);
1365 }
1366
VisitFloat32Sub(Node * node)1367 void InstructionSelector::VisitFloat32Sub(Node* node) {
1368 VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
1369 }
1370
VisitFloat64Sub(Node * node)1371 void InstructionSelector::VisitFloat64Sub(Node* node) {
1372 // TODO(mbrandy): detect multiply-subtract
1373 VisitRRR(this, kPPC_SubDouble, node);
1374 }
1375
VisitFloat32Mul(Node * node)1376 void InstructionSelector::VisitFloat32Mul(Node* node) {
1377 VisitRRR(this, kPPC_MulDouble | MiscField::encode(1), node);
1378 }
1379
VisitFloat64Mul(Node * node)1380 void InstructionSelector::VisitFloat64Mul(Node* node) {
1381 // TODO(mbrandy): detect negate
1382 VisitRRR(this, kPPC_MulDouble, node);
1383 }
1384
VisitFloat32Div(Node * node)1385 void InstructionSelector::VisitFloat32Div(Node* node) {
1386 VisitRRR(this, kPPC_DivDouble | MiscField::encode(1), node);
1387 }
1388
VisitFloat64Div(Node * node)1389 void InstructionSelector::VisitFloat64Div(Node* node) {
1390 VisitRRR(this, kPPC_DivDouble, node);
1391 }
1392
VisitFloat64Mod(Node * node)1393 void InstructionSelector::VisitFloat64Mod(Node* node) {
1394 PPCOperandGenerator g(this);
1395 Emit(kPPC_ModDouble, g.DefineAsFixed(node, d1),
1396 g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
1397 ->MarkAsCall();
1398 }
1399
VisitFloat32Max(Node * node)1400 void InstructionSelector::VisitFloat32Max(Node* node) {
1401 VisitRRR(this, kPPC_MaxDouble | MiscField::encode(1), node);
1402 }
1403
VisitFloat64Max(Node * node)1404 void InstructionSelector::VisitFloat64Max(Node* node) {
1405 VisitRRR(this, kPPC_MaxDouble, node);
1406 }
1407
VisitFloat64SilenceNaN(Node * node)1408 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
1409 VisitRR(this, kPPC_Float64SilenceNaN, node);
1410 }
1411
VisitFloat32Min(Node * node)1412 void InstructionSelector::VisitFloat32Min(Node* node) {
1413 VisitRRR(this, kPPC_MinDouble | MiscField::encode(1), node);
1414 }
1415
VisitFloat64Min(Node * node)1416 void InstructionSelector::VisitFloat64Min(Node* node) {
1417 VisitRRR(this, kPPC_MinDouble, node);
1418 }
1419
VisitFloat32Abs(Node * node)1420 void InstructionSelector::VisitFloat32Abs(Node* node) {
1421 VisitRR(this, kPPC_AbsDouble | MiscField::encode(1), node);
1422 }
1423
VisitFloat64Abs(Node * node)1424 void InstructionSelector::VisitFloat64Abs(Node* node) {
1425 VisitRR(this, kPPC_AbsDouble, node);
1426 }
1427
VisitFloat32Sqrt(Node * node)1428 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1429 VisitRR(this, kPPC_SqrtDouble | MiscField::encode(1), node);
1430 }
1431
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1432 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1433 InstructionCode opcode) {
1434 PPCOperandGenerator g(this);
1435 Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
1436 ->MarkAsCall();
1437 }
1438
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1439 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1440 InstructionCode opcode) {
1441 PPCOperandGenerator g(this);
1442 Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1),
1443 g.UseFixed(node->InputAt(1), d2))
1444 ->MarkAsCall();
1445 }
1446
VisitFloat64Sqrt(Node * node)1447 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1448 VisitRR(this, kPPC_SqrtDouble, node);
1449 }
1450
VisitFloat32RoundDown(Node * node)1451 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1452 VisitRR(this, kPPC_FloorDouble | MiscField::encode(1), node);
1453 }
1454
VisitFloat64RoundDown(Node * node)1455 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1456 VisitRR(this, kPPC_FloorDouble, node);
1457 }
1458
VisitFloat32RoundUp(Node * node)1459 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1460 VisitRR(this, kPPC_CeilDouble | MiscField::encode(1), node);
1461 }
1462
VisitFloat64RoundUp(Node * node)1463 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1464 VisitRR(this, kPPC_CeilDouble, node);
1465 }
1466
VisitFloat32RoundTruncate(Node * node)1467 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1468 VisitRR(this, kPPC_TruncateDouble | MiscField::encode(1), node);
1469 }
1470
VisitFloat64RoundTruncate(Node * node)1471 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1472 VisitRR(this, kPPC_TruncateDouble, node);
1473 }
1474
VisitFloat64RoundTiesAway(Node * node)1475 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1476 VisitRR(this, kPPC_RoundDouble, node);
1477 }
1478
VisitFloat32Neg(Node * node)1479 void InstructionSelector::VisitFloat32Neg(Node* node) {
1480 VisitRR(this, kPPC_NegDouble, node);
1481 }
1482
VisitFloat64Neg(Node * node)1483 void InstructionSelector::VisitFloat64Neg(Node* node) {
1484 VisitRR(this, kPPC_NegDouble, node);
1485 }
1486
VisitInt32AddWithOverflow(Node * node)1487 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1488 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1489 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1490 return VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32,
1491 kInt16Imm, &cont);
1492 }
1493 FlagsContinuation cont;
1494 VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32, kInt16Imm,
1495 &cont);
1496 }
1497
VisitInt32SubWithOverflow(Node * node)1498 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1499 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1500 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1501 return VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
1502 kInt16Imm_Negate, &cont);
1503 }
1504 FlagsContinuation cont;
1505 VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
1506 kInt16Imm_Negate, &cont);
1507 }
1508
1509 #if V8_TARGET_ARCH_PPC64
VisitInt64AddWithOverflow(Node * node)1510 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
1511 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1512 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1513 return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm,
1514 &cont);
1515 }
1516 FlagsContinuation cont;
1517 VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm, &cont);
1518 }
1519
VisitInt64SubWithOverflow(Node * node)1520 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
1521 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1522 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1523 return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate,
1524 &cont);
1525 }
1526 FlagsContinuation cont;
1527 VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate, &cont);
1528 }
1529 #endif
1530
CompareLogical(FlagsContinuation * cont)1531 static bool CompareLogical(FlagsContinuation* cont) {
1532 switch (cont->condition()) {
1533 case kUnsignedLessThan:
1534 case kUnsignedGreaterThanOrEqual:
1535 case kUnsignedLessThanOrEqual:
1536 case kUnsignedGreaterThan:
1537 return true;
1538 default:
1539 return false;
1540 }
1541 UNREACHABLE();
1542 }
1543
1544 namespace {
1545
1546 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1547 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1548 InstructionOperand left, InstructionOperand right,
1549 FlagsContinuation* cont) {
1550 selector->EmitWithContinuation(opcode, left, right, cont);
1551 }
1552
1553 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative,ImmediateMode immediate_mode)1554 void VisitWordCompare(InstructionSelector* selector, Node* node,
1555 InstructionCode opcode, FlagsContinuation* cont,
1556 bool commutative, ImmediateMode immediate_mode) {
1557 PPCOperandGenerator g(selector);
1558 Node* left = node->InputAt(0);
1559 Node* right = node->InputAt(1);
1560
1561 // Match immediates on left or right side of comparison.
1562 if (g.CanBeImmediate(right, immediate_mode)) {
1563 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1564 cont);
1565 } else if (g.CanBeImmediate(left, immediate_mode)) {
1566 if (!commutative) cont->Commute();
1567 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1568 cont);
1569 } else {
1570 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1571 cont);
1572 }
1573 }
1574
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1575 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1576 FlagsContinuation* cont) {
1577 ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
1578 VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode);
1579 }
1580
1581 #if V8_TARGET_ARCH_PPC64
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1582 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1583 FlagsContinuation* cont) {
1584 ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
1585 VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode);
1586 }
1587 #endif
1588
1589 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1590 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1591 FlagsContinuation* cont) {
1592 PPCOperandGenerator g(selector);
1593 Node* left = node->InputAt(0);
1594 Node* right = node->InputAt(1);
1595 VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left),
1596 g.UseRegister(right), cont);
1597 }
1598
1599 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1600 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1601 FlagsContinuation* cont) {
1602 PPCOperandGenerator g(selector);
1603 Node* left = node->InputAt(0);
1604 Node* right = node->InputAt(1);
1605 VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left),
1606 g.UseRegister(right), cont);
1607 }
1608
1609 } // namespace
1610
1611 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)1612 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
1613 FlagsContinuation* cont) {
1614 // Try to combine with comparisons against 0 by simply inverting the branch.
1615 while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1616 Int32BinopMatcher m(value);
1617 if (!m.right().Is(0)) break;
1618
1619 user = value;
1620 value = m.left().node();
1621 cont->Negate();
1622 }
1623
1624 if (CanCover(user, value)) {
1625 switch (value->opcode()) {
1626 case IrOpcode::kWord32Equal:
1627 cont->OverwriteAndNegateIfEqual(kEqual);
1628 return VisitWord32Compare(this, value, cont);
1629 case IrOpcode::kInt32LessThan:
1630 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1631 return VisitWord32Compare(this, value, cont);
1632 case IrOpcode::kInt32LessThanOrEqual:
1633 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1634 return VisitWord32Compare(this, value, cont);
1635 case IrOpcode::kUint32LessThan:
1636 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1637 return VisitWord32Compare(this, value, cont);
1638 case IrOpcode::kUint32LessThanOrEqual:
1639 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1640 return VisitWord32Compare(this, value, cont);
1641 #if V8_TARGET_ARCH_PPC64
1642 case IrOpcode::kWord64Equal:
1643 cont->OverwriteAndNegateIfEqual(kEqual);
1644 return VisitWord64Compare(this, value, cont);
1645 case IrOpcode::kInt64LessThan:
1646 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1647 return VisitWord64Compare(this, value, cont);
1648 case IrOpcode::kInt64LessThanOrEqual:
1649 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1650 return VisitWord64Compare(this, value, cont);
1651 case IrOpcode::kUint64LessThan:
1652 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1653 return VisitWord64Compare(this, value, cont);
1654 case IrOpcode::kUint64LessThanOrEqual:
1655 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1656 return VisitWord64Compare(this, value, cont);
1657 #endif
1658 case IrOpcode::kFloat32Equal:
1659 cont->OverwriteAndNegateIfEqual(kEqual);
1660 return VisitFloat32Compare(this, value, cont);
1661 case IrOpcode::kFloat32LessThan:
1662 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1663 return VisitFloat32Compare(this, value, cont);
1664 case IrOpcode::kFloat32LessThanOrEqual:
1665 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1666 return VisitFloat32Compare(this, value, cont);
1667 case IrOpcode::kFloat64Equal:
1668 cont->OverwriteAndNegateIfEqual(kEqual);
1669 return VisitFloat64Compare(this, value, cont);
1670 case IrOpcode::kFloat64LessThan:
1671 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1672 return VisitFloat64Compare(this, value, cont);
1673 case IrOpcode::kFloat64LessThanOrEqual:
1674 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1675 return VisitFloat64Compare(this, value, cont);
1676 case IrOpcode::kProjection:
1677 // Check if this is the overflow output projection of an
1678 // <Operation>WithOverflow node.
1679 if (ProjectionIndexOf(value->op()) == 1u) {
1680 // We cannot combine the <Operation>WithOverflow with this branch
1681 // unless the 0th projection (the use of the actual value of the
1682 // <Operation> is either nullptr, which means there's no use of the
1683 // actual value, or was already defined, which means it is scheduled
1684 // *AFTER* this branch).
1685 Node* const node = value->InputAt(0);
1686 Node* const result = NodeProperties::FindProjection(node, 0);
1687 if (result == nullptr || IsDefined(result)) {
1688 switch (node->opcode()) {
1689 case IrOpcode::kInt32AddWithOverflow:
1690 cont->OverwriteAndNegateIfEqual(kOverflow);
1691 return VisitBinop<Int32BinopMatcher>(
1692 this, node, kPPC_AddWithOverflow32, kInt16Imm, cont);
1693 case IrOpcode::kInt32SubWithOverflow:
1694 cont->OverwriteAndNegateIfEqual(kOverflow);
1695 return VisitBinop<Int32BinopMatcher>(
1696 this, node, kPPC_SubWithOverflow32, kInt16Imm_Negate, cont);
1697 case IrOpcode::kInt32MulWithOverflow:
1698 cont->OverwriteAndNegateIfEqual(kNotEqual);
1699 return EmitInt32MulWithOverflow(this, node, cont);
1700 #if V8_TARGET_ARCH_PPC64
1701 case IrOpcode::kInt64AddWithOverflow:
1702 cont->OverwriteAndNegateIfEqual(kOverflow);
1703 return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64,
1704 kInt16Imm, cont);
1705 case IrOpcode::kInt64SubWithOverflow:
1706 cont->OverwriteAndNegateIfEqual(kOverflow);
1707 return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub,
1708 kInt16Imm_Negate, cont);
1709 #endif
1710 default:
1711 break;
1712 }
1713 }
1714 }
1715 break;
1716 case IrOpcode::kInt32Sub:
1717 return VisitWord32Compare(this, value, cont);
1718 case IrOpcode::kWord32And:
1719 // TODO(mbandy): opportunity for rlwinm?
1720 return VisitWordCompare(this, value, kPPC_Tst32, cont, true,
1721 kInt16Imm_Unsigned);
1722 // TODO(mbrandy): Handle?
1723 // case IrOpcode::kInt32Add:
1724 // case IrOpcode::kWord32Or:
1725 // case IrOpcode::kWord32Xor:
1726 // case IrOpcode::kWord32Sar:
1727 // case IrOpcode::kWord32Shl:
1728 // case IrOpcode::kWord32Shr:
1729 // case IrOpcode::kWord32Ror:
1730 #if V8_TARGET_ARCH_PPC64
1731 case IrOpcode::kInt64Sub:
1732 return VisitWord64Compare(this, value, cont);
1733 case IrOpcode::kWord64And:
1734 // TODO(mbandy): opportunity for rldic?
1735 return VisitWordCompare(this, value, kPPC_Tst64, cont, true,
1736 kInt16Imm_Unsigned);
1737 // TODO(mbrandy): Handle?
1738 // case IrOpcode::kInt64Add:
1739 // case IrOpcode::kWord64Or:
1740 // case IrOpcode::kWord64Xor:
1741 // case IrOpcode::kWord64Sar:
1742 // case IrOpcode::kWord64Shl:
1743 // case IrOpcode::kWord64Shr:
1744 // case IrOpcode::kWord64Ror:
1745 #endif
1746 case IrOpcode::kStackPointerGreaterThan:
1747 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
1748 return VisitStackPointerGreaterThan(value, cont);
1749 default:
1750 break;
1751 }
1752 }
1753
1754 // Branch could not be combined with a compare, emit compare against 0.
1755 PPCOperandGenerator g(this);
1756 VisitCompare(this, kPPC_Cmp32, g.UseRegister(value), g.TempImmediate(0),
1757 cont);
1758 }
1759
VisitSwitch(Node * node,const SwitchInfo & sw)1760 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1761 PPCOperandGenerator g(this);
1762 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1763
1764 // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
1765 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
1766 static const size_t kMaxTableSwitchValueRange = 2 << 16;
1767 size_t table_space_cost = 4 + sw.value_range();
1768 size_t table_time_cost = 3;
1769 size_t lookup_space_cost = 3 + 2 * sw.case_count();
1770 size_t lookup_time_cost = sw.case_count();
1771 if (sw.case_count() > 0 &&
1772 table_space_cost + 3 * table_time_cost <=
1773 lookup_space_cost + 3 * lookup_time_cost &&
1774 sw.min_value() > std::numeric_limits<int32_t>::min() &&
1775 sw.value_range() <= kMaxTableSwitchValueRange) {
1776 InstructionOperand index_operand = value_operand;
1777 if (sw.min_value()) {
1778 index_operand = g.TempRegister();
1779 Emit(kPPC_Sub, index_operand, value_operand,
1780 g.TempImmediate(sw.min_value()));
1781 }
1782 // Generate a table lookup.
1783 return EmitTableSwitch(sw, index_operand);
1784 }
1785 }
1786
1787 // Generate a tree of conditional jumps.
1788 return EmitBinarySearchSwitch(sw, value_operand);
1789 }
1790
VisitWord32Equal(Node * const node)1791 void InstructionSelector::VisitWord32Equal(Node* const node) {
1792 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1793 VisitWord32Compare(this, node, &cont);
1794 }
1795
VisitInt32LessThan(Node * node)1796 void InstructionSelector::VisitInt32LessThan(Node* node) {
1797 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1798 VisitWord32Compare(this, node, &cont);
1799 }
1800
VisitInt32LessThanOrEqual(Node * node)1801 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1802 FlagsContinuation cont =
1803 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1804 VisitWord32Compare(this, node, &cont);
1805 }
1806
VisitUint32LessThan(Node * node)1807 void InstructionSelector::VisitUint32LessThan(Node* node) {
1808 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1809 VisitWord32Compare(this, node, &cont);
1810 }
1811
VisitUint32LessThanOrEqual(Node * node)1812 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1813 FlagsContinuation cont =
1814 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1815 VisitWord32Compare(this, node, &cont);
1816 }
1817
1818 #if V8_TARGET_ARCH_PPC64
VisitWord64Equal(Node * const node)1819 void InstructionSelector::VisitWord64Equal(Node* const node) {
1820 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1821 VisitWord64Compare(this, node, &cont);
1822 }
1823
VisitInt64LessThan(Node * node)1824 void InstructionSelector::VisitInt64LessThan(Node* node) {
1825 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1826 VisitWord64Compare(this, node, &cont);
1827 }
1828
VisitInt64LessThanOrEqual(Node * node)1829 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1830 FlagsContinuation cont =
1831 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1832 VisitWord64Compare(this, node, &cont);
1833 }
1834
VisitUint64LessThan(Node * node)1835 void InstructionSelector::VisitUint64LessThan(Node* node) {
1836 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1837 VisitWord64Compare(this, node, &cont);
1838 }
1839
VisitUint64LessThanOrEqual(Node * node)1840 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
1841 FlagsContinuation cont =
1842 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1843 VisitWord64Compare(this, node, &cont);
1844 }
1845 #endif
1846
VisitInt32MulWithOverflow(Node * node)1847 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1848 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1849 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
1850 return EmitInt32MulWithOverflow(this, node, &cont);
1851 }
1852 FlagsContinuation cont;
1853 EmitInt32MulWithOverflow(this, node, &cont);
1854 }
1855
VisitFloat32Equal(Node * node)1856 void InstructionSelector::VisitFloat32Equal(Node* node) {
1857 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1858 VisitFloat32Compare(this, node, &cont);
1859 }
1860
VisitFloat32LessThan(Node * node)1861 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1862 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1863 VisitFloat32Compare(this, node, &cont);
1864 }
1865
VisitFloat32LessThanOrEqual(Node * node)1866 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1867 FlagsContinuation cont =
1868 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1869 VisitFloat32Compare(this, node, &cont);
1870 }
1871
VisitFloat64Equal(Node * node)1872 void InstructionSelector::VisitFloat64Equal(Node* node) {
1873 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1874 VisitFloat64Compare(this, node, &cont);
1875 }
1876
VisitFloat64LessThan(Node * node)1877 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1878 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1879 VisitFloat64Compare(this, node, &cont);
1880 }
1881
VisitFloat64LessThanOrEqual(Node * node)1882 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1883 FlagsContinuation cont =
1884 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1885 VisitFloat64Compare(this, node, &cont);
1886 }
1887
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)1888 void InstructionSelector::EmitPrepareArguments(
1889 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1890 Node* node) {
1891 PPCOperandGenerator g(this);
1892
1893 // Prepare for C function call.
1894 if (call_descriptor->IsCFunctionCall()) {
1895 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1896 call_descriptor->ParameterCount())),
1897 0, nullptr, 0, nullptr);
1898
1899 // Poke any stack arguments.
1900 int slot = kStackFrameExtraParamSlot;
1901 for (PushParameter input : (*arguments)) {
1902 if (input.node == nullptr) continue;
1903 Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1904 g.TempImmediate(slot));
1905 ++slot;
1906 }
1907 } else {
1908 // Push any stack arguments.
1909 int stack_decrement = 0;
1910 for (PushParameter input : base::Reversed(*arguments)) {
1911 stack_decrement += kSystemPointerSize;
1912 // Skip any alignment holes in pushed nodes.
1913 if (input.node == nullptr) continue;
1914 InstructionOperand decrement = g.UseImmediate(stack_decrement);
1915 stack_decrement = 0;
1916 Emit(kPPC_Push, g.NoOutput(), decrement, g.UseRegister(input.node));
1917 }
1918 }
1919 }
1920
IsTailCallAddressImmediate()1921 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1922
VisitFloat64ExtractLowWord32(Node * node)1923 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1924 PPCOperandGenerator g(this);
1925 Emit(kPPC_DoubleExtractLowWord32, g.DefineAsRegister(node),
1926 g.UseRegister(node->InputAt(0)));
1927 }
1928
VisitFloat64ExtractHighWord32(Node * node)1929 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1930 PPCOperandGenerator g(this);
1931 Emit(kPPC_DoubleExtractHighWord32, g.DefineAsRegister(node),
1932 g.UseRegister(node->InputAt(0)));
1933 }
1934
VisitFloat64InsertLowWord32(Node * node)1935 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1936 PPCOperandGenerator g(this);
1937 Node* left = node->InputAt(0);
1938 Node* right = node->InputAt(1);
1939 if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
1940 CanCover(node, left)) {
1941 left = left->InputAt(1);
1942 Emit(kPPC_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
1943 g.UseRegister(right));
1944 return;
1945 }
1946 Emit(kPPC_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
1947 g.UseRegister(left), g.UseRegister(right));
1948 }
1949
VisitFloat64InsertHighWord32(Node * node)1950 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1951 PPCOperandGenerator g(this);
1952 Node* left = node->InputAt(0);
1953 Node* right = node->InputAt(1);
1954 if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
1955 CanCover(node, left)) {
1956 left = left->InputAt(1);
1957 Emit(kPPC_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
1958 g.UseRegister(left));
1959 return;
1960 }
1961 Emit(kPPC_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
1962 g.UseRegister(left), g.UseRegister(right));
1963 }
1964
VisitMemoryBarrier(Node * node)1965 void InstructionSelector::VisitMemoryBarrier(Node* node) {
1966 PPCOperandGenerator g(this);
1967 Emit(kPPC_Sync, g.NoOutput());
1968 }
1969
VisitWord32AtomicLoad(Node * node)1970 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
1971 AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
1972 LoadRepresentation load_rep = atomic_load_params.representation();
1973 VisitLoadCommon(this, node, load_rep);
1974 }
1975
VisitWord64AtomicLoad(Node * node)1976 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
1977 AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
1978 LoadRepresentation load_rep = atomic_load_params.representation();
1979 VisitLoadCommon(this, node, load_rep);
1980 }
1981
VisitWord32AtomicStore(Node * node)1982 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
1983 AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
1984 VisitStoreCommon(this, node, store_params.store_representation(),
1985 store_params.order());
1986 }
1987
VisitWord64AtomicStore(Node * node)1988 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
1989 AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
1990 VisitStoreCommon(this, node, store_params.store_representation(),
1991 store_params.order());
1992 }
1993
VisitAtomicExchange(InstructionSelector * selector,Node * node,ArchOpcode opcode)1994 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
1995 ArchOpcode opcode) {
1996 PPCOperandGenerator g(selector);
1997 Node* base = node->InputAt(0);
1998 Node* index = node->InputAt(1);
1999 Node* value = node->InputAt(2);
2000
2001 AddressingMode addressing_mode = kMode_MRR;
2002 InstructionOperand inputs[3];
2003 size_t input_count = 0;
2004 inputs[input_count++] = g.UseUniqueRegister(base);
2005 inputs[input_count++] = g.UseUniqueRegister(index);
2006 inputs[input_count++] = g.UseUniqueRegister(value);
2007 InstructionOperand outputs[1];
2008 outputs[0] = g.UseUniqueRegister(node);
2009 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2010 selector->Emit(code, 1, outputs, input_count, inputs);
2011 }
2012
VisitWord32AtomicExchange(Node * node)2013 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2014 ArchOpcode opcode;
2015 MachineType type = AtomicOpType(node->op());
2016 if (type == MachineType::Int8()) {
2017 opcode = kAtomicExchangeInt8;
2018 } else if (type == MachineType::Uint8()) {
2019 opcode = kPPC_AtomicExchangeUint8;
2020 } else if (type == MachineType::Int16()) {
2021 opcode = kAtomicExchangeInt16;
2022 } else if (type == MachineType::Uint16()) {
2023 opcode = kPPC_AtomicExchangeUint16;
2024 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2025 opcode = kPPC_AtomicExchangeWord32;
2026 } else {
2027 UNREACHABLE();
2028 }
2029 VisitAtomicExchange(this, node, opcode);
2030 }
2031
VisitWord64AtomicExchange(Node * node)2032 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2033 ArchOpcode opcode;
2034 MachineType type = AtomicOpType(node->op());
2035 if (type == MachineType::Uint8()) {
2036 opcode = kPPC_AtomicExchangeUint8;
2037 } else if (type == MachineType::Uint16()) {
2038 opcode = kPPC_AtomicExchangeUint16;
2039 } else if (type == MachineType::Uint32()) {
2040 opcode = kPPC_AtomicExchangeWord32;
2041 } else if (type == MachineType::Uint64()) {
2042 opcode = kPPC_AtomicExchangeWord64;
2043 } else {
2044 UNREACHABLE();
2045 }
2046 VisitAtomicExchange(this, node, opcode);
2047 }
2048
VisitAtomicCompareExchange(InstructionSelector * selector,Node * node,ArchOpcode opcode)2049 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
2050 ArchOpcode opcode) {
2051 PPCOperandGenerator g(selector);
2052 Node* base = node->InputAt(0);
2053 Node* index = node->InputAt(1);
2054 Node* old_value = node->InputAt(2);
2055 Node* new_value = node->InputAt(3);
2056
2057 AddressingMode addressing_mode = kMode_MRR;
2058 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2059
2060 InstructionOperand inputs[4];
2061 size_t input_count = 0;
2062 inputs[input_count++] = g.UseUniqueRegister(base);
2063 inputs[input_count++] = g.UseUniqueRegister(index);
2064 inputs[input_count++] = g.UseUniqueRegister(old_value);
2065 inputs[input_count++] = g.UseUniqueRegister(new_value);
2066
2067 InstructionOperand outputs[1];
2068 size_t output_count = 0;
2069 outputs[output_count++] = g.DefineAsRegister(node);
2070
2071 selector->Emit(code, output_count, outputs, input_count, inputs);
2072 }
2073
VisitWord32AtomicCompareExchange(Node * node)2074 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2075 MachineType type = AtomicOpType(node->op());
2076 ArchOpcode opcode;
2077 if (type == MachineType::Int8()) {
2078 opcode = kAtomicCompareExchangeInt8;
2079 } else if (type == MachineType::Uint8()) {
2080 opcode = kPPC_AtomicCompareExchangeUint8;
2081 } else if (type == MachineType::Int16()) {
2082 opcode = kAtomicCompareExchangeInt16;
2083 } else if (type == MachineType::Uint16()) {
2084 opcode = kPPC_AtomicCompareExchangeUint16;
2085 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2086 opcode = kPPC_AtomicCompareExchangeWord32;
2087 } else {
2088 UNREACHABLE();
2089 }
2090 VisitAtomicCompareExchange(this, node, opcode);
2091 }
2092
VisitWord64AtomicCompareExchange(Node * node)2093 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2094 MachineType type = AtomicOpType(node->op());
2095 ArchOpcode opcode;
2096 if (type == MachineType::Uint8()) {
2097 opcode = kPPC_AtomicCompareExchangeUint8;
2098 } else if (type == MachineType::Uint16()) {
2099 opcode = kPPC_AtomicCompareExchangeUint16;
2100 } else if (type == MachineType::Uint32()) {
2101 opcode = kPPC_AtomicCompareExchangeWord32;
2102 } else if (type == MachineType::Uint64()) {
2103 opcode = kPPC_AtomicCompareExchangeWord64;
2104 } else {
2105 UNREACHABLE();
2106 }
2107 VisitAtomicCompareExchange(this, node, opcode);
2108 }
2109
VisitAtomicBinaryOperation(InstructionSelector * selector,Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode int32_op,ArchOpcode uint32_op,ArchOpcode int64_op,ArchOpcode uint64_op)2110 void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node,
2111 ArchOpcode int8_op, ArchOpcode uint8_op,
2112 ArchOpcode int16_op, ArchOpcode uint16_op,
2113 ArchOpcode int32_op, ArchOpcode uint32_op,
2114 ArchOpcode int64_op, ArchOpcode uint64_op) {
2115 PPCOperandGenerator g(selector);
2116 Node* base = node->InputAt(0);
2117 Node* index = node->InputAt(1);
2118 Node* value = node->InputAt(2);
2119 MachineType type = AtomicOpType(node->op());
2120
2121 ArchOpcode opcode;
2122
2123 if (type == MachineType::Int8()) {
2124 opcode = int8_op;
2125 } else if (type == MachineType::Uint8()) {
2126 opcode = uint8_op;
2127 } else if (type == MachineType::Int16()) {
2128 opcode = int16_op;
2129 } else if (type == MachineType::Uint16()) {
2130 opcode = uint16_op;
2131 } else if (type == MachineType::Int32()) {
2132 opcode = int32_op;
2133 } else if (type == MachineType::Uint32()) {
2134 opcode = uint32_op;
2135 } else if (type == MachineType::Int64()) {
2136 opcode = int64_op;
2137 } else if (type == MachineType::Uint64()) {
2138 opcode = uint64_op;
2139 } else {
2140 UNREACHABLE();
2141 }
2142
2143 AddressingMode addressing_mode = kMode_MRR;
2144 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2145 InstructionOperand inputs[3];
2146
2147 size_t input_count = 0;
2148 inputs[input_count++] = g.UseUniqueRegister(base);
2149 inputs[input_count++] = g.UseUniqueRegister(index);
2150 inputs[input_count++] = g.UseUniqueRegister(value);
2151
2152 InstructionOperand outputs[1];
2153 size_t output_count = 0;
2154 outputs[output_count++] = g.DefineAsRegister(node);
2155
2156 selector->Emit(code, output_count, outputs, input_count, inputs);
2157 }
2158
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2159 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2160 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2161 ArchOpcode uint16_op, ArchOpcode word32_op) {
2162 // Unused
2163 UNREACHABLE();
2164 }
2165
VisitWord64AtomicBinaryOperation(Node * node,ArchOpcode uint8_op,ArchOpcode uint16_op,ArchOpcode uint32_op,ArchOpcode uint64_op)2166 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2167 Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
2168 ArchOpcode uint64_op) {
2169 // Unused
2170 UNREACHABLE();
2171 }
2172
2173 #define VISIT_ATOMIC_BINOP(op) \
2174 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2175 VisitAtomicBinaryOperation( \
2176 this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
2177 kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \
2178 kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \
2179 kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \
2180 } \
2181 void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
2182 VisitAtomicBinaryOperation( \
2183 this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
2184 kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \
2185 kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \
2186 kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \
2187 }
2188 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2189 VISIT_ATOMIC_BINOP(Sub)
2190 VISIT_ATOMIC_BINOP(And)
2191 VISIT_ATOMIC_BINOP(Or)
2192 VISIT_ATOMIC_BINOP(Xor)
2193 #undef VISIT_ATOMIC_BINOP
2194
2195 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2196 UNREACHABLE();
2197 }
2198
VisitInt64AbsWithOverflow(Node * node)2199 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2200 UNREACHABLE();
2201 }
2202
2203 #define SIMD_TYPES(V) \
2204 V(F64x2) \
2205 V(F32x4) \
2206 V(I64x2) \
2207 V(I32x4) \
2208 V(I16x8) \
2209 V(I8x16)
2210
2211 #define SIMD_BINOP_LIST(V) \
2212 V(F64x2Add) \
2213 V(F64x2Sub) \
2214 V(F64x2Mul) \
2215 V(F64x2Eq) \
2216 V(F64x2Ne) \
2217 V(F64x2Le) \
2218 V(F64x2Lt) \
2219 V(F64x2Div) \
2220 V(F64x2Min) \
2221 V(F64x2Max) \
2222 V(F32x4Add) \
2223 V(F32x4Sub) \
2224 V(F32x4Mul) \
2225 V(F32x4Eq) \
2226 V(F32x4Ne) \
2227 V(F32x4Lt) \
2228 V(F32x4Le) \
2229 V(F32x4Div) \
2230 V(F32x4Min) \
2231 V(F32x4Max) \
2232 V(I64x2Add) \
2233 V(I64x2Sub) \
2234 V(I64x2Mul) \
2235 V(I64x2Eq) \
2236 V(I64x2Ne) \
2237 V(I64x2ExtMulLowI32x4S) \
2238 V(I64x2ExtMulHighI32x4S) \
2239 V(I64x2ExtMulLowI32x4U) \
2240 V(I64x2ExtMulHighI32x4U) \
2241 V(I64x2GtS) \
2242 V(I64x2GeS) \
2243 V(I32x4Add) \
2244 V(I32x4Sub) \
2245 V(I32x4Mul) \
2246 V(I32x4MinS) \
2247 V(I32x4MinU) \
2248 V(I32x4MaxS) \
2249 V(I32x4MaxU) \
2250 V(I32x4Eq) \
2251 V(I32x4Ne) \
2252 V(I32x4GtS) \
2253 V(I32x4GeS) \
2254 V(I32x4GtU) \
2255 V(I32x4GeU) \
2256 V(I32x4DotI16x8S) \
2257 V(I32x4ExtMulLowI16x8S) \
2258 V(I32x4ExtMulHighI16x8S) \
2259 V(I32x4ExtMulLowI16x8U) \
2260 V(I32x4ExtMulHighI16x8U) \
2261 V(I16x8Add) \
2262 V(I16x8Sub) \
2263 V(I16x8Mul) \
2264 V(I16x8MinS) \
2265 V(I16x8MinU) \
2266 V(I16x8MaxS) \
2267 V(I16x8MaxU) \
2268 V(I16x8Eq) \
2269 V(I16x8Ne) \
2270 V(I16x8GtS) \
2271 V(I16x8GeS) \
2272 V(I16x8GtU) \
2273 V(I16x8GeU) \
2274 V(I16x8SConvertI32x4) \
2275 V(I16x8UConvertI32x4) \
2276 V(I16x8AddSatS) \
2277 V(I16x8SubSatS) \
2278 V(I16x8AddSatU) \
2279 V(I16x8SubSatU) \
2280 V(I16x8RoundingAverageU) \
2281 V(I16x8Q15MulRSatS) \
2282 V(I16x8ExtMulLowI8x16S) \
2283 V(I16x8ExtMulHighI8x16S) \
2284 V(I16x8ExtMulLowI8x16U) \
2285 V(I16x8ExtMulHighI8x16U) \
2286 V(I8x16Add) \
2287 V(I8x16Sub) \
2288 V(I8x16MinS) \
2289 V(I8x16MinU) \
2290 V(I8x16MaxS) \
2291 V(I8x16MaxU) \
2292 V(I8x16Eq) \
2293 V(I8x16Ne) \
2294 V(I8x16GtS) \
2295 V(I8x16GeS) \
2296 V(I8x16GtU) \
2297 V(I8x16GeU) \
2298 V(I8x16SConvertI16x8) \
2299 V(I8x16UConvertI16x8) \
2300 V(I8x16AddSatS) \
2301 V(I8x16SubSatS) \
2302 V(I8x16AddSatU) \
2303 V(I8x16SubSatU) \
2304 V(I8x16RoundingAverageU) \
2305 V(I8x16Swizzle) \
2306 V(S128And) \
2307 V(S128Or) \
2308 V(S128Xor) \
2309 V(S128AndNot)
2310
2311 #define SIMD_UNOP_LIST(V) \
2312 V(F64x2Abs) \
2313 V(F64x2Neg) \
2314 V(F64x2Sqrt) \
2315 V(F64x2Ceil) \
2316 V(F64x2Floor) \
2317 V(F64x2Trunc) \
2318 V(F64x2ConvertLowI32x4S) \
2319 V(F64x2ConvertLowI32x4U) \
2320 V(F64x2PromoteLowF32x4) \
2321 V(F32x4Abs) \
2322 V(F32x4Neg) \
2323 V(F32x4RecipApprox) \
2324 V(F32x4RecipSqrtApprox) \
2325 V(F32x4Sqrt) \
2326 V(F32x4SConvertI32x4) \
2327 V(F32x4UConvertI32x4) \
2328 V(F32x4Ceil) \
2329 V(F32x4Floor) \
2330 V(F32x4Trunc) \
2331 V(F32x4DemoteF64x2Zero) \
2332 V(I64x2Abs) \
2333 V(I64x2Neg) \
2334 V(I64x2SConvertI32x4Low) \
2335 V(I64x2SConvertI32x4High) \
2336 V(I64x2UConvertI32x4Low) \
2337 V(I64x2UConvertI32x4High) \
2338 V(I32x4Neg) \
2339 V(I32x4Abs) \
2340 V(I32x4SConvertF32x4) \
2341 V(I32x4UConvertF32x4) \
2342 V(I32x4SConvertI16x8Low) \
2343 V(I32x4SConvertI16x8High) \
2344 V(I32x4UConvertI16x8Low) \
2345 V(I32x4UConvertI16x8High) \
2346 V(I32x4ExtAddPairwiseI16x8S) \
2347 V(I32x4ExtAddPairwiseI16x8U) \
2348 V(I32x4TruncSatF64x2SZero) \
2349 V(I32x4TruncSatF64x2UZero) \
2350 V(I16x8Neg) \
2351 V(I16x8Abs) \
2352 V(I8x16Neg) \
2353 V(I8x16Abs) \
2354 V(I8x16Popcnt) \
2355 V(I16x8SConvertI8x16Low) \
2356 V(I16x8SConvertI8x16High) \
2357 V(I16x8UConvertI8x16Low) \
2358 V(I16x8UConvertI8x16High) \
2359 V(I16x8ExtAddPairwiseI8x16S) \
2360 V(I16x8ExtAddPairwiseI8x16U) \
2361 V(S128Not)
2362
2363 #define SIMD_SHIFT_LIST(V) \
2364 V(I64x2Shl) \
2365 V(I64x2ShrS) \
2366 V(I64x2ShrU) \
2367 V(I32x4Shl) \
2368 V(I32x4ShrS) \
2369 V(I32x4ShrU) \
2370 V(I16x8Shl) \
2371 V(I16x8ShrS) \
2372 V(I16x8ShrU) \
2373 V(I8x16Shl) \
2374 V(I8x16ShrS) \
2375 V(I8x16ShrU)
2376
2377 #define SIMD_BOOL_LIST(V) \
2378 V(V128AnyTrue) \
2379 V(I64x2AllTrue) \
2380 V(I32x4AllTrue) \
2381 V(I16x8AllTrue) \
2382 V(I8x16AllTrue)
2383
2384 #define SIMD_VISIT_SPLAT(Type) \
2385 void InstructionSelector::Visit##Type##Splat(Node* node) { \
2386 PPCOperandGenerator g(this); \
2387 Emit(kPPC_##Type##Splat, g.DefineAsRegister(node), \
2388 g.UseRegister(node->InputAt(0))); \
2389 }
2390 SIMD_TYPES(SIMD_VISIT_SPLAT)
2391 #undef SIMD_VISIT_SPLAT
2392
2393 #define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
2394 void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
2395 PPCOperandGenerator g(this); \
2396 int32_t lane = OpParameter<int32_t>(node->op()); \
2397 Emit(kPPC_##Type##ExtractLane##Sign, g.DefineAsRegister(node), \
2398 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
2399 }
2400 SIMD_VISIT_EXTRACT_LANE(F64x2, )
2401 SIMD_VISIT_EXTRACT_LANE(F32x4, )
2402 SIMD_VISIT_EXTRACT_LANE(I64x2, )
2403 SIMD_VISIT_EXTRACT_LANE(I32x4, )
SIMD_VISIT_EXTRACT_LANE(I16x8,U)2404 SIMD_VISIT_EXTRACT_LANE(I16x8, U)
2405 SIMD_VISIT_EXTRACT_LANE(I16x8, S)
2406 SIMD_VISIT_EXTRACT_LANE(I8x16, U)
2407 SIMD_VISIT_EXTRACT_LANE(I8x16, S)
2408 #undef SIMD_VISIT_EXTRACT_LANE
2409
2410 #define SIMD_VISIT_REPLACE_LANE(Type) \
2411 void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2412 PPCOperandGenerator g(this); \
2413 int32_t lane = OpParameter<int32_t>(node->op()); \
2414 Emit(kPPC_##Type##ReplaceLane, g.DefineSameAsFirst(node), \
2415 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \
2416 g.UseRegister(node->InputAt(1))); \
2417 }
2418 SIMD_TYPES(SIMD_VISIT_REPLACE_LANE)
2419 #undef SIMD_VISIT_REPLACE_LANE
2420
2421 #define SIMD_VISIT_BINOP(Opcode) \
2422 void InstructionSelector::Visit##Opcode(Node* node) { \
2423 PPCOperandGenerator g(this); \
2424 InstructionOperand temps[] = {g.TempSimd128Register(), \
2425 g.TempSimd128Register(), g.TempRegister()}; \
2426 Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
2427 g.UseUniqueRegister(node->InputAt(0)), \
2428 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
2429 }
2430 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
2431 #undef SIMD_VISIT_BINOP
2432 #undef SIMD_BINOP_LIST
2433
2434 #define SIMD_VISIT_UNOP(Opcode) \
2435 void InstructionSelector::Visit##Opcode(Node* node) { \
2436 PPCOperandGenerator g(this); \
2437 InstructionOperand temps[] = {g.TempSimd128Register()}; \
2438 Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
2439 g.UseRegister(node->InputAt(0)), arraysize(temps), temps); \
2440 }
2441 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
2442 #undef SIMD_VISIT_UNOP
2443 #undef SIMD_UNOP_LIST
2444
2445 #define SIMD_VISIT_SHIFT(Opcode) \
2446 void InstructionSelector::Visit##Opcode(Node* node) { \
2447 PPCOperandGenerator g(this); \
2448 Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
2449 g.UseUniqueRegister(node->InputAt(0)), \
2450 g.UseUniqueRegister(node->InputAt(1))); \
2451 }
2452 SIMD_SHIFT_LIST(SIMD_VISIT_SHIFT)
2453 #undef SIMD_VISIT_SHIFT
2454 #undef SIMD_SHIFT_LIST
2455
2456 #define SIMD_VISIT_BOOL(Opcode) \
2457 void InstructionSelector::Visit##Opcode(Node* node) { \
2458 PPCOperandGenerator g(this); \
2459 Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
2460 g.UseUniqueRegister(node->InputAt(0))); \
2461 }
2462 SIMD_BOOL_LIST(SIMD_VISIT_BOOL)
2463 #undef SIMD_VISIT_BOOL
2464 #undef SIMD_BOOL_LIST
2465
2466 #define SIMD_VISIT_QFMOP(Opcode) \
2467 void InstructionSelector::Visit##Opcode(Node* node) { \
2468 PPCOperandGenerator g(this); \
2469 Emit(kPPC_##Opcode, g.DefineSameAsFirst(node), \
2470 g.UseUniqueRegister(node->InputAt(0)), \
2471 g.UseUniqueRegister(node->InputAt(1)), \
2472 g.UseRegister(node->InputAt(2))); \
2473 }
2474 SIMD_VISIT_QFMOP(F64x2Qfma)
2475 SIMD_VISIT_QFMOP(F64x2Qfms)
2476 SIMD_VISIT_QFMOP(F32x4Qfma)
2477 SIMD_VISIT_QFMOP(F32x4Qfms)
2478 #undef SIMD_VISIT_QFMOP
2479
2480 #define SIMD_VISIT_BITMASK(Opcode) \
2481 void InstructionSelector::Visit##Opcode(Node* node) { \
2482 PPCOperandGenerator g(this); \
2483 InstructionOperand temps[] = {g.TempRegister()}; \
2484 Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
2485 g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
2486 }
2487 SIMD_VISIT_BITMASK(I8x16BitMask)
2488 SIMD_VISIT_BITMASK(I16x8BitMask)
2489 SIMD_VISIT_BITMASK(I32x4BitMask)
2490 SIMD_VISIT_BITMASK(I64x2BitMask)
2491 #undef SIMD_VISIT_BITMASK
2492
2493 #define SIMD_VISIT_PMIN_MAX(Type) \
2494 void InstructionSelector::Visit##Type(Node* node) { \
2495 PPCOperandGenerator g(this); \
2496 Emit(kPPC_##Type, g.DefineAsRegister(node), \
2497 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
2498 }
2499 SIMD_VISIT_PMIN_MAX(F64x2Pmin)
2500 SIMD_VISIT_PMIN_MAX(F32x4Pmin)
2501 SIMD_VISIT_PMIN_MAX(F64x2Pmax)
2502 SIMD_VISIT_PMIN_MAX(F32x4Pmax)
2503 #undef SIMD_VISIT_PMIN_MAX
2504 #undef SIMD_TYPES
2505
2506 #if V8_ENABLE_WEBASSEMBLY
2507 void InstructionSelector::VisitI8x16Shuffle(Node* node) {
2508 uint8_t shuffle[kSimd128Size];
2509 bool is_swizzle;
2510 CanonicalizeShuffle(node, shuffle, &is_swizzle);
2511 PPCOperandGenerator g(this);
2512 Node* input0 = node->InputAt(0);
2513 Node* input1 = node->InputAt(1);
2514 // Remap the shuffle indices to match IBM lane numbering.
2515 int max_index = 15;
2516 int total_lane_count = 2 * kSimd128Size;
2517 uint8_t shuffle_remapped[kSimd128Size];
2518 for (int i = 0; i < kSimd128Size; i++) {
2519 uint8_t current_index = shuffle[i];
2520 shuffle_remapped[i] = (current_index <= max_index
2521 ? max_index - current_index
2522 : total_lane_count - current_index + max_index);
2523 }
2524 Emit(kPPC_I8x16Shuffle, g.DefineAsRegister(node), g.UseUniqueRegister(input0),
2525 g.UseUniqueRegister(input1),
2526 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped)),
2527 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 4)),
2528 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 8)),
2529 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 12)));
2530 }
2531 #else
2532 void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
2533 #endif // V8_ENABLE_WEBASSEMBLY
2534
VisitS128Zero(Node * node)2535 void InstructionSelector::VisitS128Zero(Node* node) {
2536 PPCOperandGenerator g(this);
2537 Emit(kPPC_S128Zero, g.DefineAsRegister(node));
2538 }
2539
VisitS128Select(Node * node)2540 void InstructionSelector::VisitS128Select(Node* node) {
2541 PPCOperandGenerator g(this);
2542 Emit(kPPC_S128Select, g.DefineAsRegister(node),
2543 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
2544 g.UseRegister(node->InputAt(2)));
2545 }
2546
2547 // This is a replica of SimdShuffle::Pack4Lanes. However, above function will
2548 // not be available on builds with webassembly disabled, hence we need to have
2549 // it declared locally as it is used on other visitors such as S128Const.
Pack4Lanes(const uint8_t * shuffle)2550 static int32_t Pack4Lanes(const uint8_t* shuffle) {
2551 int32_t result = 0;
2552 for (int i = 3; i >= 0; --i) {
2553 result <<= 8;
2554 result |= shuffle[i];
2555 }
2556 return result;
2557 }
2558
VisitS128Const(Node * node)2559 void InstructionSelector::VisitS128Const(Node* node) {
2560 PPCOperandGenerator g(this);
2561 uint32_t val[kSimd128Size / sizeof(uint32_t)];
2562 memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
2563 // If all bytes are zeros, avoid emitting code for generic constants.
2564 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
2565 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
2566 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
2567 InstructionOperand dst = g.DefineAsRegister(node);
2568 if (all_zeros) {
2569 Emit(kPPC_S128Zero, dst);
2570 } else if (all_ones) {
2571 Emit(kPPC_S128AllOnes, dst);
2572 } else {
2573 // We have to use Pack4Lanes to reverse the bytes (lanes) on BE,
2574 // Which in this case is ineffective on LE.
2575 Emit(kPPC_S128Const, g.DefineAsRegister(node),
2576 g.UseImmediate(Pack4Lanes(bit_cast<uint8_t*>(&val[0]))),
2577 g.UseImmediate(Pack4Lanes(bit_cast<uint8_t*>(&val[0]) + 4)),
2578 g.UseImmediate(Pack4Lanes(bit_cast<uint8_t*>(&val[0]) + 8)),
2579 g.UseImmediate(Pack4Lanes(bit_cast<uint8_t*>(&val[0]) + 12)));
2580 }
2581 }
2582
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)2583 void InstructionSelector::EmitPrepareResults(
2584 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
2585 Node* node) {
2586 PPCOperandGenerator g(this);
2587
2588 for (PushParameter output : *results) {
2589 if (!output.location.IsCallerFrameSlot()) continue;
2590 // Skip any alignment holes in nodes.
2591 if (output.node != nullptr) {
2592 DCHECK(!call_descriptor->IsCFunctionCall());
2593 if (output.location.GetType() == MachineType::Float32()) {
2594 MarkAsFloat32(output.node);
2595 } else if (output.location.GetType() == MachineType::Float64()) {
2596 MarkAsFloat64(output.node);
2597 } else if (output.location.GetType() == MachineType::Simd128()) {
2598 MarkAsSimd128(output.node);
2599 }
2600 int offset = call_descriptor->GetOffsetToReturns();
2601 int reverse_slot = -output.location.GetLocation() - offset;
2602 Emit(kPPC_Peek, g.DefineAsRegister(output.node),
2603 g.UseImmediate(reverse_slot));
2604 }
2605 }
2606 }
2607
VisitLoadLane(Node * node)2608 void InstructionSelector::VisitLoadLane(Node* node) {
2609 LoadLaneParameters params = LoadLaneParametersOf(node->op());
2610 InstructionCode opcode = kArchNop;
2611 if (params.rep == MachineType::Int8()) {
2612 opcode = kPPC_S128Load8Lane;
2613 } else if (params.rep == MachineType::Int16()) {
2614 opcode = kPPC_S128Load16Lane;
2615 } else if (params.rep == MachineType::Int32()) {
2616 opcode = kPPC_S128Load32Lane;
2617 } else if (params.rep == MachineType::Int64()) {
2618 opcode = kPPC_S128Load64Lane;
2619 } else {
2620 UNREACHABLE();
2621 }
2622
2623 PPCOperandGenerator g(this);
2624 Emit(opcode | AddressingModeField::encode(kMode_MRR),
2625 g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(2)),
2626 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
2627 g.UseImmediate(params.laneidx));
2628 }
2629
VisitLoadTransform(Node * node)2630 void InstructionSelector::VisitLoadTransform(Node* node) {
2631 LoadTransformParameters params = LoadTransformParametersOf(node->op());
2632 PPCOperandGenerator g(this);
2633 Node* base = node->InputAt(0);
2634 Node* index = node->InputAt(1);
2635
2636 ArchOpcode opcode;
2637 switch (params.transformation) {
2638 case LoadTransformation::kS128Load8Splat:
2639 opcode = kPPC_S128Load8Splat;
2640 break;
2641 case LoadTransformation::kS128Load16Splat:
2642 opcode = kPPC_S128Load16Splat;
2643 break;
2644 case LoadTransformation::kS128Load32Splat:
2645 opcode = kPPC_S128Load32Splat;
2646 break;
2647 case LoadTransformation::kS128Load64Splat:
2648 opcode = kPPC_S128Load64Splat;
2649 break;
2650 case LoadTransformation::kS128Load8x8S:
2651 opcode = kPPC_S128Load8x8S;
2652 break;
2653 case LoadTransformation::kS128Load8x8U:
2654 opcode = kPPC_S128Load8x8U;
2655 break;
2656 case LoadTransformation::kS128Load16x4S:
2657 opcode = kPPC_S128Load16x4S;
2658 break;
2659 case LoadTransformation::kS128Load16x4U:
2660 opcode = kPPC_S128Load16x4U;
2661 break;
2662 case LoadTransformation::kS128Load32x2S:
2663 opcode = kPPC_S128Load32x2S;
2664 break;
2665 case LoadTransformation::kS128Load32x2U:
2666 opcode = kPPC_S128Load32x2U;
2667 break;
2668 case LoadTransformation::kS128Load32Zero:
2669 opcode = kPPC_S128Load32Zero;
2670 break;
2671 case LoadTransformation::kS128Load64Zero:
2672 opcode = kPPC_S128Load64Zero;
2673 break;
2674 default:
2675 UNREACHABLE();
2676 }
2677 Emit(opcode | AddressingModeField::encode(kMode_MRR),
2678 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
2679 }
2680
VisitStoreLane(Node * node)2681 void InstructionSelector::VisitStoreLane(Node* node) {
2682 PPCOperandGenerator g(this);
2683
2684 StoreLaneParameters params = StoreLaneParametersOf(node->op());
2685 InstructionCode opcode = kArchNop;
2686 if (params.rep == MachineRepresentation::kWord8) {
2687 opcode = kPPC_S128Store8Lane;
2688 } else if (params.rep == MachineRepresentation::kWord16) {
2689 opcode = kPPC_S128Store16Lane;
2690 } else if (params.rep == MachineRepresentation::kWord32) {
2691 opcode = kPPC_S128Store32Lane;
2692 } else if (params.rep == MachineRepresentation::kWord64) {
2693 opcode = kPPC_S128Store64Lane;
2694 } else {
2695 UNREACHABLE();
2696 }
2697
2698 InstructionOperand inputs[4];
2699 InstructionOperand value_operand = g.UseRegister(node->InputAt(2));
2700 inputs[0] = value_operand;
2701 inputs[1] = g.UseRegister(node->InputAt(0));
2702 inputs[2] = g.UseRegister(node->InputAt(1));
2703 inputs[3] = g.UseImmediate(params.laneidx);
2704 Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, 4, inputs);
2705 }
2706
AddOutputToSelectContinuation(OperandGenerator * g,int first_input_index,Node * node)2707 void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
2708 int first_input_index,
2709 Node* node) {
2710 UNREACHABLE();
2711 }
2712
VisitFloat32RoundTiesEven(Node * node)2713 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
2714 UNREACHABLE();
2715 }
2716
VisitFloat64RoundTiesEven(Node * node)2717 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
2718 UNREACHABLE();
2719 }
2720
VisitF64x2NearestInt(Node * node)2721 void InstructionSelector::VisitF64x2NearestInt(Node* node) { UNREACHABLE(); }
2722
VisitF32x4NearestInt(Node * node)2723 void InstructionSelector::VisitF32x4NearestInt(Node* node) { UNREACHABLE(); }
2724
2725 // static
2726 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2727 InstructionSelector::SupportedMachineOperatorFlags() {
2728 return MachineOperatorBuilder::kFloat32RoundDown |
2729 MachineOperatorBuilder::kFloat64RoundDown |
2730 MachineOperatorBuilder::kFloat32RoundUp |
2731 MachineOperatorBuilder::kFloat64RoundUp |
2732 MachineOperatorBuilder::kFloat32RoundTruncate |
2733 MachineOperatorBuilder::kFloat64RoundTruncate |
2734 MachineOperatorBuilder::kFloat64RoundTiesAway |
2735 MachineOperatorBuilder::kWord32Popcnt |
2736 MachineOperatorBuilder::kWord64Popcnt;
2737 // We omit kWord32ShiftIsSafe as s[rl]w use 0x3F as a mask rather than 0x1F.
2738 }
2739
2740 // static
2741 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2742 InstructionSelector::AlignmentRequirements() {
2743 return MachineOperatorBuilder::AlignmentRequirements::
2744 FullUnalignedAccessSupport();
2745 }
2746
2747 } // namespace compiler
2748 } // namespace internal
2749 } // namespace v8
2750