1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <algorithm>
6
7 #include "src/base/adapters.h"
8 #include "src/compiler/instruction-selector-impl.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties.h"
11 #include "src/turbo-assembler.h"
12
13 namespace v8 {
14 namespace internal {
15 namespace compiler {
16
17 // Adds X64-specific methods for generating operands.
18 class X64OperandGenerator final : public OperandGenerator {
19 public:
X64OperandGenerator(InstructionSelector * selector)20 explicit X64OperandGenerator(InstructionSelector* selector)
21 : OperandGenerator(selector) {}
22
CanBeImmediate(Node * node)23 bool CanBeImmediate(Node* node) {
24 switch (node->opcode()) {
25 case IrOpcode::kInt32Constant:
26 case IrOpcode::kRelocatableInt32Constant:
27 return true;
28 case IrOpcode::kInt64Constant: {
29 const int64_t value = OpParameter<int64_t>(node->op());
30 return std::numeric_limits<int32_t>::min() < value &&
31 value <= std::numeric_limits<int32_t>::max();
32 }
33 case IrOpcode::kNumberConstant: {
34 const double value = OpParameter<double>(node->op());
35 return bit_cast<int64_t>(value) == 0;
36 }
37 default:
38 return false;
39 }
40 }
41
GetImmediateIntegerValue(Node * node)42 int32_t GetImmediateIntegerValue(Node* node) {
43 DCHECK(CanBeImmediate(node));
44 if (node->opcode() == IrOpcode::kInt32Constant) {
45 return OpParameter<int32_t>(node->op());
46 }
47 DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
48 return static_cast<int32_t>(OpParameter<int64_t>(node->op()));
49 }
50
CanBeMemoryOperand(InstructionCode opcode,Node * node,Node * input,int effect_level)51 bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
52 int effect_level) {
53 if (input->opcode() != IrOpcode::kLoad ||
54 !selector()->CanCover(node, input)) {
55 return false;
56 }
57 if (effect_level != selector()->GetEffectLevel(input)) {
58 return false;
59 }
60 MachineRepresentation rep =
61 LoadRepresentationOf(input->op()).representation();
62 switch (opcode) {
63 case kX64And:
64 case kX64Or:
65 case kX64Xor:
66 case kX64Add:
67 case kX64Sub:
68 case kX64Push:
69 case kX64Cmp:
70 case kX64Test:
71 return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
72 case kX64And32:
73 case kX64Or32:
74 case kX64Xor32:
75 case kX64Add32:
76 case kX64Sub32:
77 case kX64Cmp32:
78 case kX64Test32:
79 return rep == MachineRepresentation::kWord32;
80 case kX64Cmp16:
81 case kX64Test16:
82 return rep == MachineRepresentation::kWord16;
83 case kX64Cmp8:
84 case kX64Test8:
85 return rep == MachineRepresentation::kWord8;
86 default:
87 break;
88 }
89 return false;
90 }
91
GenerateMemoryOperandInputs(Node * index,int scale_exponent,Node * base,Node * displacement,DisplacementMode displacement_mode,InstructionOperand inputs[],size_t * input_count)92 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
93 Node* base, Node* displacement,
94 DisplacementMode displacement_mode,
95 InstructionOperand inputs[],
96 size_t* input_count) {
97 AddressingMode mode = kMode_MRI;
98 if (base != nullptr && (index != nullptr || displacement != nullptr)) {
99 if (base->opcode() == IrOpcode::kInt32Constant &&
100 OpParameter<int32_t>(base->op()) == 0) {
101 base = nullptr;
102 } else if (base->opcode() == IrOpcode::kInt64Constant &&
103 OpParameter<int64_t>(base->op()) == 0) {
104 base = nullptr;
105 }
106 }
107 if (base != nullptr) {
108 inputs[(*input_count)++] = UseRegister(base);
109 if (index != nullptr) {
110 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
111 inputs[(*input_count)++] = UseRegister(index);
112 if (displacement != nullptr) {
113 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
114 ? UseNegatedImmediate(displacement)
115 : UseImmediate(displacement);
116 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
117 kMode_MR4I, kMode_MR8I};
118 mode = kMRnI_modes[scale_exponent];
119 } else {
120 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
121 kMode_MR4, kMode_MR8};
122 mode = kMRn_modes[scale_exponent];
123 }
124 } else {
125 if (displacement == nullptr) {
126 mode = kMode_MR;
127 } else {
128 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
129 ? UseNegatedImmediate(displacement)
130 : UseImmediate(displacement);
131 mode = kMode_MRI;
132 }
133 }
134 } else {
135 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
136 if (displacement != nullptr) {
137 if (index == nullptr) {
138 inputs[(*input_count)++] = UseRegister(displacement);
139 mode = kMode_MR;
140 } else {
141 inputs[(*input_count)++] = UseRegister(index);
142 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
143 ? UseNegatedImmediate(displacement)
144 : UseImmediate(displacement);
145 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
146 kMode_M4I, kMode_M8I};
147 mode = kMnI_modes[scale_exponent];
148 }
149 } else {
150 inputs[(*input_count)++] = UseRegister(index);
151 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
152 kMode_M4, kMode_M8};
153 mode = kMn_modes[scale_exponent];
154 if (mode == kMode_MR1) {
155 // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
156 inputs[(*input_count)++] = UseRegister(index);
157 }
158 }
159 }
160 return mode;
161 }
162
GetEffectiveAddressMemoryOperand(Node * operand,InstructionOperand inputs[],size_t * input_count)163 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
164 InstructionOperand inputs[],
165 size_t* input_count) {
166 if (selector()->CanAddressRelativeToRootsRegister()) {
167 LoadMatcher<ExternalReferenceMatcher> m(operand);
168 if (m.index().HasValue() && m.object().HasValue()) {
169 ptrdiff_t const delta =
170 m.index().Value() +
171 TurboAssemblerBase::RootRegisterOffsetForExternalReference(
172 selector()->isolate(), m.object().Value());
173 if (is_int32(delta)) {
174 inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
175 return kMode_Root;
176 }
177 }
178 }
179 BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
180 DCHECK(m.matches());
181 if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) {
182 return GenerateMemoryOperandInputs(
183 m.index(), m.scale(), m.base(), m.displacement(),
184 m.displacement_mode(), inputs, input_count);
185 } else if (m.base() == nullptr &&
186 m.displacement_mode() == kPositiveDisplacement) {
187 // The displacement cannot be an immediate, but we can use the
188 // displacement as base instead and still benefit from addressing
189 // modes for the scale.
190 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.displacement(),
191 nullptr, m.displacement_mode(), inputs,
192 input_count);
193 } else {
194 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
195 inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
196 return kMode_MR1;
197 }
198 }
199
GetEffectiveIndexOperand(Node * index,AddressingMode * mode)200 InstructionOperand GetEffectiveIndexOperand(Node* index,
201 AddressingMode* mode) {
202 if (CanBeImmediate(index)) {
203 *mode = kMode_MRI;
204 return UseImmediate(index);
205 } else {
206 *mode = kMode_MR1;
207 return UseUniqueRegister(index);
208 }
209 }
210
CanBeBetterLeftOperand(Node * node) const211 bool CanBeBetterLeftOperand(Node* node) const {
212 return !selector()->IsLive(node);
213 }
214 };
215
216 namespace {
GetLoadOpcode(LoadRepresentation load_rep)217 ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
218 ArchOpcode opcode = kArchNop;
219 switch (load_rep.representation()) {
220 case MachineRepresentation::kFloat32:
221 opcode = kX64Movss;
222 break;
223 case MachineRepresentation::kFloat64:
224 opcode = kX64Movsd;
225 break;
226 case MachineRepresentation::kBit: // Fall through.
227 case MachineRepresentation::kWord8:
228 opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
229 break;
230 case MachineRepresentation::kWord16:
231 opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
232 break;
233 case MachineRepresentation::kWord32:
234 opcode = kX64Movl;
235 break;
236 case MachineRepresentation::kTaggedSigned: // Fall through.
237 case MachineRepresentation::kTaggedPointer: // Fall through.
238 case MachineRepresentation::kTagged: // Fall through.
239 case MachineRepresentation::kWord64:
240 opcode = kX64Movq;
241 break;
242 case MachineRepresentation::kSimd128: // Fall through.
243 opcode = kX64Movdqu;
244 break;
245 case MachineRepresentation::kNone:
246 UNREACHABLE();
247 break;
248 }
249 return opcode;
250 }
251
GetStoreOpcode(StoreRepresentation store_rep)252 ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
253 switch (store_rep.representation()) {
254 case MachineRepresentation::kFloat32:
255 return kX64Movss;
256 break;
257 case MachineRepresentation::kFloat64:
258 return kX64Movsd;
259 break;
260 case MachineRepresentation::kBit: // Fall through.
261 case MachineRepresentation::kWord8:
262 return kX64Movb;
263 break;
264 case MachineRepresentation::kWord16:
265 return kX64Movw;
266 break;
267 case MachineRepresentation::kWord32:
268 return kX64Movl;
269 break;
270 case MachineRepresentation::kTaggedSigned: // Fall through.
271 case MachineRepresentation::kTaggedPointer: // Fall through.
272 case MachineRepresentation::kTagged: // Fall through.
273 case MachineRepresentation::kWord64:
274 return kX64Movq;
275 break;
276 case MachineRepresentation::kSimd128: // Fall through.
277 return kX64Movdqu;
278 break;
279 case MachineRepresentation::kNone:
280 UNREACHABLE();
281 }
282 UNREACHABLE();
283 }
284
285 } // namespace
286
VisitStackSlot(Node * node)287 void InstructionSelector::VisitStackSlot(Node* node) {
288 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
289 int slot = frame_->AllocateSpillSlot(rep.size());
290 OperandGenerator g(this);
291
292 Emit(kArchStackSlot, g.DefineAsRegister(node),
293 sequence()->AddImmediate(Constant(slot)), 0, nullptr);
294 }
295
VisitDebugAbort(Node * node)296 void InstructionSelector::VisitDebugAbort(Node* node) {
297 X64OperandGenerator g(this);
298 Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
299 }
300
VisitSpeculationFence(Node * node)301 void InstructionSelector::VisitSpeculationFence(Node* node) {
302 X64OperandGenerator g(this);
303 Emit(kLFence, g.NoOutput());
304 }
305
VisitLoad(Node * node)306 void InstructionSelector::VisitLoad(Node* node) {
307 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
308 X64OperandGenerator g(this);
309
310 ArchOpcode opcode = GetLoadOpcode(load_rep);
311 InstructionOperand outputs[1];
312 outputs[0] = g.DefineAsRegister(node);
313 InstructionOperand inputs[3];
314 size_t input_count = 0;
315 AddressingMode mode =
316 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
317 InstructionCode code = opcode | AddressingModeField::encode(mode);
318 if (node->opcode() == IrOpcode::kProtectedLoad) {
319 code |= MiscField::encode(kMemoryAccessProtected);
320 } else if (node->opcode() == IrOpcode::kPoisonedLoad) {
321 CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
322 code |= MiscField::encode(kMemoryAccessPoisoned);
323 }
324 Emit(code, 1, outputs, input_count, inputs);
325 }
326
VisitPoisonedLoad(Node * node)327 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
328
VisitProtectedLoad(Node * node)329 void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
330
VisitStore(Node * node)331 void InstructionSelector::VisitStore(Node* node) {
332 X64OperandGenerator g(this);
333 Node* base = node->InputAt(0);
334 Node* index = node->InputAt(1);
335 Node* value = node->InputAt(2);
336
337 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
338 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
339
340 if (write_barrier_kind != kNoWriteBarrier) {
341 DCHECK(CanBeTaggedPointer(store_rep.representation()));
342 AddressingMode addressing_mode;
343 InstructionOperand inputs[] = {
344 g.UseUniqueRegister(base),
345 g.GetEffectiveIndexOperand(index, &addressing_mode),
346 g.UseUniqueRegister(value)};
347 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
348 switch (write_barrier_kind) {
349 case kNoWriteBarrier:
350 UNREACHABLE();
351 break;
352 case kMapWriteBarrier:
353 record_write_mode = RecordWriteMode::kValueIsMap;
354 break;
355 case kPointerWriteBarrier:
356 record_write_mode = RecordWriteMode::kValueIsPointer;
357 break;
358 case kFullWriteBarrier:
359 record_write_mode = RecordWriteMode::kValueIsAny;
360 break;
361 }
362 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
363 InstructionCode code = kArchStoreWithWriteBarrier;
364 code |= AddressingModeField::encode(addressing_mode);
365 code |= MiscField::encode(static_cast<int>(record_write_mode));
366 Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
367 } else {
368 ArchOpcode opcode = GetStoreOpcode(store_rep);
369 InstructionOperand inputs[4];
370 size_t input_count = 0;
371 AddressingMode addressing_mode =
372 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
373 InstructionCode code =
374 opcode | AddressingModeField::encode(addressing_mode);
375 if ((ElementSizeLog2Of(store_rep.representation()) < kPointerSizeLog2) &&
376 (value->opcode() == IrOpcode::kTruncateInt64ToInt32) &&
377 CanCover(node, value)) {
378 value = value->InputAt(0);
379 }
380 InstructionOperand value_operand =
381 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
382 inputs[input_count++] = value_operand;
383 Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
384 inputs);
385 }
386 }
387
VisitProtectedStore(Node * node)388 void InstructionSelector::VisitProtectedStore(Node* node) {
389 X64OperandGenerator g(this);
390 Node* value = node->InputAt(2);
391
392 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
393
394 ArchOpcode opcode = GetStoreOpcode(store_rep);
395 InstructionOperand inputs[4];
396 size_t input_count = 0;
397 AddressingMode addressing_mode =
398 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
399 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
400 MiscField::encode(kMemoryAccessProtected);
401 InstructionOperand value_operand =
402 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
403 inputs[input_count++] = value_operand;
404 Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
405 }
406
407 // Architecture supports unaligned access, therefore VisitLoad is used instead
VisitUnalignedLoad(Node * node)408 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
409
410 // Architecture supports unaligned access, therefore VisitStore is used instead
VisitUnalignedStore(Node * node)411 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
412
413 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)414 static void VisitBinop(InstructionSelector* selector, Node* node,
415 InstructionCode opcode, FlagsContinuation* cont) {
416 X64OperandGenerator g(selector);
417 Int32BinopMatcher m(node);
418 Node* left = m.left().node();
419 Node* right = m.right().node();
420 InstructionOperand inputs[8];
421 size_t input_count = 0;
422 InstructionOperand outputs[1];
423 size_t output_count = 0;
424
425 // TODO(turbofan): match complex addressing modes.
426 if (left == right) {
427 // If both inputs refer to the same operand, enforce allocating a register
428 // for both of them to ensure that we don't end up generating code like
429 // this:
430 //
431 // mov rax, [rbp-0x10]
432 // add rax, [rbp-0x10]
433 // jo label
434 InstructionOperand const input = g.UseRegister(left);
435 inputs[input_count++] = input;
436 inputs[input_count++] = input;
437 } else if (g.CanBeImmediate(right)) {
438 inputs[input_count++] = g.UseRegister(left);
439 inputs[input_count++] = g.UseImmediate(right);
440 } else {
441 int effect_level = selector->GetEffectLevel(node);
442 if (cont->IsBranch()) {
443 effect_level = selector->GetEffectLevel(
444 cont->true_block()->PredecessorAt(0)->control_input());
445 }
446 if (node->op()->HasProperty(Operator::kCommutative) &&
447 g.CanBeBetterLeftOperand(right) &&
448 (!g.CanBeBetterLeftOperand(left) ||
449 !g.CanBeMemoryOperand(opcode, node, right, effect_level))) {
450 std::swap(left, right);
451 }
452 if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
453 inputs[input_count++] = g.UseRegister(left);
454 AddressingMode addressing_mode =
455 g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
456 opcode |= AddressingModeField::encode(addressing_mode);
457 } else {
458 inputs[input_count++] = g.UseRegister(left);
459 inputs[input_count++] = g.Use(right);
460 }
461 }
462
463 if (cont->IsBranch()) {
464 inputs[input_count++] = g.Label(cont->true_block());
465 inputs[input_count++] = g.Label(cont->false_block());
466 }
467
468 outputs[output_count++] = g.DefineSameAsFirst(node);
469
470 DCHECK_NE(0u, input_count);
471 DCHECK_EQ(1u, output_count);
472 DCHECK_GE(arraysize(inputs), input_count);
473 DCHECK_GE(arraysize(outputs), output_count);
474
475 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
476 inputs, cont);
477 }
478
479
480 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)481 static void VisitBinop(InstructionSelector* selector, Node* node,
482 InstructionCode opcode) {
483 FlagsContinuation cont;
484 VisitBinop(selector, node, opcode, &cont);
485 }
486
487
VisitWord32And(Node * node)488 void InstructionSelector::VisitWord32And(Node* node) {
489 X64OperandGenerator g(this);
490 Uint32BinopMatcher m(node);
491 if (m.right().Is(0xFF)) {
492 Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
493 } else if (m.right().Is(0xFFFF)) {
494 Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
495 } else {
496 VisitBinop(this, node, kX64And32);
497 }
498 }
499
500
VisitWord64And(Node * node)501 void InstructionSelector::VisitWord64And(Node* node) {
502 VisitBinop(this, node, kX64And);
503 }
504
VisitWord32Or(Node * node)505 void InstructionSelector::VisitWord32Or(Node* node) {
506 VisitBinop(this, node, kX64Or32);
507 }
508
509
VisitWord64Or(Node * node)510 void InstructionSelector::VisitWord64Or(Node* node) {
511 VisitBinop(this, node, kX64Or);
512 }
513
514
VisitWord32Xor(Node * node)515 void InstructionSelector::VisitWord32Xor(Node* node) {
516 X64OperandGenerator g(this);
517 Uint32BinopMatcher m(node);
518 if (m.right().Is(-1)) {
519 Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
520 } else {
521 VisitBinop(this, node, kX64Xor32);
522 }
523 }
524
525
VisitWord64Xor(Node * node)526 void InstructionSelector::VisitWord64Xor(Node* node) {
527 X64OperandGenerator g(this);
528 Uint64BinopMatcher m(node);
529 if (m.right().Is(-1)) {
530 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
531 } else {
532 VisitBinop(this, node, kX64Xor);
533 }
534 }
535
536
537 namespace {
538
539 // Shared routine for multiple 32-bit shift operations.
540 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
VisitWord32Shift(InstructionSelector * selector,Node * node,ArchOpcode opcode)541 void VisitWord32Shift(InstructionSelector* selector, Node* node,
542 ArchOpcode opcode) {
543 X64OperandGenerator g(selector);
544 Int32BinopMatcher m(node);
545 Node* left = m.left().node();
546 Node* right = m.right().node();
547
548 if (g.CanBeImmediate(right)) {
549 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
550 g.UseImmediate(right));
551 } else {
552 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
553 g.UseFixed(right, rcx));
554 }
555 }
556
557
558 // Shared routine for multiple 64-bit shift operations.
559 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
VisitWord64Shift(InstructionSelector * selector,Node * node,ArchOpcode opcode)560 void VisitWord64Shift(InstructionSelector* selector, Node* node,
561 ArchOpcode opcode) {
562 X64OperandGenerator g(selector);
563 Int64BinopMatcher m(node);
564 Node* left = m.left().node();
565 Node* right = m.right().node();
566
567 if (g.CanBeImmediate(right)) {
568 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
569 g.UseImmediate(right));
570 } else {
571 if (m.right().IsWord64And()) {
572 Int64BinopMatcher mright(right);
573 if (mright.right().Is(0x3F)) {
574 right = mright.left().node();
575 }
576 }
577 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
578 g.UseFixed(right, rcx));
579 }
580 }
581
582 // Shared routine for multiple shift operations with continuation.
583 template <typename BinopMatcher, int Bits>
TryVisitWordShift(InstructionSelector * selector,Node * node,ArchOpcode opcode,FlagsContinuation * cont)584 bool TryVisitWordShift(InstructionSelector* selector, Node* node,
585 ArchOpcode opcode, FlagsContinuation* cont) {
586 X64OperandGenerator g(selector);
587 BinopMatcher m(node);
588 Node* left = m.left().node();
589 Node* right = m.right().node();
590
591 // If the shift count is 0, the flags are not affected.
592 if (!g.CanBeImmediate(right) ||
593 (g.GetImmediateIntegerValue(right) & (Bits - 1)) == 0) {
594 return false;
595 }
596 InstructionOperand output = g.DefineSameAsFirst(node);
597 InstructionOperand inputs[2];
598 inputs[0] = g.UseRegister(left);
599 inputs[1] = g.UseImmediate(right);
600 selector->EmitWithContinuation(opcode, 1, &output, 2, inputs, cont);
601 return true;
602 }
603
EmitLea(InstructionSelector * selector,InstructionCode opcode,Node * result,Node * index,int scale,Node * base,Node * displacement,DisplacementMode displacement_mode)604 void EmitLea(InstructionSelector* selector, InstructionCode opcode,
605 Node* result, Node* index, int scale, Node* base,
606 Node* displacement, DisplacementMode displacement_mode) {
607 X64OperandGenerator g(selector);
608
609 InstructionOperand inputs[4];
610 size_t input_count = 0;
611 AddressingMode mode =
612 g.GenerateMemoryOperandInputs(index, scale, base, displacement,
613 displacement_mode, inputs, &input_count);
614
615 DCHECK_NE(0u, input_count);
616 DCHECK_GE(arraysize(inputs), input_count);
617
618 InstructionOperand outputs[1];
619 outputs[0] = g.DefineAsRegister(result);
620
621 opcode = AddressingModeField::encode(mode) | opcode;
622
623 selector->Emit(opcode, 1, outputs, input_count, inputs);
624 }
625
626 } // namespace
627
628
VisitWord32Shl(Node * node)629 void InstructionSelector::VisitWord32Shl(Node* node) {
630 Int32ScaleMatcher m(node, true);
631 if (m.matches()) {
632 Node* index = node->InputAt(0);
633 Node* base = m.power_of_two_plus_one() ? index : nullptr;
634 EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
635 kPositiveDisplacement);
636 return;
637 }
638 VisitWord32Shift(this, node, kX64Shl32);
639 }
640
641
VisitWord64Shl(Node * node)642 void InstructionSelector::VisitWord64Shl(Node* node) {
643 X64OperandGenerator g(this);
644 Int64ScaleMatcher m(node, true);
645 if (m.matches()) {
646 Node* index = node->InputAt(0);
647 Node* base = m.power_of_two_plus_one() ? index : nullptr;
648 EmitLea(this, kX64Lea, node, index, m.scale(), base, nullptr,
649 kPositiveDisplacement);
650 return;
651 } else {
652 Int64BinopMatcher m(node);
653 if ((m.left().IsChangeInt32ToInt64() ||
654 m.left().IsChangeUint32ToUint64()) &&
655 m.right().IsInRange(32, 63)) {
656 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
657 // 32 bits anyway.
658 Emit(kX64Shl, g.DefineSameAsFirst(node),
659 g.UseRegister(m.left().node()->InputAt(0)),
660 g.UseImmediate(m.right().node()));
661 return;
662 }
663 }
664 VisitWord64Shift(this, node, kX64Shl);
665 }
666
667
VisitWord32Shr(Node * node)668 void InstructionSelector::VisitWord32Shr(Node* node) {
669 VisitWord32Shift(this, node, kX64Shr32);
670 }
671
672 namespace {
TryMatchLoadWord64AndShiftRight(InstructionSelector * selector,Node * node,InstructionCode opcode)673 bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
674 InstructionCode opcode) {
675 DCHECK(IrOpcode::kWord64Sar == node->opcode() ||
676 IrOpcode::kWord64Shr == node->opcode());
677 X64OperandGenerator g(selector);
678 Int64BinopMatcher m(node);
679 if (selector->CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
680 m.right().Is(32)) {
681 // Just load and sign-extend the interesting 4 bytes instead. This happens,
682 // for example, when we're loading and untagging SMIs.
683 BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
684 AddressOption::kAllowAll);
685 if (mleft.matches() && (mleft.displacement() == nullptr ||
686 g.CanBeImmediate(mleft.displacement()))) {
687 size_t input_count = 0;
688 InstructionOperand inputs[3];
689 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
690 m.left().node(), inputs, &input_count);
691 if (mleft.displacement() == nullptr) {
692 // Make sure that the addressing mode indicates the presence of an
693 // immediate displacement. It seems that we never use M1 and M2, but we
694 // handle them here anyways.
695 switch (mode) {
696 case kMode_MR:
697 mode = kMode_MRI;
698 break;
699 case kMode_MR1:
700 mode = kMode_MR1I;
701 break;
702 case kMode_MR2:
703 mode = kMode_MR2I;
704 break;
705 case kMode_MR4:
706 mode = kMode_MR4I;
707 break;
708 case kMode_MR8:
709 mode = kMode_MR8I;
710 break;
711 case kMode_M1:
712 mode = kMode_M1I;
713 break;
714 case kMode_M2:
715 mode = kMode_M2I;
716 break;
717 case kMode_M4:
718 mode = kMode_M4I;
719 break;
720 case kMode_M8:
721 mode = kMode_M8I;
722 break;
723 case kMode_None:
724 case kMode_MRI:
725 case kMode_MR1I:
726 case kMode_MR2I:
727 case kMode_MR4I:
728 case kMode_MR8I:
729 case kMode_M1I:
730 case kMode_M2I:
731 case kMode_M4I:
732 case kMode_M8I:
733 case kMode_Root:
734 UNREACHABLE();
735 }
736 inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
737 } else {
738 // In the case that the base address was zero, the displacement will be
739 // in a register and replacing it with an immediate is not allowed. This
740 // usually only happens in dead code anyway.
741 if (!inputs[input_count - 1].IsImmediate()) return false;
742 int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
743 inputs[input_count - 1] =
744 ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
745 }
746 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
747 InstructionCode code = opcode | AddressingModeField::encode(mode);
748 selector->Emit(code, 1, outputs, input_count, inputs);
749 return true;
750 }
751 }
752 return false;
753 }
754 } // namespace
755
VisitWord64Shr(Node * node)756 void InstructionSelector::VisitWord64Shr(Node* node) {
757 if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movl)) return;
758 VisitWord64Shift(this, node, kX64Shr);
759 }
760
VisitWord32Sar(Node * node)761 void InstructionSelector::VisitWord32Sar(Node* node) {
762 X64OperandGenerator g(this);
763 Int32BinopMatcher m(node);
764 if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
765 Int32BinopMatcher mleft(m.left().node());
766 if (mleft.right().Is(16) && m.right().Is(16)) {
767 Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
768 return;
769 } else if (mleft.right().Is(24) && m.right().Is(24)) {
770 Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
771 return;
772 }
773 }
774 VisitWord32Shift(this, node, kX64Sar32);
775 }
776
VisitWord64Sar(Node * node)777 void InstructionSelector::VisitWord64Sar(Node* node) {
778 if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movsxlq)) return;
779 VisitWord64Shift(this, node, kX64Sar);
780 }
781
782
VisitWord32Ror(Node * node)783 void InstructionSelector::VisitWord32Ror(Node* node) {
784 VisitWord32Shift(this, node, kX64Ror32);
785 }
786
787
VisitWord64Ror(Node * node)788 void InstructionSelector::VisitWord64Ror(Node* node) {
789 VisitWord64Shift(this, node, kX64Ror);
790 }
791
VisitWord32ReverseBits(Node * node)792 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
793
794
VisitWord64ReverseBits(Node * node)795 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
796
VisitWord64ReverseBytes(Node * node)797 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
798 X64OperandGenerator g(this);
799 Emit(kX64Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
800 }
801
VisitWord32ReverseBytes(Node * node)802 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
803 X64OperandGenerator g(this);
804 Emit(kX64Bswap32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
805 }
806
VisitInt32Add(Node * node)807 void InstructionSelector::VisitInt32Add(Node* node) {
808 X64OperandGenerator g(this);
809
810 // Try to match the Add to a leal pattern
811 BaseWithIndexAndDisplacement32Matcher m(node);
812 if (m.matches() &&
813 (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
814 EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
815 m.displacement(), m.displacement_mode());
816 return;
817 }
818
819 // No leal pattern match, use addl
820 VisitBinop(this, node, kX64Add32);
821 }
822
823
VisitInt64Add(Node * node)824 void InstructionSelector::VisitInt64Add(Node* node) {
825 X64OperandGenerator g(this);
826
827 // Try to match the Add to a leaq pattern
828 BaseWithIndexAndDisplacement64Matcher m(node);
829 if (m.matches() &&
830 (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
831 EmitLea(this, kX64Lea, node, m.index(), m.scale(), m.base(),
832 m.displacement(), m.displacement_mode());
833 return;
834 }
835
836 // No leal pattern match, use addq
837 VisitBinop(this, node, kX64Add);
838 }
839
840
VisitInt64AddWithOverflow(Node * node)841 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
842 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
843 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
844 return VisitBinop(this, node, kX64Add, &cont);
845 }
846 FlagsContinuation cont;
847 VisitBinop(this, node, kX64Add, &cont);
848 }
849
VisitInt32Sub(Node * node)850 void InstructionSelector::VisitInt32Sub(Node* node) {
851 X64OperandGenerator g(this);
852 DCHECK_EQ(node->InputCount(), 2);
853 Node* input1 = node->InputAt(0);
854 Node* input2 = node->InputAt(1);
855 if (input1->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
856 g.CanBeImmediate(input2)) {
857 int32_t imm = g.GetImmediateIntegerValue(input2);
858 InstructionOperand int64_input = g.UseRegister(input1->InputAt(0));
859 if (imm == 0) {
860 // Emit "movl" for subtraction of 0.
861 Emit(kX64Movl, g.DefineAsRegister(node), int64_input);
862 } else {
863 // Omit truncation and turn subtractions of constant values into immediate
864 // "leal" instructions by negating the value.
865 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
866 g.DefineAsRegister(node), int64_input, g.TempImmediate(-imm));
867 }
868 return;
869 }
870
871 Int32BinopMatcher m(node);
872 if (m.left().Is(0)) {
873 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
874 } else if (m.right().Is(0)) {
875 // TODO(jarin): We should be able to use {EmitIdentity} here
876 // (https://crbug.com/v8/7947).
877 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(m.left().node()));
878 } else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
879 // Turn subtractions of constant values into immediate "leal" instructions
880 // by negating the value.
881 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
882 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
883 g.TempImmediate(-m.right().Value()));
884 } else {
885 VisitBinop(this, node, kX64Sub32);
886 }
887 }
888
VisitInt64Sub(Node * node)889 void InstructionSelector::VisitInt64Sub(Node* node) {
890 X64OperandGenerator g(this);
891 Int64BinopMatcher m(node);
892 if (m.left().Is(0)) {
893 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
894 } else {
895 if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
896 // Turn subtractions of constant values into immediate "leaq" instructions
897 // by negating the value.
898 Emit(kX64Lea | AddressingModeField::encode(kMode_MRI),
899 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
900 g.TempImmediate(-static_cast<int32_t>(m.right().Value())));
901 return;
902 }
903 VisitBinop(this, node, kX64Sub);
904 }
905 }
906
907
VisitInt64SubWithOverflow(Node * node)908 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
909 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
910 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
911 return VisitBinop(this, node, kX64Sub, &cont);
912 }
913 FlagsContinuation cont;
914 VisitBinop(this, node, kX64Sub, &cont);
915 }
916
917
918 namespace {
919
VisitMul(InstructionSelector * selector,Node * node,ArchOpcode opcode)920 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
921 X64OperandGenerator g(selector);
922 Int32BinopMatcher m(node);
923 Node* left = m.left().node();
924 Node* right = m.right().node();
925 if (g.CanBeImmediate(right)) {
926 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
927 g.UseImmediate(right));
928 } else {
929 if (g.CanBeBetterLeftOperand(right)) {
930 std::swap(left, right);
931 }
932 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
933 g.Use(right));
934 }
935 }
936
VisitMulHigh(InstructionSelector * selector,Node * node,ArchOpcode opcode)937 void VisitMulHigh(InstructionSelector* selector, Node* node,
938 ArchOpcode opcode) {
939 X64OperandGenerator g(selector);
940 Node* left = node->InputAt(0);
941 Node* right = node->InputAt(1);
942 if (selector->IsLive(left) && !selector->IsLive(right)) {
943 std::swap(left, right);
944 }
945 InstructionOperand temps[] = {g.TempRegister(rax)};
946 // TODO(turbofan): We use UseUniqueRegister here to improve register
947 // allocation.
948 selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
949 g.UseUniqueRegister(right), arraysize(temps), temps);
950 }
951
952
VisitDiv(InstructionSelector * selector,Node * node,ArchOpcode opcode)953 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
954 X64OperandGenerator g(selector);
955 InstructionOperand temps[] = {g.TempRegister(rdx)};
956 selector->Emit(
957 opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
958 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
959 }
960
961
VisitMod(InstructionSelector * selector,Node * node,ArchOpcode opcode)962 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
963 X64OperandGenerator g(selector);
964 InstructionOperand temps[] = {g.TempRegister(rax)};
965 selector->Emit(
966 opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
967 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
968 }
969
970 } // namespace
971
972
VisitInt32Mul(Node * node)973 void InstructionSelector::VisitInt32Mul(Node* node) {
974 Int32ScaleMatcher m(node, true);
975 if (m.matches()) {
976 Node* index = node->InputAt(0);
977 Node* base = m.power_of_two_plus_one() ? index : nullptr;
978 EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
979 kPositiveDisplacement);
980 return;
981 }
982 VisitMul(this, node, kX64Imul32);
983 }
984
VisitInt32MulWithOverflow(Node * node)985 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
986 // TODO(mvstanton): Use Int32ScaleMatcher somehow.
987 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
988 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
989 return VisitBinop(this, node, kX64Imul32, &cont);
990 }
991 FlagsContinuation cont;
992 VisitBinop(this, node, kX64Imul32, &cont);
993 }
994
VisitInt64Mul(Node * node)995 void InstructionSelector::VisitInt64Mul(Node* node) {
996 VisitMul(this, node, kX64Imul);
997 }
998
VisitInt32MulHigh(Node * node)999 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1000 VisitMulHigh(this, node, kX64ImulHigh32);
1001 }
1002
1003
VisitInt32Div(Node * node)1004 void InstructionSelector::VisitInt32Div(Node* node) {
1005 VisitDiv(this, node, kX64Idiv32);
1006 }
1007
1008
VisitInt64Div(Node * node)1009 void InstructionSelector::VisitInt64Div(Node* node) {
1010 VisitDiv(this, node, kX64Idiv);
1011 }
1012
1013
VisitUint32Div(Node * node)1014 void InstructionSelector::VisitUint32Div(Node* node) {
1015 VisitDiv(this, node, kX64Udiv32);
1016 }
1017
1018
VisitUint64Div(Node * node)1019 void InstructionSelector::VisitUint64Div(Node* node) {
1020 VisitDiv(this, node, kX64Udiv);
1021 }
1022
1023
VisitInt32Mod(Node * node)1024 void InstructionSelector::VisitInt32Mod(Node* node) {
1025 VisitMod(this, node, kX64Idiv32);
1026 }
1027
1028
VisitInt64Mod(Node * node)1029 void InstructionSelector::VisitInt64Mod(Node* node) {
1030 VisitMod(this, node, kX64Idiv);
1031 }
1032
1033
VisitUint32Mod(Node * node)1034 void InstructionSelector::VisitUint32Mod(Node* node) {
1035 VisitMod(this, node, kX64Udiv32);
1036 }
1037
1038
VisitUint64Mod(Node * node)1039 void InstructionSelector::VisitUint64Mod(Node* node) {
1040 VisitMod(this, node, kX64Udiv);
1041 }
1042
1043
VisitUint32MulHigh(Node * node)1044 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1045 VisitMulHigh(this, node, kX64UmulHigh32);
1046 }
1047
VisitTryTruncateFloat32ToInt64(Node * node)1048 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1049 X64OperandGenerator g(this);
1050 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1051 InstructionOperand outputs[2];
1052 size_t output_count = 0;
1053 outputs[output_count++] = g.DefineAsRegister(node);
1054
1055 Node* success_output = NodeProperties::FindProjection(node, 1);
1056 if (success_output) {
1057 outputs[output_count++] = g.DefineAsRegister(success_output);
1058 }
1059
1060 Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
1061 }
1062
1063
VisitTryTruncateFloat64ToInt64(Node * node)1064 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1065 X64OperandGenerator g(this);
1066 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1067 InstructionOperand outputs[2];
1068 size_t output_count = 0;
1069 outputs[output_count++] = g.DefineAsRegister(node);
1070
1071 Node* success_output = NodeProperties::FindProjection(node, 1);
1072 if (success_output) {
1073 outputs[output_count++] = g.DefineAsRegister(success_output);
1074 }
1075
1076 Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
1077 }
1078
1079
VisitTryTruncateFloat32ToUint64(Node * node)1080 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1081 X64OperandGenerator g(this);
1082 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1083 InstructionOperand outputs[2];
1084 size_t output_count = 0;
1085 outputs[output_count++] = g.DefineAsRegister(node);
1086
1087 Node* success_output = NodeProperties::FindProjection(node, 1);
1088 if (success_output) {
1089 outputs[output_count++] = g.DefineAsRegister(success_output);
1090 }
1091
1092 Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs);
1093 }
1094
1095
VisitTryTruncateFloat64ToUint64(Node * node)1096 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1097 X64OperandGenerator g(this);
1098 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1099 InstructionOperand outputs[2];
1100 size_t output_count = 0;
1101 outputs[output_count++] = g.DefineAsRegister(node);
1102
1103 Node* success_output = NodeProperties::FindProjection(node, 1);
1104 if (success_output) {
1105 outputs[output_count++] = g.DefineAsRegister(success_output);
1106 }
1107
1108 Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
1109 }
1110
1111
VisitChangeInt32ToInt64(Node * node)1112 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1113 X64OperandGenerator g(this);
1114 Node* const value = node->InputAt(0);
1115 if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1116 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1117 MachineRepresentation rep = load_rep.representation();
1118 InstructionCode opcode = kArchNop;
1119 switch (rep) {
1120 case MachineRepresentation::kBit: // Fall through.
1121 case MachineRepresentation::kWord8:
1122 opcode = load_rep.IsSigned() ? kX64Movsxbq : kX64Movzxbq;
1123 break;
1124 case MachineRepresentation::kWord16:
1125 opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq;
1126 break;
1127 case MachineRepresentation::kWord32:
1128 opcode = load_rep.IsSigned() ? kX64Movsxlq : kX64Movl;
1129 break;
1130 default:
1131 UNREACHABLE();
1132 return;
1133 }
1134 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1135 size_t input_count = 0;
1136 InstructionOperand inputs[3];
1137 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
1138 node->InputAt(0), inputs, &input_count);
1139 opcode |= AddressingModeField::encode(mode);
1140 Emit(opcode, 1, outputs, input_count, inputs);
1141 } else {
1142 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1143 }
1144 }
1145
1146 namespace {
1147
ZeroExtendsWord32ToWord64(Node * node)1148 bool ZeroExtendsWord32ToWord64(Node* node) {
1149 switch (node->opcode()) {
1150 case IrOpcode::kWord32And:
1151 case IrOpcode::kWord32Or:
1152 case IrOpcode::kWord32Xor:
1153 case IrOpcode::kWord32Shl:
1154 case IrOpcode::kWord32Shr:
1155 case IrOpcode::kWord32Sar:
1156 case IrOpcode::kWord32Ror:
1157 case IrOpcode::kWord32Equal:
1158 case IrOpcode::kInt32Add:
1159 case IrOpcode::kInt32Sub:
1160 case IrOpcode::kInt32Mul:
1161 case IrOpcode::kInt32MulHigh:
1162 case IrOpcode::kInt32Div:
1163 case IrOpcode::kInt32LessThan:
1164 case IrOpcode::kInt32LessThanOrEqual:
1165 case IrOpcode::kInt32Mod:
1166 case IrOpcode::kUint32Div:
1167 case IrOpcode::kUint32LessThan:
1168 case IrOpcode::kUint32LessThanOrEqual:
1169 case IrOpcode::kUint32Mod:
1170 case IrOpcode::kUint32MulHigh:
1171 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
1172 // zero-extension is a no-op.
1173 return true;
1174 case IrOpcode::kProjection: {
1175 Node* const value = node->InputAt(0);
1176 switch (value->opcode()) {
1177 case IrOpcode::kInt32AddWithOverflow:
1178 case IrOpcode::kInt32SubWithOverflow:
1179 case IrOpcode::kInt32MulWithOverflow:
1180 return true;
1181 default:
1182 return false;
1183 }
1184 }
1185 case IrOpcode::kLoad:
1186 case IrOpcode::kPoisonedLoad: {
1187 // The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
1188 // zero-extend to 64-bit on x64, so the zero-extension is a no-op.
1189 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1190 switch (load_rep.representation()) {
1191 case MachineRepresentation::kWord8:
1192 case MachineRepresentation::kWord16:
1193 case MachineRepresentation::kWord32:
1194 return true;
1195 default:
1196 return false;
1197 }
1198 }
1199 default:
1200 return false;
1201 }
1202 }
1203
1204 } // namespace
1205
VisitChangeUint32ToUint64(Node * node)1206 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1207 X64OperandGenerator g(this);
1208 Node* value = node->InputAt(0);
1209 if (ZeroExtendsWord32ToWord64(value)) {
1210 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
1211 // zero-extension is a no-op.
1212 return EmitIdentity(node);
1213 }
1214 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
1215 }
1216
1217
1218 namespace {
1219
VisitRO(InstructionSelector * selector,Node * node,InstructionCode opcode)1220 void VisitRO(InstructionSelector* selector, Node* node,
1221 InstructionCode opcode) {
1222 X64OperandGenerator g(selector);
1223 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1224 }
1225
1226
VisitRR(InstructionSelector * selector,Node * node,InstructionCode opcode)1227 void VisitRR(InstructionSelector* selector, Node* node,
1228 InstructionCode opcode) {
1229 X64OperandGenerator g(selector);
1230 selector->Emit(opcode, g.DefineAsRegister(node),
1231 g.UseRegister(node->InputAt(0)));
1232 }
1233
VisitRRO(InstructionSelector * selector,Node * node,InstructionCode opcode)1234 void VisitRRO(InstructionSelector* selector, Node* node,
1235 InstructionCode opcode) {
1236 X64OperandGenerator g(selector);
1237 selector->Emit(opcode, g.DefineSameAsFirst(node),
1238 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
1239 }
1240
VisitFloatBinop(InstructionSelector * selector,Node * node,ArchOpcode avx_opcode,ArchOpcode sse_opcode)1241 void VisitFloatBinop(InstructionSelector* selector, Node* node,
1242 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
1243 X64OperandGenerator g(selector);
1244 InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
1245 InstructionOperand operand1 = g.Use(node->InputAt(1));
1246 if (selector->IsSupported(AVX)) {
1247 selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
1248 } else {
1249 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
1250 }
1251 }
1252
1253
VisitFloatUnop(InstructionSelector * selector,Node * node,Node * input,ArchOpcode avx_opcode,ArchOpcode sse_opcode)1254 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
1255 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
1256 X64OperandGenerator g(selector);
1257 if (selector->IsSupported(AVX)) {
1258 selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
1259 } else {
1260 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
1261 }
1262 }
1263
1264 } // namespace
1265
1266 #define RO_OP_LIST(V) \
1267 V(Word64Clz, kX64Lzcnt) \
1268 V(Word32Clz, kX64Lzcnt32) \
1269 V(Word64Ctz, kX64Tzcnt) \
1270 V(Word32Ctz, kX64Tzcnt32) \
1271 V(Word64Popcnt, kX64Popcnt) \
1272 V(Word32Popcnt, kX64Popcnt32) \
1273 V(Float64Sqrt, kSSEFloat64Sqrt) \
1274 V(Float32Sqrt, kSSEFloat32Sqrt) \
1275 V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
1276 V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1)) \
1277 V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \
1278 V(ChangeFloat64ToUint64, kSSEFloat64ToUint64) \
1279 V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
1280 V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
1281 V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
1282 V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
1283 V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
1284 V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
1285 V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
1286 V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
1287 V(RoundInt64ToFloat32, kSSEInt64ToFloat32) \
1288 V(RoundUint64ToFloat32, kSSEUint64ToFloat32) \
1289 V(RoundInt64ToFloat64, kSSEInt64ToFloat64) \
1290 V(RoundUint64ToFloat64, kSSEUint64ToFloat64) \
1291 V(RoundUint32ToFloat32, kSSEUint32ToFloat32) \
1292 V(BitcastFloat32ToInt32, kX64BitcastFI) \
1293 V(BitcastFloat64ToInt64, kX64BitcastDL) \
1294 V(BitcastInt32ToFloat32, kX64BitcastIF) \
1295 V(BitcastInt64ToFloat64, kX64BitcastLD) \
1296 V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
1297 V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
1298 V(SignExtendWord8ToInt32, kX64Movsxbl) \
1299 V(SignExtendWord16ToInt32, kX64Movsxwl) \
1300 V(SignExtendWord8ToInt64, kX64Movsxbq) \
1301 V(SignExtendWord16ToInt64, kX64Movsxwq) \
1302 V(SignExtendWord32ToInt64, kX64Movsxlq)
1303
1304 #define RR_OP_LIST(V) \
1305 V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \
1306 V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown)) \
1307 V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp)) \
1308 V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp)) \
1309 V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
1310 V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
1311 V(Float32RoundTiesEven, \
1312 kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
1313 V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
1314
1315 #define RO_VISITOR(Name, opcode) \
1316 void InstructionSelector::Visit##Name(Node* node) { \
1317 VisitRO(this, node, opcode); \
1318 }
1319 RO_OP_LIST(RO_VISITOR)
1320 #undef RO_VISITOR
1321 #undef RO_OP_LIST
1322
1323 #define RR_VISITOR(Name, opcode) \
1324 void InstructionSelector::Visit##Name(Node* node) { \
1325 VisitRR(this, node, opcode); \
1326 }
RR_OP_LIST(RR_VISITOR)1327 RR_OP_LIST(RR_VISITOR)
1328 #undef RR_VISITOR
1329 #undef RR_OP_LIST
1330
1331 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1332 VisitRR(this, node, kArchTruncateDoubleToI);
1333 }
1334
VisitTruncateInt64ToInt32(Node * node)1335 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1336 X64OperandGenerator g(this);
1337 Node* value = node->InputAt(0);
1338 if (CanCover(node, value)) {
1339 switch (value->opcode()) {
1340 case IrOpcode::kWord64Sar:
1341 case IrOpcode::kWord64Shr: {
1342 Int64BinopMatcher m(value);
1343 if (m.right().Is(32)) {
1344 if (TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
1345 return EmitIdentity(node);
1346 }
1347 Emit(kX64Shr, g.DefineSameAsFirst(node),
1348 g.UseRegister(m.left().node()), g.TempImmediate(32));
1349 return;
1350 }
1351 break;
1352 }
1353 default:
1354 break;
1355 }
1356 }
1357 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
1358 }
1359
VisitFloat32Add(Node * node)1360 void InstructionSelector::VisitFloat32Add(Node* node) {
1361 VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
1362 }
1363
1364
VisitFloat32Sub(Node * node)1365 void InstructionSelector::VisitFloat32Sub(Node* node) {
1366 VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
1367 }
1368
VisitFloat32Mul(Node * node)1369 void InstructionSelector::VisitFloat32Mul(Node* node) {
1370 VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
1371 }
1372
1373
VisitFloat32Div(Node * node)1374 void InstructionSelector::VisitFloat32Div(Node* node) {
1375 VisitFloatBinop(this, node, kAVXFloat32Div, kSSEFloat32Div);
1376 }
1377
1378
VisitFloat32Abs(Node * node)1379 void InstructionSelector::VisitFloat32Abs(Node* node) {
1380 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
1381 }
1382
1383
VisitFloat32Max(Node * node)1384 void InstructionSelector::VisitFloat32Max(Node* node) {
1385 VisitRRO(this, node, kSSEFloat32Max);
1386 }
1387
VisitFloat32Min(Node * node)1388 void InstructionSelector::VisitFloat32Min(Node* node) {
1389 VisitRRO(this, node, kSSEFloat32Min);
1390 }
1391
VisitFloat64Add(Node * node)1392 void InstructionSelector::VisitFloat64Add(Node* node) {
1393 VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
1394 }
1395
1396
VisitFloat64Sub(Node * node)1397 void InstructionSelector::VisitFloat64Sub(Node* node) {
1398 VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
1399 }
1400
VisitFloat64Mul(Node * node)1401 void InstructionSelector::VisitFloat64Mul(Node* node) {
1402 VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
1403 }
1404
1405
VisitFloat64Div(Node * node)1406 void InstructionSelector::VisitFloat64Div(Node* node) {
1407 VisitFloatBinop(this, node, kAVXFloat64Div, kSSEFloat64Div);
1408 }
1409
1410
VisitFloat64Mod(Node * node)1411 void InstructionSelector::VisitFloat64Mod(Node* node) {
1412 X64OperandGenerator g(this);
1413 InstructionOperand temps[] = {g.TempRegister(rax)};
1414 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
1415 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
1416 temps);
1417 }
1418
1419
VisitFloat64Max(Node * node)1420 void InstructionSelector::VisitFloat64Max(Node* node) {
1421 VisitRRO(this, node, kSSEFloat64Max);
1422 }
1423
1424
VisitFloat64Min(Node * node)1425 void InstructionSelector::VisitFloat64Min(Node* node) {
1426 VisitRRO(this, node, kSSEFloat64Min);
1427 }
1428
1429
VisitFloat64Abs(Node * node)1430 void InstructionSelector::VisitFloat64Abs(Node* node) {
1431 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
1432 }
1433
1434
VisitFloat64RoundTiesAway(Node * node)1435 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1436 UNREACHABLE();
1437 }
1438
1439
VisitFloat32Neg(Node * node)1440 void InstructionSelector::VisitFloat32Neg(Node* node) {
1441 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
1442 }
1443
VisitFloat64Neg(Node * node)1444 void InstructionSelector::VisitFloat64Neg(Node* node) {
1445 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
1446 }
1447
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1448 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1449 InstructionCode opcode) {
1450 X64OperandGenerator g(this);
1451 Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0),
1452 g.UseFixed(node->InputAt(1), xmm1))
1453 ->MarkAsCall();
1454 }
1455
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1456 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1457 InstructionCode opcode) {
1458 X64OperandGenerator g(this);
1459 Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0))
1460 ->MarkAsCall();
1461 }
1462
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)1463 void InstructionSelector::EmitPrepareArguments(
1464 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1465 Node* node) {
1466 X64OperandGenerator g(this);
1467
1468 // Prepare for C function call.
1469 if (call_descriptor->IsCFunctionCall()) {
1470 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1471 call_descriptor->ParameterCount())),
1472 0, nullptr, 0, nullptr);
1473
1474 // Poke any stack arguments.
1475 for (size_t n = 0; n < arguments->size(); ++n) {
1476 PushParameter input = (*arguments)[n];
1477 if (input.node) {
1478 int slot = static_cast<int>(n);
1479 InstructionOperand value = g.CanBeImmediate(input.node)
1480 ? g.UseImmediate(input.node)
1481 : g.UseRegister(input.node);
1482 Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
1483 }
1484 }
1485 } else {
1486 // Push any stack arguments.
1487 int effect_level = GetEffectLevel(node);
1488 for (PushParameter input : base::Reversed(*arguments)) {
1489 // Skip any alignment holes in pushed nodes. We may have one in case of a
1490 // Simd128 stack argument.
1491 if (input.node == nullptr) continue;
1492 if (g.CanBeImmediate(input.node)) {
1493 Emit(kX64Push, g.NoOutput(), g.UseImmediate(input.node));
1494 } else if (IsSupported(ATOM) ||
1495 sequence()->IsFP(GetVirtualRegister(input.node))) {
1496 // TODO(titzer): X64Push cannot handle stack->stack double moves
1497 // because there is no way to encode fixed double slots.
1498 Emit(kX64Push, g.NoOutput(), g.UseRegister(input.node));
1499 } else if (g.CanBeMemoryOperand(kX64Push, node, input.node,
1500 effect_level)) {
1501 InstructionOperand outputs[1];
1502 InstructionOperand inputs[4];
1503 size_t input_count = 0;
1504 InstructionCode opcode = kX64Push;
1505 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
1506 input.node, inputs, &input_count);
1507 opcode |= AddressingModeField::encode(mode);
1508 Emit(opcode, 0, outputs, input_count, inputs);
1509 } else {
1510 Emit(kX64Push, g.NoOutput(), g.Use(input.node));
1511 }
1512 }
1513 }
1514 }
1515
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)1516 void InstructionSelector::EmitPrepareResults(
1517 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1518 Node* node) {
1519 X64OperandGenerator g(this);
1520
1521 int reverse_slot = 0;
1522 for (PushParameter output : *results) {
1523 if (!output.location.IsCallerFrameSlot()) continue;
1524 reverse_slot += output.location.GetSizeInPointers();
1525 // Skip any alignment holes in nodes.
1526 if (output.node == nullptr) continue;
1527 DCHECK(!call_descriptor->IsCFunctionCall());
1528 if (output.location.GetType() == MachineType::Float32()) {
1529 MarkAsFloat32(output.node);
1530 } else if (output.location.GetType() == MachineType::Float64()) {
1531 MarkAsFloat64(output.node);
1532 }
1533 InstructionOperand result = g.DefineAsRegister(output.node);
1534 InstructionOperand slot = g.UseImmediate(reverse_slot);
1535 Emit(kX64Peek, 1, &result, 1, &slot);
1536 }
1537 }
1538
IsTailCallAddressImmediate()1539 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
1540
GetTempsCountForTailCallFromJSFunction()1541 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1542
1543 namespace {
1544
VisitCompareWithMemoryOperand(InstructionSelector * selector,InstructionCode opcode,Node * left,InstructionOperand right,FlagsContinuation * cont)1545 void VisitCompareWithMemoryOperand(InstructionSelector* selector,
1546 InstructionCode opcode, Node* left,
1547 InstructionOperand right,
1548 FlagsContinuation* cont) {
1549 DCHECK_EQ(IrOpcode::kLoad, left->opcode());
1550 X64OperandGenerator g(selector);
1551 size_t input_count = 0;
1552 InstructionOperand inputs[4];
1553 AddressingMode addressing_mode =
1554 g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
1555 opcode |= AddressingModeField::encode(addressing_mode);
1556 inputs[input_count++] = right;
1557
1558 selector->EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont);
1559 }
1560
1561 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1562 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1563 InstructionOperand left, InstructionOperand right,
1564 FlagsContinuation* cont) {
1565 selector->EmitWithContinuation(opcode, left, right, cont);
1566 }
1567
1568
1569 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,Node * left,Node * right,FlagsContinuation * cont,bool commutative)1570 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1571 Node* left, Node* right, FlagsContinuation* cont,
1572 bool commutative) {
1573 X64OperandGenerator g(selector);
1574 if (commutative && g.CanBeBetterLeftOperand(right)) {
1575 std::swap(left, right);
1576 }
1577 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
1578 }
1579
MachineTypeForNarrow(Node * node,Node * hint_node)1580 MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
1581 if (hint_node->opcode() == IrOpcode::kLoad) {
1582 MachineType hint = LoadRepresentationOf(hint_node->op());
1583 if (node->opcode() == IrOpcode::kInt32Constant ||
1584 node->opcode() == IrOpcode::kInt64Constant) {
1585 int64_t constant = node->opcode() == IrOpcode::kInt32Constant
1586 ? OpParameter<int32_t>(node->op())
1587 : OpParameter<int64_t>(node->op());
1588 if (hint == MachineType::Int8()) {
1589 if (constant >= std::numeric_limits<int8_t>::min() &&
1590 constant <= std::numeric_limits<int8_t>::max()) {
1591 return hint;
1592 }
1593 } else if (hint == MachineType::Uint8()) {
1594 if (constant >= std::numeric_limits<uint8_t>::min() &&
1595 constant <= std::numeric_limits<uint8_t>::max()) {
1596 return hint;
1597 }
1598 } else if (hint == MachineType::Int16()) {
1599 if (constant >= std::numeric_limits<int16_t>::min() &&
1600 constant <= std::numeric_limits<int16_t>::max()) {
1601 return hint;
1602 }
1603 } else if (hint == MachineType::Uint16()) {
1604 if (constant >= std::numeric_limits<uint16_t>::min() &&
1605 constant <= std::numeric_limits<uint16_t>::max()) {
1606 return hint;
1607 }
1608 } else if (hint == MachineType::Int32()) {
1609 return hint;
1610 } else if (hint == MachineType::Uint32()) {
1611 if (constant >= 0) return hint;
1612 }
1613 }
1614 }
1615 return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
1616 : MachineType::None();
1617 }
1618
1619 // Tries to match the size of the given opcode to that of the operands, if
1620 // possible.
TryNarrowOpcodeSize(InstructionCode opcode,Node * left,Node * right,FlagsContinuation * cont)1621 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
1622 Node* right, FlagsContinuation* cont) {
1623 // TODO(epertoso): we can probably get some size information out phi nodes.
1624 // If the load representations don't match, both operands will be
1625 // zero/sign-extended to 32bit.
1626 MachineType left_type = MachineTypeForNarrow(left, right);
1627 MachineType right_type = MachineTypeForNarrow(right, left);
1628 if (left_type == right_type) {
1629 switch (left_type.representation()) {
1630 case MachineRepresentation::kBit:
1631 case MachineRepresentation::kWord8: {
1632 if (opcode == kX64Test32) return kX64Test8;
1633 if (opcode == kX64Cmp32) {
1634 if (left_type.semantic() == MachineSemantic::kUint32) {
1635 cont->OverwriteUnsignedIfSigned();
1636 } else {
1637 CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1638 }
1639 return kX64Cmp8;
1640 }
1641 break;
1642 }
1643 case MachineRepresentation::kWord16:
1644 if (opcode == kX64Test32) return kX64Test16;
1645 if (opcode == kX64Cmp32) {
1646 if (left_type.semantic() == MachineSemantic::kUint32) {
1647 cont->OverwriteUnsignedIfSigned();
1648 } else {
1649 CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1650 }
1651 return kX64Cmp16;
1652 }
1653 break;
1654 default:
1655 break;
1656 }
1657 }
1658 return opcode;
1659 }
1660
1661 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1662 void VisitWordCompare(InstructionSelector* selector, Node* node,
1663 InstructionCode opcode, FlagsContinuation* cont) {
1664 X64OperandGenerator g(selector);
1665 Node* left = node->InputAt(0);
1666 Node* right = node->InputAt(1);
1667
1668 opcode = TryNarrowOpcodeSize(opcode, left, right, cont);
1669
1670 // If one of the two inputs is an immediate, make sure it's on the right, or
1671 // if one of the two inputs is a memory operand, make sure it's on the left.
1672 int effect_level = selector->GetEffectLevel(node);
1673 if (cont->IsBranch()) {
1674 effect_level = selector->GetEffectLevel(
1675 cont->true_block()->PredecessorAt(0)->control_input());
1676 }
1677
1678 if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
1679 (g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
1680 !g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
1681 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1682 std::swap(left, right);
1683 }
1684
1685 // Match immediates on right side of comparison.
1686 if (g.CanBeImmediate(right)) {
1687 if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
1688 return VisitCompareWithMemoryOperand(selector, opcode, left,
1689 g.UseImmediate(right), cont);
1690 }
1691 return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
1692 cont);
1693 }
1694
1695 // Match memory operands on left side of comparison.
1696 if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
1697 return VisitCompareWithMemoryOperand(selector, opcode, left,
1698 g.UseRegister(right), cont);
1699 }
1700
1701 return VisitCompare(selector, opcode, left, right, cont,
1702 node->op()->HasProperty(Operator::kCommutative));
1703 }
1704
1705 // Shared routine for 64-bit word comparison operations.
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1706 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1707 FlagsContinuation* cont) {
1708 X64OperandGenerator g(selector);
1709 if (selector->CanUseRootsRegister()) {
1710 Heap* const heap = selector->isolate()->heap();
1711 Heap::RootListIndex root_index;
1712 HeapObjectBinopMatcher m(node);
1713 if (m.right().HasValue() &&
1714 heap->IsRootHandle(m.right().Value(), &root_index)) {
1715 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1716 InstructionCode opcode =
1717 kX64Cmp | AddressingModeField::encode(kMode_Root);
1718 return VisitCompare(
1719 selector, opcode,
1720 g.TempImmediate(TurboAssemblerBase::RootRegisterOffset(root_index)),
1721 g.UseRegister(m.left().node()), cont);
1722 } else if (m.left().HasValue() &&
1723 heap->IsRootHandle(m.left().Value(), &root_index)) {
1724 InstructionCode opcode =
1725 kX64Cmp | AddressingModeField::encode(kMode_Root);
1726 return VisitCompare(
1727 selector, opcode,
1728 g.TempImmediate(TurboAssemblerBase::RootRegisterOffset(root_index)),
1729 g.UseRegister(m.right().node()), cont);
1730 }
1731 }
1732 StackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> m(
1733 selector->isolate(), node);
1734 if (m.Matched()) {
1735 // Compare(Load(js_stack_limit), LoadStackPointer)
1736 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1737 InstructionCode opcode = cont->Encode(kX64StackCheck);
1738 CHECK(cont->IsBranch());
1739 selector->EmitWithContinuation(opcode, cont);
1740 return;
1741 }
1742 WasmStackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> wasm_m(
1743 node);
1744 if (wasm_m.Matched()) {
1745 // This is a wasm stack check. By structure, we know that we can use the
1746 // stack pointer directly, as wasm code does not modify the stack at points
1747 // where stack checks are performed.
1748 Node* left = node->InputAt(0);
1749 LocationOperand rsp(InstructionOperand::EXPLICIT, LocationOperand::REGISTER,
1750 InstructionSequence::DefaultRepresentation(),
1751 RegisterCode::kRegCode_rsp);
1752 return VisitCompareWithMemoryOperand(selector, kX64Cmp, left, rsp, cont);
1753 }
1754 VisitWordCompare(selector, node, kX64Cmp, cont);
1755 }
1756
1757 // Shared routine for comparison with zero.
VisitCompareZero(InstructionSelector * selector,Node * user,Node * node,InstructionCode opcode,FlagsContinuation * cont)1758 void VisitCompareZero(InstructionSelector* selector, Node* user, Node* node,
1759 InstructionCode opcode, FlagsContinuation* cont) {
1760 X64OperandGenerator g(selector);
1761 if (cont->IsBranch() &&
1762 (cont->condition() == kNotEqual || cont->condition() == kEqual)) {
1763 switch (node->opcode()) {
1764 #define FLAGS_SET_BINOP_LIST(V) \
1765 V(kInt32Add, VisitBinop, kX64Add32) \
1766 V(kInt32Sub, VisitBinop, kX64Sub32) \
1767 V(kWord32And, VisitBinop, kX64And32) \
1768 V(kWord32Or, VisitBinop, kX64Or32) \
1769 V(kInt64Add, VisitBinop, kX64Add) \
1770 V(kInt64Sub, VisitBinop, kX64Sub) \
1771 V(kWord64And, VisitBinop, kX64And) \
1772 V(kWord64Or, VisitBinop, kX64Or)
1773 #define FLAGS_SET_BINOP(opcode, Visit, archOpcode) \
1774 case IrOpcode::opcode: \
1775 if (selector->IsOnlyUserOfNodeInSameBlock(user, node)) { \
1776 return Visit(selector, node, archOpcode, cont); \
1777 } \
1778 break;
1779 FLAGS_SET_BINOP_LIST(FLAGS_SET_BINOP)
1780 #undef FLAGS_SET_BINOP_LIST
1781 #undef FLAGS_SET_BINOP
1782
1783 #define TRY_VISIT_WORD32_SHIFT TryVisitWordShift<Int32BinopMatcher, 32>
1784 #define TRY_VISIT_WORD64_SHIFT TryVisitWordShift<Int64BinopMatcher, 64>
1785 // Skip Word64Sar/Word32Sar since no instruction reduction in most cases.
1786 #define FLAGS_SET_SHIFT_LIST(V) \
1787 V(kWord32Shl, TRY_VISIT_WORD32_SHIFT, kX64Shl32) \
1788 V(kWord32Shr, TRY_VISIT_WORD32_SHIFT, kX64Shr32) \
1789 V(kWord64Shl, TRY_VISIT_WORD64_SHIFT, kX64Shl) \
1790 V(kWord64Shr, TRY_VISIT_WORD64_SHIFT, kX64Shr)
1791 #define FLAGS_SET_SHIFT(opcode, TryVisit, archOpcode) \
1792 case IrOpcode::opcode: \
1793 if (selector->IsOnlyUserOfNodeInSameBlock(user, node)) { \
1794 if (TryVisit(selector, node, archOpcode, cont)) return; \
1795 } \
1796 break;
1797 FLAGS_SET_SHIFT_LIST(FLAGS_SET_SHIFT)
1798 #undef TRY_VISIT_WORD32_SHIFT
1799 #undef TRY_VISIT_WORD64_SHIFT
1800 #undef FLAGS_SET_SHIFT_LIST
1801 #undef FLAGS_SET_SHIFT
1802 default:
1803 break;
1804 }
1805 }
1806 VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
1807 }
1808
1809
1810 // Shared routine for multiple float32 compare operations (inputs commuted).
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1811 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1812 FlagsContinuation* cont) {
1813 Node* const left = node->InputAt(0);
1814 Node* const right = node->InputAt(1);
1815 InstructionCode const opcode =
1816 selector->IsSupported(AVX) ? kAVXFloat32Cmp : kSSEFloat32Cmp;
1817 VisitCompare(selector, opcode, right, left, cont, false);
1818 }
1819
1820
1821 // Shared routine for multiple float64 compare operations (inputs commuted).
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1822 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1823 FlagsContinuation* cont) {
1824 Node* const left = node->InputAt(0);
1825 Node* const right = node->InputAt(1);
1826 InstructionCode const opcode =
1827 selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
1828 VisitCompare(selector, opcode, right, left, cont, false);
1829 }
1830
1831 // Shared routine for Word32/Word64 Atomic Binops
VisitAtomicBinop(InstructionSelector * selector,Node * node,ArchOpcode opcode)1832 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
1833 ArchOpcode opcode) {
1834 X64OperandGenerator g(selector);
1835 Node* base = node->InputAt(0);
1836 Node* index = node->InputAt(1);
1837 Node* value = node->InputAt(2);
1838 AddressingMode addressing_mode;
1839 InstructionOperand inputs[] = {
1840 g.UseUniqueRegister(value), g.UseUniqueRegister(base),
1841 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1842 InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
1843 InstructionOperand temps[] = {g.TempRegister()};
1844 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1845 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
1846 arraysize(temps), temps);
1847 }
1848
1849 // Shared routine for Word32/Word64 Atomic CmpExchg
VisitAtomicCompareExchange(InstructionSelector * selector,Node * node,ArchOpcode opcode)1850 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
1851 ArchOpcode opcode) {
1852 X64OperandGenerator g(selector);
1853 Node* base = node->InputAt(0);
1854 Node* index = node->InputAt(1);
1855 Node* old_value = node->InputAt(2);
1856 Node* new_value = node->InputAt(3);
1857 AddressingMode addressing_mode;
1858 InstructionOperand inputs[] = {
1859 g.UseFixed(old_value, rax), g.UseUniqueRegister(new_value),
1860 g.UseUniqueRegister(base),
1861 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1862 InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
1863 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1864 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
1865 }
1866
1867 // Shared routine for Word32/Word64 Atomic Exchange
VisitAtomicExchange(InstructionSelector * selector,Node * node,ArchOpcode opcode)1868 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
1869 ArchOpcode opcode) {
1870 X64OperandGenerator g(selector);
1871 Node* base = node->InputAt(0);
1872 Node* index = node->InputAt(1);
1873 Node* value = node->InputAt(2);
1874 AddressingMode addressing_mode;
1875 InstructionOperand inputs[] = {
1876 g.UseUniqueRegister(value), g.UseUniqueRegister(base),
1877 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1878 InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
1879 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1880 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
1881 }
1882
1883 } // namespace
1884
1885 // Shared routine for word comparison against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)1886 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
1887 FlagsContinuation* cont) {
1888 // Try to combine with comparisons against 0 by simply inverting the branch.
1889 while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1890 Int32BinopMatcher m(value);
1891 if (!m.right().Is(0)) break;
1892
1893 user = value;
1894 value = m.left().node();
1895 cont->Negate();
1896 }
1897
1898 if (CanCover(user, value)) {
1899 switch (value->opcode()) {
1900 case IrOpcode::kWord32Equal:
1901 cont->OverwriteAndNegateIfEqual(kEqual);
1902 return VisitWordCompare(this, value, kX64Cmp32, cont);
1903 case IrOpcode::kInt32LessThan:
1904 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1905 return VisitWordCompare(this, value, kX64Cmp32, cont);
1906 case IrOpcode::kInt32LessThanOrEqual:
1907 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1908 return VisitWordCompare(this, value, kX64Cmp32, cont);
1909 case IrOpcode::kUint32LessThan:
1910 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1911 return VisitWordCompare(this, value, kX64Cmp32, cont);
1912 case IrOpcode::kUint32LessThanOrEqual:
1913 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1914 return VisitWordCompare(this, value, kX64Cmp32, cont);
1915 case IrOpcode::kWord64Equal: {
1916 cont->OverwriteAndNegateIfEqual(kEqual);
1917 Int64BinopMatcher m(value);
1918 if (m.right().Is(0)) {
1919 // Try to combine the branch with a comparison.
1920 Node* const user = m.node();
1921 Node* const value = m.left().node();
1922 if (CanCover(user, value)) {
1923 switch (value->opcode()) {
1924 case IrOpcode::kInt64Sub:
1925 return VisitWord64Compare(this, value, cont);
1926 case IrOpcode::kWord64And:
1927 return VisitWordCompare(this, value, kX64Test, cont);
1928 default:
1929 break;
1930 }
1931 }
1932 return VisitCompareZero(this, user, value, kX64Cmp, cont);
1933 }
1934 return VisitWord64Compare(this, value, cont);
1935 }
1936 case IrOpcode::kInt64LessThan:
1937 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1938 return VisitWord64Compare(this, value, cont);
1939 case IrOpcode::kInt64LessThanOrEqual:
1940 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1941 return VisitWord64Compare(this, value, cont);
1942 case IrOpcode::kUint64LessThan:
1943 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1944 return VisitWord64Compare(this, value, cont);
1945 case IrOpcode::kUint64LessThanOrEqual:
1946 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1947 return VisitWord64Compare(this, value, cont);
1948 case IrOpcode::kFloat32Equal:
1949 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1950 return VisitFloat32Compare(this, value, cont);
1951 case IrOpcode::kFloat32LessThan:
1952 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1953 return VisitFloat32Compare(this, value, cont);
1954 case IrOpcode::kFloat32LessThanOrEqual:
1955 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1956 return VisitFloat32Compare(this, value, cont);
1957 case IrOpcode::kFloat64Equal:
1958 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1959 return VisitFloat64Compare(this, value, cont);
1960 case IrOpcode::kFloat64LessThan: {
1961 Float64BinopMatcher m(value);
1962 if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
1963 // This matches the pattern
1964 //
1965 // Float64LessThan(#0.0, Float64Abs(x))
1966 //
1967 // which TurboFan generates for NumberToBoolean in the general case,
1968 // and which evaluates to false if x is 0, -0 or NaN. We can compile
1969 // this to a simple (v)ucomisd using not_equal flags condition, which
1970 // avoids the costly Float64Abs.
1971 cont->OverwriteAndNegateIfEqual(kNotEqual);
1972 InstructionCode const opcode =
1973 IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
1974 return VisitCompare(this, opcode, m.left().node(),
1975 m.right().InputAt(0), cont, false);
1976 }
1977 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1978 return VisitFloat64Compare(this, value, cont);
1979 }
1980 case IrOpcode::kFloat64LessThanOrEqual:
1981 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1982 return VisitFloat64Compare(this, value, cont);
1983 case IrOpcode::kProjection:
1984 // Check if this is the overflow output projection of an
1985 // <Operation>WithOverflow node.
1986 if (ProjectionIndexOf(value->op()) == 1u) {
1987 // We cannot combine the <Operation>WithOverflow with this branch
1988 // unless the 0th projection (the use of the actual value of the
1989 // <Operation> is either nullptr, which means there's no use of the
1990 // actual value, or was already defined, which means it is scheduled
1991 // *AFTER* this branch).
1992 Node* const node = value->InputAt(0);
1993 Node* const result = NodeProperties::FindProjection(node, 0);
1994 if (result == nullptr || IsDefined(result)) {
1995 switch (node->opcode()) {
1996 case IrOpcode::kInt32AddWithOverflow:
1997 cont->OverwriteAndNegateIfEqual(kOverflow);
1998 return VisitBinop(this, node, kX64Add32, cont);
1999 case IrOpcode::kInt32SubWithOverflow:
2000 cont->OverwriteAndNegateIfEqual(kOverflow);
2001 return VisitBinop(this, node, kX64Sub32, cont);
2002 case IrOpcode::kInt32MulWithOverflow:
2003 cont->OverwriteAndNegateIfEqual(kOverflow);
2004 return VisitBinop(this, node, kX64Imul32, cont);
2005 case IrOpcode::kInt64AddWithOverflow:
2006 cont->OverwriteAndNegateIfEqual(kOverflow);
2007 return VisitBinop(this, node, kX64Add, cont);
2008 case IrOpcode::kInt64SubWithOverflow:
2009 cont->OverwriteAndNegateIfEqual(kOverflow);
2010 return VisitBinop(this, node, kX64Sub, cont);
2011 default:
2012 break;
2013 }
2014 }
2015 }
2016 break;
2017 case IrOpcode::kInt32Sub:
2018 return VisitWordCompare(this, value, kX64Cmp32, cont);
2019 case IrOpcode::kWord32And:
2020 return VisitWordCompare(this, value, kX64Test32, cont);
2021 default:
2022 break;
2023 }
2024 }
2025
2026 // Branch could not be combined with a compare, emit compare against 0.
2027 VisitCompareZero(this, user, value, kX64Cmp32, cont);
2028 }
2029
VisitSwitch(Node * node,const SwitchInfo & sw)2030 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2031 X64OperandGenerator g(this);
2032 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2033
2034 // Emit either ArchTableSwitch or ArchLookupSwitch.
2035 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2036 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2037 size_t table_space_cost = 4 + sw.value_range();
2038 size_t table_time_cost = 3;
2039 size_t lookup_space_cost = 3 + 2 * sw.case_count();
2040 size_t lookup_time_cost = sw.case_count();
2041 if (sw.case_count() > 4 &&
2042 table_space_cost + 3 * table_time_cost <=
2043 lookup_space_cost + 3 * lookup_time_cost &&
2044 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2045 sw.value_range() <= kMaxTableSwitchValueRange) {
2046 InstructionOperand index_operand = g.TempRegister();
2047 if (sw.min_value()) {
2048 // The leal automatically zero extends, so result is a valid 64-bit
2049 // index.
2050 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
2051 value_operand, g.TempImmediate(-sw.min_value()));
2052 } else {
2053 // Zero extend, because we use it as 64-bit index into the jump table.
2054 Emit(kX64Movl, index_operand, value_operand);
2055 }
2056 // Generate a table lookup.
2057 return EmitTableSwitch(sw, index_operand);
2058 }
2059 }
2060
2061 // Generate a tree of conditional jumps.
2062 return EmitBinarySearchSwitch(sw, value_operand);
2063 }
2064
2065
VisitWord32Equal(Node * const node)2066 void InstructionSelector::VisitWord32Equal(Node* const node) {
2067 Node* user = node;
2068 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2069 Int32BinopMatcher m(user);
2070 if (m.right().Is(0)) {
2071 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2072 }
2073 VisitWordCompare(this, node, kX64Cmp32, &cont);
2074 }
2075
2076
VisitInt32LessThan(Node * node)2077 void InstructionSelector::VisitInt32LessThan(Node* node) {
2078 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2079 VisitWordCompare(this, node, kX64Cmp32, &cont);
2080 }
2081
2082
VisitInt32LessThanOrEqual(Node * node)2083 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2084 FlagsContinuation cont =
2085 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2086 VisitWordCompare(this, node, kX64Cmp32, &cont);
2087 }
2088
2089
VisitUint32LessThan(Node * node)2090 void InstructionSelector::VisitUint32LessThan(Node* node) {
2091 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2092 VisitWordCompare(this, node, kX64Cmp32, &cont);
2093 }
2094
2095
VisitUint32LessThanOrEqual(Node * node)2096 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2097 FlagsContinuation cont =
2098 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2099 VisitWordCompare(this, node, kX64Cmp32, &cont);
2100 }
2101
2102
VisitWord64Equal(Node * const node)2103 void InstructionSelector::VisitWord64Equal(Node* const node) {
2104 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2105 Int64BinopMatcher m(node);
2106 if (m.right().Is(0)) {
2107 // Try to combine the equality check with a comparison.
2108 Node* const user = m.node();
2109 Node* const value = m.left().node();
2110 if (CanCover(user, value)) {
2111 switch (value->opcode()) {
2112 case IrOpcode::kInt64Sub:
2113 return VisitWord64Compare(this, value, &cont);
2114 case IrOpcode::kWord64And:
2115 return VisitWordCompare(this, value, kX64Test, &cont);
2116 default:
2117 break;
2118 }
2119 }
2120 }
2121 VisitWord64Compare(this, node, &cont);
2122 }
2123
2124
VisitInt32AddWithOverflow(Node * node)2125 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2126 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2127 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2128 return VisitBinop(this, node, kX64Add32, &cont);
2129 }
2130 FlagsContinuation cont;
2131 VisitBinop(this, node, kX64Add32, &cont);
2132 }
2133
2134
VisitInt32SubWithOverflow(Node * node)2135 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2136 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2137 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2138 return VisitBinop(this, node, kX64Sub32, &cont);
2139 }
2140 FlagsContinuation cont;
2141 VisitBinop(this, node, kX64Sub32, &cont);
2142 }
2143
2144
VisitInt64LessThan(Node * node)2145 void InstructionSelector::VisitInt64LessThan(Node* node) {
2146 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2147 VisitWord64Compare(this, node, &cont);
2148 }
2149
2150
VisitInt64LessThanOrEqual(Node * node)2151 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2152 FlagsContinuation cont =
2153 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2154 VisitWord64Compare(this, node, &cont);
2155 }
2156
2157
VisitUint64LessThan(Node * node)2158 void InstructionSelector::VisitUint64LessThan(Node* node) {
2159 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2160 VisitWord64Compare(this, node, &cont);
2161 }
2162
2163
VisitUint64LessThanOrEqual(Node * node)2164 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2165 FlagsContinuation cont =
2166 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2167 VisitWord64Compare(this, node, &cont);
2168 }
2169
2170
VisitFloat32Equal(Node * node)2171 void InstructionSelector::VisitFloat32Equal(Node* node) {
2172 FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
2173 VisitFloat32Compare(this, node, &cont);
2174 }
2175
2176
VisitFloat32LessThan(Node * node)2177 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2178 FlagsContinuation cont =
2179 FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
2180 VisitFloat32Compare(this, node, &cont);
2181 }
2182
2183
VisitFloat32LessThanOrEqual(Node * node)2184 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2185 FlagsContinuation cont =
2186 FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
2187 VisitFloat32Compare(this, node, &cont);
2188 }
2189
2190
VisitFloat64Equal(Node * node)2191 void InstructionSelector::VisitFloat64Equal(Node* node) {
2192 FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
2193 VisitFloat64Compare(this, node, &cont);
2194 }
2195
VisitFloat64LessThan(Node * node)2196 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2197 Float64BinopMatcher m(node);
2198 if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
2199 // This matches the pattern
2200 //
2201 // Float64LessThan(#0.0, Float64Abs(x))
2202 //
2203 // which TurboFan generates for NumberToBoolean in the general case,
2204 // and which evaluates to false if x is 0, -0 or NaN. We can compile
2205 // this to a simple (v)ucomisd using not_equal flags condition, which
2206 // avoids the costly Float64Abs.
2207 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, node);
2208 InstructionCode const opcode =
2209 IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
2210 return VisitCompare(this, opcode, m.left().node(), m.right().InputAt(0),
2211 &cont, false);
2212 }
2213 FlagsContinuation cont =
2214 FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
2215 VisitFloat64Compare(this, node, &cont);
2216 }
2217
VisitFloat64LessThanOrEqual(Node * node)2218 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2219 FlagsContinuation cont =
2220 FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
2221 VisitFloat64Compare(this, node, &cont);
2222 }
2223
VisitFloat64InsertLowWord32(Node * node)2224 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2225 X64OperandGenerator g(this);
2226 Node* left = node->InputAt(0);
2227 Node* right = node->InputAt(1);
2228 Float64Matcher mleft(left);
2229 if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
2230 Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
2231 return;
2232 }
2233 Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
2234 g.UseRegister(left), g.Use(right));
2235 }
2236
2237
VisitFloat64InsertHighWord32(Node * node)2238 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2239 X64OperandGenerator g(this);
2240 Node* left = node->InputAt(0);
2241 Node* right = node->InputAt(1);
2242 Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
2243 g.UseRegister(left), g.Use(right));
2244 }
2245
VisitFloat64SilenceNaN(Node * node)2246 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2247 X64OperandGenerator g(this);
2248 Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
2249 g.UseRegister(node->InputAt(0)));
2250 }
2251
VisitWord32AtomicLoad(Node * node)2252 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2253 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2254 DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
2255 load_rep.representation() == MachineRepresentation::kWord16 ||
2256 load_rep.representation() == MachineRepresentation::kWord32);
2257 USE(load_rep);
2258 VisitLoad(node);
2259 }
2260
VisitWord64AtomicLoad(Node * node)2261 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2262 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2263 USE(load_rep);
2264 VisitLoad(node);
2265 }
2266
VisitWord32AtomicStore(Node * node)2267 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2268 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2269 ArchOpcode opcode = kArchNop;
2270 switch (rep) {
2271 case MachineRepresentation::kWord8:
2272 opcode = kWord32AtomicExchangeInt8;
2273 break;
2274 case MachineRepresentation::kWord16:
2275 opcode = kWord32AtomicExchangeInt16;
2276 break;
2277 case MachineRepresentation::kWord32:
2278 opcode = kWord32AtomicExchangeWord32;
2279 break;
2280 default:
2281 UNREACHABLE();
2282 return;
2283 }
2284 VisitAtomicExchange(this, node, opcode);
2285 }
2286
VisitWord64AtomicStore(Node * node)2287 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2288 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2289 ArchOpcode opcode = kArchNop;
2290 switch (rep) {
2291 case MachineRepresentation::kWord8:
2292 opcode = kX64Word64AtomicExchangeUint8;
2293 break;
2294 case MachineRepresentation::kWord16:
2295 opcode = kX64Word64AtomicExchangeUint16;
2296 break;
2297 case MachineRepresentation::kWord32:
2298 opcode = kX64Word64AtomicExchangeUint32;
2299 break;
2300 case MachineRepresentation::kWord64:
2301 opcode = kX64Word64AtomicExchangeUint64;
2302 break;
2303 default:
2304 UNREACHABLE();
2305 return;
2306 }
2307 VisitAtomicExchange(this, node, opcode);
2308 }
2309
VisitWord32AtomicExchange(Node * node)2310 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2311 MachineType type = AtomicOpType(node->op());
2312 ArchOpcode opcode = kArchNop;
2313 if (type == MachineType::Int8()) {
2314 opcode = kWord32AtomicExchangeInt8;
2315 } else if (type == MachineType::Uint8()) {
2316 opcode = kWord32AtomicExchangeUint8;
2317 } else if (type == MachineType::Int16()) {
2318 opcode = kWord32AtomicExchangeInt16;
2319 } else if (type == MachineType::Uint16()) {
2320 opcode = kWord32AtomicExchangeUint16;
2321 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2322 opcode = kWord32AtomicExchangeWord32;
2323 } else {
2324 UNREACHABLE();
2325 return;
2326 }
2327 VisitAtomicExchange(this, node, opcode);
2328 }
2329
VisitWord64AtomicExchange(Node * node)2330 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2331 MachineType type = AtomicOpType(node->op());
2332 ArchOpcode opcode = kArchNop;
2333 if (type == MachineType::Uint8()) {
2334 opcode = kX64Word64AtomicExchangeUint8;
2335 } else if (type == MachineType::Uint16()) {
2336 opcode = kX64Word64AtomicExchangeUint16;
2337 } else if (type == MachineType::Uint32()) {
2338 opcode = kX64Word64AtomicExchangeUint32;
2339 } else if (type == MachineType::Uint64()) {
2340 opcode = kX64Word64AtomicExchangeUint64;
2341 } else {
2342 UNREACHABLE();
2343 return;
2344 }
2345 VisitAtomicExchange(this, node, opcode);
2346 }
2347
VisitWord32AtomicCompareExchange(Node * node)2348 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2349 MachineType type = AtomicOpType(node->op());
2350 ArchOpcode opcode = kArchNop;
2351 if (type == MachineType::Int8()) {
2352 opcode = kWord32AtomicCompareExchangeInt8;
2353 } else if (type == MachineType::Uint8()) {
2354 opcode = kWord32AtomicCompareExchangeUint8;
2355 } else if (type == MachineType::Int16()) {
2356 opcode = kWord32AtomicCompareExchangeInt16;
2357 } else if (type == MachineType::Uint16()) {
2358 opcode = kWord32AtomicCompareExchangeUint16;
2359 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2360 opcode = kWord32AtomicCompareExchangeWord32;
2361 } else {
2362 UNREACHABLE();
2363 return;
2364 }
2365 VisitAtomicCompareExchange(this, node, opcode);
2366 }
2367
VisitWord64AtomicCompareExchange(Node * node)2368 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2369 MachineType type = AtomicOpType(node->op());
2370 ArchOpcode opcode = kArchNop;
2371 if (type == MachineType::Uint8()) {
2372 opcode = kX64Word64AtomicCompareExchangeUint8;
2373 } else if (type == MachineType::Uint16()) {
2374 opcode = kX64Word64AtomicCompareExchangeUint16;
2375 } else if (type == MachineType::Uint32()) {
2376 opcode = kX64Word64AtomicCompareExchangeUint32;
2377 } else if (type == MachineType::Uint64()) {
2378 opcode = kX64Word64AtomicCompareExchangeUint64;
2379 } else {
2380 UNREACHABLE();
2381 return;
2382 }
2383 VisitAtomicCompareExchange(this, node, opcode);
2384 }
2385
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2386 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2387 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2388 ArchOpcode uint16_op, ArchOpcode word32_op) {
2389 MachineType type = AtomicOpType(node->op());
2390 ArchOpcode opcode = kArchNop;
2391 if (type == MachineType::Int8()) {
2392 opcode = int8_op;
2393 } else if (type == MachineType::Uint8()) {
2394 opcode = uint8_op;
2395 } else if (type == MachineType::Int16()) {
2396 opcode = int16_op;
2397 } else if (type == MachineType::Uint16()) {
2398 opcode = uint16_op;
2399 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2400 opcode = word32_op;
2401 } else {
2402 UNREACHABLE();
2403 return;
2404 }
2405 VisitAtomicBinop(this, node, opcode);
2406 }
2407
2408 #define VISIT_ATOMIC_BINOP(op) \
2409 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2410 VisitWord32AtomicBinaryOperation( \
2411 node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
2412 kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
2413 kWord32Atomic##op##Word32); \
2414 }
2415 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2416 VISIT_ATOMIC_BINOP(Sub)
2417 VISIT_ATOMIC_BINOP(And)
2418 VISIT_ATOMIC_BINOP(Or)
2419 VISIT_ATOMIC_BINOP(Xor)
2420 #undef VISIT_ATOMIC_BINOP
2421
2422 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2423 Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
2424 ArchOpcode word64_op) {
2425 MachineType type = AtomicOpType(node->op());
2426 ArchOpcode opcode = kArchNop;
2427 if (type == MachineType::Uint8()) {
2428 opcode = uint8_op;
2429 } else if (type == MachineType::Uint16()) {
2430 opcode = uint16_op;
2431 } else if (type == MachineType::Uint32()) {
2432 opcode = uint32_op;
2433 } else if (type == MachineType::Uint64()) {
2434 opcode = word64_op;
2435 } else {
2436 UNREACHABLE();
2437 return;
2438 }
2439 VisitAtomicBinop(this, node, opcode);
2440 }
2441
2442 #define VISIT_ATOMIC_BINOP(op) \
2443 void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
2444 VisitWord64AtomicBinaryOperation( \
2445 node, kX64Word64Atomic##op##Uint8, kX64Word64Atomic##op##Uint16, \
2446 kX64Word64Atomic##op##Uint32, kX64Word64Atomic##op##Uint64); \
2447 }
2448 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2449 VISIT_ATOMIC_BINOP(Sub)
2450 VISIT_ATOMIC_BINOP(And)
2451 VISIT_ATOMIC_BINOP(Or)
2452 VISIT_ATOMIC_BINOP(Xor)
2453 #undef VISIT_ATOMIC_BINOP
2454
2455 #define SIMD_TYPES(V) \
2456 V(F32x4) \
2457 V(I32x4) \
2458 V(I16x8) \
2459 V(I8x16)
2460
2461 #define SIMD_BINOP_LIST(V) \
2462 V(F32x4Add) \
2463 V(F32x4AddHoriz) \
2464 V(F32x4Sub) \
2465 V(F32x4Mul) \
2466 V(F32x4Min) \
2467 V(F32x4Max) \
2468 V(F32x4Eq) \
2469 V(F32x4Ne) \
2470 V(F32x4Lt) \
2471 V(F32x4Le) \
2472 V(I32x4Add) \
2473 V(I32x4AddHoriz) \
2474 V(I32x4Sub) \
2475 V(I32x4Mul) \
2476 V(I32x4MinS) \
2477 V(I32x4MaxS) \
2478 V(I32x4Eq) \
2479 V(I32x4Ne) \
2480 V(I32x4GtS) \
2481 V(I32x4GeS) \
2482 V(I32x4MinU) \
2483 V(I32x4MaxU) \
2484 V(I32x4GtU) \
2485 V(I32x4GeU) \
2486 V(I16x8Add) \
2487 V(I16x8AddSaturateS) \
2488 V(I16x8AddHoriz) \
2489 V(I16x8Sub) \
2490 V(I16x8SubSaturateS) \
2491 V(I16x8Mul) \
2492 V(I16x8MinS) \
2493 V(I16x8MaxS) \
2494 V(I16x8Eq) \
2495 V(I16x8Ne) \
2496 V(I16x8GtS) \
2497 V(I16x8GeS) \
2498 V(I16x8AddSaturateU) \
2499 V(I16x8SubSaturateU) \
2500 V(I16x8MinU) \
2501 V(I16x8MaxU) \
2502 V(I16x8GtU) \
2503 V(I16x8GeU) \
2504 V(I8x16Add) \
2505 V(I8x16AddSaturateS) \
2506 V(I8x16Sub) \
2507 V(I8x16SubSaturateS) \
2508 V(I8x16MinS) \
2509 V(I8x16MaxS) \
2510 V(I8x16Eq) \
2511 V(I8x16Ne) \
2512 V(I8x16GtS) \
2513 V(I8x16GeS) \
2514 V(I8x16AddSaturateU) \
2515 V(I8x16SubSaturateU) \
2516 V(I8x16MinU) \
2517 V(I8x16MaxU) \
2518 V(I8x16GtU) \
2519 V(I8x16GeU) \
2520 V(S128And) \
2521 V(S128Or) \
2522 V(S128Xor)
2523
2524 #define SIMD_UNOP_LIST(V) \
2525 V(F32x4Abs) \
2526 V(F32x4Neg) \
2527 V(F32x4RecipApprox) \
2528 V(F32x4RecipSqrtApprox) \
2529 V(I32x4Neg) \
2530 V(I16x8Neg) \
2531 V(I8x16Neg) \
2532 V(S128Not)
2533
2534 #define SIMD_SHIFT_OPCODES(V) \
2535 V(I32x4Shl) \
2536 V(I32x4ShrS) \
2537 V(I32x4ShrU) \
2538 V(I16x8Shl) \
2539 V(I16x8ShrS) \
2540 V(I16x8ShrU)
2541
2542 void InstructionSelector::VisitS128Zero(Node* node) {
2543 X64OperandGenerator g(this);
2544 Emit(kX64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
2545 }
2546
2547 #define VISIT_SIMD_SPLAT(Type) \
2548 void InstructionSelector::Visit##Type##Splat(Node* node) { \
2549 X64OperandGenerator g(this); \
2550 Emit(kX64##Type##Splat, g.DefineAsRegister(node), \
2551 g.Use(node->InputAt(0))); \
2552 }
2553 SIMD_TYPES(VISIT_SIMD_SPLAT)
2554 #undef VISIT_SIMD_SPLAT
2555
2556 #define VISIT_SIMD_EXTRACT_LANE(Type) \
2557 void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
2558 X64OperandGenerator g(this); \
2559 int32_t lane = OpParameter<int32_t>(node->op()); \
2560 Emit(kX64##Type##ExtractLane, g.DefineAsRegister(node), \
2561 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
2562 }
SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)2563 SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
2564 #undef VISIT_SIMD_EXTRACT_LANE
2565
2566 #define VISIT_SIMD_REPLACE_LANE(Type) \
2567 void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2568 X64OperandGenerator g(this); \
2569 int32_t lane = OpParameter<int32_t>(node->op()); \
2570 Emit(kX64##Type##ReplaceLane, g.DefineSameAsFirst(node), \
2571 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \
2572 g.Use(node->InputAt(1))); \
2573 }
2574 SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
2575 #undef VISIT_SIMD_REPLACE_LANE
2576
2577 #define VISIT_SIMD_SHIFT(Opcode) \
2578 void InstructionSelector::Visit##Opcode(Node* node) { \
2579 X64OperandGenerator g(this); \
2580 int32_t value = OpParameter<int32_t>(node->op()); \
2581 Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
2582 g.UseRegister(node->InputAt(0)), g.UseImmediate(value)); \
2583 }
2584 SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
2585 #undef VISIT_SIMD_SHIFT
2586
2587 #define VISIT_SIMD_UNOP(Opcode) \
2588 void InstructionSelector::Visit##Opcode(Node* node) { \
2589 X64OperandGenerator g(this); \
2590 Emit(kX64##Opcode, g.DefineAsRegister(node), \
2591 g.UseRegister(node->InputAt(0))); \
2592 }
2593 SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
2594 #undef VISIT_SIMD_UNOP
2595
2596 #define VISIT_SIMD_BINOP(Opcode) \
2597 void InstructionSelector::Visit##Opcode(Node* node) { \
2598 X64OperandGenerator g(this); \
2599 Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
2600 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
2601 }
2602 SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
2603 #undef VISIT_SIMD_BINOP
2604 #undef SIMD_TYPES
2605 #undef SIMD_BINOP_LIST
2606 #undef SIMD_UNOP_LIST
2607 #undef SIMD_SHIFT_OPCODES
2608
2609 void InstructionSelector::VisitS128Select(Node* node) {
2610 X64OperandGenerator g(this);
2611 Emit(kX64S128Select, g.DefineSameAsFirst(node),
2612 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
2613 g.UseRegister(node->InputAt(2)));
2614 }
2615
VisitInt32AbsWithOverflow(Node * node)2616 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2617 UNREACHABLE();
2618 }
2619
VisitInt64AbsWithOverflow(Node * node)2620 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2621 UNREACHABLE();
2622 }
2623
2624 // static
2625 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2626 InstructionSelector::SupportedMachineOperatorFlags() {
2627 MachineOperatorBuilder::Flags flags =
2628 MachineOperatorBuilder::kWord32ShiftIsSafe |
2629 MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz |
2630 MachineOperatorBuilder::kSpeculationFence;
2631 if (CpuFeatures::IsSupported(POPCNT)) {
2632 flags |= MachineOperatorBuilder::kWord32Popcnt |
2633 MachineOperatorBuilder::kWord64Popcnt;
2634 }
2635 if (CpuFeatures::IsSupported(SSE4_1)) {
2636 flags |= MachineOperatorBuilder::kFloat32RoundDown |
2637 MachineOperatorBuilder::kFloat64RoundDown |
2638 MachineOperatorBuilder::kFloat32RoundUp |
2639 MachineOperatorBuilder::kFloat64RoundUp |
2640 MachineOperatorBuilder::kFloat32RoundTruncate |
2641 MachineOperatorBuilder::kFloat64RoundTruncate |
2642 MachineOperatorBuilder::kFloat32RoundTiesEven |
2643 MachineOperatorBuilder::kFloat64RoundTiesEven;
2644 }
2645 return flags;
2646 }
2647
2648 // static
2649 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2650 InstructionSelector::AlignmentRequirements() {
2651 return MachineOperatorBuilder::AlignmentRequirements::
2652 FullUnalignedAccessSupport();
2653 }
2654
2655 } // namespace compiler
2656 } // namespace internal
2657 } // namespace v8
2658