• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <algorithm>
6 
7 #include "src/base/adapters.h"
8 #include "src/compiler/instruction-selector-impl.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties.h"
11 
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15 
16 // Adds X64-specific methods for generating operands.
17 class X64OperandGenerator final : public OperandGenerator {
18  public:
X64OperandGenerator(InstructionSelector * selector)19   explicit X64OperandGenerator(InstructionSelector* selector)
20       : OperandGenerator(selector) {}
21 
CanBeImmediate(Node * node)22   bool CanBeImmediate(Node* node) {
23     switch (node->opcode()) {
24       case IrOpcode::kInt32Constant:
25       case IrOpcode::kRelocatableInt32Constant:
26         return true;
27       case IrOpcode::kInt64Constant: {
28         const int64_t value = OpParameter<int64_t>(node);
29         return value == static_cast<int64_t>(static_cast<int32_t>(value));
30       }
31       case IrOpcode::kNumberConstant: {
32         const double value = OpParameter<double>(node);
33         return bit_cast<int64_t>(value) == 0;
34       }
35       default:
36         return false;
37     }
38   }
39 
CanBeMemoryOperand(InstructionCode opcode,Node * node,Node * input,int effect_level)40   bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
41                           int effect_level) {
42     if (input->opcode() != IrOpcode::kLoad ||
43         !selector()->CanCover(node, input)) {
44       return false;
45     }
46     if (effect_level != selector()->GetEffectLevel(input)) {
47       return false;
48     }
49     MachineRepresentation rep =
50         LoadRepresentationOf(input->op()).representation();
51     switch (opcode) {
52       case kX64Cmp:
53       case kX64Test:
54         return rep == MachineRepresentation::kWord64 ||
55                rep == MachineRepresentation::kTagged;
56       case kX64Cmp32:
57       case kX64Test32:
58         return rep == MachineRepresentation::kWord32;
59       case kX64Cmp16:
60       case kX64Test16:
61         return rep == MachineRepresentation::kWord16;
62       case kX64Cmp8:
63       case kX64Test8:
64         return rep == MachineRepresentation::kWord8;
65       default:
66         break;
67     }
68     return false;
69   }
70 
GenerateMemoryOperandInputs(Node * index,int scale_exponent,Node * base,Node * displacement,InstructionOperand inputs[],size_t * input_count)71   AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
72                                              Node* base, Node* displacement,
73                                              InstructionOperand inputs[],
74                                              size_t* input_count) {
75     AddressingMode mode = kMode_MRI;
76     if (base != nullptr) {
77       inputs[(*input_count)++] = UseRegister(base);
78       if (index != nullptr) {
79         DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
80         inputs[(*input_count)++] = UseRegister(index);
81         if (displacement != nullptr) {
82           inputs[(*input_count)++] = UseImmediate(displacement);
83           static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
84                                                        kMode_MR4I, kMode_MR8I};
85           mode = kMRnI_modes[scale_exponent];
86         } else {
87           static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
88                                                       kMode_MR4, kMode_MR8};
89           mode = kMRn_modes[scale_exponent];
90         }
91       } else {
92         if (displacement == nullptr) {
93           mode = kMode_MR;
94         } else {
95           inputs[(*input_count)++] = UseImmediate(displacement);
96           mode = kMode_MRI;
97         }
98       }
99     } else {
100       DCHECK_NOT_NULL(index);
101       DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
102       inputs[(*input_count)++] = UseRegister(index);
103       if (displacement != nullptr) {
104         inputs[(*input_count)++] = UseImmediate(displacement);
105         static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
106                                                     kMode_M4I, kMode_M8I};
107         mode = kMnI_modes[scale_exponent];
108       } else {
109         static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
110                                                    kMode_M4, kMode_M8};
111         mode = kMn_modes[scale_exponent];
112         if (mode == kMode_MR1) {
113           // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
114           inputs[(*input_count)++] = UseRegister(index);
115         }
116       }
117     }
118     return mode;
119   }
120 
GetEffectiveAddressMemoryOperand(Node * operand,InstructionOperand inputs[],size_t * input_count)121   AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
122                                                   InstructionOperand inputs[],
123                                                   size_t* input_count) {
124     BaseWithIndexAndDisplacement64Matcher m(operand, true);
125     DCHECK(m.matches());
126     if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
127       return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
128                                          m.displacement(), inputs, input_count);
129     } else {
130       inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
131       inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
132       return kMode_MR1;
133     }
134   }
135 
CanBeBetterLeftOperand(Node * node) const136   bool CanBeBetterLeftOperand(Node* node) const {
137     return !selector()->IsLive(node);
138   }
139 };
140 
141 
VisitLoad(Node * node)142 void InstructionSelector::VisitLoad(Node* node) {
143   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
144   X64OperandGenerator g(this);
145 
146   ArchOpcode opcode = kArchNop;
147   switch (load_rep.representation()) {
148     case MachineRepresentation::kFloat32:
149       opcode = kX64Movss;
150       break;
151     case MachineRepresentation::kFloat64:
152       opcode = kX64Movsd;
153       break;
154     case MachineRepresentation::kBit:  // Fall through.
155     case MachineRepresentation::kWord8:
156       opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
157       break;
158     case MachineRepresentation::kWord16:
159       opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
160       break;
161     case MachineRepresentation::kWord32:
162       opcode = kX64Movl;
163       break;
164     case MachineRepresentation::kTagged:  // Fall through.
165     case MachineRepresentation::kWord64:
166       opcode = kX64Movq;
167       break;
168     case MachineRepresentation::kSimd128:  // Fall through.
169     case MachineRepresentation::kNone:
170       UNREACHABLE();
171       return;
172   }
173 
174   InstructionOperand outputs[1];
175   outputs[0] = g.DefineAsRegister(node);
176   InstructionOperand inputs[3];
177   size_t input_count = 0;
178   AddressingMode mode =
179       g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
180   InstructionCode code = opcode | AddressingModeField::encode(mode);
181   Emit(code, 1, outputs, input_count, inputs);
182 }
183 
184 
VisitStore(Node * node)185 void InstructionSelector::VisitStore(Node* node) {
186   X64OperandGenerator g(this);
187   Node* base = node->InputAt(0);
188   Node* index = node->InputAt(1);
189   Node* value = node->InputAt(2);
190 
191   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
192   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
193   MachineRepresentation rep = store_rep.representation();
194 
195   if (write_barrier_kind != kNoWriteBarrier) {
196     DCHECK_EQ(MachineRepresentation::kTagged, rep);
197     AddressingMode addressing_mode;
198     InstructionOperand inputs[3];
199     size_t input_count = 0;
200     inputs[input_count++] = g.UseUniqueRegister(base);
201     if (g.CanBeImmediate(index)) {
202       inputs[input_count++] = g.UseImmediate(index);
203       addressing_mode = kMode_MRI;
204     } else {
205       inputs[input_count++] = g.UseUniqueRegister(index);
206       addressing_mode = kMode_MR1;
207     }
208     inputs[input_count++] = g.UseUniqueRegister(value);
209     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
210     switch (write_barrier_kind) {
211       case kNoWriteBarrier:
212         UNREACHABLE();
213         break;
214       case kMapWriteBarrier:
215         record_write_mode = RecordWriteMode::kValueIsMap;
216         break;
217       case kPointerWriteBarrier:
218         record_write_mode = RecordWriteMode::kValueIsPointer;
219         break;
220       case kFullWriteBarrier:
221         record_write_mode = RecordWriteMode::kValueIsAny;
222         break;
223     }
224     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
225     size_t const temp_count = arraysize(temps);
226     InstructionCode code = kArchStoreWithWriteBarrier;
227     code |= AddressingModeField::encode(addressing_mode);
228     code |= MiscField::encode(static_cast<int>(record_write_mode));
229     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
230   } else {
231     ArchOpcode opcode = kArchNop;
232     switch (rep) {
233       case MachineRepresentation::kFloat32:
234         opcode = kX64Movss;
235         break;
236       case MachineRepresentation::kFloat64:
237         opcode = kX64Movsd;
238         break;
239       case MachineRepresentation::kBit:  // Fall through.
240       case MachineRepresentation::kWord8:
241         opcode = kX64Movb;
242         break;
243       case MachineRepresentation::kWord16:
244         opcode = kX64Movw;
245         break;
246       case MachineRepresentation::kWord32:
247         opcode = kX64Movl;
248         break;
249       case MachineRepresentation::kTagged:  // Fall through.
250       case MachineRepresentation::kWord64:
251         opcode = kX64Movq;
252         break;
253       case MachineRepresentation::kSimd128:  // Fall through.
254       case MachineRepresentation::kNone:
255         UNREACHABLE();
256         return;
257     }
258     InstructionOperand inputs[4];
259     size_t input_count = 0;
260     AddressingMode addressing_mode =
261         g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
262     InstructionCode code =
263         opcode | AddressingModeField::encode(addressing_mode);
264     InstructionOperand value_operand =
265         g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
266     inputs[input_count++] = value_operand;
267     Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
268          inputs);
269   }
270 }
271 
272 
VisitCheckedLoad(Node * node)273 void InstructionSelector::VisitCheckedLoad(Node* node) {
274   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
275   X64OperandGenerator g(this);
276   Node* const buffer = node->InputAt(0);
277   Node* const offset = node->InputAt(1);
278   Node* const length = node->InputAt(2);
279   ArchOpcode opcode = kArchNop;
280   switch (load_rep.representation()) {
281     case MachineRepresentation::kWord8:
282       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
283       break;
284     case MachineRepresentation::kWord16:
285       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
286       break;
287     case MachineRepresentation::kWord32:
288       opcode = kCheckedLoadWord32;
289       break;
290     case MachineRepresentation::kWord64:
291       opcode = kCheckedLoadWord64;
292       break;
293     case MachineRepresentation::kFloat32:
294       opcode = kCheckedLoadFloat32;
295       break;
296     case MachineRepresentation::kFloat64:
297       opcode = kCheckedLoadFloat64;
298       break;
299     case MachineRepresentation::kBit:      // Fall through.
300     case MachineRepresentation::kSimd128:  // Fall through.
301     case MachineRepresentation::kTagged:   // Fall through.
302     case MachineRepresentation::kNone:
303       UNREACHABLE();
304       return;
305   }
306   if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
307     Int32Matcher mlength(length);
308     Int32BinopMatcher moffset(offset);
309     if (mlength.HasValue() && moffset.right().HasValue() &&
310         moffset.right().Value() >= 0 &&
311         mlength.Value() >= moffset.right().Value()) {
312       Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
313            g.UseRegister(moffset.left().node()),
314            g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
315       return;
316     }
317   }
318   InstructionOperand length_operand =
319       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
320   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
321        g.UseRegister(offset), g.TempImmediate(0), length_operand);
322 }
323 
324 
VisitCheckedStore(Node * node)325 void InstructionSelector::VisitCheckedStore(Node* node) {
326   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
327   X64OperandGenerator g(this);
328   Node* const buffer = node->InputAt(0);
329   Node* const offset = node->InputAt(1);
330   Node* const length = node->InputAt(2);
331   Node* const value = node->InputAt(3);
332   ArchOpcode opcode = kArchNop;
333   switch (rep) {
334     case MachineRepresentation::kWord8:
335       opcode = kCheckedStoreWord8;
336       break;
337     case MachineRepresentation::kWord16:
338       opcode = kCheckedStoreWord16;
339       break;
340     case MachineRepresentation::kWord32:
341       opcode = kCheckedStoreWord32;
342       break;
343     case MachineRepresentation::kWord64:
344       opcode = kCheckedStoreWord64;
345       break;
346     case MachineRepresentation::kFloat32:
347       opcode = kCheckedStoreFloat32;
348       break;
349     case MachineRepresentation::kFloat64:
350       opcode = kCheckedStoreFloat64;
351       break;
352     case MachineRepresentation::kBit:      // Fall through.
353     case MachineRepresentation::kSimd128:  // Fall through.
354     case MachineRepresentation::kTagged:   // Fall through.
355     case MachineRepresentation::kNone:
356       UNREACHABLE();
357       return;
358   }
359   InstructionOperand value_operand =
360       g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
361   if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
362     Int32Matcher mlength(length);
363     Int32BinopMatcher moffset(offset);
364     if (mlength.HasValue() && moffset.right().HasValue() &&
365         moffset.right().Value() >= 0 &&
366         mlength.Value() >= moffset.right().Value()) {
367       Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
368            g.UseRegister(moffset.left().node()),
369            g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
370            value_operand);
371       return;
372     }
373   }
374   InstructionOperand length_operand =
375       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
376   Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
377        g.TempImmediate(0), length_operand, value_operand);
378 }
379 
380 
381 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)382 static void VisitBinop(InstructionSelector* selector, Node* node,
383                        InstructionCode opcode, FlagsContinuation* cont) {
384   X64OperandGenerator g(selector);
385   Int32BinopMatcher m(node);
386   Node* left = m.left().node();
387   Node* right = m.right().node();
388   InstructionOperand inputs[4];
389   size_t input_count = 0;
390   InstructionOperand outputs[2];
391   size_t output_count = 0;
392 
393   // TODO(turbofan): match complex addressing modes.
394   if (left == right) {
395     // If both inputs refer to the same operand, enforce allocating a register
396     // for both of them to ensure that we don't end up generating code like
397     // this:
398     //
399     //   mov rax, [rbp-0x10]
400     //   add rax, [rbp-0x10]
401     //   jo label
402     InstructionOperand const input = g.UseRegister(left);
403     inputs[input_count++] = input;
404     inputs[input_count++] = input;
405   } else if (g.CanBeImmediate(right)) {
406     inputs[input_count++] = g.UseRegister(left);
407     inputs[input_count++] = g.UseImmediate(right);
408   } else {
409     if (node->op()->HasProperty(Operator::kCommutative) &&
410         g.CanBeBetterLeftOperand(right)) {
411       std::swap(left, right);
412     }
413     inputs[input_count++] = g.UseRegister(left);
414     inputs[input_count++] = g.Use(right);
415   }
416 
417   if (cont->IsBranch()) {
418     inputs[input_count++] = g.Label(cont->true_block());
419     inputs[input_count++] = g.Label(cont->false_block());
420   }
421 
422   outputs[output_count++] = g.DefineSameAsFirst(node);
423   if (cont->IsSet()) {
424     outputs[output_count++] = g.DefineAsRegister(cont->result());
425   }
426 
427   DCHECK_NE(0u, input_count);
428   DCHECK_NE(0u, output_count);
429   DCHECK_GE(arraysize(inputs), input_count);
430   DCHECK_GE(arraysize(outputs), output_count);
431 
432   opcode = cont->Encode(opcode);
433   if (cont->IsDeoptimize()) {
434     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
435                              cont->frame_state());
436   } else {
437     selector->Emit(opcode, output_count, outputs, input_count, inputs);
438   }
439 }
440 
441 
442 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)443 static void VisitBinop(InstructionSelector* selector, Node* node,
444                        InstructionCode opcode) {
445   FlagsContinuation cont;
446   VisitBinop(selector, node, opcode, &cont);
447 }
448 
449 
VisitWord32And(Node * node)450 void InstructionSelector::VisitWord32And(Node* node) {
451   X64OperandGenerator g(this);
452   Uint32BinopMatcher m(node);
453   if (m.right().Is(0xff)) {
454     Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
455   } else if (m.right().Is(0xffff)) {
456     Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
457   } else {
458     VisitBinop(this, node, kX64And32);
459   }
460 }
461 
462 
VisitWord64And(Node * node)463 void InstructionSelector::VisitWord64And(Node* node) {
464   VisitBinop(this, node, kX64And);
465 }
466 
467 
VisitWord32Or(Node * node)468 void InstructionSelector::VisitWord32Or(Node* node) {
469   VisitBinop(this, node, kX64Or32);
470 }
471 
472 
VisitWord64Or(Node * node)473 void InstructionSelector::VisitWord64Or(Node* node) {
474   VisitBinop(this, node, kX64Or);
475 }
476 
477 
VisitWord32Xor(Node * node)478 void InstructionSelector::VisitWord32Xor(Node* node) {
479   X64OperandGenerator g(this);
480   Uint32BinopMatcher m(node);
481   if (m.right().Is(-1)) {
482     Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
483   } else {
484     VisitBinop(this, node, kX64Xor32);
485   }
486 }
487 
488 
VisitWord64Xor(Node * node)489 void InstructionSelector::VisitWord64Xor(Node* node) {
490   X64OperandGenerator g(this);
491   Uint64BinopMatcher m(node);
492   if (m.right().Is(-1)) {
493     Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
494   } else {
495     VisitBinop(this, node, kX64Xor);
496   }
497 }
498 
499 
500 namespace {
501 
502 // Shared routine for multiple 32-bit shift operations.
503 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
VisitWord32Shift(InstructionSelector * selector,Node * node,ArchOpcode opcode)504 void VisitWord32Shift(InstructionSelector* selector, Node* node,
505                       ArchOpcode opcode) {
506   X64OperandGenerator g(selector);
507   Int32BinopMatcher m(node);
508   Node* left = m.left().node();
509   Node* right = m.right().node();
510 
511   if (g.CanBeImmediate(right)) {
512     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
513                    g.UseImmediate(right));
514   } else {
515     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
516                    g.UseFixed(right, rcx));
517   }
518 }
519 
520 
521 // Shared routine for multiple 64-bit shift operations.
522 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
VisitWord64Shift(InstructionSelector * selector,Node * node,ArchOpcode opcode)523 void VisitWord64Shift(InstructionSelector* selector, Node* node,
524                       ArchOpcode opcode) {
525   X64OperandGenerator g(selector);
526   Int64BinopMatcher m(node);
527   Node* left = m.left().node();
528   Node* right = m.right().node();
529 
530   if (g.CanBeImmediate(right)) {
531     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
532                    g.UseImmediate(right));
533   } else {
534     if (m.right().IsWord64And()) {
535       Int64BinopMatcher mright(right);
536       if (mright.right().Is(0x3F)) {
537         right = mright.left().node();
538       }
539     }
540     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
541                    g.UseFixed(right, rcx));
542   }
543 }
544 
545 
EmitLea(InstructionSelector * selector,InstructionCode opcode,Node * result,Node * index,int scale,Node * base,Node * displacement)546 void EmitLea(InstructionSelector* selector, InstructionCode opcode,
547              Node* result, Node* index, int scale, Node* base,
548              Node* displacement) {
549   X64OperandGenerator g(selector);
550 
551   InstructionOperand inputs[4];
552   size_t input_count = 0;
553   AddressingMode mode = g.GenerateMemoryOperandInputs(
554       index, scale, base, displacement, inputs, &input_count);
555 
556   DCHECK_NE(0u, input_count);
557   DCHECK_GE(arraysize(inputs), input_count);
558 
559   InstructionOperand outputs[1];
560   outputs[0] = g.DefineAsRegister(result);
561 
562   opcode = AddressingModeField::encode(mode) | opcode;
563 
564   selector->Emit(opcode, 1, outputs, input_count, inputs);
565 }
566 
567 }  // namespace
568 
569 
VisitWord32Shl(Node * node)570 void InstructionSelector::VisitWord32Shl(Node* node) {
571   Int32ScaleMatcher m(node, true);
572   if (m.matches()) {
573     Node* index = node->InputAt(0);
574     Node* base = m.power_of_two_plus_one() ? index : nullptr;
575     EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
576     return;
577   }
578   VisitWord32Shift(this, node, kX64Shl32);
579 }
580 
581 
VisitWord64Shl(Node * node)582 void InstructionSelector::VisitWord64Shl(Node* node) {
583   X64OperandGenerator g(this);
584   Int64BinopMatcher m(node);
585   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
586       m.right().IsInRange(32, 63)) {
587     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
588     // 32 bits anyway.
589     Emit(kX64Shl, g.DefineSameAsFirst(node),
590          g.UseRegister(m.left().node()->InputAt(0)),
591          g.UseImmediate(m.right().node()));
592     return;
593   }
594   VisitWord64Shift(this, node, kX64Shl);
595 }
596 
597 
VisitWord32Shr(Node * node)598 void InstructionSelector::VisitWord32Shr(Node* node) {
599   VisitWord32Shift(this, node, kX64Shr32);
600 }
601 
602 
VisitWord64Shr(Node * node)603 void InstructionSelector::VisitWord64Shr(Node* node) {
604   VisitWord64Shift(this, node, kX64Shr);
605 }
606 
607 
VisitWord32Sar(Node * node)608 void InstructionSelector::VisitWord32Sar(Node* node) {
609   X64OperandGenerator g(this);
610   Int32BinopMatcher m(node);
611   if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
612     Int32BinopMatcher mleft(m.left().node());
613     if (mleft.right().Is(16) && m.right().Is(16)) {
614       Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
615       return;
616     } else if (mleft.right().Is(24) && m.right().Is(24)) {
617       Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
618       return;
619     }
620   }
621   VisitWord32Shift(this, node, kX64Sar32);
622 }
623 
624 
VisitWord64Sar(Node * node)625 void InstructionSelector::VisitWord64Sar(Node* node) {
626   X64OperandGenerator g(this);
627   Int64BinopMatcher m(node);
628   if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
629       m.right().Is(32)) {
630     // Just load and sign-extend the interesting 4 bytes instead. This happens,
631     // for example, when we're loading and untagging SMIs.
632     BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
633     if (mleft.matches() && (mleft.displacement() == nullptr ||
634                             g.CanBeImmediate(mleft.displacement()))) {
635       size_t input_count = 0;
636       InstructionOperand inputs[3];
637       AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
638           m.left().node(), inputs, &input_count);
639       if (mleft.displacement() == nullptr) {
640         // Make sure that the addressing mode indicates the presence of an
641         // immediate displacement. It seems that we never use M1 and M2, but we
642         // handle them here anyways.
643         switch (mode) {
644           case kMode_MR:
645             mode = kMode_MRI;
646             break;
647           case kMode_MR1:
648             mode = kMode_MR1I;
649             break;
650           case kMode_MR2:
651             mode = kMode_MR2I;
652             break;
653           case kMode_MR4:
654             mode = kMode_MR4I;
655             break;
656           case kMode_MR8:
657             mode = kMode_MR8I;
658             break;
659           case kMode_M1:
660             mode = kMode_M1I;
661             break;
662           case kMode_M2:
663             mode = kMode_M2I;
664             break;
665           case kMode_M4:
666             mode = kMode_M4I;
667             break;
668           case kMode_M8:
669             mode = kMode_M8I;
670             break;
671           case kMode_None:
672           case kMode_MRI:
673           case kMode_MR1I:
674           case kMode_MR2I:
675           case kMode_MR4I:
676           case kMode_MR8I:
677           case kMode_M1I:
678           case kMode_M2I:
679           case kMode_M4I:
680           case kMode_M8I:
681             UNREACHABLE();
682         }
683         inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
684       } else {
685         ImmediateOperand* op = ImmediateOperand::cast(&inputs[input_count - 1]);
686         int32_t displacement = sequence()->GetImmediate(op).ToInt32();
687         *op = ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
688       }
689       InstructionOperand outputs[] = {g.DefineAsRegister(node)};
690       InstructionCode code = kX64Movsxlq | AddressingModeField::encode(mode);
691       Emit(code, 1, outputs, input_count, inputs);
692       return;
693     }
694   }
695   VisitWord64Shift(this, node, kX64Sar);
696 }
697 
698 
VisitWord32Ror(Node * node)699 void InstructionSelector::VisitWord32Ror(Node* node) {
700   VisitWord32Shift(this, node, kX64Ror32);
701 }
702 
703 
VisitWord64Ror(Node * node)704 void InstructionSelector::VisitWord64Ror(Node* node) {
705   VisitWord64Shift(this, node, kX64Ror);
706 }
707 
708 
VisitWord64Clz(Node * node)709 void InstructionSelector::VisitWord64Clz(Node* node) {
710   X64OperandGenerator g(this);
711   Emit(kX64Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
712 }
713 
714 
VisitWord32Clz(Node * node)715 void InstructionSelector::VisitWord32Clz(Node* node) {
716   X64OperandGenerator g(this);
717   Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
718 }
719 
720 
VisitWord64Ctz(Node * node)721 void InstructionSelector::VisitWord64Ctz(Node* node) {
722   X64OperandGenerator g(this);
723   Emit(kX64Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
724 }
725 
726 
VisitWord32Ctz(Node * node)727 void InstructionSelector::VisitWord32Ctz(Node* node) {
728   X64OperandGenerator g(this);
729   Emit(kX64Tzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
730 }
731 
732 
VisitWord32ReverseBits(Node * node)733 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
734 
735 
VisitWord64ReverseBits(Node * node)736 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
737 
738 
VisitWord32Popcnt(Node * node)739 void InstructionSelector::VisitWord32Popcnt(Node* node) {
740   X64OperandGenerator g(this);
741   Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
742 }
743 
744 
VisitWord64Popcnt(Node * node)745 void InstructionSelector::VisitWord64Popcnt(Node* node) {
746   X64OperandGenerator g(this);
747   Emit(kX64Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
748 }
749 
750 
VisitInt32Add(Node * node)751 void InstructionSelector::VisitInt32Add(Node* node) {
752   X64OperandGenerator g(this);
753 
754   // Try to match the Add to a leal pattern
755   BaseWithIndexAndDisplacement32Matcher m(node);
756   if (m.matches() &&
757       (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
758     EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
759             m.displacement());
760     return;
761   }
762 
763   // No leal pattern match, use addl
764   VisitBinop(this, node, kX64Add32);
765 }
766 
767 
VisitInt64Add(Node * node)768 void InstructionSelector::VisitInt64Add(Node* node) {
769   VisitBinop(this, node, kX64Add);
770 }
771 
772 
VisitInt64AddWithOverflow(Node * node)773 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
774   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
775     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
776     return VisitBinop(this, node, kX64Add, &cont);
777   }
778   FlagsContinuation cont;
779   VisitBinop(this, node, kX64Add, &cont);
780 }
781 
782 
VisitInt32Sub(Node * node)783 void InstructionSelector::VisitInt32Sub(Node* node) {
784   X64OperandGenerator g(this);
785   Int32BinopMatcher m(node);
786   if (m.left().Is(0)) {
787     Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
788   } else {
789     if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
790       // Turn subtractions of constant values into immediate "leal" instructions
791       // by negating the value.
792       Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
793            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
794            g.TempImmediate(-m.right().Value()));
795       return;
796     }
797     VisitBinop(this, node, kX64Sub32);
798   }
799 }
800 
801 
VisitInt64Sub(Node * node)802 void InstructionSelector::VisitInt64Sub(Node* node) {
803   X64OperandGenerator g(this);
804   Int64BinopMatcher m(node);
805   if (m.left().Is(0)) {
806     Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
807   } else {
808     VisitBinop(this, node, kX64Sub);
809   }
810 }
811 
812 
VisitInt64SubWithOverflow(Node * node)813 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
814   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
815     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
816     return VisitBinop(this, node, kX64Sub, &cont);
817   }
818   FlagsContinuation cont;
819   VisitBinop(this, node, kX64Sub, &cont);
820 }
821 
822 
823 namespace {
824 
VisitMul(InstructionSelector * selector,Node * node,ArchOpcode opcode)825 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
826   X64OperandGenerator g(selector);
827   Int32BinopMatcher m(node);
828   Node* left = m.left().node();
829   Node* right = m.right().node();
830   if (g.CanBeImmediate(right)) {
831     selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
832                    g.UseImmediate(right));
833   } else {
834     if (g.CanBeBetterLeftOperand(right)) {
835       std::swap(left, right);
836     }
837     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
838                    g.Use(right));
839   }
840 }
841 
842 
VisitMulHigh(InstructionSelector * selector,Node * node,ArchOpcode opcode)843 void VisitMulHigh(InstructionSelector* selector, Node* node,
844                   ArchOpcode opcode) {
845   X64OperandGenerator g(selector);
846   Node* left = node->InputAt(0);
847   Node* right = node->InputAt(1);
848   if (selector->IsLive(left) && !selector->IsLive(right)) {
849     std::swap(left, right);
850   }
851   InstructionOperand temps[] = {g.TempRegister(rax)};
852   // TODO(turbofan): We use UseUniqueRegister here to improve register
853   // allocation.
854   selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
855                  g.UseUniqueRegister(right), arraysize(temps), temps);
856 }
857 
858 
VisitDiv(InstructionSelector * selector,Node * node,ArchOpcode opcode)859 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
860   X64OperandGenerator g(selector);
861   InstructionOperand temps[] = {g.TempRegister(rdx)};
862   selector->Emit(
863       opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
864       g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
865 }
866 
867 
VisitMod(InstructionSelector * selector,Node * node,ArchOpcode opcode)868 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
869   X64OperandGenerator g(selector);
870   InstructionOperand temps[] = {g.TempRegister(rax)};
871   selector->Emit(
872       opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
873       g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
874 }
875 
876 }  // namespace
877 
878 
VisitInt32Mul(Node * node)879 void InstructionSelector::VisitInt32Mul(Node* node) {
880   Int32ScaleMatcher m(node, true);
881   if (m.matches()) {
882     Node* index = node->InputAt(0);
883     Node* base = m.power_of_two_plus_one() ? index : nullptr;
884     EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
885     return;
886   }
887   VisitMul(this, node, kX64Imul32);
888 }
889 
890 
VisitInt64Mul(Node * node)891 void InstructionSelector::VisitInt64Mul(Node* node) {
892   VisitMul(this, node, kX64Imul);
893 }
894 
895 
VisitInt32MulHigh(Node * node)896 void InstructionSelector::VisitInt32MulHigh(Node* node) {
897   VisitMulHigh(this, node, kX64ImulHigh32);
898 }
899 
900 
VisitInt32Div(Node * node)901 void InstructionSelector::VisitInt32Div(Node* node) {
902   VisitDiv(this, node, kX64Idiv32);
903 }
904 
905 
VisitInt64Div(Node * node)906 void InstructionSelector::VisitInt64Div(Node* node) {
907   VisitDiv(this, node, kX64Idiv);
908 }
909 
910 
VisitUint32Div(Node * node)911 void InstructionSelector::VisitUint32Div(Node* node) {
912   VisitDiv(this, node, kX64Udiv32);
913 }
914 
915 
VisitUint64Div(Node * node)916 void InstructionSelector::VisitUint64Div(Node* node) {
917   VisitDiv(this, node, kX64Udiv);
918 }
919 
920 
VisitInt32Mod(Node * node)921 void InstructionSelector::VisitInt32Mod(Node* node) {
922   VisitMod(this, node, kX64Idiv32);
923 }
924 
925 
VisitInt64Mod(Node * node)926 void InstructionSelector::VisitInt64Mod(Node* node) {
927   VisitMod(this, node, kX64Idiv);
928 }
929 
930 
VisitUint32Mod(Node * node)931 void InstructionSelector::VisitUint32Mod(Node* node) {
932   VisitMod(this, node, kX64Udiv32);
933 }
934 
935 
VisitUint64Mod(Node * node)936 void InstructionSelector::VisitUint64Mod(Node* node) {
937   VisitMod(this, node, kX64Udiv);
938 }
939 
940 
VisitUint32MulHigh(Node * node)941 void InstructionSelector::VisitUint32MulHigh(Node* node) {
942   VisitMulHigh(this, node, kX64UmulHigh32);
943 }
944 
945 
VisitChangeFloat32ToFloat64(Node * node)946 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
947   X64OperandGenerator g(this);
948   Emit(kSSEFloat32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
949 }
950 
951 
VisitChangeInt32ToFloat64(Node * node)952 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
953   X64OperandGenerator g(this);
954   Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
955 }
956 
957 
VisitChangeUint32ToFloat64(Node * node)958 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
959   X64OperandGenerator g(this);
960   Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
961 }
962 
963 
VisitChangeFloat64ToInt32(Node * node)964 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
965   X64OperandGenerator g(this);
966   Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
967 }
968 
969 
VisitChangeFloat64ToUint32(Node * node)970 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
971   X64OperandGenerator g(this);
972   Emit(kSSEFloat64ToUint32 | MiscField::encode(1), g.DefineAsRegister(node),
973        g.Use(node->InputAt(0)));
974 }
975 
VisitTruncateFloat64ToUint32(Node * node)976 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
977   X64OperandGenerator g(this);
978   Emit(kSSEFloat64ToUint32 | MiscField::encode(0), g.DefineAsRegister(node),
979        g.Use(node->InputAt(0)));
980 }
981 
VisitTruncateFloat32ToInt32(Node * node)982 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
983   X64OperandGenerator g(this);
984   Emit(kSSEFloat32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
985 }
986 
987 
VisitTruncateFloat32ToUint32(Node * node)988 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
989   X64OperandGenerator g(this);
990   Emit(kSSEFloat32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
991 }
992 
993 
VisitTryTruncateFloat32ToInt64(Node * node)994 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
995   X64OperandGenerator g(this);
996   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
997   InstructionOperand outputs[2];
998   size_t output_count = 0;
999   outputs[output_count++] = g.DefineAsRegister(node);
1000 
1001   Node* success_output = NodeProperties::FindProjection(node, 1);
1002   if (success_output) {
1003     outputs[output_count++] = g.DefineAsRegister(success_output);
1004   }
1005 
1006   Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
1007 }
1008 
1009 
VisitTryTruncateFloat64ToInt64(Node * node)1010 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1011   X64OperandGenerator g(this);
1012   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1013   InstructionOperand outputs[2];
1014   size_t output_count = 0;
1015   outputs[output_count++] = g.DefineAsRegister(node);
1016 
1017   Node* success_output = NodeProperties::FindProjection(node, 1);
1018   if (success_output) {
1019     outputs[output_count++] = g.DefineAsRegister(success_output);
1020   }
1021 
1022   Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
1023 }
1024 
1025 
VisitTryTruncateFloat32ToUint64(Node * node)1026 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1027   X64OperandGenerator g(this);
1028   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1029   InstructionOperand outputs[2];
1030   size_t output_count = 0;
1031   outputs[output_count++] = g.DefineAsRegister(node);
1032 
1033   Node* success_output = NodeProperties::FindProjection(node, 1);
1034   if (success_output) {
1035     outputs[output_count++] = g.DefineAsRegister(success_output);
1036   }
1037 
1038   Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs);
1039 }
1040 
1041 
VisitTryTruncateFloat64ToUint64(Node * node)1042 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1043   X64OperandGenerator g(this);
1044   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1045   InstructionOperand outputs[2];
1046   size_t output_count = 0;
1047   outputs[output_count++] = g.DefineAsRegister(node);
1048 
1049   Node* success_output = NodeProperties::FindProjection(node, 1);
1050   if (success_output) {
1051     outputs[output_count++] = g.DefineAsRegister(success_output);
1052   }
1053 
1054   Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
1055 }
1056 
1057 
VisitChangeInt32ToInt64(Node * node)1058 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1059   X64OperandGenerator g(this);
1060   Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1061 }
1062 
1063 
VisitChangeUint32ToUint64(Node * node)1064 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1065   X64OperandGenerator g(this);
1066   Node* value = node->InputAt(0);
1067   switch (value->opcode()) {
1068     case IrOpcode::kWord32And:
1069     case IrOpcode::kWord32Or:
1070     case IrOpcode::kWord32Xor:
1071     case IrOpcode::kWord32Shl:
1072     case IrOpcode::kWord32Shr:
1073     case IrOpcode::kWord32Sar:
1074     case IrOpcode::kWord32Ror:
1075     case IrOpcode::kWord32Equal:
1076     case IrOpcode::kInt32Add:
1077     case IrOpcode::kInt32Sub:
1078     case IrOpcode::kInt32Mul:
1079     case IrOpcode::kInt32MulHigh:
1080     case IrOpcode::kInt32Div:
1081     case IrOpcode::kInt32LessThan:
1082     case IrOpcode::kInt32LessThanOrEqual:
1083     case IrOpcode::kInt32Mod:
1084     case IrOpcode::kUint32Div:
1085     case IrOpcode::kUint32LessThan:
1086     case IrOpcode::kUint32LessThanOrEqual:
1087     case IrOpcode::kUint32Mod:
1088     case IrOpcode::kUint32MulHigh: {
1089       // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
1090       // zero-extension is a no-op.
1091       Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1092       return;
1093     }
1094     default:
1095       break;
1096   }
1097   Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
1098 }
1099 
1100 
1101 namespace {
1102 
VisitRO(InstructionSelector * selector,Node * node,InstructionCode opcode)1103 void VisitRO(InstructionSelector* selector, Node* node,
1104              InstructionCode opcode) {
1105   X64OperandGenerator g(selector);
1106   selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1107 }
1108 
1109 
VisitRR(InstructionSelector * selector,Node * node,InstructionCode opcode)1110 void VisitRR(InstructionSelector* selector, Node* node,
1111              InstructionCode opcode) {
1112   X64OperandGenerator g(selector);
1113   selector->Emit(opcode, g.DefineAsRegister(node),
1114                  g.UseRegister(node->InputAt(0)));
1115 }
1116 
1117 
VisitFloatBinop(InstructionSelector * selector,Node * node,ArchOpcode avx_opcode,ArchOpcode sse_opcode)1118 void VisitFloatBinop(InstructionSelector* selector, Node* node,
1119                      ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
1120   X64OperandGenerator g(selector);
1121   InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
1122   InstructionOperand operand1 = g.Use(node->InputAt(1));
1123   if (selector->IsSupported(AVX)) {
1124     selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
1125   } else {
1126     selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
1127   }
1128 }
1129 
1130 
VisitFloatUnop(InstructionSelector * selector,Node * node,Node * input,ArchOpcode avx_opcode,ArchOpcode sse_opcode)1131 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
1132                     ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
1133   X64OperandGenerator g(selector);
1134   if (selector->IsSupported(AVX)) {
1135     selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
1136   } else {
1137     selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
1138   }
1139 }
1140 
1141 }  // namespace
1142 
1143 
VisitTruncateFloat64ToFloat32(Node * node)1144 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1145   VisitRO(this, node, kSSEFloat64ToFloat32);
1146 }
1147 
VisitTruncateFloat64ToWord32(Node * node)1148 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1149   VisitRR(this, node, kArchTruncateDoubleToI);
1150 }
1151 
1152 
VisitTruncateInt64ToInt32(Node * node)1153 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1154   X64OperandGenerator g(this);
1155   Node* value = node->InputAt(0);
1156   if (CanCover(node, value)) {
1157     switch (value->opcode()) {
1158       case IrOpcode::kWord64Sar:
1159       case IrOpcode::kWord64Shr: {
1160         Int64BinopMatcher m(value);
1161         if (m.right().Is(32)) {
1162           Emit(kX64Shr, g.DefineSameAsFirst(node),
1163                g.UseRegister(m.left().node()), g.TempImmediate(32));
1164           return;
1165         }
1166         break;
1167       }
1168       default:
1169         break;
1170     }
1171   }
1172   Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
1173 }
1174 
VisitRoundFloat64ToInt32(Node * node)1175 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1176   VisitRO(this, node, kSSEFloat64ToInt32);
1177 }
1178 
VisitRoundInt32ToFloat32(Node * node)1179 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1180   X64OperandGenerator g(this);
1181   Emit(kSSEInt32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1182 }
1183 
1184 
VisitRoundInt64ToFloat32(Node * node)1185 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1186   X64OperandGenerator g(this);
1187   Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1188 }
1189 
1190 
VisitRoundInt64ToFloat64(Node * node)1191 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1192   X64OperandGenerator g(this);
1193   Emit(kSSEInt64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1194 }
1195 
1196 
VisitRoundUint32ToFloat32(Node * node)1197 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1198   X64OperandGenerator g(this);
1199   Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1200 }
1201 
1202 
VisitRoundUint64ToFloat32(Node * node)1203 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1204   X64OperandGenerator g(this);
1205   InstructionOperand temps[] = {g.TempRegister()};
1206   Emit(kSSEUint64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
1207        arraysize(temps), temps);
1208 }
1209 
1210 
VisitRoundUint64ToFloat64(Node * node)1211 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1212   X64OperandGenerator g(this);
1213   InstructionOperand temps[] = {g.TempRegister()};
1214   Emit(kSSEUint64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
1215        arraysize(temps), temps);
1216 }
1217 
1218 
VisitBitcastFloat32ToInt32(Node * node)1219 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1220   X64OperandGenerator g(this);
1221   Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1222 }
1223 
1224 
VisitBitcastFloat64ToInt64(Node * node)1225 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1226   X64OperandGenerator g(this);
1227   Emit(kX64BitcastDL, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1228 }
1229 
1230 
VisitBitcastInt32ToFloat32(Node * node)1231 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1232   X64OperandGenerator g(this);
1233   Emit(kX64BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1234 }
1235 
1236 
VisitBitcastInt64ToFloat64(Node * node)1237 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1238   X64OperandGenerator g(this);
1239   Emit(kX64BitcastLD, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1240 }
1241 
1242 
VisitFloat32Add(Node * node)1243 void InstructionSelector::VisitFloat32Add(Node* node) {
1244   VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
1245 }
1246 
1247 
VisitFloat32Sub(Node * node)1248 void InstructionSelector::VisitFloat32Sub(Node* node) {
1249   X64OperandGenerator g(this);
1250   Float32BinopMatcher m(node);
1251   if (m.left().IsMinusZero()) {
1252     VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
1253                    kSSEFloat32Neg);
1254     return;
1255   }
1256   VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
1257 }
1258 
VisitFloat32SubPreserveNan(Node * node)1259 void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
1260   VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
1261 }
1262 
VisitFloat32Mul(Node * node)1263 void InstructionSelector::VisitFloat32Mul(Node* node) {
1264   VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
1265 }
1266 
1267 
VisitFloat32Div(Node * node)1268 void InstructionSelector::VisitFloat32Div(Node* node) {
1269   VisitFloatBinop(this, node, kAVXFloat32Div, kSSEFloat32Div);
1270 }
1271 
1272 
VisitFloat32Max(Node * node)1273 void InstructionSelector::VisitFloat32Max(Node* node) {
1274   VisitFloatBinop(this, node, kAVXFloat32Max, kSSEFloat32Max);
1275 }
1276 
1277 
VisitFloat32Min(Node * node)1278 void InstructionSelector::VisitFloat32Min(Node* node) {
1279   VisitFloatBinop(this, node, kAVXFloat32Min, kSSEFloat32Min);
1280 }
1281 
1282 
VisitFloat32Abs(Node * node)1283 void InstructionSelector::VisitFloat32Abs(Node* node) {
1284   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
1285 }
1286 
1287 
VisitFloat32Sqrt(Node * node)1288 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1289   VisitRO(this, node, kSSEFloat32Sqrt);
1290 }
1291 
1292 
VisitFloat64Add(Node * node)1293 void InstructionSelector::VisitFloat64Add(Node* node) {
1294   VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
1295 }
1296 
1297 
VisitFloat64Sub(Node * node)1298 void InstructionSelector::VisitFloat64Sub(Node* node) {
1299   X64OperandGenerator g(this);
1300   Float64BinopMatcher m(node);
1301   if (m.left().IsMinusZero()) {
1302     if (m.right().IsFloat64RoundDown() &&
1303         CanCover(m.node(), m.right().node())) {
1304       if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
1305           CanCover(m.right().node(), m.right().InputAt(0))) {
1306         Float64BinopMatcher mright0(m.right().InputAt(0));
1307         if (mright0.left().IsMinusZero()) {
1308           Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
1309                g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
1310           return;
1311         }
1312       }
1313     }
1314     VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
1315                    kSSEFloat64Neg);
1316     return;
1317   }
1318   VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
1319 }
1320 
VisitFloat64SubPreserveNan(Node * node)1321 void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
1322   VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
1323 }
1324 
VisitFloat64Mul(Node * node)1325 void InstructionSelector::VisitFloat64Mul(Node* node) {
1326   VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
1327 }
1328 
1329 
VisitFloat64Div(Node * node)1330 void InstructionSelector::VisitFloat64Div(Node* node) {
1331   VisitFloatBinop(this, node, kAVXFloat64Div, kSSEFloat64Div);
1332 }
1333 
1334 
VisitFloat64Mod(Node * node)1335 void InstructionSelector::VisitFloat64Mod(Node* node) {
1336   X64OperandGenerator g(this);
1337   InstructionOperand temps[] = {g.TempRegister(rax)};
1338   Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
1339        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
1340        temps);
1341 }
1342 
1343 
VisitFloat64Max(Node * node)1344 void InstructionSelector::VisitFloat64Max(Node* node) {
1345   VisitFloatBinop(this, node, kAVXFloat64Max, kSSEFloat64Max);
1346 }
1347 
1348 
VisitFloat64Min(Node * node)1349 void InstructionSelector::VisitFloat64Min(Node* node) {
1350   VisitFloatBinop(this, node, kAVXFloat64Min, kSSEFloat64Min);
1351 }
1352 
1353 
VisitFloat64Abs(Node * node)1354 void InstructionSelector::VisitFloat64Abs(Node* node) {
1355   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
1356 }
1357 
VisitFloat64Sqrt(Node * node)1358 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1359   VisitRO(this, node, kSSEFloat64Sqrt);
1360 }
1361 
1362 
VisitFloat32RoundDown(Node * node)1363 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1364   VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
1365 }
1366 
1367 
VisitFloat64RoundDown(Node * node)1368 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1369   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
1370 }
1371 
1372 
VisitFloat32RoundUp(Node * node)1373 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1374   VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
1375 }
1376 
1377 
VisitFloat64RoundUp(Node * node)1378 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1379   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
1380 }
1381 
1382 
VisitFloat32RoundTruncate(Node * node)1383 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1384   VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
1385 }
1386 
1387 
VisitFloat64RoundTruncate(Node * node)1388 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1389   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
1390 }
1391 
1392 
VisitFloat64RoundTiesAway(Node * node)1393 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1394   UNREACHABLE();
1395 }
1396 
1397 
VisitFloat32RoundTiesEven(Node * node)1398 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1399   VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
1400 }
1401 
1402 
VisitFloat64RoundTiesEven(Node * node)1403 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1404   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
1405 }
1406 
VisitFloat32Neg(Node * node)1407 void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
1408 
VisitFloat64Neg(Node * node)1409 void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
1410 
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1411 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1412                                                    InstructionCode opcode) {
1413   X64OperandGenerator g(this);
1414   Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0),
1415        g.UseFixed(node->InputAt(1), xmm1))
1416       ->MarkAsCall();
1417 }
1418 
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1419 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1420                                                   InstructionCode opcode) {
1421   X64OperandGenerator g(this);
1422   Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0))
1423       ->MarkAsCall();
1424 }
1425 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1426 void InstructionSelector::EmitPrepareArguments(
1427     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1428     Node* node) {
1429   X64OperandGenerator g(this);
1430 
1431   // Prepare for C function call.
1432   if (descriptor->IsCFunctionCall()) {
1433     Emit(kArchPrepareCallCFunction |
1434              MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
1435          0, nullptr, 0, nullptr);
1436 
1437     // Poke any stack arguments.
1438     for (size_t n = 0; n < arguments->size(); ++n) {
1439       PushParameter input = (*arguments)[n];
1440       if (input.node()) {
1441         int slot = static_cast<int>(n);
1442         InstructionOperand value = g.CanBeImmediate(input.node())
1443                                        ? g.UseImmediate(input.node())
1444                                        : g.UseRegister(input.node());
1445         Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
1446       }
1447     }
1448   } else {
1449     // Push any stack arguments.
1450     for (PushParameter input : base::Reversed(*arguments)) {
1451       // TODO(titzer): X64Push cannot handle stack->stack double moves
1452       // because there is no way to encode fixed double slots.
1453       InstructionOperand value =
1454           g.CanBeImmediate(input.node())
1455               ? g.UseImmediate(input.node())
1456               : IsSupported(ATOM) ||
1457                         sequence()->IsFP(GetVirtualRegister(input.node()))
1458                     ? g.UseRegister(input.node())
1459                     : g.Use(input.node());
1460       Emit(kX64Push, g.NoOutput(), value);
1461     }
1462   }
1463 }
1464 
1465 
IsTailCallAddressImmediate()1466 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
1467 
GetTempsCountForTailCallFromJSFunction()1468 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1469 
1470 namespace {
1471 
VisitCompareWithMemoryOperand(InstructionSelector * selector,InstructionCode opcode,Node * left,InstructionOperand right,FlagsContinuation * cont)1472 void VisitCompareWithMemoryOperand(InstructionSelector* selector,
1473                                    InstructionCode opcode, Node* left,
1474                                    InstructionOperand right,
1475                                    FlagsContinuation* cont) {
1476   DCHECK(left->opcode() == IrOpcode::kLoad);
1477   X64OperandGenerator g(selector);
1478   size_t input_count = 0;
1479   InstructionOperand inputs[6];
1480   AddressingMode addressing_mode =
1481       g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
1482   opcode |= AddressingModeField::encode(addressing_mode);
1483   opcode = cont->Encode(opcode);
1484   inputs[input_count++] = right;
1485 
1486   if (cont->IsBranch()) {
1487     inputs[input_count++] = g.Label(cont->true_block());
1488     inputs[input_count++] = g.Label(cont->false_block());
1489     selector->Emit(opcode, 0, nullptr, input_count, inputs);
1490   } else if (cont->IsDeoptimize()) {
1491     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
1492                              cont->frame_state());
1493   } else {
1494     DCHECK(cont->IsSet());
1495     InstructionOperand output = g.DefineAsRegister(cont->result());
1496     selector->Emit(opcode, 1, &output, input_count, inputs);
1497   }
1498 }
1499 
1500 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1501 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1502                   InstructionOperand left, InstructionOperand right,
1503                   FlagsContinuation* cont) {
1504   X64OperandGenerator g(selector);
1505   opcode = cont->Encode(opcode);
1506   if (cont->IsBranch()) {
1507     selector->Emit(opcode, g.NoOutput(), left, right,
1508                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1509   } else if (cont->IsDeoptimize()) {
1510     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
1511                              cont->frame_state());
1512   } else {
1513     DCHECK(cont->IsSet());
1514     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1515   }
1516 }
1517 
1518 
1519 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,Node * left,Node * right,FlagsContinuation * cont,bool commutative)1520 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1521                   Node* left, Node* right, FlagsContinuation* cont,
1522                   bool commutative) {
1523   X64OperandGenerator g(selector);
1524   if (commutative && g.CanBeBetterLeftOperand(right)) {
1525     std::swap(left, right);
1526   }
1527   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
1528 }
1529 
1530 // Tries to match the size of the given opcode to that of the operands, if
1531 // possible.
TryNarrowOpcodeSize(InstructionCode opcode,Node * left,Node * right)1532 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
1533                                     Node* right) {
1534   if (opcode != kX64Cmp32 && opcode != kX64Test32) {
1535     return opcode;
1536   }
1537   // Currently, if one of the two operands is not a Load, we don't know what its
1538   // machine representation is, so we bail out.
1539   // TODO(epertoso): we can probably get some size information out of immediates
1540   // and phi nodes.
1541   if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
1542     return opcode;
1543   }
1544   // If the load representations don't match, both operands will be
1545   // zero/sign-extended to 32bit.
1546   LoadRepresentation left_representation = LoadRepresentationOf(left->op());
1547   if (left_representation != LoadRepresentationOf(right->op())) {
1548     return opcode;
1549   }
1550   switch (left_representation.representation()) {
1551     case MachineRepresentation::kBit:
1552     case MachineRepresentation::kWord8:
1553       return opcode == kX64Cmp32 ? kX64Cmp8 : kX64Test8;
1554     case MachineRepresentation::kWord16:
1555       return opcode == kX64Cmp32 ? kX64Cmp16 : kX64Test16;
1556     default:
1557       return opcode;
1558   }
1559 }
1560 
1561 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1562 void VisitWordCompare(InstructionSelector* selector, Node* node,
1563                       InstructionCode opcode, FlagsContinuation* cont) {
1564   X64OperandGenerator g(selector);
1565   Node* left = node->InputAt(0);
1566   Node* right = node->InputAt(1);
1567 
1568   opcode = TryNarrowOpcodeSize(opcode, left, right);
1569 
1570   // If one of the two inputs is an immediate, make sure it's on the right, or
1571   // if one of the two inputs is a memory operand, make sure it's on the left.
1572   int effect_level = selector->GetEffectLevel(node);
1573   if (cont->IsBranch()) {
1574     effect_level = selector->GetEffectLevel(
1575         cont->true_block()->PredecessorAt(0)->control_input());
1576   }
1577 
1578   if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
1579       (g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
1580        !g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
1581     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1582     std::swap(left, right);
1583   }
1584 
1585   // Match immediates on right side of comparison.
1586   if (g.CanBeImmediate(right)) {
1587     if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
1588       return VisitCompareWithMemoryOperand(selector, opcode, left,
1589                                            g.UseImmediate(right), cont);
1590     }
1591     return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
1592                         cont);
1593   }
1594 
1595   // Match memory operands on left side of comparison.
1596   if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
1597     return VisitCompareWithMemoryOperand(selector, opcode, left,
1598                                          g.UseRegister(right), cont);
1599   }
1600 
1601   if (g.CanBeBetterLeftOperand(right)) {
1602     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1603     std::swap(left, right);
1604   }
1605 
1606   return VisitCompare(selector, opcode, left, right, cont,
1607                       node->op()->HasProperty(Operator::kCommutative));
1608 }
1609 
1610 // Shared routine for 64-bit word comparison operations.
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1611 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1612                         FlagsContinuation* cont) {
1613   X64OperandGenerator g(selector);
1614   Int64BinopMatcher m(node);
1615   if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
1616     LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
1617     ExternalReference js_stack_limit =
1618         ExternalReference::address_of_stack_limit(selector->isolate());
1619     if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
1620       // Compare(Load(js_stack_limit), LoadStackPointer)
1621       if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1622       InstructionCode opcode = cont->Encode(kX64StackCheck);
1623       if (cont->IsBranch()) {
1624         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
1625                        g.Label(cont->false_block()));
1626       } else if (cont->IsDeoptimize()) {
1627         selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
1628                                  cont->frame_state());
1629       } else {
1630         DCHECK(cont->IsSet());
1631         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
1632       }
1633       return;
1634     }
1635   }
1636   VisitWordCompare(selector, node, kX64Cmp, cont);
1637 }
1638 
1639 
1640 // Shared routine for comparison with zero.
VisitCompareZero(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1641 void VisitCompareZero(InstructionSelector* selector, Node* node,
1642                       InstructionCode opcode, FlagsContinuation* cont) {
1643   X64OperandGenerator g(selector);
1644   VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
1645 }
1646 
1647 
1648 // Shared routine for multiple float32 compare operations (inputs commuted).
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1649 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1650                          FlagsContinuation* cont) {
1651   Node* const left = node->InputAt(0);
1652   Node* const right = node->InputAt(1);
1653   InstructionCode const opcode =
1654       selector->IsSupported(AVX) ? kAVXFloat32Cmp : kSSEFloat32Cmp;
1655   VisitCompare(selector, opcode, right, left, cont, false);
1656 }
1657 
1658 
1659 // Shared routine for multiple float64 compare operations (inputs commuted).
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1660 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1661                          FlagsContinuation* cont) {
1662   Node* const left = node->InputAt(0);
1663   Node* const right = node->InputAt(1);
1664   InstructionCode const opcode =
1665       selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
1666   VisitCompare(selector, opcode, right, left, cont, false);
1667 }
1668 
1669 // Shared routine for word comparison against zero.
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)1670 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1671                           Node* value, FlagsContinuation* cont) {
1672   while (selector->CanCover(user, value)) {
1673     switch (value->opcode()) {
1674       case IrOpcode::kWord32Equal: {
1675         // Combine with comparisons against 0 by simply inverting the
1676         // continuation.
1677         Int32BinopMatcher m(value);
1678         if (m.right().Is(0)) {
1679           user = value;
1680           value = m.left().node();
1681           cont->Negate();
1682           continue;
1683         }
1684         cont->OverwriteAndNegateIfEqual(kEqual);
1685         return VisitWordCompare(selector, value, kX64Cmp32, cont);
1686       }
1687       case IrOpcode::kInt32LessThan:
1688         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1689         return VisitWordCompare(selector, value, kX64Cmp32, cont);
1690       case IrOpcode::kInt32LessThanOrEqual:
1691         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1692         return VisitWordCompare(selector, value, kX64Cmp32, cont);
1693       case IrOpcode::kUint32LessThan:
1694         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1695         return VisitWordCompare(selector, value, kX64Cmp32, cont);
1696       case IrOpcode::kUint32LessThanOrEqual:
1697         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1698         return VisitWordCompare(selector, value, kX64Cmp32, cont);
1699       case IrOpcode::kWord64Equal: {
1700         cont->OverwriteAndNegateIfEqual(kEqual);
1701         Int64BinopMatcher m(value);
1702         if (m.right().Is(0)) {
1703           // Try to combine the branch with a comparison.
1704           Node* const user = m.node();
1705           Node* const value = m.left().node();
1706           if (selector->CanCover(user, value)) {
1707             switch (value->opcode()) {
1708               case IrOpcode::kInt64Sub:
1709                 return VisitWord64Compare(selector, value, cont);
1710               case IrOpcode::kWord64And:
1711                 return VisitWordCompare(selector, value, kX64Test, cont);
1712               default:
1713                 break;
1714             }
1715           }
1716           return VisitCompareZero(selector, value, kX64Cmp, cont);
1717         }
1718         return VisitWord64Compare(selector, value, cont);
1719       }
1720       case IrOpcode::kInt64LessThan:
1721         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1722         return VisitWord64Compare(selector, value, cont);
1723       case IrOpcode::kInt64LessThanOrEqual:
1724         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1725         return VisitWord64Compare(selector, value, cont);
1726       case IrOpcode::kUint64LessThan:
1727         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1728         return VisitWord64Compare(selector, value, cont);
1729       case IrOpcode::kUint64LessThanOrEqual:
1730         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1731         return VisitWord64Compare(selector, value, cont);
1732       case IrOpcode::kFloat32Equal:
1733         cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1734         return VisitFloat32Compare(selector, value, cont);
1735       case IrOpcode::kFloat32LessThan:
1736         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1737         return VisitFloat32Compare(selector, value, cont);
1738       case IrOpcode::kFloat32LessThanOrEqual:
1739         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1740         return VisitFloat32Compare(selector, value, cont);
1741       case IrOpcode::kFloat64Equal:
1742         cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1743         return VisitFloat64Compare(selector, value, cont);
1744       case IrOpcode::kFloat64LessThan:
1745         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1746         return VisitFloat64Compare(selector, value, cont);
1747       case IrOpcode::kFloat64LessThanOrEqual:
1748         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1749         return VisitFloat64Compare(selector, value, cont);
1750       case IrOpcode::kProjection:
1751         // Check if this is the overflow output projection of an
1752         // <Operation>WithOverflow node.
1753         if (ProjectionIndexOf(value->op()) == 1u) {
1754           // We cannot combine the <Operation>WithOverflow with this branch
1755           // unless the 0th projection (the use of the actual value of the
1756           // <Operation> is either nullptr, which means there's no use of the
1757           // actual value, or was already defined, which means it is scheduled
1758           // *AFTER* this branch).
1759           Node* const node = value->InputAt(0);
1760           Node* const result = NodeProperties::FindProjection(node, 0);
1761           if (result == nullptr || selector->IsDefined(result)) {
1762             switch (node->opcode()) {
1763               case IrOpcode::kInt32AddWithOverflow:
1764                 cont->OverwriteAndNegateIfEqual(kOverflow);
1765                 return VisitBinop(selector, node, kX64Add32, cont);
1766               case IrOpcode::kInt32SubWithOverflow:
1767                 cont->OverwriteAndNegateIfEqual(kOverflow);
1768                 return VisitBinop(selector, node, kX64Sub32, cont);
1769               case IrOpcode::kInt64AddWithOverflow:
1770                 cont->OverwriteAndNegateIfEqual(kOverflow);
1771                 return VisitBinop(selector, node, kX64Add, cont);
1772               case IrOpcode::kInt64SubWithOverflow:
1773                 cont->OverwriteAndNegateIfEqual(kOverflow);
1774                 return VisitBinop(selector, node, kX64Sub, cont);
1775               default:
1776                 break;
1777             }
1778           }
1779         }
1780         break;
1781       case IrOpcode::kInt32Sub:
1782         return VisitWordCompare(selector, value, kX64Cmp32, cont);
1783       case IrOpcode::kInt64Sub:
1784         return VisitWord64Compare(selector, value, cont);
1785       case IrOpcode::kWord32And:
1786         return VisitWordCompare(selector, value, kX64Test32, cont);
1787       case IrOpcode::kWord64And:
1788         return VisitWordCompare(selector, value, kX64Test, cont);
1789       default:
1790         break;
1791     }
1792     break;
1793   }
1794 
1795   // Branch could not be combined with a compare, emit compare against 0.
1796   VisitCompareZero(selector, value, kX64Cmp32, cont);
1797 }
1798 
1799 }  // namespace
1800 
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)1801 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1802                                       BasicBlock* fbranch) {
1803   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1804   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1805 }
1806 
VisitDeoptimizeIf(Node * node)1807 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
1808   FlagsContinuation cont =
1809       FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
1810   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1811 }
1812 
VisitDeoptimizeUnless(Node * node)1813 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
1814   FlagsContinuation cont =
1815       FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
1816   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1817 }
1818 
VisitSwitch(Node * node,const SwitchInfo & sw)1819 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1820   X64OperandGenerator g(this);
1821   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1822 
1823   // Emit either ArchTableSwitch or ArchLookupSwitch.
1824   size_t table_space_cost = 4 + sw.value_range;
1825   size_t table_time_cost = 3;
1826   size_t lookup_space_cost = 3 + 2 * sw.case_count;
1827   size_t lookup_time_cost = sw.case_count;
1828   if (sw.case_count > 4 &&
1829       table_space_cost + 3 * table_time_cost <=
1830           lookup_space_cost + 3 * lookup_time_cost &&
1831       sw.min_value > std::numeric_limits<int32_t>::min()) {
1832     InstructionOperand index_operand = g.TempRegister();
1833     if (sw.min_value) {
1834       // The leal automatically zero extends, so result is a valid 64-bit index.
1835       Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
1836            value_operand, g.TempImmediate(-sw.min_value));
1837     } else {
1838       // Zero extend, because we use it as 64-bit index into the jump table.
1839       Emit(kX64Movl, index_operand, value_operand);
1840     }
1841     // Generate a table lookup.
1842     return EmitTableSwitch(sw, index_operand);
1843   }
1844 
1845   // Generate a sequence of conditional jumps.
1846   return EmitLookupSwitch(sw, value_operand);
1847 }
1848 
1849 
VisitWord32Equal(Node * const node)1850 void InstructionSelector::VisitWord32Equal(Node* const node) {
1851   Node* user = node;
1852   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1853   Int32BinopMatcher m(user);
1854   if (m.right().Is(0)) {
1855     Node* value = m.left().node();
1856 
1857     // Try to combine with comparisons against 0 by simply inverting the branch.
1858     while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
1859       Int32BinopMatcher m(value);
1860       if (m.right().Is(0)) {
1861         user = value;
1862         value = m.left().node();
1863         cont.Negate();
1864       } else {
1865         break;
1866       }
1867     }
1868 
1869     // Try to combine the branch with a comparison.
1870     if (CanCover(user, value)) {
1871       switch (value->opcode()) {
1872         case IrOpcode::kInt32Sub:
1873           return VisitWordCompare(this, value, kX64Cmp32, &cont);
1874         case IrOpcode::kWord32And:
1875           return VisitWordCompare(this, value, kX64Test32, &cont);
1876         default:
1877           break;
1878       }
1879     }
1880     return VisitCompareZero(this, value, kX64Cmp32, &cont);
1881   }
1882   VisitWordCompare(this, node, kX64Cmp32, &cont);
1883 }
1884 
1885 
VisitInt32LessThan(Node * node)1886 void InstructionSelector::VisitInt32LessThan(Node* node) {
1887   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1888   VisitWordCompare(this, node, kX64Cmp32, &cont);
1889 }
1890 
1891 
VisitInt32LessThanOrEqual(Node * node)1892 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1893   FlagsContinuation cont =
1894       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1895   VisitWordCompare(this, node, kX64Cmp32, &cont);
1896 }
1897 
1898 
VisitUint32LessThan(Node * node)1899 void InstructionSelector::VisitUint32LessThan(Node* node) {
1900   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1901   VisitWordCompare(this, node, kX64Cmp32, &cont);
1902 }
1903 
1904 
VisitUint32LessThanOrEqual(Node * node)1905 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1906   FlagsContinuation cont =
1907       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1908   VisitWordCompare(this, node, kX64Cmp32, &cont);
1909 }
1910 
1911 
VisitWord64Equal(Node * const node)1912 void InstructionSelector::VisitWord64Equal(Node* const node) {
1913   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1914   Int64BinopMatcher m(node);
1915   if (m.right().Is(0)) {
1916     // Try to combine the equality check with a comparison.
1917     Node* const user = m.node();
1918     Node* const value = m.left().node();
1919     if (CanCover(user, value)) {
1920       switch (value->opcode()) {
1921         case IrOpcode::kInt64Sub:
1922           return VisitWord64Compare(this, value, &cont);
1923         case IrOpcode::kWord64And:
1924           return VisitWordCompare(this, value, kX64Test, &cont);
1925         default:
1926           break;
1927       }
1928     }
1929   }
1930   VisitWord64Compare(this, node, &cont);
1931 }
1932 
1933 
VisitInt32AddWithOverflow(Node * node)1934 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1935   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1936     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1937     return VisitBinop(this, node, kX64Add32, &cont);
1938   }
1939   FlagsContinuation cont;
1940   VisitBinop(this, node, kX64Add32, &cont);
1941 }
1942 
1943 
VisitInt32SubWithOverflow(Node * node)1944 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1945   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1946     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1947     return VisitBinop(this, node, kX64Sub32, &cont);
1948   }
1949   FlagsContinuation cont;
1950   VisitBinop(this, node, kX64Sub32, &cont);
1951 }
1952 
1953 
VisitInt64LessThan(Node * node)1954 void InstructionSelector::VisitInt64LessThan(Node* node) {
1955   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1956   VisitWord64Compare(this, node, &cont);
1957 }
1958 
1959 
VisitInt64LessThanOrEqual(Node * node)1960 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1961   FlagsContinuation cont =
1962       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1963   VisitWord64Compare(this, node, &cont);
1964 }
1965 
1966 
VisitUint64LessThan(Node * node)1967 void InstructionSelector::VisitUint64LessThan(Node* node) {
1968   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1969   VisitWord64Compare(this, node, &cont);
1970 }
1971 
1972 
VisitUint64LessThanOrEqual(Node * node)1973 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
1974   FlagsContinuation cont =
1975       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1976   VisitWord64Compare(this, node, &cont);
1977 }
1978 
1979 
VisitFloat32Equal(Node * node)1980 void InstructionSelector::VisitFloat32Equal(Node* node) {
1981   FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
1982   VisitFloat32Compare(this, node, &cont);
1983 }
1984 
1985 
VisitFloat32LessThan(Node * node)1986 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1987   FlagsContinuation cont =
1988       FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
1989   VisitFloat32Compare(this, node, &cont);
1990 }
1991 
1992 
VisitFloat32LessThanOrEqual(Node * node)1993 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1994   FlagsContinuation cont =
1995       FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
1996   VisitFloat32Compare(this, node, &cont);
1997 }
1998 
1999 
VisitFloat64Equal(Node * node)2000 void InstructionSelector::VisitFloat64Equal(Node* node) {
2001   FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
2002   VisitFloat64Compare(this, node, &cont);
2003 }
2004 
2005 
VisitFloat64LessThan(Node * node)2006 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2007   FlagsContinuation cont =
2008       FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
2009   VisitFloat64Compare(this, node, &cont);
2010 }
2011 
2012 
VisitFloat64LessThanOrEqual(Node * node)2013 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2014   FlagsContinuation cont =
2015       FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
2016   VisitFloat64Compare(this, node, &cont);
2017 }
2018 
2019 
VisitFloat64ExtractLowWord32(Node * node)2020 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2021   X64OperandGenerator g(this);
2022   Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
2023        g.Use(node->InputAt(0)));
2024 }
2025 
2026 
VisitFloat64ExtractHighWord32(Node * node)2027 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2028   X64OperandGenerator g(this);
2029   Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
2030        g.Use(node->InputAt(0)));
2031 }
2032 
2033 
VisitFloat64InsertLowWord32(Node * node)2034 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2035   X64OperandGenerator g(this);
2036   Node* left = node->InputAt(0);
2037   Node* right = node->InputAt(1);
2038   Float64Matcher mleft(left);
2039   if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
2040     Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
2041     return;
2042   }
2043   Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
2044        g.UseRegister(left), g.Use(right));
2045 }
2046 
2047 
VisitFloat64InsertHighWord32(Node * node)2048 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2049   X64OperandGenerator g(this);
2050   Node* left = node->InputAt(0);
2051   Node* right = node->InputAt(1);
2052   Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
2053        g.UseRegister(left), g.Use(right));
2054 }
2055 
VisitFloat64SilenceNaN(Node * node)2056 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2057   X64OperandGenerator g(this);
2058   Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
2059        g.UseRegister(node->InputAt(0)));
2060 }
2061 
VisitAtomicLoad(Node * node)2062 void InstructionSelector::VisitAtomicLoad(Node* node) {
2063   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2064   DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
2065          load_rep.representation() == MachineRepresentation::kWord16 ||
2066          load_rep.representation() == MachineRepresentation::kWord32);
2067   USE(load_rep);
2068   VisitLoad(node);
2069 }
2070 
VisitAtomicStore(Node * node)2071 void InstructionSelector::VisitAtomicStore(Node* node) {
2072   X64OperandGenerator g(this);
2073   Node* base = node->InputAt(0);
2074   Node* index = node->InputAt(1);
2075   Node* value = node->InputAt(2);
2076 
2077   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2078   ArchOpcode opcode = kArchNop;
2079   switch (rep) {
2080     case MachineRepresentation::kWord8:
2081       opcode = kX64Xchgb;
2082       break;
2083     case MachineRepresentation::kWord16:
2084       opcode = kX64Xchgw;
2085       break;
2086     case MachineRepresentation::kWord32:
2087       opcode = kX64Xchgl;
2088       break;
2089     default:
2090       UNREACHABLE();
2091       return;
2092   }
2093   AddressingMode addressing_mode;
2094   InstructionOperand inputs[4];
2095   size_t input_count = 0;
2096   inputs[input_count++] = g.UseUniqueRegister(base);
2097   if (g.CanBeImmediate(index)) {
2098     inputs[input_count++] = g.UseImmediate(index);
2099     addressing_mode = kMode_MRI;
2100   } else {
2101     inputs[input_count++] = g.UseUniqueRegister(index);
2102     addressing_mode = kMode_MR1;
2103   }
2104   inputs[input_count++] = g.UseUniqueRegister(value);
2105   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2106   Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
2107 }
2108 
2109 // static
2110 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2111 InstructionSelector::SupportedMachineOperatorFlags() {
2112   MachineOperatorBuilder::Flags flags =
2113       MachineOperatorBuilder::kFloat32Max |
2114       MachineOperatorBuilder::kFloat32Min |
2115       MachineOperatorBuilder::kFloat64Max |
2116       MachineOperatorBuilder::kFloat64Min |
2117       MachineOperatorBuilder::kWord32ShiftIsSafe |
2118       MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
2119   if (CpuFeatures::IsSupported(POPCNT)) {
2120     flags |= MachineOperatorBuilder::kWord32Popcnt |
2121              MachineOperatorBuilder::kWord64Popcnt;
2122   }
2123   if (CpuFeatures::IsSupported(SSE4_1)) {
2124     flags |= MachineOperatorBuilder::kFloat32RoundDown |
2125              MachineOperatorBuilder::kFloat64RoundDown |
2126              MachineOperatorBuilder::kFloat32RoundUp |
2127              MachineOperatorBuilder::kFloat64RoundUp |
2128              MachineOperatorBuilder::kFloat32RoundTruncate |
2129              MachineOperatorBuilder::kFloat64RoundTruncate |
2130              MachineOperatorBuilder::kFloat32RoundTiesEven |
2131              MachineOperatorBuilder::kFloat64RoundTiesEven;
2132   }
2133   return flags;
2134 }
2135 
2136 // static
2137 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2138 InstructionSelector::AlignmentRequirements() {
2139   return MachineOperatorBuilder::AlignmentRequirements::
2140       FullUnalignedAccessSupport();
2141 }
2142 
2143 }  // namespace compiler
2144 }  // namespace internal
2145 }  // namespace v8
2146