• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
9 
10 namespace v8 {
11 namespace internal {
12 namespace compiler {
13 
14 // Adds IA32-specific methods for generating operands.
15 class IA32OperandGenerator final : public OperandGenerator {
16  public:
IA32OperandGenerator(InstructionSelector * selector)17   explicit IA32OperandGenerator(InstructionSelector* selector)
18       : OperandGenerator(selector) {}
19 
UseByteRegister(Node * node)20   InstructionOperand UseByteRegister(Node* node) {
21     // TODO(titzer): encode byte register use constraints.
22     return UseFixed(node, edx);
23   }
24 
DefineAsByteRegister(Node * node)25   InstructionOperand DefineAsByteRegister(Node* node) {
26     // TODO(titzer): encode byte register def constraints.
27     return DefineAsRegister(node);
28   }
29 
CanBeMemoryOperand(InstructionCode opcode,Node * node,Node * input,int effect_level)30   bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
31                           int effect_level) {
32     if (input->opcode() != IrOpcode::kLoad ||
33         !selector()->CanCover(node, input)) {
34       return false;
35     }
36     if (effect_level != selector()->GetEffectLevel(input)) {
37       return false;
38     }
39     MachineRepresentation rep =
40         LoadRepresentationOf(input->op()).representation();
41     switch (opcode) {
42       case kIA32Cmp:
43       case kIA32Test:
44         return rep == MachineRepresentation::kWord32 ||
45                rep == MachineRepresentation::kTagged;
46       case kIA32Cmp16:
47       case kIA32Test16:
48         return rep == MachineRepresentation::kWord16;
49       case kIA32Cmp8:
50       case kIA32Test8:
51         return rep == MachineRepresentation::kWord8;
52       default:
53         break;
54     }
55     return false;
56   }
57 
CanBeImmediate(Node * node)58   bool CanBeImmediate(Node* node) {
59     switch (node->opcode()) {
60       case IrOpcode::kInt32Constant:
61       case IrOpcode::kNumberConstant:
62       case IrOpcode::kExternalConstant:
63       case IrOpcode::kRelocatableInt32Constant:
64       case IrOpcode::kRelocatableInt64Constant:
65         return true;
66       case IrOpcode::kHeapConstant: {
67 // TODO(bmeurer): We must not dereference handles concurrently. If we
68 // really have to this here, then we need to find a way to put this
69 // information on the HeapConstant node already.
70 #if 0
71         // Constants in new space cannot be used as immediates in V8 because
72         // the GC does not scan code objects when collecting the new generation.
73         Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
74         Isolate* isolate = value->GetIsolate();
75         return !isolate->heap()->InNewSpace(*value);
76 #endif
77       }
78       default:
79         return false;
80     }
81   }
82 
GenerateMemoryOperandInputs(Node * index,int scale,Node * base,Node * displacement_node,DisplacementMode displacement_mode,InstructionOperand inputs[],size_t * input_count)83   AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
84                                              Node* displacement_node,
85                                              DisplacementMode displacement_mode,
86                                              InstructionOperand inputs[],
87                                              size_t* input_count) {
88     AddressingMode mode = kMode_MRI;
89     int32_t displacement = (displacement_node == nullptr)
90                                ? 0
91                                : OpParameter<int32_t>(displacement_node);
92     if (displacement_mode == kNegativeDisplacement) {
93       displacement = -displacement;
94     }
95     if (base != nullptr) {
96       if (base->opcode() == IrOpcode::kInt32Constant) {
97         displacement += OpParameter<int32_t>(base);
98         base = nullptr;
99       }
100     }
101     if (base != nullptr) {
102       inputs[(*input_count)++] = UseRegister(base);
103       if (index != nullptr) {
104         DCHECK(scale >= 0 && scale <= 3);
105         inputs[(*input_count)++] = UseRegister(index);
106         if (displacement != 0) {
107           inputs[(*input_count)++] = TempImmediate(displacement);
108           static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
109                                                        kMode_MR4I, kMode_MR8I};
110           mode = kMRnI_modes[scale];
111         } else {
112           static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
113                                                       kMode_MR4, kMode_MR8};
114           mode = kMRn_modes[scale];
115         }
116       } else {
117         if (displacement == 0) {
118           mode = kMode_MR;
119         } else {
120           inputs[(*input_count)++] = TempImmediate(displacement);
121           mode = kMode_MRI;
122         }
123       }
124     } else {
125       DCHECK(scale >= 0 && scale <= 3);
126       if (index != nullptr) {
127         inputs[(*input_count)++] = UseRegister(index);
128         if (displacement != 0) {
129           inputs[(*input_count)++] = TempImmediate(displacement);
130           static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
131                                                       kMode_M4I, kMode_M8I};
132           mode = kMnI_modes[scale];
133         } else {
134           static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
135                                                      kMode_M4, kMode_M8};
136           mode = kMn_modes[scale];
137         }
138       } else {
139         inputs[(*input_count)++] = TempImmediate(displacement);
140         return kMode_MI;
141       }
142     }
143     return mode;
144   }
145 
GetEffectiveAddressMemoryOperand(Node * node,InstructionOperand inputs[],size_t * input_count)146   AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
147                                                   InstructionOperand inputs[],
148                                                   size_t* input_count) {
149     BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
150     DCHECK(m.matches());
151     if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
152       return GenerateMemoryOperandInputs(
153           m.index(), m.scale(), m.base(), m.displacement(),
154           m.displacement_mode(), inputs, input_count);
155     } else {
156       inputs[(*input_count)++] = UseRegister(node->InputAt(0));
157       inputs[(*input_count)++] = UseRegister(node->InputAt(1));
158       return kMode_MR1;
159     }
160   }
161 
CanBeBetterLeftOperand(Node * node) const162   bool CanBeBetterLeftOperand(Node* node) const {
163     return !selector()->IsLive(node);
164   }
165 };
166 
167 
168 namespace {
169 
VisitRO(InstructionSelector * selector,Node * node,ArchOpcode opcode)170 void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
171   IA32OperandGenerator g(selector);
172   selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
173 }
174 
175 
VisitRR(InstructionSelector * selector,Node * node,InstructionCode opcode)176 void VisitRR(InstructionSelector* selector, Node* node,
177              InstructionCode opcode) {
178   IA32OperandGenerator g(selector);
179   selector->Emit(opcode, g.DefineAsRegister(node),
180                  g.UseRegister(node->InputAt(0)));
181 }
182 
183 
VisitRROFloat(InstructionSelector * selector,Node * node,ArchOpcode avx_opcode,ArchOpcode sse_opcode)184 void VisitRROFloat(InstructionSelector* selector, Node* node,
185                    ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
186   IA32OperandGenerator g(selector);
187   InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
188   InstructionOperand operand1 = g.Use(node->InputAt(1));
189   if (selector->IsSupported(AVX)) {
190     selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
191   } else {
192     selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
193   }
194 }
195 
196 
VisitFloatUnop(InstructionSelector * selector,Node * node,Node * input,ArchOpcode avx_opcode,ArchOpcode sse_opcode)197 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
198                     ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
199   IA32OperandGenerator g(selector);
200   if (selector->IsSupported(AVX)) {
201     selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
202   } else {
203     selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
204   }
205 }
206 
207 
208 }  // namespace
209 
210 
VisitLoad(Node * node)211 void InstructionSelector::VisitLoad(Node* node) {
212   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
213 
214   ArchOpcode opcode = kArchNop;
215   switch (load_rep.representation()) {
216     case MachineRepresentation::kFloat32:
217       opcode = kIA32Movss;
218       break;
219     case MachineRepresentation::kFloat64:
220       opcode = kIA32Movsd;
221       break;
222     case MachineRepresentation::kBit:  // Fall through.
223     case MachineRepresentation::kWord8:
224       opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
225       break;
226     case MachineRepresentation::kWord16:
227       opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
228       break;
229     case MachineRepresentation::kTaggedSigned:   // Fall through.
230     case MachineRepresentation::kTaggedPointer:  // Fall through.
231     case MachineRepresentation::kTagged:         // Fall through.
232     case MachineRepresentation::kWord32:
233       opcode = kIA32Movl;
234       break;
235     case MachineRepresentation::kWord64:   // Fall through.
236     case MachineRepresentation::kSimd128:  // Fall through.
237     case MachineRepresentation::kNone:
238       UNREACHABLE();
239       return;
240   }
241 
242   IA32OperandGenerator g(this);
243   InstructionOperand outputs[1];
244   outputs[0] = g.DefineAsRegister(node);
245   InstructionOperand inputs[3];
246   size_t input_count = 0;
247   AddressingMode mode =
248       g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
249   InstructionCode code = opcode | AddressingModeField::encode(mode);
250   Emit(code, 1, outputs, input_count, inputs);
251 }
252 
VisitProtectedLoad(Node * node)253 void InstructionSelector::VisitProtectedLoad(Node* node) {
254   // TODO(eholk)
255   UNIMPLEMENTED();
256 }
257 
VisitStore(Node * node)258 void InstructionSelector::VisitStore(Node* node) {
259   IA32OperandGenerator g(this);
260   Node* base = node->InputAt(0);
261   Node* index = node->InputAt(1);
262   Node* value = node->InputAt(2);
263 
264   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
265   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
266   MachineRepresentation rep = store_rep.representation();
267 
268   if (write_barrier_kind != kNoWriteBarrier) {
269     DCHECK(CanBeTaggedPointer(rep));
270     AddressingMode addressing_mode;
271     InstructionOperand inputs[3];
272     size_t input_count = 0;
273     inputs[input_count++] = g.UseUniqueRegister(base);
274     if (g.CanBeImmediate(index)) {
275       inputs[input_count++] = g.UseImmediate(index);
276       addressing_mode = kMode_MRI;
277     } else {
278       inputs[input_count++] = g.UseUniqueRegister(index);
279       addressing_mode = kMode_MR1;
280     }
281     inputs[input_count++] = g.UseUniqueRegister(value);
282     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
283     switch (write_barrier_kind) {
284       case kNoWriteBarrier:
285         UNREACHABLE();
286         break;
287       case kMapWriteBarrier:
288         record_write_mode = RecordWriteMode::kValueIsMap;
289         break;
290       case kPointerWriteBarrier:
291         record_write_mode = RecordWriteMode::kValueIsPointer;
292         break;
293       case kFullWriteBarrier:
294         record_write_mode = RecordWriteMode::kValueIsAny;
295         break;
296     }
297     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
298     size_t const temp_count = arraysize(temps);
299     InstructionCode code = kArchStoreWithWriteBarrier;
300     code |= AddressingModeField::encode(addressing_mode);
301     code |= MiscField::encode(static_cast<int>(record_write_mode));
302     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
303   } else {
304     ArchOpcode opcode = kArchNop;
305     switch (rep) {
306       case MachineRepresentation::kFloat32:
307         opcode = kIA32Movss;
308         break;
309       case MachineRepresentation::kFloat64:
310         opcode = kIA32Movsd;
311         break;
312       case MachineRepresentation::kBit:  // Fall through.
313       case MachineRepresentation::kWord8:
314         opcode = kIA32Movb;
315         break;
316       case MachineRepresentation::kWord16:
317         opcode = kIA32Movw;
318         break;
319       case MachineRepresentation::kTaggedSigned:   // Fall through.
320       case MachineRepresentation::kTaggedPointer:  // Fall through.
321       case MachineRepresentation::kTagged:         // Fall through.
322       case MachineRepresentation::kWord32:
323         opcode = kIA32Movl;
324         break;
325       case MachineRepresentation::kWord64:   // Fall through.
326       case MachineRepresentation::kSimd128:  // Fall through.
327       case MachineRepresentation::kNone:
328         UNREACHABLE();
329         return;
330     }
331 
332     InstructionOperand val;
333     if (g.CanBeImmediate(value)) {
334       val = g.UseImmediate(value);
335     } else if (rep == MachineRepresentation::kWord8 ||
336                rep == MachineRepresentation::kBit) {
337       val = g.UseByteRegister(value);
338     } else {
339       val = g.UseRegister(value);
340     }
341 
342     InstructionOperand inputs[4];
343     size_t input_count = 0;
344     AddressingMode addressing_mode =
345         g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
346     InstructionCode code =
347         opcode | AddressingModeField::encode(addressing_mode);
348     inputs[input_count++] = val;
349     Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
350          inputs);
351   }
352 }
353 
354 // Architecture supports unaligned access, therefore VisitLoad is used instead
VisitUnalignedLoad(Node * node)355 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
356 
357 // Architecture supports unaligned access, therefore VisitStore is used instead
VisitUnalignedStore(Node * node)358 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
359 
VisitCheckedLoad(Node * node)360 void InstructionSelector::VisitCheckedLoad(Node* node) {
361   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
362   IA32OperandGenerator g(this);
363   Node* const buffer = node->InputAt(0);
364   Node* const offset = node->InputAt(1);
365   Node* const length = node->InputAt(2);
366   ArchOpcode opcode = kArchNop;
367   switch (load_rep.representation()) {
368     case MachineRepresentation::kWord8:
369       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
370       break;
371     case MachineRepresentation::kWord16:
372       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
373       break;
374     case MachineRepresentation::kWord32:
375       opcode = kCheckedLoadWord32;
376       break;
377     case MachineRepresentation::kFloat32:
378       opcode = kCheckedLoadFloat32;
379       break;
380     case MachineRepresentation::kFloat64:
381       opcode = kCheckedLoadFloat64;
382       break;
383     case MachineRepresentation::kBit:            // Fall through.
384     case MachineRepresentation::kTaggedSigned:   // Fall through.
385     case MachineRepresentation::kTaggedPointer:  // Fall through.
386     case MachineRepresentation::kTagged:         // Fall through.
387     case MachineRepresentation::kWord64:         // Fall through.
388     case MachineRepresentation::kSimd128:        // Fall through.
389     case MachineRepresentation::kNone:
390       UNREACHABLE();
391       return;
392   }
393   InstructionOperand offset_operand = g.UseRegister(offset);
394   InstructionOperand length_operand =
395       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
396   if (g.CanBeImmediate(buffer)) {
397     Emit(opcode | AddressingModeField::encode(kMode_MRI),
398          g.DefineAsRegister(node), offset_operand, length_operand,
399          offset_operand, g.UseImmediate(buffer));
400   } else {
401     Emit(opcode | AddressingModeField::encode(kMode_MR1),
402          g.DefineAsRegister(node), offset_operand, length_operand,
403          g.UseRegister(buffer), offset_operand);
404   }
405 }
406 
407 
VisitCheckedStore(Node * node)408 void InstructionSelector::VisitCheckedStore(Node* node) {
409   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
410   IA32OperandGenerator g(this);
411   Node* const buffer = node->InputAt(0);
412   Node* const offset = node->InputAt(1);
413   Node* const length = node->InputAt(2);
414   Node* const value = node->InputAt(3);
415   ArchOpcode opcode = kArchNop;
416   switch (rep) {
417     case MachineRepresentation::kWord8:
418       opcode = kCheckedStoreWord8;
419       break;
420     case MachineRepresentation::kWord16:
421       opcode = kCheckedStoreWord16;
422       break;
423     case MachineRepresentation::kWord32:
424       opcode = kCheckedStoreWord32;
425       break;
426     case MachineRepresentation::kFloat32:
427       opcode = kCheckedStoreFloat32;
428       break;
429     case MachineRepresentation::kFloat64:
430       opcode = kCheckedStoreFloat64;
431       break;
432     case MachineRepresentation::kBit:            // Fall through.
433     case MachineRepresentation::kTaggedSigned:   // Fall through.
434     case MachineRepresentation::kTaggedPointer:  // Fall through.
435     case MachineRepresentation::kTagged:         // Fall through.
436     case MachineRepresentation::kWord64:         // Fall through.
437     case MachineRepresentation::kSimd128:        // Fall through.
438     case MachineRepresentation::kNone:
439       UNREACHABLE();
440       return;
441   }
442   InstructionOperand value_operand =
443       g.CanBeImmediate(value) ? g.UseImmediate(value)
444                               : ((rep == MachineRepresentation::kWord8 ||
445                                   rep == MachineRepresentation::kBit)
446                                      ? g.UseByteRegister(value)
447                                      : g.UseRegister(value));
448   InstructionOperand offset_operand = g.UseRegister(offset);
449   InstructionOperand length_operand =
450       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
451   if (g.CanBeImmediate(buffer)) {
452     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
453          offset_operand, length_operand, value_operand, offset_operand,
454          g.UseImmediate(buffer));
455   } else {
456     Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
457          offset_operand, length_operand, value_operand, g.UseRegister(buffer),
458          offset_operand);
459   }
460 }
461 
462 namespace {
463 
464 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)465 void VisitBinop(InstructionSelector* selector, Node* node,
466                 InstructionCode opcode, FlagsContinuation* cont) {
467   IA32OperandGenerator g(selector);
468   Int32BinopMatcher m(node);
469   Node* left = m.left().node();
470   Node* right = m.right().node();
471   InstructionOperand inputs[4];
472   size_t input_count = 0;
473   InstructionOperand outputs[2];
474   size_t output_count = 0;
475 
476   // TODO(turbofan): match complex addressing modes.
477   if (left == right) {
478     // If both inputs refer to the same operand, enforce allocating a register
479     // for both of them to ensure that we don't end up generating code like
480     // this:
481     //
482     //   mov eax, [ebp-0x10]
483     //   add eax, [ebp-0x10]
484     //   jo label
485     InstructionOperand const input = g.UseRegister(left);
486     inputs[input_count++] = input;
487     inputs[input_count++] = input;
488   } else if (g.CanBeImmediate(right)) {
489     inputs[input_count++] = g.UseRegister(left);
490     inputs[input_count++] = g.UseImmediate(right);
491   } else {
492     if (node->op()->HasProperty(Operator::kCommutative) &&
493         g.CanBeBetterLeftOperand(right)) {
494       std::swap(left, right);
495     }
496     inputs[input_count++] = g.UseRegister(left);
497     inputs[input_count++] = g.Use(right);
498   }
499 
500   if (cont->IsBranch()) {
501     inputs[input_count++] = g.Label(cont->true_block());
502     inputs[input_count++] = g.Label(cont->false_block());
503   }
504 
505   outputs[output_count++] = g.DefineSameAsFirst(node);
506   if (cont->IsSet()) {
507     outputs[output_count++] = g.DefineAsByteRegister(cont->result());
508   }
509 
510   DCHECK_NE(0u, input_count);
511   DCHECK_NE(0u, output_count);
512   DCHECK_GE(arraysize(inputs), input_count);
513   DCHECK_GE(arraysize(outputs), output_count);
514 
515   opcode = cont->Encode(opcode);
516   if (cont->IsDeoptimize()) {
517     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
518                              cont->reason(), cont->frame_state());
519   } else {
520     selector->Emit(opcode, output_count, outputs, input_count, inputs);
521   }
522 }
523 
524 
525 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)526 void VisitBinop(InstructionSelector* selector, Node* node,
527                 InstructionCode opcode) {
528   FlagsContinuation cont;
529   VisitBinop(selector, node, opcode, &cont);
530 }
531 
532 }  // namespace
533 
VisitWord32And(Node * node)534 void InstructionSelector::VisitWord32And(Node* node) {
535   VisitBinop(this, node, kIA32And);
536 }
537 
538 
VisitWord32Or(Node * node)539 void InstructionSelector::VisitWord32Or(Node* node) {
540   VisitBinop(this, node, kIA32Or);
541 }
542 
543 
VisitWord32Xor(Node * node)544 void InstructionSelector::VisitWord32Xor(Node* node) {
545   IA32OperandGenerator g(this);
546   Int32BinopMatcher m(node);
547   if (m.right().Is(-1)) {
548     Emit(kIA32Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
549   } else {
550     VisitBinop(this, node, kIA32Xor);
551   }
552 }
553 
554 
555 // Shared routine for multiple shift operations.
VisitShift(InstructionSelector * selector,Node * node,ArchOpcode opcode)556 static inline void VisitShift(InstructionSelector* selector, Node* node,
557                               ArchOpcode opcode) {
558   IA32OperandGenerator g(selector);
559   Node* left = node->InputAt(0);
560   Node* right = node->InputAt(1);
561 
562   if (g.CanBeImmediate(right)) {
563     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
564                    g.UseImmediate(right));
565   } else {
566     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
567                    g.UseFixed(right, ecx));
568   }
569 }
570 
571 
572 namespace {
573 
VisitMulHigh(InstructionSelector * selector,Node * node,ArchOpcode opcode)574 void VisitMulHigh(InstructionSelector* selector, Node* node,
575                   ArchOpcode opcode) {
576   IA32OperandGenerator g(selector);
577   InstructionOperand temps[] = {g.TempRegister(eax)};
578   selector->Emit(
579       opcode, g.DefineAsFixed(node, edx), g.UseFixed(node->InputAt(0), eax),
580       g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
581 }
582 
583 
VisitDiv(InstructionSelector * selector,Node * node,ArchOpcode opcode)584 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
585   IA32OperandGenerator g(selector);
586   InstructionOperand temps[] = {g.TempRegister(edx)};
587   selector->Emit(opcode, g.DefineAsFixed(node, eax),
588                  g.UseFixed(node->InputAt(0), eax),
589                  g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
590 }
591 
592 
VisitMod(InstructionSelector * selector,Node * node,ArchOpcode opcode)593 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
594   IA32OperandGenerator g(selector);
595   InstructionOperand temps[] = {g.TempRegister(eax)};
596   selector->Emit(opcode, g.DefineAsFixed(node, edx),
597                  g.UseFixed(node->InputAt(0), eax),
598                  g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
599 }
600 
EmitLea(InstructionSelector * selector,Node * result,Node * index,int scale,Node * base,Node * displacement,DisplacementMode displacement_mode)601 void EmitLea(InstructionSelector* selector, Node* result, Node* index,
602              int scale, Node* base, Node* displacement,
603              DisplacementMode displacement_mode) {
604   IA32OperandGenerator g(selector);
605   InstructionOperand inputs[4];
606   size_t input_count = 0;
607   AddressingMode mode =
608       g.GenerateMemoryOperandInputs(index, scale, base, displacement,
609                                     displacement_mode, inputs, &input_count);
610 
611   DCHECK_NE(0u, input_count);
612   DCHECK_GE(arraysize(inputs), input_count);
613 
614   InstructionOperand outputs[1];
615   outputs[0] = g.DefineAsRegister(result);
616 
617   InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
618 
619   selector->Emit(opcode, 1, outputs, input_count, inputs);
620 }
621 
622 }  // namespace
623 
624 
VisitWord32Shl(Node * node)625 void InstructionSelector::VisitWord32Shl(Node* node) {
626   Int32ScaleMatcher m(node, true);
627   if (m.matches()) {
628     Node* index = node->InputAt(0);
629     Node* base = m.power_of_two_plus_one() ? index : nullptr;
630     EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
631     return;
632   }
633   VisitShift(this, node, kIA32Shl);
634 }
635 
636 
VisitWord32Shr(Node * node)637 void InstructionSelector::VisitWord32Shr(Node* node) {
638   VisitShift(this, node, kIA32Shr);
639 }
640 
641 
VisitWord32Sar(Node * node)642 void InstructionSelector::VisitWord32Sar(Node* node) {
643   VisitShift(this, node, kIA32Sar);
644 }
645 
VisitInt32PairAdd(Node * node)646 void InstructionSelector::VisitInt32PairAdd(Node* node) {
647   IA32OperandGenerator g(this);
648 
649   Node* projection1 = NodeProperties::FindProjection(node, 1);
650   if (projection1) {
651     // We use UseUniqueRegister here to avoid register sharing with the temp
652     // register.
653     InstructionOperand inputs[] = {
654         g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
655         g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
656 
657     InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
658                                     g.DefineAsRegister(projection1)};
659 
660     InstructionOperand temps[] = {g.TempRegister()};
661 
662     Emit(kIA32AddPair, 2, outputs, 4, inputs, 1, temps);
663   } else {
664     // The high word of the result is not used, so we emit the standard 32 bit
665     // instruction.
666     Emit(kIA32Add, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
667          g.Use(node->InputAt(2)));
668   }
669 }
670 
VisitInt32PairSub(Node * node)671 void InstructionSelector::VisitInt32PairSub(Node* node) {
672   IA32OperandGenerator g(this);
673 
674   Node* projection1 = NodeProperties::FindProjection(node, 1);
675   if (projection1) {
676     // We use UseUniqueRegister here to avoid register sharing with the temp
677     // register.
678     InstructionOperand inputs[] = {
679         g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
680         g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
681 
682     InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
683                                     g.DefineAsRegister(projection1)};
684 
685     InstructionOperand temps[] = {g.TempRegister()};
686 
687     Emit(kIA32SubPair, 2, outputs, 4, inputs, 1, temps);
688   } else {
689     // The high word of the result is not used, so we emit the standard 32 bit
690     // instruction.
691     Emit(kIA32Sub, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
692          g.Use(node->InputAt(2)));
693   }
694 }
695 
VisitInt32PairMul(Node * node)696 void InstructionSelector::VisitInt32PairMul(Node* node) {
697   IA32OperandGenerator g(this);
698 
699   Node* projection1 = NodeProperties::FindProjection(node, 1);
700   if (projection1) {
701     // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
702     // register and one mov instruction.
703     InstructionOperand inputs[] = {g.UseUnique(node->InputAt(0)),
704                                    g.UseUnique(node->InputAt(1)),
705                                    g.UseUniqueRegister(node->InputAt(2)),
706                                    g.UseFixed(node->InputAt(3), ecx)};
707 
708     InstructionOperand outputs[] = {
709         g.DefineAsFixed(node, eax),
710         g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
711 
712     InstructionOperand temps[] = {g.TempRegister(edx)};
713 
714     Emit(kIA32MulPair, 2, outputs, 4, inputs, 1, temps);
715   } else {
716     // The high word of the result is not used, so we emit the standard 32 bit
717     // instruction.
718     Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
719          g.Use(node->InputAt(2)));
720   }
721 }
722 
VisitWord32PairShift(InstructionSelector * selector,InstructionCode opcode,Node * node)723 void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
724                           Node* node) {
725   IA32OperandGenerator g(selector);
726 
727   Node* shift = node->InputAt(2);
728   InstructionOperand shift_operand;
729   if (g.CanBeImmediate(shift)) {
730     shift_operand = g.UseImmediate(shift);
731   } else {
732     shift_operand = g.UseFixed(shift, ecx);
733   }
734   InstructionOperand inputs[] = {g.UseFixed(node->InputAt(0), eax),
735                                  g.UseFixed(node->InputAt(1), edx),
736                                  shift_operand};
737 
738   InstructionOperand outputs[2];
739   InstructionOperand temps[1];
740   int32_t output_count = 0;
741   int32_t temp_count = 0;
742   outputs[output_count++] = g.DefineAsFixed(node, eax);
743   Node* projection1 = NodeProperties::FindProjection(node, 1);
744   if (projection1) {
745     outputs[output_count++] = g.DefineAsFixed(projection1, edx);
746   } else {
747     temps[temp_count++] = g.TempRegister(edx);
748   }
749 
750   selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
751 }
752 
VisitWord32PairShl(Node * node)753 void InstructionSelector::VisitWord32PairShl(Node* node) {
754   VisitWord32PairShift(this, kIA32ShlPair, node);
755 }
756 
VisitWord32PairShr(Node * node)757 void InstructionSelector::VisitWord32PairShr(Node* node) {
758   VisitWord32PairShift(this, kIA32ShrPair, node);
759 }
760 
VisitWord32PairSar(Node * node)761 void InstructionSelector::VisitWord32PairSar(Node* node) {
762   VisitWord32PairShift(this, kIA32SarPair, node);
763 }
764 
VisitWord32Ror(Node * node)765 void InstructionSelector::VisitWord32Ror(Node* node) {
766   VisitShift(this, node, kIA32Ror);
767 }
768 
769 
VisitWord32Clz(Node * node)770 void InstructionSelector::VisitWord32Clz(Node* node) {
771   IA32OperandGenerator g(this);
772   Emit(kIA32Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
773 }
774 
775 
VisitWord32Ctz(Node * node)776 void InstructionSelector::VisitWord32Ctz(Node* node) {
777   IA32OperandGenerator g(this);
778   Emit(kIA32Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
779 }
780 
781 
VisitWord32ReverseBits(Node * node)782 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
783 
VisitWord64ReverseBytes(Node * node)784 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
785 
VisitWord32ReverseBytes(Node * node)786 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
787 
VisitWord32Popcnt(Node * node)788 void InstructionSelector::VisitWord32Popcnt(Node* node) {
789   IA32OperandGenerator g(this);
790   Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
791 }
792 
793 
VisitInt32Add(Node * node)794 void InstructionSelector::VisitInt32Add(Node* node) {
795   IA32OperandGenerator g(this);
796 
797   // Try to match the Add to a lea pattern
798   BaseWithIndexAndDisplacement32Matcher m(node);
799   if (m.matches() &&
800       (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
801     InstructionOperand inputs[4];
802     size_t input_count = 0;
803     AddressingMode mode = g.GenerateMemoryOperandInputs(
804         m.index(), m.scale(), m.base(), m.displacement(), m.displacement_mode(),
805         inputs, &input_count);
806 
807     DCHECK_NE(0u, input_count);
808     DCHECK_GE(arraysize(inputs), input_count);
809 
810     InstructionOperand outputs[1];
811     outputs[0] = g.DefineAsRegister(node);
812 
813     InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
814     Emit(opcode, 1, outputs, input_count, inputs);
815     return;
816   }
817 
818   // No lea pattern match, use add
819   VisitBinop(this, node, kIA32Add);
820 }
821 
822 
VisitInt32Sub(Node * node)823 void InstructionSelector::VisitInt32Sub(Node* node) {
824   IA32OperandGenerator g(this);
825   Int32BinopMatcher m(node);
826   if (m.left().Is(0)) {
827     Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
828   } else {
829     VisitBinop(this, node, kIA32Sub);
830   }
831 }
832 
833 
VisitInt32Mul(Node * node)834 void InstructionSelector::VisitInt32Mul(Node* node) {
835   Int32ScaleMatcher m(node, true);
836   if (m.matches()) {
837     Node* index = node->InputAt(0);
838     Node* base = m.power_of_two_plus_one() ? index : nullptr;
839     EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
840     return;
841   }
842   IA32OperandGenerator g(this);
843   Node* left = node->InputAt(0);
844   Node* right = node->InputAt(1);
845   if (g.CanBeImmediate(right)) {
846     Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
847          g.UseImmediate(right));
848   } else {
849     if (g.CanBeBetterLeftOperand(right)) {
850       std::swap(left, right);
851     }
852     Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
853          g.Use(right));
854   }
855 }
856 
857 
VisitInt32MulHigh(Node * node)858 void InstructionSelector::VisitInt32MulHigh(Node* node) {
859   VisitMulHigh(this, node, kIA32ImulHigh);
860 }
861 
862 
VisitUint32MulHigh(Node * node)863 void InstructionSelector::VisitUint32MulHigh(Node* node) {
864   VisitMulHigh(this, node, kIA32UmulHigh);
865 }
866 
867 
VisitInt32Div(Node * node)868 void InstructionSelector::VisitInt32Div(Node* node) {
869   VisitDiv(this, node, kIA32Idiv);
870 }
871 
872 
VisitUint32Div(Node * node)873 void InstructionSelector::VisitUint32Div(Node* node) {
874   VisitDiv(this, node, kIA32Udiv);
875 }
876 
877 
VisitInt32Mod(Node * node)878 void InstructionSelector::VisitInt32Mod(Node* node) {
879   VisitMod(this, node, kIA32Idiv);
880 }
881 
882 
VisitUint32Mod(Node * node)883 void InstructionSelector::VisitUint32Mod(Node* node) {
884   VisitMod(this, node, kIA32Udiv);
885 }
886 
887 
VisitChangeFloat32ToFloat64(Node * node)888 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
889   VisitRO(this, node, kSSEFloat32ToFloat64);
890 }
891 
892 
VisitRoundInt32ToFloat32(Node * node)893 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
894   VisitRO(this, node, kSSEInt32ToFloat32);
895 }
896 
897 
VisitRoundUint32ToFloat32(Node * node)898 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
899   IA32OperandGenerator g(this);
900   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
901   Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
902        arraysize(temps), temps);
903 }
904 
905 
VisitChangeInt32ToFloat64(Node * node)906 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
907   VisitRO(this, node, kSSEInt32ToFloat64);
908 }
909 
910 
VisitChangeUint32ToFloat64(Node * node)911 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
912   VisitRO(this, node, kSSEUint32ToFloat64);
913 }
914 
915 
VisitTruncateFloat32ToInt32(Node * node)916 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
917   VisitRO(this, node, kSSEFloat32ToInt32);
918 }
919 
920 
VisitTruncateFloat32ToUint32(Node * node)921 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
922   VisitRO(this, node, kSSEFloat32ToUint32);
923 }
924 
925 
VisitChangeFloat64ToInt32(Node * node)926 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
927   VisitRO(this, node, kSSEFloat64ToInt32);
928 }
929 
930 
VisitChangeFloat64ToUint32(Node * node)931 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
932   VisitRO(this, node, kSSEFloat64ToUint32);
933 }
934 
VisitTruncateFloat64ToUint32(Node * node)935 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
936   VisitRO(this, node, kSSEFloat64ToUint32);
937 }
938 
VisitTruncateFloat64ToFloat32(Node * node)939 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
940   VisitRO(this, node, kSSEFloat64ToFloat32);
941 }
942 
VisitTruncateFloat64ToWord32(Node * node)943 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
944   VisitRR(this, node, kArchTruncateDoubleToI);
945 }
946 
VisitRoundFloat64ToInt32(Node * node)947 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
948   VisitRO(this, node, kSSEFloat64ToInt32);
949 }
950 
951 
VisitBitcastFloat32ToInt32(Node * node)952 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
953   IA32OperandGenerator g(this);
954   Emit(kIA32BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
955 }
956 
957 
VisitBitcastInt32ToFloat32(Node * node)958 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
959   IA32OperandGenerator g(this);
960   Emit(kIA32BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
961 }
962 
963 
VisitFloat32Add(Node * node)964 void InstructionSelector::VisitFloat32Add(Node* node) {
965   VisitRROFloat(this, node, kAVXFloat32Add, kSSEFloat32Add);
966 }
967 
968 
VisitFloat64Add(Node * node)969 void InstructionSelector::VisitFloat64Add(Node* node) {
970   VisitRROFloat(this, node, kAVXFloat64Add, kSSEFloat64Add);
971 }
972 
973 
VisitFloat32Sub(Node * node)974 void InstructionSelector::VisitFloat32Sub(Node* node) {
975   VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
976 }
977 
VisitFloat64Sub(Node * node)978 void InstructionSelector::VisitFloat64Sub(Node* node) {
979   VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
980 }
981 
VisitFloat32Mul(Node * node)982 void InstructionSelector::VisitFloat32Mul(Node* node) {
983   VisitRROFloat(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
984 }
985 
986 
VisitFloat64Mul(Node * node)987 void InstructionSelector::VisitFloat64Mul(Node* node) {
988   VisitRROFloat(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
989 }
990 
991 
VisitFloat32Div(Node * node)992 void InstructionSelector::VisitFloat32Div(Node* node) {
993   VisitRROFloat(this, node, kAVXFloat32Div, kSSEFloat32Div);
994 }
995 
996 
VisitFloat64Div(Node * node)997 void InstructionSelector::VisitFloat64Div(Node* node) {
998   VisitRROFloat(this, node, kAVXFloat64Div, kSSEFloat64Div);
999 }
1000 
1001 
VisitFloat64Mod(Node * node)1002 void InstructionSelector::VisitFloat64Mod(Node* node) {
1003   IA32OperandGenerator g(this);
1004   InstructionOperand temps[] = {g.TempRegister(eax)};
1005   Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
1006        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
1007        temps);
1008 }
1009 
VisitFloat32Max(Node * node)1010 void InstructionSelector::VisitFloat32Max(Node* node) {
1011   IA32OperandGenerator g(this);
1012   InstructionOperand temps[] = {g.TempRegister()};
1013   Emit(kSSEFloat32Max, g.DefineSameAsFirst(node),
1014        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
1015        arraysize(temps), temps);
1016 }
1017 
VisitFloat64Max(Node * node)1018 void InstructionSelector::VisitFloat64Max(Node* node) {
1019   IA32OperandGenerator g(this);
1020   InstructionOperand temps[] = {g.TempRegister()};
1021   Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
1022        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
1023        arraysize(temps), temps);
1024 }
1025 
VisitFloat32Min(Node * node)1026 void InstructionSelector::VisitFloat32Min(Node* node) {
1027   IA32OperandGenerator g(this);
1028   InstructionOperand temps[] = {g.TempRegister()};
1029   Emit(kSSEFloat32Min, g.DefineSameAsFirst(node),
1030        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
1031        arraysize(temps), temps);
1032 }
1033 
VisitFloat64Min(Node * node)1034 void InstructionSelector::VisitFloat64Min(Node* node) {
1035   IA32OperandGenerator g(this);
1036   InstructionOperand temps[] = {g.TempRegister()};
1037   Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
1038        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
1039        arraysize(temps), temps);
1040 }
1041 
1042 
VisitFloat32Abs(Node * node)1043 void InstructionSelector::VisitFloat32Abs(Node* node) {
1044   IA32OperandGenerator g(this);
1045   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
1046 }
1047 
1048 
VisitFloat64Abs(Node * node)1049 void InstructionSelector::VisitFloat64Abs(Node* node) {
1050   IA32OperandGenerator g(this);
1051   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
1052 }
1053 
VisitFloat32Sqrt(Node * node)1054 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1055   VisitRO(this, node, kSSEFloat32Sqrt);
1056 }
1057 
1058 
VisitFloat64Sqrt(Node * node)1059 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1060   VisitRO(this, node, kSSEFloat64Sqrt);
1061 }
1062 
1063 
VisitFloat32RoundDown(Node * node)1064 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1065   VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
1066 }
1067 
1068 
VisitFloat64RoundDown(Node * node)1069 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1070   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
1071 }
1072 
1073 
VisitFloat32RoundUp(Node * node)1074 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1075   VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
1076 }
1077 
1078 
VisitFloat64RoundUp(Node * node)1079 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1080   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
1081 }
1082 
1083 
VisitFloat32RoundTruncate(Node * node)1084 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1085   VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
1086 }
1087 
1088 
VisitFloat64RoundTruncate(Node * node)1089 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1090   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
1091 }
1092 
1093 
VisitFloat64RoundTiesAway(Node * node)1094 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1095   UNREACHABLE();
1096 }
1097 
1098 
VisitFloat32RoundTiesEven(Node * node)1099 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1100   VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
1101 }
1102 
1103 
VisitFloat64RoundTiesEven(Node * node)1104 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1105   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
1106 }
1107 
VisitFloat32Neg(Node * node)1108 void InstructionSelector::VisitFloat32Neg(Node* node) {
1109   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
1110 }
1111 
VisitFloat64Neg(Node * node)1112 void InstructionSelector::VisitFloat64Neg(Node* node) {
1113   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
1114 }
1115 
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1116 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1117                                                    InstructionCode opcode) {
1118   IA32OperandGenerator g(this);
1119   Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
1120        g.UseRegister(node->InputAt(1)))
1121       ->MarkAsCall();
1122 }
1123 
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1124 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1125                                                   InstructionCode opcode) {
1126   IA32OperandGenerator g(this);
1127   Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)))
1128       ->MarkAsCall();
1129 }
1130 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1131 void InstructionSelector::EmitPrepareArguments(
1132     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1133     Node* node) {
1134   IA32OperandGenerator g(this);
1135 
1136   // Prepare for C function call.
1137   if (descriptor->IsCFunctionCall()) {
1138     InstructionOperand temps[] = {g.TempRegister()};
1139     size_t const temp_count = arraysize(temps);
1140     Emit(kArchPrepareCallCFunction |
1141              MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
1142          0, nullptr, 0, nullptr, temp_count, temps);
1143 
1144     // Poke any stack arguments.
1145     for (size_t n = 0; n < arguments->size(); ++n) {
1146       PushParameter input = (*arguments)[n];
1147       if (input.node()) {
1148         int const slot = static_cast<int>(n);
1149         InstructionOperand value = g.CanBeImmediate(node)
1150                                        ? g.UseImmediate(input.node())
1151                                        : g.UseRegister(input.node());
1152         Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
1153       }
1154     }
1155   } else {
1156     // Push any stack arguments.
1157     for (PushParameter input : base::Reversed(*arguments)) {
1158       // Skip any alignment holes in pushed nodes.
1159       if (input.node() == nullptr) continue;
1160       InstructionOperand value =
1161           g.CanBeImmediate(input.node())
1162               ? g.UseImmediate(input.node())
1163               : IsSupported(ATOM) ||
1164                         sequence()->IsFP(GetVirtualRegister(input.node()))
1165                     ? g.UseRegister(input.node())
1166                     : g.Use(input.node());
1167       if (input.type() == MachineType::Float32()) {
1168         Emit(kIA32PushFloat32, g.NoOutput(), value);
1169       } else if (input.type() == MachineType::Float64()) {
1170         Emit(kIA32PushFloat64, g.NoOutput(), value);
1171       } else {
1172         Emit(kIA32Push, g.NoOutput(), value);
1173       }
1174     }
1175   }
1176 }
1177 
1178 
IsTailCallAddressImmediate()1179 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
1180 
GetTempsCountForTailCallFromJSFunction()1181 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
1182 
1183 namespace {
1184 
VisitCompareWithMemoryOperand(InstructionSelector * selector,InstructionCode opcode,Node * left,InstructionOperand right,FlagsContinuation * cont)1185 void VisitCompareWithMemoryOperand(InstructionSelector* selector,
1186                                    InstructionCode opcode, Node* left,
1187                                    InstructionOperand right,
1188                                    FlagsContinuation* cont) {
1189   DCHECK(left->opcode() == IrOpcode::kLoad);
1190   IA32OperandGenerator g(selector);
1191   size_t input_count = 0;
1192   InstructionOperand inputs[6];
1193   AddressingMode addressing_mode =
1194       g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
1195   opcode |= AddressingModeField::encode(addressing_mode);
1196   opcode = cont->Encode(opcode);
1197   inputs[input_count++] = right;
1198 
1199   if (cont->IsBranch()) {
1200     inputs[input_count++] = g.Label(cont->true_block());
1201     inputs[input_count++] = g.Label(cont->false_block());
1202     selector->Emit(opcode, 0, nullptr, input_count, inputs);
1203   } else if (cont->IsDeoptimize()) {
1204     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
1205                              cont->reason(), cont->frame_state());
1206   } else {
1207     DCHECK(cont->IsSet());
1208     InstructionOperand output = g.DefineAsRegister(cont->result());
1209     selector->Emit(opcode, 1, &output, input_count, inputs);
1210   }
1211 }
1212 
1213 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1214 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1215                   InstructionOperand left, InstructionOperand right,
1216                   FlagsContinuation* cont) {
1217   IA32OperandGenerator g(selector);
1218   opcode = cont->Encode(opcode);
1219   if (cont->IsBranch()) {
1220     selector->Emit(opcode, g.NoOutput(), left, right,
1221                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1222   } else if (cont->IsDeoptimize()) {
1223     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
1224                              cont->frame_state());
1225   } else {
1226     DCHECK(cont->IsSet());
1227     selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
1228   }
1229 }
1230 
1231 
1232 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,Node * left,Node * right,FlagsContinuation * cont,bool commutative)1233 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1234                   Node* left, Node* right, FlagsContinuation* cont,
1235                   bool commutative) {
1236   IA32OperandGenerator g(selector);
1237   if (commutative && g.CanBeBetterLeftOperand(right)) {
1238     std::swap(left, right);
1239   }
1240   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
1241 }
1242 
1243 // Tries to match the size of the given opcode to that of the operands, if
1244 // possible.
TryNarrowOpcodeSize(InstructionCode opcode,Node * left,Node * right,FlagsContinuation * cont)1245 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
1246                                     Node* right, FlagsContinuation* cont) {
1247   // Currently, if one of the two operands is not a Load, we don't know what its
1248   // machine representation is, so we bail out.
1249   // TODO(epertoso): we can probably get some size information out of immediates
1250   // and phi nodes.
1251   if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
1252     return opcode;
1253   }
1254   // If the load representations don't match, both operands will be
1255   // zero/sign-extended to 32bit.
1256   MachineType left_type = LoadRepresentationOf(left->op());
1257   MachineType right_type = LoadRepresentationOf(right->op());
1258   if (left_type == right_type) {
1259     switch (left_type.representation()) {
1260       case MachineRepresentation::kBit:
1261       case MachineRepresentation::kWord8: {
1262         if (opcode == kIA32Test) return kIA32Test8;
1263         if (opcode == kIA32Cmp) {
1264           if (left_type.semantic() == MachineSemantic::kUint32) {
1265             cont->OverwriteUnsignedIfSigned();
1266           } else {
1267             CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1268           }
1269           return kIA32Cmp8;
1270         }
1271         break;
1272       }
1273       case MachineRepresentation::kWord16:
1274         if (opcode == kIA32Test) return kIA32Test16;
1275         if (opcode == kIA32Cmp) {
1276           if (left_type.semantic() == MachineSemantic::kUint32) {
1277             cont->OverwriteUnsignedIfSigned();
1278           } else {
1279             CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1280           }
1281           return kIA32Cmp16;
1282         }
1283         break;
1284       default:
1285         break;
1286     }
1287   }
1288   return opcode;
1289 }
1290 
1291 // Shared routine for multiple float32 compare operations (inputs commuted).
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1292 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1293                          FlagsContinuation* cont) {
1294   Node* const left = node->InputAt(0);
1295   Node* const right = node->InputAt(1);
1296   VisitCompare(selector, kSSEFloat32Cmp, right, left, cont, false);
1297 }
1298 
1299 
1300 // Shared routine for multiple float64 compare operations (inputs commuted).
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1301 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1302                          FlagsContinuation* cont) {
1303   Node* const left = node->InputAt(0);
1304   Node* const right = node->InputAt(1);
1305   VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
1306 }
1307 
1308 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1309 void VisitWordCompare(InstructionSelector* selector, Node* node,
1310                       InstructionCode opcode, FlagsContinuation* cont) {
1311   IA32OperandGenerator g(selector);
1312   Node* left = node->InputAt(0);
1313   Node* right = node->InputAt(1);
1314 
1315   InstructionCode narrowed_opcode =
1316       TryNarrowOpcodeSize(opcode, left, right, cont);
1317 
1318   int effect_level = selector->GetEffectLevel(node);
1319   if (cont->IsBranch()) {
1320     effect_level = selector->GetEffectLevel(
1321         cont->true_block()->PredecessorAt(0)->control_input());
1322   }
1323 
1324   // If one of the two inputs is an immediate, make sure it's on the right, or
1325   // if one of the two inputs is a memory operand, make sure it's on the left.
1326   if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
1327       (g.CanBeMemoryOperand(narrowed_opcode, node, right, effect_level) &&
1328        !g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level))) {
1329     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1330     std::swap(left, right);
1331   }
1332 
1333   // Match immediates on right side of comparison.
1334   if (g.CanBeImmediate(right)) {
1335     if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
1336       // TODO(epertoso): we should use `narrowed_opcode' here once we match
1337       // immediates too.
1338       return VisitCompareWithMemoryOperand(selector, opcode, left,
1339                                            g.UseImmediate(right), cont);
1340     }
1341     return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
1342                         cont);
1343   }
1344 
1345   // Match memory operands on left side of comparison.
1346   if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
1347     bool needs_byte_register =
1348         narrowed_opcode == kIA32Test8 || narrowed_opcode == kIA32Cmp8;
1349     return VisitCompareWithMemoryOperand(
1350         selector, narrowed_opcode, left,
1351         needs_byte_register ? g.UseByteRegister(right) : g.UseRegister(right),
1352         cont);
1353   }
1354 
1355   if (g.CanBeBetterLeftOperand(right)) {
1356     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1357     std::swap(left, right);
1358   }
1359 
1360   return VisitCompare(selector, opcode, left, right, cont,
1361                       node->op()->HasProperty(Operator::kCommutative));
1362 }
1363 
VisitWordCompare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1364 void VisitWordCompare(InstructionSelector* selector, Node* node,
1365                       FlagsContinuation* cont) {
1366   IA32OperandGenerator g(selector);
1367   Int32BinopMatcher m(node);
1368   if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
1369     LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
1370     ExternalReference js_stack_limit =
1371         ExternalReference::address_of_stack_limit(selector->isolate());
1372     if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
1373       // Compare(Load(js_stack_limit), LoadStackPointer)
1374       if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1375       InstructionCode opcode = cont->Encode(kIA32StackCheck);
1376       if (cont->IsBranch()) {
1377         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
1378                        g.Label(cont->false_block()));
1379       } else if (cont->IsDeoptimize()) {
1380         selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
1381                                  cont->frame_state());
1382       } else {
1383         DCHECK(cont->IsSet());
1384         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
1385       }
1386       return;
1387     }
1388   }
1389   VisitWordCompare(selector, node, kIA32Cmp, cont);
1390 }
1391 
1392 
1393 // Shared routine for word comparison with zero.
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)1394 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1395                           Node* value, FlagsContinuation* cont) {
1396   // Try to combine with comparisons against 0 by simply inverting the branch.
1397   while (value->opcode() == IrOpcode::kWord32Equal &&
1398          selector->CanCover(user, value)) {
1399     Int32BinopMatcher m(value);
1400     if (!m.right().Is(0)) break;
1401 
1402     user = value;
1403     value = m.left().node();
1404     cont->Negate();
1405   }
1406 
1407   if (selector->CanCover(user, value)) {
1408     switch (value->opcode()) {
1409       case IrOpcode::kWord32Equal:
1410         cont->OverwriteAndNegateIfEqual(kEqual);
1411         return VisitWordCompare(selector, value, cont);
1412       case IrOpcode::kInt32LessThan:
1413         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1414         return VisitWordCompare(selector, value, cont);
1415       case IrOpcode::kInt32LessThanOrEqual:
1416         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1417         return VisitWordCompare(selector, value, cont);
1418       case IrOpcode::kUint32LessThan:
1419         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1420         return VisitWordCompare(selector, value, cont);
1421       case IrOpcode::kUint32LessThanOrEqual:
1422         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1423         return VisitWordCompare(selector, value, cont);
1424       case IrOpcode::kFloat32Equal:
1425         cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1426         return VisitFloat32Compare(selector, value, cont);
1427       case IrOpcode::kFloat32LessThan:
1428         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1429         return VisitFloat32Compare(selector, value, cont);
1430       case IrOpcode::kFloat32LessThanOrEqual:
1431         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1432         return VisitFloat32Compare(selector, value, cont);
1433       case IrOpcode::kFloat64Equal:
1434         cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1435         return VisitFloat64Compare(selector, value, cont);
1436       case IrOpcode::kFloat64LessThan:
1437         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1438         return VisitFloat64Compare(selector, value, cont);
1439       case IrOpcode::kFloat64LessThanOrEqual:
1440         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1441         return VisitFloat64Compare(selector, value, cont);
1442       case IrOpcode::kProjection:
1443         // Check if this is the overflow output projection of an
1444         // <Operation>WithOverflow node.
1445         if (ProjectionIndexOf(value->op()) == 1u) {
1446           // We cannot combine the <Operation>WithOverflow with this branch
1447           // unless the 0th projection (the use of the actual value of the
1448           // <Operation> is either nullptr, which means there's no use of the
1449           // actual value, or was already defined, which means it is scheduled
1450           // *AFTER* this branch).
1451           Node* const node = value->InputAt(0);
1452           Node* const result = NodeProperties::FindProjection(node, 0);
1453           if (result == nullptr || selector->IsDefined(result)) {
1454             switch (node->opcode()) {
1455               case IrOpcode::kInt32AddWithOverflow:
1456                 cont->OverwriteAndNegateIfEqual(kOverflow);
1457                 return VisitBinop(selector, node, kIA32Add, cont);
1458               case IrOpcode::kInt32SubWithOverflow:
1459                 cont->OverwriteAndNegateIfEqual(kOverflow);
1460                 return VisitBinop(selector, node, kIA32Sub, cont);
1461               case IrOpcode::kInt32MulWithOverflow:
1462                 cont->OverwriteAndNegateIfEqual(kOverflow);
1463                 return VisitBinop(selector, node, kIA32Imul, cont);
1464               default:
1465                 break;
1466             }
1467           }
1468         }
1469         break;
1470       case IrOpcode::kInt32Sub:
1471         return VisitWordCompare(selector, value, cont);
1472       case IrOpcode::kWord32And:
1473         return VisitWordCompare(selector, value, kIA32Test, cont);
1474       default:
1475         break;
1476     }
1477   }
1478 
1479   // Continuation could not be combined with a compare, emit compare against 0.
1480   IA32OperandGenerator g(selector);
1481   VisitCompare(selector, kIA32Cmp, g.Use(value), g.TempImmediate(0), cont);
1482 }
1483 
1484 }  // namespace
1485 
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)1486 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1487                                       BasicBlock* fbranch) {
1488   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1489   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1490 }
1491 
VisitDeoptimizeIf(Node * node)1492 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
1493   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
1494       kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
1495   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1496 }
1497 
VisitDeoptimizeUnless(Node * node)1498 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
1499   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
1500       kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
1501   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1502 }
1503 
VisitSwitch(Node * node,const SwitchInfo & sw)1504 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1505   IA32OperandGenerator g(this);
1506   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1507 
1508   // Emit either ArchTableSwitch or ArchLookupSwitch.
1509   size_t table_space_cost = 4 + sw.value_range;
1510   size_t table_time_cost = 3;
1511   size_t lookup_space_cost = 3 + 2 * sw.case_count;
1512   size_t lookup_time_cost = sw.case_count;
1513   if (sw.case_count > 4 &&
1514       table_space_cost + 3 * table_time_cost <=
1515           lookup_space_cost + 3 * lookup_time_cost &&
1516       sw.min_value > std::numeric_limits<int32_t>::min()) {
1517     InstructionOperand index_operand = value_operand;
1518     if (sw.min_value) {
1519       index_operand = g.TempRegister();
1520       Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
1521            value_operand, g.TempImmediate(-sw.min_value));
1522     }
1523     // Generate a table lookup.
1524     return EmitTableSwitch(sw, index_operand);
1525   }
1526 
1527   // Generate a sequence of conditional jumps.
1528   return EmitLookupSwitch(sw, value_operand);
1529 }
1530 
1531 
VisitWord32Equal(Node * const node)1532 void InstructionSelector::VisitWord32Equal(Node* const node) {
1533   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1534   Int32BinopMatcher m(node);
1535   if (m.right().Is(0)) {
1536     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1537   }
1538   VisitWordCompare(this, node, &cont);
1539 }
1540 
1541 
VisitInt32LessThan(Node * node)1542 void InstructionSelector::VisitInt32LessThan(Node* node) {
1543   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1544   VisitWordCompare(this, node, &cont);
1545 }
1546 
1547 
VisitInt32LessThanOrEqual(Node * node)1548 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1549   FlagsContinuation cont =
1550       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1551   VisitWordCompare(this, node, &cont);
1552 }
1553 
1554 
VisitUint32LessThan(Node * node)1555 void InstructionSelector::VisitUint32LessThan(Node* node) {
1556   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1557   VisitWordCompare(this, node, &cont);
1558 }
1559 
1560 
VisitUint32LessThanOrEqual(Node * node)1561 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1562   FlagsContinuation cont =
1563       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1564   VisitWordCompare(this, node, &cont);
1565 }
1566 
1567 
VisitInt32AddWithOverflow(Node * node)1568 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1569   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1570     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1571     return VisitBinop(this, node, kIA32Add, &cont);
1572   }
1573   FlagsContinuation cont;
1574   VisitBinop(this, node, kIA32Add, &cont);
1575 }
1576 
1577 
VisitInt32SubWithOverflow(Node * node)1578 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1579   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1580     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1581     return VisitBinop(this, node, kIA32Sub, &cont);
1582   }
1583   FlagsContinuation cont;
1584   VisitBinop(this, node, kIA32Sub, &cont);
1585 }
1586 
VisitInt32MulWithOverflow(Node * node)1587 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1588   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1589     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1590     return VisitBinop(this, node, kIA32Imul, &cont);
1591   }
1592   FlagsContinuation cont;
1593   VisitBinop(this, node, kIA32Imul, &cont);
1594 }
1595 
VisitFloat32Equal(Node * node)1596 void InstructionSelector::VisitFloat32Equal(Node* node) {
1597   FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
1598   VisitFloat32Compare(this, node, &cont);
1599 }
1600 
1601 
VisitFloat32LessThan(Node * node)1602 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1603   FlagsContinuation cont =
1604       FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
1605   VisitFloat32Compare(this, node, &cont);
1606 }
1607 
1608 
VisitFloat32LessThanOrEqual(Node * node)1609 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1610   FlagsContinuation cont =
1611       FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
1612   VisitFloat32Compare(this, node, &cont);
1613 }
1614 
1615 
VisitFloat64Equal(Node * node)1616 void InstructionSelector::VisitFloat64Equal(Node* node) {
1617   FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
1618   VisitFloat64Compare(this, node, &cont);
1619 }
1620 
1621 
VisitFloat64LessThan(Node * node)1622 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1623   FlagsContinuation cont =
1624       FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
1625   VisitFloat64Compare(this, node, &cont);
1626 }
1627 
1628 
VisitFloat64LessThanOrEqual(Node * node)1629 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1630   FlagsContinuation cont =
1631       FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
1632   VisitFloat64Compare(this, node, &cont);
1633 }
1634 
1635 
VisitFloat64ExtractLowWord32(Node * node)1636 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1637   IA32OperandGenerator g(this);
1638   Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
1639        g.Use(node->InputAt(0)));
1640 }
1641 
1642 
VisitFloat64ExtractHighWord32(Node * node)1643 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1644   IA32OperandGenerator g(this);
1645   Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
1646        g.Use(node->InputAt(0)));
1647 }
1648 
1649 
VisitFloat64InsertLowWord32(Node * node)1650 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1651   IA32OperandGenerator g(this);
1652   Node* left = node->InputAt(0);
1653   Node* right = node->InputAt(1);
1654   Float64Matcher mleft(left);
1655   if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
1656     Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
1657     return;
1658   }
1659   Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1660        g.UseRegister(left), g.Use(right));
1661 }
1662 
1663 
VisitFloat64InsertHighWord32(Node * node)1664 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1665   IA32OperandGenerator g(this);
1666   Node* left = node->InputAt(0);
1667   Node* right = node->InputAt(1);
1668   Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1669        g.UseRegister(left), g.Use(right));
1670 }
1671 
VisitFloat64SilenceNaN(Node * node)1672 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
1673   IA32OperandGenerator g(this);
1674   Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
1675        g.UseRegister(node->InputAt(0)));
1676 }
1677 
VisitAtomicLoad(Node * node)1678 void InstructionSelector::VisitAtomicLoad(Node* node) {
1679   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1680   DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
1681          load_rep.representation() == MachineRepresentation::kWord16 ||
1682          load_rep.representation() == MachineRepresentation::kWord32);
1683   USE(load_rep);
1684   VisitLoad(node);
1685 }
1686 
VisitAtomicStore(Node * node)1687 void InstructionSelector::VisitAtomicStore(Node* node) {
1688   IA32OperandGenerator g(this);
1689   Node* base = node->InputAt(0);
1690   Node* index = node->InputAt(1);
1691   Node* value = node->InputAt(2);
1692 
1693   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
1694   ArchOpcode opcode = kArchNop;
1695   switch (rep) {
1696     case MachineRepresentation::kWord8:
1697       opcode = kIA32Xchgb;
1698       break;
1699     case MachineRepresentation::kWord16:
1700       opcode = kIA32Xchgw;
1701       break;
1702     case MachineRepresentation::kWord32:
1703       opcode = kIA32Xchgl;
1704       break;
1705     default:
1706       UNREACHABLE();
1707       break;
1708   }
1709   AddressingMode addressing_mode;
1710   InstructionOperand inputs[4];
1711   size_t input_count = 0;
1712   inputs[input_count++] = g.UseUniqueRegister(base);
1713   if (g.CanBeImmediate(index)) {
1714     inputs[input_count++] = g.UseImmediate(index);
1715     addressing_mode = kMode_MRI;
1716   } else {
1717     inputs[input_count++] = g.UseUniqueRegister(index);
1718     addressing_mode = kMode_MR1;
1719   }
1720   inputs[input_count++] = g.UseUniqueRegister(value);
1721   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1722   Emit(code, 0, nullptr, input_count, inputs);
1723 }
1724 
1725 // static
1726 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()1727 InstructionSelector::SupportedMachineOperatorFlags() {
1728   MachineOperatorBuilder::Flags flags =
1729       MachineOperatorBuilder::kWord32ShiftIsSafe |
1730       MachineOperatorBuilder::kWord32Ctz;
1731   if (CpuFeatures::IsSupported(POPCNT)) {
1732     flags |= MachineOperatorBuilder::kWord32Popcnt;
1733   }
1734   if (CpuFeatures::IsSupported(SSE4_1)) {
1735     flags |= MachineOperatorBuilder::kFloat32RoundDown |
1736              MachineOperatorBuilder::kFloat64RoundDown |
1737              MachineOperatorBuilder::kFloat32RoundUp |
1738              MachineOperatorBuilder::kFloat64RoundUp |
1739              MachineOperatorBuilder::kFloat32RoundTruncate |
1740              MachineOperatorBuilder::kFloat64RoundTruncate |
1741              MachineOperatorBuilder::kFloat32RoundTiesEven |
1742              MachineOperatorBuilder::kFloat64RoundTiesEven;
1743   }
1744   return flags;
1745 }
1746 
1747 // static
1748 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()1749 InstructionSelector::AlignmentRequirements() {
1750   return MachineOperatorBuilder::AlignmentRequirements::
1751       FullUnalignedAccessSupport();
1752 }
1753 
1754 }  // namespace compiler
1755 }  // namespace internal
1756 }  // namespace v8
1757