1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/adapters.h"
6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
9
10 namespace v8 {
11 namespace internal {
12 namespace compiler {
13
14 // Adds IA32-specific methods for generating operands.
15 class IA32OperandGenerator final : public OperandGenerator {
16 public:
IA32OperandGenerator(InstructionSelector * selector)17 explicit IA32OperandGenerator(InstructionSelector* selector)
18 : OperandGenerator(selector) {}
19
UseByteRegister(Node * node)20 InstructionOperand UseByteRegister(Node* node) {
21 // TODO(titzer): encode byte register use constraints.
22 return UseFixed(node, edx);
23 }
24
DefineAsByteRegister(Node * node)25 InstructionOperand DefineAsByteRegister(Node* node) {
26 // TODO(titzer): encode byte register def constraints.
27 return DefineAsRegister(node);
28 }
29
CanBeImmediate(Node * node)30 bool CanBeImmediate(Node* node) {
31 switch (node->opcode()) {
32 case IrOpcode::kInt32Constant:
33 case IrOpcode::kNumberConstant:
34 case IrOpcode::kExternalConstant:
35 return true;
36 case IrOpcode::kHeapConstant: {
37 // Constants in new space cannot be used as immediates in V8 because
38 // the GC does not scan code objects when collecting the new generation.
39 Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
40 Isolate* isolate = value->GetIsolate();
41 return !isolate->heap()->InNewSpace(*value);
42 }
43 default:
44 return false;
45 }
46 }
47
GenerateMemoryOperandInputs(Node * index,int scale,Node * base,Node * displacement_node,InstructionOperand inputs[],size_t * input_count)48 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
49 Node* displacement_node,
50 InstructionOperand inputs[],
51 size_t* input_count) {
52 AddressingMode mode = kMode_MRI;
53 int32_t displacement = (displacement_node == nullptr)
54 ? 0
55 : OpParameter<int32_t>(displacement_node);
56 if (base != nullptr) {
57 if (base->opcode() == IrOpcode::kInt32Constant) {
58 displacement += OpParameter<int32_t>(base);
59 base = nullptr;
60 }
61 }
62 if (base != nullptr) {
63 inputs[(*input_count)++] = UseRegister(base);
64 if (index != nullptr) {
65 DCHECK(scale >= 0 && scale <= 3);
66 inputs[(*input_count)++] = UseRegister(index);
67 if (displacement != 0) {
68 inputs[(*input_count)++] = TempImmediate(displacement);
69 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
70 kMode_MR4I, kMode_MR8I};
71 mode = kMRnI_modes[scale];
72 } else {
73 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
74 kMode_MR4, kMode_MR8};
75 mode = kMRn_modes[scale];
76 }
77 } else {
78 if (displacement == 0) {
79 mode = kMode_MR;
80 } else {
81 inputs[(*input_count)++] = TempImmediate(displacement);
82 mode = kMode_MRI;
83 }
84 }
85 } else {
86 DCHECK(scale >= 0 && scale <= 3);
87 if (index != nullptr) {
88 inputs[(*input_count)++] = UseRegister(index);
89 if (displacement != 0) {
90 inputs[(*input_count)++] = TempImmediate(displacement);
91 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
92 kMode_M4I, kMode_M8I};
93 mode = kMnI_modes[scale];
94 } else {
95 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
96 kMode_M4, kMode_M8};
97 mode = kMn_modes[scale];
98 }
99 } else {
100 inputs[(*input_count)++] = TempImmediate(displacement);
101 return kMode_MI;
102 }
103 }
104 return mode;
105 }
106
GetEffectiveAddressMemoryOperand(Node * node,InstructionOperand inputs[],size_t * input_count)107 AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
108 InstructionOperand inputs[],
109 size_t* input_count) {
110 BaseWithIndexAndDisplacement32Matcher m(node, true);
111 DCHECK(m.matches());
112 if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
113 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
114 m.displacement(), inputs, input_count);
115 } else {
116 inputs[(*input_count)++] = UseRegister(node->InputAt(0));
117 inputs[(*input_count)++] = UseRegister(node->InputAt(1));
118 return kMode_MR1;
119 }
120 }
121
CanBeBetterLeftOperand(Node * node) const122 bool CanBeBetterLeftOperand(Node* node) const {
123 return !selector()->IsLive(node);
124 }
125 };
126
127
128 namespace {
129
VisitRO(InstructionSelector * selector,Node * node,ArchOpcode opcode)130 void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
131 IA32OperandGenerator g(selector);
132 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
133 }
134
135
VisitRR(InstructionSelector * selector,Node * node,InstructionCode opcode)136 void VisitRR(InstructionSelector* selector, Node* node,
137 InstructionCode opcode) {
138 IA32OperandGenerator g(selector);
139 selector->Emit(opcode, g.DefineAsRegister(node),
140 g.UseRegister(node->InputAt(0)));
141 }
142
143
VisitRROFloat(InstructionSelector * selector,Node * node,ArchOpcode avx_opcode,ArchOpcode sse_opcode)144 void VisitRROFloat(InstructionSelector* selector, Node* node,
145 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
146 IA32OperandGenerator g(selector);
147 InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
148 InstructionOperand operand1 = g.Use(node->InputAt(1));
149 if (selector->IsSupported(AVX)) {
150 selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
151 } else {
152 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
153 }
154 }
155
156
VisitFloatUnop(InstructionSelector * selector,Node * node,Node * input,ArchOpcode avx_opcode,ArchOpcode sse_opcode)157 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
158 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
159 IA32OperandGenerator g(selector);
160 if (selector->IsSupported(AVX)) {
161 selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
162 } else {
163 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
164 }
165 }
166
167
168 } // namespace
169
170
VisitLoad(Node * node)171 void InstructionSelector::VisitLoad(Node* node) {
172 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
173
174 ArchOpcode opcode = kArchNop;
175 switch (load_rep.representation()) {
176 case MachineRepresentation::kFloat32:
177 opcode = kIA32Movss;
178 break;
179 case MachineRepresentation::kFloat64:
180 opcode = kIA32Movsd;
181 break;
182 case MachineRepresentation::kBit: // Fall through.
183 case MachineRepresentation::kWord8:
184 opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
185 break;
186 case MachineRepresentation::kWord16:
187 opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
188 break;
189 case MachineRepresentation::kTagged: // Fall through.
190 case MachineRepresentation::kWord32:
191 opcode = kIA32Movl;
192 break;
193 case MachineRepresentation::kWord64: // Fall through.
194 case MachineRepresentation::kNone:
195 UNREACHABLE();
196 return;
197 }
198
199 IA32OperandGenerator g(this);
200 InstructionOperand outputs[1];
201 outputs[0] = g.DefineAsRegister(node);
202 InstructionOperand inputs[3];
203 size_t input_count = 0;
204 AddressingMode mode =
205 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
206 InstructionCode code = opcode | AddressingModeField::encode(mode);
207 Emit(code, 1, outputs, input_count, inputs);
208 }
209
210
VisitStore(Node * node)211 void InstructionSelector::VisitStore(Node* node) {
212 IA32OperandGenerator g(this);
213 Node* base = node->InputAt(0);
214 Node* index = node->InputAt(1);
215 Node* value = node->InputAt(2);
216
217 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
218 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
219 MachineRepresentation rep = store_rep.representation();
220
221 if (write_barrier_kind != kNoWriteBarrier) {
222 DCHECK_EQ(MachineRepresentation::kTagged, rep);
223 AddressingMode addressing_mode;
224 InstructionOperand inputs[3];
225 size_t input_count = 0;
226 inputs[input_count++] = g.UseUniqueRegister(base);
227 if (g.CanBeImmediate(index)) {
228 inputs[input_count++] = g.UseImmediate(index);
229 addressing_mode = kMode_MRI;
230 } else {
231 inputs[input_count++] = g.UseUniqueRegister(index);
232 addressing_mode = kMode_MR1;
233 }
234 inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
235 ? g.UseRegister(value)
236 : g.UseUniqueRegister(value);
237 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
238 switch (write_barrier_kind) {
239 case kNoWriteBarrier:
240 UNREACHABLE();
241 break;
242 case kMapWriteBarrier:
243 record_write_mode = RecordWriteMode::kValueIsMap;
244 break;
245 case kPointerWriteBarrier:
246 record_write_mode = RecordWriteMode::kValueIsPointer;
247 break;
248 case kFullWriteBarrier:
249 record_write_mode = RecordWriteMode::kValueIsAny;
250 break;
251 }
252 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
253 size_t const temp_count = arraysize(temps);
254 InstructionCode code = kArchStoreWithWriteBarrier;
255 code |= AddressingModeField::encode(addressing_mode);
256 code |= MiscField::encode(static_cast<int>(record_write_mode));
257 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
258 } else {
259 ArchOpcode opcode = kArchNop;
260 switch (rep) {
261 case MachineRepresentation::kFloat32:
262 opcode = kIA32Movss;
263 break;
264 case MachineRepresentation::kFloat64:
265 opcode = kIA32Movsd;
266 break;
267 case MachineRepresentation::kBit: // Fall through.
268 case MachineRepresentation::kWord8:
269 opcode = kIA32Movb;
270 break;
271 case MachineRepresentation::kWord16:
272 opcode = kIA32Movw;
273 break;
274 case MachineRepresentation::kTagged: // Fall through.
275 case MachineRepresentation::kWord32:
276 opcode = kIA32Movl;
277 break;
278 case MachineRepresentation::kWord64: // Fall through.
279 case MachineRepresentation::kNone:
280 UNREACHABLE();
281 return;
282 }
283
284 InstructionOperand val;
285 if (g.CanBeImmediate(value)) {
286 val = g.UseImmediate(value);
287 } else if (rep == MachineRepresentation::kWord8 ||
288 rep == MachineRepresentation::kBit) {
289 val = g.UseByteRegister(value);
290 } else {
291 val = g.UseRegister(value);
292 }
293
294 InstructionOperand inputs[4];
295 size_t input_count = 0;
296 AddressingMode addressing_mode =
297 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
298 InstructionCode code =
299 opcode | AddressingModeField::encode(addressing_mode);
300 inputs[input_count++] = val;
301 Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
302 inputs);
303 }
304 }
305
306
VisitCheckedLoad(Node * node)307 void InstructionSelector::VisitCheckedLoad(Node* node) {
308 CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
309 IA32OperandGenerator g(this);
310 Node* const buffer = node->InputAt(0);
311 Node* const offset = node->InputAt(1);
312 Node* const length = node->InputAt(2);
313 ArchOpcode opcode = kArchNop;
314 switch (load_rep.representation()) {
315 case MachineRepresentation::kWord8:
316 opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
317 break;
318 case MachineRepresentation::kWord16:
319 opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
320 break;
321 case MachineRepresentation::kWord32:
322 opcode = kCheckedLoadWord32;
323 break;
324 case MachineRepresentation::kFloat32:
325 opcode = kCheckedLoadFloat32;
326 break;
327 case MachineRepresentation::kFloat64:
328 opcode = kCheckedLoadFloat64;
329 break;
330 case MachineRepresentation::kBit: // Fall through.
331 case MachineRepresentation::kTagged: // Fall through.
332 case MachineRepresentation::kWord64: // Fall through.
333 case MachineRepresentation::kNone:
334 UNREACHABLE();
335 return;
336 }
337 InstructionOperand offset_operand = g.UseRegister(offset);
338 InstructionOperand length_operand =
339 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
340 if (g.CanBeImmediate(buffer)) {
341 Emit(opcode | AddressingModeField::encode(kMode_MRI),
342 g.DefineAsRegister(node), offset_operand, length_operand,
343 offset_operand, g.UseImmediate(buffer));
344 } else {
345 Emit(opcode | AddressingModeField::encode(kMode_MR1),
346 g.DefineAsRegister(node), offset_operand, length_operand,
347 g.UseRegister(buffer), offset_operand);
348 }
349 }
350
351
VisitCheckedStore(Node * node)352 void InstructionSelector::VisitCheckedStore(Node* node) {
353 MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
354 IA32OperandGenerator g(this);
355 Node* const buffer = node->InputAt(0);
356 Node* const offset = node->InputAt(1);
357 Node* const length = node->InputAt(2);
358 Node* const value = node->InputAt(3);
359 ArchOpcode opcode = kArchNop;
360 switch (rep) {
361 case MachineRepresentation::kWord8:
362 opcode = kCheckedStoreWord8;
363 break;
364 case MachineRepresentation::kWord16:
365 opcode = kCheckedStoreWord16;
366 break;
367 case MachineRepresentation::kWord32:
368 opcode = kCheckedStoreWord32;
369 break;
370 case MachineRepresentation::kFloat32:
371 opcode = kCheckedStoreFloat32;
372 break;
373 case MachineRepresentation::kFloat64:
374 opcode = kCheckedStoreFloat64;
375 break;
376 case MachineRepresentation::kBit: // Fall through.
377 case MachineRepresentation::kTagged: // Fall through.
378 case MachineRepresentation::kWord64: // Fall through.
379 case MachineRepresentation::kNone:
380 UNREACHABLE();
381 return;
382 }
383 InstructionOperand value_operand =
384 g.CanBeImmediate(value) ? g.UseImmediate(value)
385 : ((rep == MachineRepresentation::kWord8 ||
386 rep == MachineRepresentation::kBit)
387 ? g.UseByteRegister(value)
388 : g.UseRegister(value));
389 InstructionOperand offset_operand = g.UseRegister(offset);
390 InstructionOperand length_operand =
391 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
392 if (g.CanBeImmediate(buffer)) {
393 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
394 offset_operand, length_operand, value_operand, offset_operand,
395 g.UseImmediate(buffer));
396 } else {
397 Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
398 offset_operand, length_operand, value_operand, g.UseRegister(buffer),
399 offset_operand);
400 }
401 }
402
403
404 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)405 static void VisitBinop(InstructionSelector* selector, Node* node,
406 InstructionCode opcode, FlagsContinuation* cont) {
407 IA32OperandGenerator g(selector);
408 Int32BinopMatcher m(node);
409 Node* left = m.left().node();
410 Node* right = m.right().node();
411 InstructionOperand inputs[4];
412 size_t input_count = 0;
413 InstructionOperand outputs[2];
414 size_t output_count = 0;
415
416 // TODO(turbofan): match complex addressing modes.
417 if (left == right) {
418 // If both inputs refer to the same operand, enforce allocating a register
419 // for both of them to ensure that we don't end up generating code like
420 // this:
421 //
422 // mov eax, [ebp-0x10]
423 // add eax, [ebp-0x10]
424 // jo label
425 InstructionOperand const input = g.UseRegister(left);
426 inputs[input_count++] = input;
427 inputs[input_count++] = input;
428 } else if (g.CanBeImmediate(right)) {
429 inputs[input_count++] = g.UseRegister(left);
430 inputs[input_count++] = g.UseImmediate(right);
431 } else {
432 if (node->op()->HasProperty(Operator::kCommutative) &&
433 g.CanBeBetterLeftOperand(right)) {
434 std::swap(left, right);
435 }
436 inputs[input_count++] = g.UseRegister(left);
437 inputs[input_count++] = g.Use(right);
438 }
439
440 if (cont->IsBranch()) {
441 inputs[input_count++] = g.Label(cont->true_block());
442 inputs[input_count++] = g.Label(cont->false_block());
443 }
444
445 outputs[output_count++] = g.DefineSameAsFirst(node);
446 if (cont->IsSet()) {
447 outputs[output_count++] = g.DefineAsByteRegister(cont->result());
448 }
449
450 DCHECK_NE(0u, input_count);
451 DCHECK_NE(0u, output_count);
452 DCHECK_GE(arraysize(inputs), input_count);
453 DCHECK_GE(arraysize(outputs), output_count);
454
455 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
456 inputs);
457 }
458
459
460 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)461 static void VisitBinop(InstructionSelector* selector, Node* node,
462 InstructionCode opcode) {
463 FlagsContinuation cont;
464 VisitBinop(selector, node, opcode, &cont);
465 }
466
467
VisitWord32And(Node * node)468 void InstructionSelector::VisitWord32And(Node* node) {
469 VisitBinop(this, node, kIA32And);
470 }
471
472
VisitWord32Or(Node * node)473 void InstructionSelector::VisitWord32Or(Node* node) {
474 VisitBinop(this, node, kIA32Or);
475 }
476
477
VisitWord32Xor(Node * node)478 void InstructionSelector::VisitWord32Xor(Node* node) {
479 IA32OperandGenerator g(this);
480 Int32BinopMatcher m(node);
481 if (m.right().Is(-1)) {
482 Emit(kIA32Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
483 } else {
484 VisitBinop(this, node, kIA32Xor);
485 }
486 }
487
488
489 // Shared routine for multiple shift operations.
VisitShift(InstructionSelector * selector,Node * node,ArchOpcode opcode)490 static inline void VisitShift(InstructionSelector* selector, Node* node,
491 ArchOpcode opcode) {
492 IA32OperandGenerator g(selector);
493 Node* left = node->InputAt(0);
494 Node* right = node->InputAt(1);
495
496 if (g.CanBeImmediate(right)) {
497 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
498 g.UseImmediate(right));
499 } else {
500 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
501 g.UseFixed(right, ecx));
502 }
503 }
504
505
506 namespace {
507
VisitMulHigh(InstructionSelector * selector,Node * node,ArchOpcode opcode)508 void VisitMulHigh(InstructionSelector* selector, Node* node,
509 ArchOpcode opcode) {
510 IA32OperandGenerator g(selector);
511 selector->Emit(opcode, g.DefineAsFixed(node, edx),
512 g.UseFixed(node->InputAt(0), eax),
513 g.UseUniqueRegister(node->InputAt(1)));
514 }
515
516
VisitDiv(InstructionSelector * selector,Node * node,ArchOpcode opcode)517 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
518 IA32OperandGenerator g(selector);
519 InstructionOperand temps[] = {g.TempRegister(edx)};
520 selector->Emit(opcode, g.DefineAsFixed(node, eax),
521 g.UseFixed(node->InputAt(0), eax),
522 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
523 }
524
525
VisitMod(InstructionSelector * selector,Node * node,ArchOpcode opcode)526 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
527 IA32OperandGenerator g(selector);
528 selector->Emit(opcode, g.DefineAsFixed(node, edx),
529 g.UseFixed(node->InputAt(0), eax),
530 g.UseUnique(node->InputAt(1)));
531 }
532
EmitLea(InstructionSelector * selector,Node * result,Node * index,int scale,Node * base,Node * displacement)533 void EmitLea(InstructionSelector* selector, Node* result, Node* index,
534 int scale, Node* base, Node* displacement) {
535 IA32OperandGenerator g(selector);
536 InstructionOperand inputs[4];
537 size_t input_count = 0;
538 AddressingMode mode = g.GenerateMemoryOperandInputs(
539 index, scale, base, displacement, inputs, &input_count);
540
541 DCHECK_NE(0u, input_count);
542 DCHECK_GE(arraysize(inputs), input_count);
543
544 InstructionOperand outputs[1];
545 outputs[0] = g.DefineAsRegister(result);
546
547 InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
548
549 selector->Emit(opcode, 1, outputs, input_count, inputs);
550 }
551
552 } // namespace
553
554
VisitWord32Shl(Node * node)555 void InstructionSelector::VisitWord32Shl(Node* node) {
556 Int32ScaleMatcher m(node, true);
557 if (m.matches()) {
558 Node* index = node->InputAt(0);
559 Node* base = m.power_of_two_plus_one() ? index : nullptr;
560 EmitLea(this, node, index, m.scale(), base, nullptr);
561 return;
562 }
563 VisitShift(this, node, kIA32Shl);
564 }
565
566
VisitWord32Shr(Node * node)567 void InstructionSelector::VisitWord32Shr(Node* node) {
568 VisitShift(this, node, kIA32Shr);
569 }
570
571
VisitWord32Sar(Node * node)572 void InstructionSelector::VisitWord32Sar(Node* node) {
573 VisitShift(this, node, kIA32Sar);
574 }
575
576
VisitWord32Ror(Node * node)577 void InstructionSelector::VisitWord32Ror(Node* node) {
578 VisitShift(this, node, kIA32Ror);
579 }
580
581
VisitWord32Clz(Node * node)582 void InstructionSelector::VisitWord32Clz(Node* node) {
583 IA32OperandGenerator g(this);
584 Emit(kIA32Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
585 }
586
587
VisitWord32Ctz(Node * node)588 void InstructionSelector::VisitWord32Ctz(Node* node) {
589 IA32OperandGenerator g(this);
590 Emit(kIA32Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
591 }
592
593
VisitWord32Popcnt(Node * node)594 void InstructionSelector::VisitWord32Popcnt(Node* node) {
595 IA32OperandGenerator g(this);
596 Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
597 }
598
599
VisitInt32Add(Node * node)600 void InstructionSelector::VisitInt32Add(Node* node) {
601 IA32OperandGenerator g(this);
602
603 // Try to match the Add to a lea pattern
604 BaseWithIndexAndDisplacement32Matcher m(node);
605 if (m.matches() &&
606 (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
607 InstructionOperand inputs[4];
608 size_t input_count = 0;
609 AddressingMode mode = g.GenerateMemoryOperandInputs(
610 m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
611
612 DCHECK_NE(0u, input_count);
613 DCHECK_GE(arraysize(inputs), input_count);
614
615 InstructionOperand outputs[1];
616 outputs[0] = g.DefineAsRegister(node);
617
618 InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
619 Emit(opcode, 1, outputs, input_count, inputs);
620 return;
621 }
622
623 // No lea pattern match, use add
624 VisitBinop(this, node, kIA32Add);
625 }
626
627
VisitInt32Sub(Node * node)628 void InstructionSelector::VisitInt32Sub(Node* node) {
629 IA32OperandGenerator g(this);
630 Int32BinopMatcher m(node);
631 if (m.left().Is(0)) {
632 Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
633 } else {
634 VisitBinop(this, node, kIA32Sub);
635 }
636 }
637
638
VisitInt32Mul(Node * node)639 void InstructionSelector::VisitInt32Mul(Node* node) {
640 Int32ScaleMatcher m(node, true);
641 if (m.matches()) {
642 Node* index = node->InputAt(0);
643 Node* base = m.power_of_two_plus_one() ? index : nullptr;
644 EmitLea(this, node, index, m.scale(), base, nullptr);
645 return;
646 }
647 IA32OperandGenerator g(this);
648 Node* left = node->InputAt(0);
649 Node* right = node->InputAt(1);
650 if (g.CanBeImmediate(right)) {
651 Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
652 g.UseImmediate(right));
653 } else {
654 if (g.CanBeBetterLeftOperand(right)) {
655 std::swap(left, right);
656 }
657 Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
658 g.Use(right));
659 }
660 }
661
662
VisitInt32MulHigh(Node * node)663 void InstructionSelector::VisitInt32MulHigh(Node* node) {
664 VisitMulHigh(this, node, kIA32ImulHigh);
665 }
666
667
VisitUint32MulHigh(Node * node)668 void InstructionSelector::VisitUint32MulHigh(Node* node) {
669 VisitMulHigh(this, node, kIA32UmulHigh);
670 }
671
672
VisitInt32Div(Node * node)673 void InstructionSelector::VisitInt32Div(Node* node) {
674 VisitDiv(this, node, kIA32Idiv);
675 }
676
677
VisitUint32Div(Node * node)678 void InstructionSelector::VisitUint32Div(Node* node) {
679 VisitDiv(this, node, kIA32Udiv);
680 }
681
682
VisitInt32Mod(Node * node)683 void InstructionSelector::VisitInt32Mod(Node* node) {
684 VisitMod(this, node, kIA32Idiv);
685 }
686
687
VisitUint32Mod(Node * node)688 void InstructionSelector::VisitUint32Mod(Node* node) {
689 VisitMod(this, node, kIA32Udiv);
690 }
691
692
VisitChangeFloat32ToFloat64(Node * node)693 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
694 VisitRO(this, node, kSSEFloat32ToFloat64);
695 }
696
697
VisitChangeInt32ToFloat64(Node * node)698 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
699 VisitRO(this, node, kSSEInt32ToFloat64);
700 }
701
702
VisitChangeUint32ToFloat64(Node * node)703 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
704 VisitRO(this, node, kSSEUint32ToFloat64);
705 }
706
707
VisitChangeFloat64ToInt32(Node * node)708 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
709 VisitRO(this, node, kSSEFloat64ToInt32);
710 }
711
712
VisitChangeFloat64ToUint32(Node * node)713 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
714 VisitRO(this, node, kSSEFloat64ToUint32);
715 }
716
717
VisitTruncateFloat64ToFloat32(Node * node)718 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
719 VisitRO(this, node, kSSEFloat64ToFloat32);
720 }
721
722
VisitTruncateFloat64ToInt32(Node * node)723 void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
724 switch (TruncationModeOf(node->op())) {
725 case TruncationMode::kJavaScript:
726 return VisitRR(this, node, kArchTruncateDoubleToI);
727 case TruncationMode::kRoundToZero:
728 return VisitRO(this, node, kSSEFloat64ToInt32);
729 }
730 UNREACHABLE();
731 }
732
733
VisitBitcastFloat32ToInt32(Node * node)734 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
735 IA32OperandGenerator g(this);
736 Emit(kIA32BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
737 }
738
739
VisitBitcastInt32ToFloat32(Node * node)740 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
741 IA32OperandGenerator g(this);
742 Emit(kIA32BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
743 }
744
745
VisitFloat32Add(Node * node)746 void InstructionSelector::VisitFloat32Add(Node* node) {
747 VisitRROFloat(this, node, kAVXFloat32Add, kSSEFloat32Add);
748 }
749
750
VisitFloat64Add(Node * node)751 void InstructionSelector::VisitFloat64Add(Node* node) {
752 VisitRROFloat(this, node, kAVXFloat64Add, kSSEFloat64Add);
753 }
754
755
VisitFloat32Sub(Node * node)756 void InstructionSelector::VisitFloat32Sub(Node* node) {
757 IA32OperandGenerator g(this);
758 Float32BinopMatcher m(node);
759 if (m.left().IsMinusZero()) {
760 VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
761 kSSEFloat32Neg);
762 return;
763 }
764 VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
765 }
766
767
VisitFloat64Sub(Node * node)768 void InstructionSelector::VisitFloat64Sub(Node* node) {
769 IA32OperandGenerator g(this);
770 Float64BinopMatcher m(node);
771 if (m.left().IsMinusZero()) {
772 if (m.right().IsFloat64RoundDown() &&
773 CanCover(m.node(), m.right().node())) {
774 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
775 CanCover(m.right().node(), m.right().InputAt(0))) {
776 Float64BinopMatcher mright0(m.right().InputAt(0));
777 if (mright0.left().IsMinusZero()) {
778 Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
779 g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
780 return;
781 }
782 }
783 }
784 VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
785 kSSEFloat64Neg);
786 return;
787 }
788 VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
789 }
790
791
VisitFloat32Mul(Node * node)792 void InstructionSelector::VisitFloat32Mul(Node* node) {
793 VisitRROFloat(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
794 }
795
796
VisitFloat64Mul(Node * node)797 void InstructionSelector::VisitFloat64Mul(Node* node) {
798 VisitRROFloat(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
799 }
800
801
VisitFloat32Div(Node * node)802 void InstructionSelector::VisitFloat32Div(Node* node) {
803 VisitRROFloat(this, node, kAVXFloat32Div, kSSEFloat32Div);
804 }
805
806
VisitFloat64Div(Node * node)807 void InstructionSelector::VisitFloat64Div(Node* node) {
808 VisitRROFloat(this, node, kAVXFloat64Div, kSSEFloat64Div);
809 }
810
811
VisitFloat64Mod(Node * node)812 void InstructionSelector::VisitFloat64Mod(Node* node) {
813 IA32OperandGenerator g(this);
814 InstructionOperand temps[] = {g.TempRegister(eax)};
815 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
816 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
817 temps);
818 }
819
820
VisitFloat32Max(Node * node)821 void InstructionSelector::VisitFloat32Max(Node* node) {
822 VisitRROFloat(this, node, kAVXFloat32Max, kSSEFloat32Max);
823 }
824
825
VisitFloat64Max(Node * node)826 void InstructionSelector::VisitFloat64Max(Node* node) {
827 VisitRROFloat(this, node, kAVXFloat64Max, kSSEFloat64Max);
828 }
829
830
VisitFloat32Min(Node * node)831 void InstructionSelector::VisitFloat32Min(Node* node) {
832 VisitRROFloat(this, node, kAVXFloat32Min, kSSEFloat32Min);
833 }
834
835
VisitFloat64Min(Node * node)836 void InstructionSelector::VisitFloat64Min(Node* node) {
837 VisitRROFloat(this, node, kAVXFloat64Min, kSSEFloat64Min);
838 }
839
840
VisitFloat32Abs(Node * node)841 void InstructionSelector::VisitFloat32Abs(Node* node) {
842 IA32OperandGenerator g(this);
843 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
844 }
845
846
VisitFloat64Abs(Node * node)847 void InstructionSelector::VisitFloat64Abs(Node* node) {
848 IA32OperandGenerator g(this);
849 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
850 }
851
852
VisitFloat32Sqrt(Node * node)853 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
854 VisitRO(this, node, kSSEFloat32Sqrt);
855 }
856
857
VisitFloat64Sqrt(Node * node)858 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
859 VisitRO(this, node, kSSEFloat64Sqrt);
860 }
861
862
VisitFloat32RoundDown(Node * node)863 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
864 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
865 }
866
867
VisitFloat64RoundDown(Node * node)868 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
869 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
870 }
871
872
VisitFloat32RoundUp(Node * node)873 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
874 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
875 }
876
877
VisitFloat64RoundUp(Node * node)878 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
879 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
880 }
881
882
VisitFloat32RoundTruncate(Node * node)883 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
884 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
885 }
886
887
VisitFloat64RoundTruncate(Node * node)888 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
889 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
890 }
891
892
VisitFloat64RoundTiesAway(Node * node)893 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
894 UNREACHABLE();
895 }
896
897
VisitFloat32RoundTiesEven(Node * node)898 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
899 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
900 }
901
902
VisitFloat64RoundTiesEven(Node * node)903 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
904 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
905 }
906
907
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)908 void InstructionSelector::EmitPrepareArguments(
909 ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
910 Node* node) {
911 IA32OperandGenerator g(this);
912
913 // Prepare for C function call.
914 if (descriptor->IsCFunctionCall()) {
915 InstructionOperand temps[] = {g.TempRegister()};
916 size_t const temp_count = arraysize(temps);
917 Emit(kArchPrepareCallCFunction |
918 MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
919 0, nullptr, 0, nullptr, temp_count, temps);
920
921 // Poke any stack arguments.
922 for (size_t n = 0; n < arguments->size(); ++n) {
923 PushParameter input = (*arguments)[n];
924 if (input.node()) {
925 int const slot = static_cast<int>(n);
926 InstructionOperand value = g.CanBeImmediate(node)
927 ? g.UseImmediate(input.node())
928 : g.UseRegister(input.node());
929 Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
930 }
931 }
932 } else {
933 // Push any stack arguments.
934 for (PushParameter input : base::Reversed(*arguments)) {
935 // Skip any alignment holes in pushed nodes.
936 if (input.node() == nullptr) continue;
937 InstructionOperand value =
938 g.CanBeImmediate(input.node())
939 ? g.UseImmediate(input.node())
940 : IsSupported(ATOM) ||
941 sequence()->IsFloat(GetVirtualRegister(input.node()))
942 ? g.UseRegister(input.node())
943 : g.Use(input.node());
944 if (input.type() == MachineType::Float32()) {
945 Emit(kIA32PushFloat32, g.NoOutput(), value);
946 } else if (input.type() == MachineType::Float64()) {
947 Emit(kIA32PushFloat64, g.NoOutput(), value);
948 } else {
949 Emit(kIA32Push, g.NoOutput(), value);
950 }
951 }
952 }
953 }
954
955
IsTailCallAddressImmediate()956 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
957
958
959 namespace {
960
961 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)962 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
963 InstructionOperand left, InstructionOperand right,
964 FlagsContinuation* cont) {
965 IA32OperandGenerator g(selector);
966 if (cont->IsBranch()) {
967 selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
968 g.Label(cont->true_block()), g.Label(cont->false_block()));
969 } else {
970 DCHECK(cont->IsSet());
971 selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
972 left, right);
973 }
974 }
975
976
977 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,Node * left,Node * right,FlagsContinuation * cont,bool commutative)978 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
979 Node* left, Node* right, FlagsContinuation* cont,
980 bool commutative) {
981 IA32OperandGenerator g(selector);
982 if (commutative && g.CanBeBetterLeftOperand(right)) {
983 std::swap(left, right);
984 }
985 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
986 }
987
988
989 // Shared routine for multiple float32 compare operations (inputs commuted).
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)990 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
991 FlagsContinuation* cont) {
992 Node* const left = node->InputAt(0);
993 Node* const right = node->InputAt(1);
994 VisitCompare(selector, kSSEFloat32Cmp, right, left, cont, false);
995 }
996
997
998 // Shared routine for multiple float64 compare operations (inputs commuted).
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)999 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1000 FlagsContinuation* cont) {
1001 Node* const left = node->InputAt(0);
1002 Node* const right = node->InputAt(1);
1003 VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
1004 }
1005
1006
1007 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1008 void VisitWordCompare(InstructionSelector* selector, Node* node,
1009 InstructionCode opcode, FlagsContinuation* cont) {
1010 IA32OperandGenerator g(selector);
1011 Node* const left = node->InputAt(0);
1012 Node* const right = node->InputAt(1);
1013
1014 // Match immediates on left or right side of comparison.
1015 if (g.CanBeImmediate(right)) {
1016 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
1017 } else if (g.CanBeImmediate(left)) {
1018 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1019 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
1020 } else {
1021 VisitCompare(selector, opcode, left, right, cont,
1022 node->op()->HasProperty(Operator::kCommutative));
1023 }
1024 }
1025
1026
VisitWordCompare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1027 void VisitWordCompare(InstructionSelector* selector, Node* node,
1028 FlagsContinuation* cont) {
1029 IA32OperandGenerator g(selector);
1030 Int32BinopMatcher m(node);
1031 if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
1032 LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
1033 ExternalReference js_stack_limit =
1034 ExternalReference::address_of_stack_limit(selector->isolate());
1035 if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
1036 // Compare(Load(js_stack_limit), LoadStackPointer)
1037 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1038 InstructionCode opcode = cont->Encode(kIA32StackCheck);
1039 if (cont->IsBranch()) {
1040 selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
1041 g.Label(cont->false_block()));
1042 } else {
1043 DCHECK(cont->IsSet());
1044 selector->Emit(opcode, g.DefineAsRegister(cont->result()));
1045 }
1046 return;
1047 }
1048 }
1049 VisitWordCompare(selector, node, kIA32Cmp, cont);
1050 }
1051
1052
1053 // Shared routine for word comparison with zero.
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)1054 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1055 Node* value, FlagsContinuation* cont) {
1056 // Try to combine the branch with a comparison.
1057 while (selector->CanCover(user, value)) {
1058 switch (value->opcode()) {
1059 case IrOpcode::kWord32Equal: {
1060 // Try to combine with comparisons against 0 by simply inverting the
1061 // continuation.
1062 Int32BinopMatcher m(value);
1063 if (m.right().Is(0)) {
1064 user = value;
1065 value = m.left().node();
1066 cont->Negate();
1067 continue;
1068 }
1069 cont->OverwriteAndNegateIfEqual(kEqual);
1070 return VisitWordCompare(selector, value, cont);
1071 }
1072 case IrOpcode::kInt32LessThan:
1073 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1074 return VisitWordCompare(selector, value, cont);
1075 case IrOpcode::kInt32LessThanOrEqual:
1076 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1077 return VisitWordCompare(selector, value, cont);
1078 case IrOpcode::kUint32LessThan:
1079 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1080 return VisitWordCompare(selector, value, cont);
1081 case IrOpcode::kUint32LessThanOrEqual:
1082 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1083 return VisitWordCompare(selector, value, cont);
1084 case IrOpcode::kFloat32Equal:
1085 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1086 return VisitFloat32Compare(selector, value, cont);
1087 case IrOpcode::kFloat32LessThan:
1088 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1089 return VisitFloat32Compare(selector, value, cont);
1090 case IrOpcode::kFloat32LessThanOrEqual:
1091 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1092 return VisitFloat32Compare(selector, value, cont);
1093 case IrOpcode::kFloat64Equal:
1094 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1095 return VisitFloat64Compare(selector, value, cont);
1096 case IrOpcode::kFloat64LessThan:
1097 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1098 return VisitFloat64Compare(selector, value, cont);
1099 case IrOpcode::kFloat64LessThanOrEqual:
1100 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1101 return VisitFloat64Compare(selector, value, cont);
1102 case IrOpcode::kProjection:
1103 // Check if this is the overflow output projection of an
1104 // <Operation>WithOverflow node.
1105 if (ProjectionIndexOf(value->op()) == 1u) {
1106 // We cannot combine the <Operation>WithOverflow with this branch
1107 // unless the 0th projection (the use of the actual value of the
1108 // <Operation> is either nullptr, which means there's no use of the
1109 // actual value, or was already defined, which means it is scheduled
1110 // *AFTER* this branch).
1111 Node* const node = value->InputAt(0);
1112 Node* const result = NodeProperties::FindProjection(node, 0);
1113 if (result == nullptr || selector->IsDefined(result)) {
1114 switch (node->opcode()) {
1115 case IrOpcode::kInt32AddWithOverflow:
1116 cont->OverwriteAndNegateIfEqual(kOverflow);
1117 return VisitBinop(selector, node, kIA32Add, cont);
1118 case IrOpcode::kInt32SubWithOverflow:
1119 cont->OverwriteAndNegateIfEqual(kOverflow);
1120 return VisitBinop(selector, node, kIA32Sub, cont);
1121 default:
1122 break;
1123 }
1124 }
1125 }
1126 break;
1127 case IrOpcode::kInt32Sub:
1128 return VisitWordCompare(selector, value, cont);
1129 case IrOpcode::kWord32And:
1130 return VisitWordCompare(selector, value, kIA32Test, cont);
1131 default:
1132 break;
1133 }
1134 break;
1135 }
1136
1137 // Continuation could not be combined with a compare, emit compare against 0.
1138 IA32OperandGenerator g(selector);
1139 VisitCompare(selector, kIA32Cmp, g.Use(value), g.TempImmediate(0), cont);
1140 }
1141
1142 } // namespace
1143
1144
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)1145 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1146 BasicBlock* fbranch) {
1147 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1148 VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1149 }
1150
1151
VisitSwitch(Node * node,const SwitchInfo & sw)1152 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1153 IA32OperandGenerator g(this);
1154 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1155
1156 // Emit either ArchTableSwitch or ArchLookupSwitch.
1157 size_t table_space_cost = 4 + sw.value_range;
1158 size_t table_time_cost = 3;
1159 size_t lookup_space_cost = 3 + 2 * sw.case_count;
1160 size_t lookup_time_cost = sw.case_count;
1161 if (sw.case_count > 4 &&
1162 table_space_cost + 3 * table_time_cost <=
1163 lookup_space_cost + 3 * lookup_time_cost &&
1164 sw.min_value > std::numeric_limits<int32_t>::min()) {
1165 InstructionOperand index_operand = value_operand;
1166 if (sw.min_value) {
1167 index_operand = g.TempRegister();
1168 Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
1169 value_operand, g.TempImmediate(-sw.min_value));
1170 }
1171 // Generate a table lookup.
1172 return EmitTableSwitch(sw, index_operand);
1173 }
1174
1175 // Generate a sequence of conditional jumps.
1176 return EmitLookupSwitch(sw, value_operand);
1177 }
1178
1179
VisitWord32Equal(Node * const node)1180 void InstructionSelector::VisitWord32Equal(Node* const node) {
1181 FlagsContinuation cont(kEqual, node);
1182 Int32BinopMatcher m(node);
1183 if (m.right().Is(0)) {
1184 return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1185 }
1186 VisitWordCompare(this, node, &cont);
1187 }
1188
1189
VisitInt32LessThan(Node * node)1190 void InstructionSelector::VisitInt32LessThan(Node* node) {
1191 FlagsContinuation cont(kSignedLessThan, node);
1192 VisitWordCompare(this, node, &cont);
1193 }
1194
1195
VisitInt32LessThanOrEqual(Node * node)1196 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1197 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1198 VisitWordCompare(this, node, &cont);
1199 }
1200
1201
VisitUint32LessThan(Node * node)1202 void InstructionSelector::VisitUint32LessThan(Node* node) {
1203 FlagsContinuation cont(kUnsignedLessThan, node);
1204 VisitWordCompare(this, node, &cont);
1205 }
1206
1207
VisitUint32LessThanOrEqual(Node * node)1208 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1209 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1210 VisitWordCompare(this, node, &cont);
1211 }
1212
1213
VisitInt32AddWithOverflow(Node * node)1214 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1215 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1216 FlagsContinuation cont(kOverflow, ovf);
1217 return VisitBinop(this, node, kIA32Add, &cont);
1218 }
1219 FlagsContinuation cont;
1220 VisitBinop(this, node, kIA32Add, &cont);
1221 }
1222
1223
VisitInt32SubWithOverflow(Node * node)1224 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1225 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1226 FlagsContinuation cont(kOverflow, ovf);
1227 return VisitBinop(this, node, kIA32Sub, &cont);
1228 }
1229 FlagsContinuation cont;
1230 VisitBinop(this, node, kIA32Sub, &cont);
1231 }
1232
1233
VisitFloat32Equal(Node * node)1234 void InstructionSelector::VisitFloat32Equal(Node* node) {
1235 FlagsContinuation cont(kUnorderedEqual, node);
1236 VisitFloat32Compare(this, node, &cont);
1237 }
1238
1239
VisitFloat32LessThan(Node * node)1240 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1241 FlagsContinuation cont(kUnsignedGreaterThan, node);
1242 VisitFloat32Compare(this, node, &cont);
1243 }
1244
1245
VisitFloat32LessThanOrEqual(Node * node)1246 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1247 FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
1248 VisitFloat32Compare(this, node, &cont);
1249 }
1250
1251
VisitFloat64Equal(Node * node)1252 void InstructionSelector::VisitFloat64Equal(Node* node) {
1253 FlagsContinuation cont(kUnorderedEqual, node);
1254 VisitFloat64Compare(this, node, &cont);
1255 }
1256
1257
VisitFloat64LessThan(Node * node)1258 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1259 FlagsContinuation cont(kUnsignedGreaterThan, node);
1260 VisitFloat64Compare(this, node, &cont);
1261 }
1262
1263
VisitFloat64LessThanOrEqual(Node * node)1264 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1265 FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
1266 VisitFloat64Compare(this, node, &cont);
1267 }
1268
1269
VisitFloat64ExtractLowWord32(Node * node)1270 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1271 IA32OperandGenerator g(this);
1272 Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
1273 g.Use(node->InputAt(0)));
1274 }
1275
1276
VisitFloat64ExtractHighWord32(Node * node)1277 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1278 IA32OperandGenerator g(this);
1279 Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
1280 g.Use(node->InputAt(0)));
1281 }
1282
1283
VisitFloat64InsertLowWord32(Node * node)1284 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1285 IA32OperandGenerator g(this);
1286 Node* left = node->InputAt(0);
1287 Node* right = node->InputAt(1);
1288 Float64Matcher mleft(left);
1289 if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
1290 Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
1291 return;
1292 }
1293 Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1294 g.UseRegister(left), g.Use(right));
1295 }
1296
1297
VisitFloat64InsertHighWord32(Node * node)1298 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1299 IA32OperandGenerator g(this);
1300 Node* left = node->InputAt(0);
1301 Node* right = node->InputAt(1);
1302 Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1303 g.UseRegister(left), g.Use(right));
1304 }
1305
1306
1307 // static
1308 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()1309 InstructionSelector::SupportedMachineOperatorFlags() {
1310 MachineOperatorBuilder::Flags flags =
1311 MachineOperatorBuilder::kFloat32Max |
1312 MachineOperatorBuilder::kFloat32Min |
1313 MachineOperatorBuilder::kFloat64Max |
1314 MachineOperatorBuilder::kFloat64Min |
1315 MachineOperatorBuilder::kWord32ShiftIsSafe |
1316 MachineOperatorBuilder::kWord32Ctz;
1317 if (CpuFeatures::IsSupported(POPCNT)) {
1318 flags |= MachineOperatorBuilder::kWord32Popcnt;
1319 }
1320 if (CpuFeatures::IsSupported(SSE4_1)) {
1321 flags |= MachineOperatorBuilder::kFloat32RoundDown |
1322 MachineOperatorBuilder::kFloat64RoundDown |
1323 MachineOperatorBuilder::kFloat32RoundUp |
1324 MachineOperatorBuilder::kFloat64RoundUp |
1325 MachineOperatorBuilder::kFloat32RoundTruncate |
1326 MachineOperatorBuilder::kFloat64RoundTruncate |
1327 MachineOperatorBuilder::kFloat32RoundTiesEven |
1328 MachineOperatorBuilder::kFloat64RoundTiesEven;
1329 }
1330 return flags;
1331 }
1332
1333 } // namespace compiler
1334 } // namespace internal
1335 } // namespace v8
1336