• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h"
8 
9 namespace v8 {
10 namespace internal {
11 namespace compiler {
12 
13 enum ImmediateMode {
14   kArithmeticImm,  // 12 bit unsigned immediate shifted left 0 or 12 bits
15   kShift32Imm,     // 0 - 31
16   kShift64Imm,     // 0 - 63
17   kLogical32Imm,
18   kLogical64Imm,
19   kLoadStoreImm8,   // signed 8 bit or 12 bit unsigned scaled by access size
20   kLoadStoreImm16,
21   kLoadStoreImm32,
22   kLoadStoreImm64,
23   kNoImmediate
24 };
25 
26 
27 // Adds Arm64-specific methods for generating operands.
28 class Arm64OperandGenerator final : public OperandGenerator {
29  public:
Arm64OperandGenerator(InstructionSelector * selector)30   explicit Arm64OperandGenerator(InstructionSelector* selector)
31       : OperandGenerator(selector) {}
32 
UseOperand(Node * node,ImmediateMode mode)33   InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
34     if (CanBeImmediate(node, mode)) {
35       return UseImmediate(node);
36     }
37     return UseRegister(node);
38   }
39 
40   // Use the zero register if the node has the immediate value zero, otherwise
41   // assign a register.
UseRegisterOrImmediateZero(Node * node)42   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
43     if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
44         (IsFloatConstant(node) &&
45          (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
46       return UseImmediate(node);
47     }
48     return UseRegister(node);
49   }
50 
51   // Use the provided node if it has the required value, or create a
52   // TempImmediate otherwise.
UseImmediateOrTemp(Node * node,int32_t value)53   InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
54     if (GetIntegerConstantValue(node) == value) {
55       return UseImmediate(node);
56     }
57     return TempImmediate(value);
58   }
59 
IsIntegerConstant(Node * node)60   bool IsIntegerConstant(Node* node) {
61     return (node->opcode() == IrOpcode::kInt32Constant) ||
62            (node->opcode() == IrOpcode::kInt64Constant);
63   }
64 
GetIntegerConstantValue(Node * node)65   int64_t GetIntegerConstantValue(Node* node) {
66     if (node->opcode() == IrOpcode::kInt32Constant) {
67       return OpParameter<int32_t>(node);
68     }
69     DCHECK(node->opcode() == IrOpcode::kInt64Constant);
70     return OpParameter<int64_t>(node);
71   }
72 
IsFloatConstant(Node * node)73   bool IsFloatConstant(Node* node) {
74     return (node->opcode() == IrOpcode::kFloat32Constant) ||
75            (node->opcode() == IrOpcode::kFloat64Constant);
76   }
77 
GetFloatConstantValue(Node * node)78   double GetFloatConstantValue(Node* node) {
79     if (node->opcode() == IrOpcode::kFloat32Constant) {
80       return OpParameter<float>(node);
81     }
82     DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
83     return OpParameter<double>(node);
84   }
85 
CanBeImmediate(Node * node,ImmediateMode mode)86   bool CanBeImmediate(Node* node, ImmediateMode mode) {
87     return IsIntegerConstant(node) &&
88            CanBeImmediate(GetIntegerConstantValue(node), mode);
89   }
90 
CanBeImmediate(int64_t value,ImmediateMode mode)91   bool CanBeImmediate(int64_t value, ImmediateMode mode) {
92     unsigned ignored;
93     switch (mode) {
94       case kLogical32Imm:
95         // TODO(dcarney): some unencodable values can be handled by
96         // switching instructions.
97         return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
98                                        &ignored, &ignored, &ignored);
99       case kLogical64Imm:
100         return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
101                                        &ignored, &ignored, &ignored);
102       case kArithmeticImm:
103         return Assembler::IsImmAddSub(value);
104       case kLoadStoreImm8:
105         return IsLoadStoreImmediate(value, LSByte);
106       case kLoadStoreImm16:
107         return IsLoadStoreImmediate(value, LSHalfword);
108       case kLoadStoreImm32:
109         return IsLoadStoreImmediate(value, LSWord);
110       case kLoadStoreImm64:
111         return IsLoadStoreImmediate(value, LSDoubleWord);
112       case kNoImmediate:
113         return false;
114       case kShift32Imm:  // Fall through.
115       case kShift64Imm:
116         // Shift operations only observe the bottom 5 or 6 bits of the value.
117         // All possible shifts can be encoded by discarding bits which have no
118         // effect.
119         return true;
120     }
121     return false;
122   }
123 
CanBeLoadStoreShiftImmediate(Node * node,MachineRepresentation rep)124   bool CanBeLoadStoreShiftImmediate(Node* node, MachineRepresentation rep) {
125     // TODO(arm64): Load and Store on 128 bit Q registers is not supported yet.
126     DCHECK_NE(MachineRepresentation::kSimd128, rep);
127     return IsIntegerConstant(node) &&
128            (GetIntegerConstantValue(node) == ElementSizeLog2Of(rep));
129   }
130 
131  private:
IsLoadStoreImmediate(int64_t value,LSDataSize size)132   bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
133     return Assembler::IsImmLSScaled(value, size) ||
134            Assembler::IsImmLSUnscaled(value);
135   }
136 };
137 
138 
139 namespace {
140 
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)141 void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
142   Arm64OperandGenerator g(selector);
143   selector->Emit(opcode, g.DefineAsRegister(node),
144                  g.UseRegister(node->InputAt(0)));
145 }
146 
147 
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)148 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
149   Arm64OperandGenerator g(selector);
150   selector->Emit(opcode, g.DefineAsRegister(node),
151                  g.UseRegister(node->InputAt(0)),
152                  g.UseRegister(node->InputAt(1)));
153 }
154 
155 
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node,ImmediateMode operand_mode)156 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
157               ImmediateMode operand_mode) {
158   Arm64OperandGenerator g(selector);
159   selector->Emit(opcode, g.DefineAsRegister(node),
160                  g.UseRegister(node->InputAt(0)),
161                  g.UseOperand(node->InputAt(1), operand_mode));
162 }
163 
164 
TryMatchAnyShift(InstructionSelector * selector,Node * node,Node * input_node,InstructionCode * opcode,bool try_ror)165 bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
166                       Node* input_node, InstructionCode* opcode, bool try_ror) {
167   Arm64OperandGenerator g(selector);
168 
169   if (!selector->CanCover(node, input_node)) return false;
170   if (input_node->InputCount() != 2) return false;
171   if (!g.IsIntegerConstant(input_node->InputAt(1))) return false;
172 
173   switch (input_node->opcode()) {
174     case IrOpcode::kWord32Shl:
175     case IrOpcode::kWord64Shl:
176       *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
177       return true;
178     case IrOpcode::kWord32Shr:
179     case IrOpcode::kWord64Shr:
180       *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
181       return true;
182     case IrOpcode::kWord32Sar:
183     case IrOpcode::kWord64Sar:
184       *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
185       return true;
186     case IrOpcode::kWord32Ror:
187     case IrOpcode::kWord64Ror:
188       if (try_ror) {
189         *opcode |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
190         return true;
191       }
192       return false;
193     default:
194       return false;
195   }
196 }
197 
198 
TryMatchAnyExtend(Arm64OperandGenerator * g,InstructionSelector * selector,Node * node,Node * left_node,Node * right_node,InstructionOperand * left_op,InstructionOperand * right_op,InstructionCode * opcode)199 bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
200                        Node* node, Node* left_node, Node* right_node,
201                        InstructionOperand* left_op,
202                        InstructionOperand* right_op, InstructionCode* opcode) {
203   if (!selector->CanCover(node, right_node)) return false;
204 
205   NodeMatcher nm(right_node);
206 
207   if (nm.IsWord32And()) {
208     Int32BinopMatcher mright(right_node);
209     if (mright.right().Is(0xff) || mright.right().Is(0xffff)) {
210       int32_t mask = mright.right().Value();
211       *left_op = g->UseRegister(left_node);
212       *right_op = g->UseRegister(mright.left().node());
213       *opcode |= AddressingModeField::encode(
214           (mask == 0xff) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
215       return true;
216     }
217   } else if (nm.IsWord32Sar()) {
218     Int32BinopMatcher mright(right_node);
219     if (selector->CanCover(mright.node(), mright.left().node()) &&
220         mright.left().IsWord32Shl()) {
221       Int32BinopMatcher mleft_of_right(mright.left().node());
222       if ((mright.right().Is(16) && mleft_of_right.right().Is(16)) ||
223           (mright.right().Is(24) && mleft_of_right.right().Is(24))) {
224         int32_t shift = mright.right().Value();
225         *left_op = g->UseRegister(left_node);
226         *right_op = g->UseRegister(mleft_of_right.left().node());
227         *opcode |= AddressingModeField::encode(
228             (shift == 24) ? kMode_Operand2_R_SXTB : kMode_Operand2_R_SXTH);
229         return true;
230       }
231     }
232   }
233   return false;
234 }
235 
TryMatchLoadStoreShift(Arm64OperandGenerator * g,InstructionSelector * selector,MachineRepresentation rep,Node * node,Node * index,InstructionOperand * index_op,InstructionOperand * shift_immediate_op)236 bool TryMatchLoadStoreShift(Arm64OperandGenerator* g,
237                             InstructionSelector* selector,
238                             MachineRepresentation rep, Node* node, Node* index,
239                             InstructionOperand* index_op,
240                             InstructionOperand* shift_immediate_op) {
241   if (!selector->CanCover(node, index)) return false;
242   if (index->InputCount() != 2) return false;
243   Node* left = index->InputAt(0);
244   Node* right = index->InputAt(1);
245   switch (index->opcode()) {
246     case IrOpcode::kWord32Shl:
247     case IrOpcode::kWord64Shl:
248       if (!g->CanBeLoadStoreShiftImmediate(right, rep)) {
249         return false;
250       }
251       *index_op = g->UseRegister(left);
252       *shift_immediate_op = g->UseImmediate(right);
253       return true;
254     default:
255       return false;
256   }
257 }
258 
259 // Bitfields describing binary operator properties:
260 // CanCommuteField is true if we can switch the two operands, potentially
261 // requiring commuting the flags continuation condition.
262 typedef BitField8<bool, 1, 1> CanCommuteField;
263 // MustCommuteCondField is true when we need to commute the flags continuation
264 // condition in order to switch the operands.
265 typedef BitField8<bool, 2, 1> MustCommuteCondField;
266 // IsComparisonField is true when the operation is a comparison and has no other
267 // result other than the condition.
268 typedef BitField8<bool, 3, 1> IsComparisonField;
269 // IsAddSubField is true when an instruction is encoded as ADD or SUB.
270 typedef BitField8<bool, 4, 1> IsAddSubField;
271 
272 // Get properties of a binary operator.
GetBinopProperties(InstructionCode opcode)273 uint8_t GetBinopProperties(InstructionCode opcode) {
274   uint8_t result = 0;
275   switch (opcode) {
276     case kArm64Cmp32:
277     case kArm64Cmp:
278       // We can commute CMP by switching the inputs and commuting
279       // the flags continuation.
280       result = CanCommuteField::update(result, true);
281       result = MustCommuteCondField::update(result, true);
282       result = IsComparisonField::update(result, true);
283       // The CMP and CMN instructions are encoded as SUB or ADD
284       // with zero output register, and therefore support the same
285       // operand modes.
286       result = IsAddSubField::update(result, true);
287       break;
288     case kArm64Cmn32:
289     case kArm64Cmn:
290       result = CanCommuteField::update(result, true);
291       result = IsComparisonField::update(result, true);
292       result = IsAddSubField::update(result, true);
293       break;
294     case kArm64Add32:
295     case kArm64Add:
296       result = CanCommuteField::update(result, true);
297       result = IsAddSubField::update(result, true);
298       break;
299     case kArm64Sub32:
300     case kArm64Sub:
301       result = IsAddSubField::update(result, true);
302       break;
303     case kArm64Tst32:
304     case kArm64Tst:
305       result = CanCommuteField::update(result, true);
306       result = IsComparisonField::update(result, true);
307       break;
308     case kArm64And32:
309     case kArm64And:
310     case kArm64Or32:
311     case kArm64Or:
312     case kArm64Eor32:
313     case kArm64Eor:
314       result = CanCommuteField::update(result, true);
315       break;
316     default:
317       UNREACHABLE();
318       return 0;
319   }
320   DCHECK_IMPLIES(MustCommuteCondField::decode(result),
321                  CanCommuteField::decode(result));
322   return result;
323 }
324 
325 // Shared routine for multiple binary operations.
326 template <typename Matcher>
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,ImmediateMode operand_mode,FlagsContinuation * cont)327 void VisitBinop(InstructionSelector* selector, Node* node,
328                 InstructionCode opcode, ImmediateMode operand_mode,
329                 FlagsContinuation* cont) {
330   Arm64OperandGenerator g(selector);
331   InstructionOperand inputs[5];
332   size_t input_count = 0;
333   InstructionOperand outputs[2];
334   size_t output_count = 0;
335 
336   Node* left_node = node->InputAt(0);
337   Node* right_node = node->InputAt(1);
338 
339   uint8_t properties = GetBinopProperties(opcode);
340   bool can_commute = CanCommuteField::decode(properties);
341   bool must_commute_cond = MustCommuteCondField::decode(properties);
342   bool is_add_sub = IsAddSubField::decode(properties);
343 
344   if (g.CanBeImmediate(right_node, operand_mode)) {
345     inputs[input_count++] = g.UseRegister(left_node);
346     inputs[input_count++] = g.UseImmediate(right_node);
347   } else if (can_commute && g.CanBeImmediate(left_node, operand_mode)) {
348     if (must_commute_cond) cont->Commute();
349     inputs[input_count++] = g.UseRegister(right_node);
350     inputs[input_count++] = g.UseImmediate(left_node);
351   } else if (is_add_sub &&
352              TryMatchAnyExtend(&g, selector, node, left_node, right_node,
353                                &inputs[0], &inputs[1], &opcode)) {
354     input_count += 2;
355   } else if (is_add_sub && can_commute &&
356              TryMatchAnyExtend(&g, selector, node, right_node, left_node,
357                                &inputs[0], &inputs[1], &opcode)) {
358     if (must_commute_cond) cont->Commute();
359     input_count += 2;
360   } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
361                               !is_add_sub)) {
362     Matcher m_shift(right_node);
363     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
364     inputs[input_count++] = g.UseRegister(m_shift.left().node());
365     inputs[input_count++] = g.UseImmediate(m_shift.right().node());
366   } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
367                                              !is_add_sub)) {
368     if (must_commute_cond) cont->Commute();
369     Matcher m_shift(left_node);
370     inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
371     inputs[input_count++] = g.UseRegister(m_shift.left().node());
372     inputs[input_count++] = g.UseImmediate(m_shift.right().node());
373   } else {
374     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
375     inputs[input_count++] = g.UseRegister(right_node);
376   }
377 
378   if (cont->IsBranch()) {
379     inputs[input_count++] = g.Label(cont->true_block());
380     inputs[input_count++] = g.Label(cont->false_block());
381   }
382 
383   if (!IsComparisonField::decode(properties)) {
384     outputs[output_count++] = g.DefineAsRegister(node);
385   }
386 
387   if (cont->IsSet()) {
388     outputs[output_count++] = g.DefineAsRegister(cont->result());
389   }
390 
391   DCHECK_NE(0u, input_count);
392   DCHECK((output_count != 0) || IsComparisonField::decode(properties));
393   DCHECK_GE(arraysize(inputs), input_count);
394   DCHECK_GE(arraysize(outputs), output_count);
395 
396   opcode = cont->Encode(opcode);
397   if (cont->IsDeoptimize()) {
398     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
399                              cont->frame_state());
400   } else {
401     selector->Emit(opcode, output_count, outputs, input_count, inputs);
402   }
403 }
404 
405 
406 // Shared routine for multiple binary operations.
407 template <typename Matcher>
VisitBinop(InstructionSelector * selector,Node * node,ArchOpcode opcode,ImmediateMode operand_mode)408 void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
409                 ImmediateMode operand_mode) {
410   FlagsContinuation cont;
411   VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
412 }
413 
414 
415 template <typename Matcher>
VisitAddSub(InstructionSelector * selector,Node * node,ArchOpcode opcode,ArchOpcode negate_opcode)416 void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
417                  ArchOpcode negate_opcode) {
418   Arm64OperandGenerator g(selector);
419   Matcher m(node);
420   if (m.right().HasValue() && (m.right().Value() < 0) &&
421       g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
422     selector->Emit(negate_opcode, g.DefineAsRegister(node),
423                    g.UseRegister(m.left().node()),
424                    g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
425   } else {
426     VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
427   }
428 }
429 
430 
431 // For multiplications by immediate of the form x * (2^k + 1), where k > 0,
432 // return the value of k, otherwise return zero. This is used to reduce the
433 // multiplication to addition with left shift: x + (x << k).
434 template <typename Matcher>
LeftShiftForReducedMultiply(Matcher * m)435 int32_t LeftShiftForReducedMultiply(Matcher* m) {
436   DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
437   if (m->right().HasValue() && m->right().Value() >= 3) {
438     uint64_t value_minus_one = m->right().Value() - 1;
439     if (base::bits::IsPowerOfTwo64(value_minus_one)) {
440       return WhichPowerOf2_64(value_minus_one);
441     }
442   }
443   return 0;
444 }
445 
446 }  // namespace
447 
448 
VisitLoad(Node * node)449 void InstructionSelector::VisitLoad(Node* node) {
450   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
451   MachineRepresentation rep = load_rep.representation();
452   Arm64OperandGenerator g(this);
453   Node* base = node->InputAt(0);
454   Node* index = node->InputAt(1);
455   InstructionCode opcode = kArchNop;
456   ImmediateMode immediate_mode = kNoImmediate;
457   InstructionOperand inputs[3];
458   size_t input_count = 0;
459   InstructionOperand outputs[1];
460   switch (rep) {
461     case MachineRepresentation::kFloat32:
462       opcode = kArm64LdrS;
463       immediate_mode = kLoadStoreImm32;
464       break;
465     case MachineRepresentation::kFloat64:
466       opcode = kArm64LdrD;
467       immediate_mode = kLoadStoreImm64;
468       break;
469     case MachineRepresentation::kBit:  // Fall through.
470     case MachineRepresentation::kWord8:
471       opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
472       immediate_mode = kLoadStoreImm8;
473       break;
474     case MachineRepresentation::kWord16:
475       opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
476       immediate_mode = kLoadStoreImm16;
477       break;
478     case MachineRepresentation::kWord32:
479       opcode = kArm64LdrW;
480       immediate_mode = kLoadStoreImm32;
481       break;
482     case MachineRepresentation::kTagged:  // Fall through.
483     case MachineRepresentation::kWord64:
484       opcode = kArm64Ldr;
485       immediate_mode = kLoadStoreImm64;
486       break;
487     case MachineRepresentation::kSimd128:  // Fall through.
488     case MachineRepresentation::kNone:
489       UNREACHABLE();
490       return;
491   }
492 
493   outputs[0] = g.DefineAsRegister(node);
494   inputs[0] = g.UseRegister(base);
495 
496   if (g.CanBeImmediate(index, immediate_mode)) {
497     input_count = 2;
498     inputs[1] = g.UseImmediate(index);
499     opcode |= AddressingModeField::encode(kMode_MRI);
500   } else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[1],
501                                     &inputs[2])) {
502     input_count = 3;
503     opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
504   } else {
505     input_count = 2;
506     inputs[1] = g.UseRegister(index);
507     opcode |= AddressingModeField::encode(kMode_MRR);
508   }
509 
510   Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
511 }
512 
513 
VisitStore(Node * node)514 void InstructionSelector::VisitStore(Node* node) {
515   Arm64OperandGenerator g(this);
516   Node* base = node->InputAt(0);
517   Node* index = node->InputAt(1);
518   Node* value = node->InputAt(2);
519 
520   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
521   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
522   MachineRepresentation rep = store_rep.representation();
523 
524   // TODO(arm64): I guess this could be done in a better way.
525   if (write_barrier_kind != kNoWriteBarrier) {
526     DCHECK_EQ(MachineRepresentation::kTagged, rep);
527     AddressingMode addressing_mode;
528     InstructionOperand inputs[3];
529     size_t input_count = 0;
530     inputs[input_count++] = g.UseUniqueRegister(base);
531     // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
532     // must check kArithmeticImm as well as kLoadStoreImm64.
533     if (g.CanBeImmediate(index, kArithmeticImm) &&
534         g.CanBeImmediate(index, kLoadStoreImm64)) {
535       inputs[input_count++] = g.UseImmediate(index);
536       addressing_mode = kMode_MRI;
537     } else {
538       inputs[input_count++] = g.UseUniqueRegister(index);
539       addressing_mode = kMode_MRR;
540     }
541     inputs[input_count++] = g.UseUniqueRegister(value);
542     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
543     switch (write_barrier_kind) {
544       case kNoWriteBarrier:
545         UNREACHABLE();
546         break;
547       case kMapWriteBarrier:
548         record_write_mode = RecordWriteMode::kValueIsMap;
549         break;
550       case kPointerWriteBarrier:
551         record_write_mode = RecordWriteMode::kValueIsPointer;
552         break;
553       case kFullWriteBarrier:
554         record_write_mode = RecordWriteMode::kValueIsAny;
555         break;
556     }
557     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
558     size_t const temp_count = arraysize(temps);
559     InstructionCode code = kArchStoreWithWriteBarrier;
560     code |= AddressingModeField::encode(addressing_mode);
561     code |= MiscField::encode(static_cast<int>(record_write_mode));
562     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
563   } else {
564     InstructionOperand inputs[4];
565     size_t input_count = 0;
566     InstructionCode opcode = kArchNop;
567     ImmediateMode immediate_mode = kNoImmediate;
568     switch (rep) {
569       case MachineRepresentation::kFloat32:
570         opcode = kArm64StrS;
571         immediate_mode = kLoadStoreImm32;
572         break;
573       case MachineRepresentation::kFloat64:
574         opcode = kArm64StrD;
575         immediate_mode = kLoadStoreImm64;
576         break;
577       case MachineRepresentation::kBit:  // Fall through.
578       case MachineRepresentation::kWord8:
579         opcode = kArm64Strb;
580         immediate_mode = kLoadStoreImm8;
581         break;
582       case MachineRepresentation::kWord16:
583         opcode = kArm64Strh;
584         immediate_mode = kLoadStoreImm16;
585         break;
586       case MachineRepresentation::kWord32:
587         opcode = kArm64StrW;
588         immediate_mode = kLoadStoreImm32;
589         break;
590       case MachineRepresentation::kTagged:  // Fall through.
591       case MachineRepresentation::kWord64:
592         opcode = kArm64Str;
593         immediate_mode = kLoadStoreImm64;
594         break;
595       case MachineRepresentation::kSimd128:  // Fall through.
596       case MachineRepresentation::kNone:
597         UNREACHABLE();
598         return;
599     }
600 
601     inputs[0] = g.UseRegisterOrImmediateZero(value);
602     inputs[1] = g.UseRegister(base);
603 
604     if (g.CanBeImmediate(index, immediate_mode)) {
605       input_count = 3;
606       inputs[2] = g.UseImmediate(index);
607       opcode |= AddressingModeField::encode(kMode_MRI);
608     } else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[2],
609                                       &inputs[3])) {
610       input_count = 4;
611       opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
612     } else {
613       input_count = 3;
614       inputs[2] = g.UseRegister(index);
615       opcode |= AddressingModeField::encode(kMode_MRR);
616     }
617 
618     Emit(opcode, 0, nullptr, input_count, inputs);
619   }
620 }
621 
622 
VisitCheckedLoad(Node * node)623 void InstructionSelector::VisitCheckedLoad(Node* node) {
624   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
625   Arm64OperandGenerator g(this);
626   Node* const buffer = node->InputAt(0);
627   Node* const offset = node->InputAt(1);
628   Node* const length = node->InputAt(2);
629   ArchOpcode opcode = kArchNop;
630   switch (load_rep.representation()) {
631     case MachineRepresentation::kWord8:
632       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
633       break;
634     case MachineRepresentation::kWord16:
635       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
636       break;
637     case MachineRepresentation::kWord32:
638       opcode = kCheckedLoadWord32;
639       break;
640     case MachineRepresentation::kWord64:
641       opcode = kCheckedLoadWord64;
642       break;
643     case MachineRepresentation::kFloat32:
644       opcode = kCheckedLoadFloat32;
645       break;
646     case MachineRepresentation::kFloat64:
647       opcode = kCheckedLoadFloat64;
648       break;
649     case MachineRepresentation::kBit:      // Fall through.
650     case MachineRepresentation::kTagged:   // Fall through.
651     case MachineRepresentation::kSimd128:  // Fall through.
652     case MachineRepresentation::kNone:
653       UNREACHABLE();
654       return;
655   }
656   // If the length is a constant power of two, allow the code generator to
657   // pick a more efficient bounds check sequence by passing the length as an
658   // immediate.
659   if (length->opcode() == IrOpcode::kInt32Constant) {
660     Int32Matcher m(length);
661     if (m.IsPowerOf2()) {
662       Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
663            g.UseRegister(offset), g.UseImmediate(length));
664       return;
665     }
666   }
667   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
668        g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
669 }
670 
671 
VisitCheckedStore(Node * node)672 void InstructionSelector::VisitCheckedStore(Node* node) {
673   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
674   Arm64OperandGenerator g(this);
675   Node* const buffer = node->InputAt(0);
676   Node* const offset = node->InputAt(1);
677   Node* const length = node->InputAt(2);
678   Node* const value = node->InputAt(3);
679   ArchOpcode opcode = kArchNop;
680   switch (rep) {
681     case MachineRepresentation::kWord8:
682       opcode = kCheckedStoreWord8;
683       break;
684     case MachineRepresentation::kWord16:
685       opcode = kCheckedStoreWord16;
686       break;
687     case MachineRepresentation::kWord32:
688       opcode = kCheckedStoreWord32;
689       break;
690     case MachineRepresentation::kWord64:
691       opcode = kCheckedStoreWord64;
692       break;
693     case MachineRepresentation::kFloat32:
694       opcode = kCheckedStoreFloat32;
695       break;
696     case MachineRepresentation::kFloat64:
697       opcode = kCheckedStoreFloat64;
698       break;
699     case MachineRepresentation::kBit:      // Fall through.
700     case MachineRepresentation::kTagged:   // Fall through.
701     case MachineRepresentation::kSimd128:  // Fall through.
702     case MachineRepresentation::kNone:
703       UNREACHABLE();
704       return;
705   }
706   // If the length is a constant power of two, allow the code generator to
707   // pick a more efficient bounds check sequence by passing the length as an
708   // immediate.
709   if (length->opcode() == IrOpcode::kInt32Constant) {
710     Int32Matcher m(length);
711     if (m.IsPowerOf2()) {
712       Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
713            g.UseImmediate(length), g.UseRegisterOrImmediateZero(value));
714       return;
715     }
716   }
717   Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
718        g.UseOperand(length, kArithmeticImm),
719        g.UseRegisterOrImmediateZero(value));
720 }
721 
722 
723 template <typename Matcher>
VisitLogical(InstructionSelector * selector,Node * node,Matcher * m,ArchOpcode opcode,bool left_can_cover,bool right_can_cover,ImmediateMode imm_mode)724 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
725                          ArchOpcode opcode, bool left_can_cover,
726                          bool right_can_cover, ImmediateMode imm_mode) {
727   Arm64OperandGenerator g(selector);
728 
729   // Map instruction to equivalent operation with inverted right input.
730   ArchOpcode inv_opcode = opcode;
731   switch (opcode) {
732     case kArm64And32:
733       inv_opcode = kArm64Bic32;
734       break;
735     case kArm64And:
736       inv_opcode = kArm64Bic;
737       break;
738     case kArm64Or32:
739       inv_opcode = kArm64Orn32;
740       break;
741     case kArm64Or:
742       inv_opcode = kArm64Orn;
743       break;
744     case kArm64Eor32:
745       inv_opcode = kArm64Eon32;
746       break;
747     case kArm64Eor:
748       inv_opcode = kArm64Eon;
749       break;
750     default:
751       UNREACHABLE();
752   }
753 
754   // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
755   if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
756     Matcher mleft(m->left().node());
757     if (mleft.right().Is(-1)) {
758       // TODO(all): support shifted operand on right.
759       selector->Emit(inv_opcode, g.DefineAsRegister(node),
760                      g.UseRegister(m->right().node()),
761                      g.UseRegister(mleft.left().node()));
762       return;
763     }
764   }
765 
766   // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
767   if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
768       right_can_cover) {
769     Matcher mright(m->right().node());
770     if (mright.right().Is(-1)) {
771       // TODO(all): support shifted operand on right.
772       selector->Emit(inv_opcode, g.DefineAsRegister(node),
773                      g.UseRegister(m->left().node()),
774                      g.UseRegister(mright.left().node()));
775       return;
776     }
777   }
778 
779   if (m->IsWord32Xor() && m->right().Is(-1)) {
780     selector->Emit(kArm64Not32, g.DefineAsRegister(node),
781                    g.UseRegister(m->left().node()));
782   } else if (m->IsWord64Xor() && m->right().Is(-1)) {
783     selector->Emit(kArm64Not, g.DefineAsRegister(node),
784                    g.UseRegister(m->left().node()));
785   } else {
786     VisitBinop<Matcher>(selector, node, opcode, imm_mode);
787   }
788 }
789 
790 
VisitWord32And(Node * node)791 void InstructionSelector::VisitWord32And(Node* node) {
792   Arm64OperandGenerator g(this);
793   Int32BinopMatcher m(node);
794   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
795       m.right().HasValue()) {
796     uint32_t mask = m.right().Value();
797     uint32_t mask_width = base::bits::CountPopulation32(mask);
798     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
799     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
800       // The mask must be contiguous, and occupy the least-significant bits.
801       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
802 
803       // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
804       // significant bits.
805       Int32BinopMatcher mleft(m.left().node());
806       if (mleft.right().HasValue()) {
807         // Any shift value can match; int32 shifts use `value % 32`.
808         uint32_t lsb = mleft.right().Value() & 0x1f;
809 
810         // Ubfx cannot extract bits past the register size, however since
811         // shifting the original value would have introduced some zeros we can
812         // still use ubfx with a smaller mask and the remaining bits will be
813         // zeros.
814         if (lsb + mask_width > 32) mask_width = 32 - lsb;
815 
816         Emit(kArm64Ubfx32, g.DefineAsRegister(node),
817              g.UseRegister(mleft.left().node()),
818              g.UseImmediateOrTemp(mleft.right().node(), lsb),
819              g.TempImmediate(mask_width));
820         return;
821       }
822       // Other cases fall through to the normal And operation.
823     }
824   }
825   VisitLogical<Int32BinopMatcher>(
826       this, node, &m, kArm64And32, CanCover(node, m.left().node()),
827       CanCover(node, m.right().node()), kLogical32Imm);
828 }
829 
830 
VisitWord64And(Node * node)831 void InstructionSelector::VisitWord64And(Node* node) {
832   Arm64OperandGenerator g(this);
833   Int64BinopMatcher m(node);
834   if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
835       m.right().HasValue()) {
836     uint64_t mask = m.right().Value();
837     uint64_t mask_width = base::bits::CountPopulation64(mask);
838     uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
839     if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
840       // The mask must be contiguous, and occupy the least-significant bits.
841       DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
842 
843       // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
844       // significant bits.
845       Int64BinopMatcher mleft(m.left().node());
846       if (mleft.right().HasValue()) {
847         // Any shift value can match; int64 shifts use `value % 64`.
848         uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
849 
850         // Ubfx cannot extract bits past the register size, however since
851         // shifting the original value would have introduced some zeros we can
852         // still use ubfx with a smaller mask and the remaining bits will be
853         // zeros.
854         if (lsb + mask_width > 64) mask_width = 64 - lsb;
855 
856         Emit(kArm64Ubfx, g.DefineAsRegister(node),
857              g.UseRegister(mleft.left().node()),
858              g.UseImmediateOrTemp(mleft.right().node(), lsb),
859              g.TempImmediate(static_cast<int32_t>(mask_width)));
860         return;
861       }
862       // Other cases fall through to the normal And operation.
863     }
864   }
865   VisitLogical<Int64BinopMatcher>(
866       this, node, &m, kArm64And, CanCover(node, m.left().node()),
867       CanCover(node, m.right().node()), kLogical64Imm);
868 }
869 
870 
VisitWord32Or(Node * node)871 void InstructionSelector::VisitWord32Or(Node* node) {
872   Int32BinopMatcher m(node);
873   VisitLogical<Int32BinopMatcher>(
874       this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
875       CanCover(node, m.right().node()), kLogical32Imm);
876 }
877 
878 
VisitWord64Or(Node * node)879 void InstructionSelector::VisitWord64Or(Node* node) {
880   Int64BinopMatcher m(node);
881   VisitLogical<Int64BinopMatcher>(
882       this, node, &m, kArm64Or, CanCover(node, m.left().node()),
883       CanCover(node, m.right().node()), kLogical64Imm);
884 }
885 
886 
VisitWord32Xor(Node * node)887 void InstructionSelector::VisitWord32Xor(Node* node) {
888   Int32BinopMatcher m(node);
889   VisitLogical<Int32BinopMatcher>(
890       this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
891       CanCover(node, m.right().node()), kLogical32Imm);
892 }
893 
894 
VisitWord64Xor(Node * node)895 void InstructionSelector::VisitWord64Xor(Node* node) {
896   Int64BinopMatcher m(node);
897   VisitLogical<Int64BinopMatcher>(
898       this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
899       CanCover(node, m.right().node()), kLogical64Imm);
900 }
901 
902 
VisitWord32Shl(Node * node)903 void InstructionSelector::VisitWord32Shl(Node* node) {
904   Int32BinopMatcher m(node);
905   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
906       m.right().IsInRange(1, 31)) {
907     Arm64OperandGenerator g(this);
908     Int32BinopMatcher mleft(m.left().node());
909     if (mleft.right().HasValue()) {
910       uint32_t mask = mleft.right().Value();
911       uint32_t mask_width = base::bits::CountPopulation32(mask);
912       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
913       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
914         uint32_t shift = m.right().Value();
915         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
916         DCHECK_NE(0u, shift);
917 
918         if ((shift + mask_width) >= 32) {
919           // If the mask is contiguous and reaches or extends beyond the top
920           // bit, only the shift is needed.
921           Emit(kArm64Lsl32, g.DefineAsRegister(node),
922                g.UseRegister(mleft.left().node()),
923                g.UseImmediate(m.right().node()));
924           return;
925         } else {
926           // Select Ubfiz for Shl(And(x, mask), imm) where the mask is
927           // contiguous, and the shift immediate non-zero.
928           Emit(kArm64Ubfiz32, g.DefineAsRegister(node),
929                g.UseRegister(mleft.left().node()),
930                g.UseImmediate(m.right().node()), g.TempImmediate(mask_width));
931           return;
932         }
933       }
934     }
935   }
936   VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
937 }
938 
939 
VisitWord64Shl(Node * node)940 void InstructionSelector::VisitWord64Shl(Node* node) {
941   Arm64OperandGenerator g(this);
942   Int64BinopMatcher m(node);
943   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
944       m.right().IsInRange(32, 63)) {
945     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
946     // 32 bits anyway.
947     Emit(kArm64Lsl, g.DefineAsRegister(node),
948          g.UseRegister(m.left().node()->InputAt(0)),
949          g.UseImmediate(m.right().node()));
950     return;
951   }
952   VisitRRO(this, kArm64Lsl, node, kShift64Imm);
953 }
954 
955 
956 namespace {
957 
TryEmitBitfieldExtract32(InstructionSelector * selector,Node * node)958 bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
959   Arm64OperandGenerator g(selector);
960   Int32BinopMatcher m(node);
961   if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
962     // Select Ubfx or Sbfx for (x << (K & 0x1f)) OP (K & 0x1f), where
963     // OP is >>> or >> and (K & 0x1f) != 0.
964     Int32BinopMatcher mleft(m.left().node());
965     if (mleft.right().HasValue() && m.right().HasValue() &&
966         (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
967       DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
968       ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
969 
970       int right_val = m.right().Value() & 0x1f;
971       DCHECK_NE(right_val, 0);
972 
973       selector->Emit(opcode, g.DefineAsRegister(node),
974                      g.UseRegister(mleft.left().node()), g.TempImmediate(0),
975                      g.TempImmediate(32 - right_val));
976       return true;
977     }
978   }
979   return false;
980 }
981 
982 }  // namespace
983 
984 
VisitWord32Shr(Node * node)985 void InstructionSelector::VisitWord32Shr(Node* node) {
986   Int32BinopMatcher m(node);
987   if (m.left().IsWord32And() && m.right().HasValue()) {
988     uint32_t lsb = m.right().Value() & 0x1f;
989     Int32BinopMatcher mleft(m.left().node());
990     if (mleft.right().HasValue()) {
991       // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
992       // shifted into the least-significant bits.
993       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
994       unsigned mask_width = base::bits::CountPopulation32(mask);
995       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
996       if ((mask_msb + mask_width + lsb) == 32) {
997         Arm64OperandGenerator g(this);
998         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
999         Emit(kArm64Ubfx32, g.DefineAsRegister(node),
1000              g.UseRegister(mleft.left().node()),
1001              g.UseImmediateOrTemp(m.right().node(), lsb),
1002              g.TempImmediate(mask_width));
1003         return;
1004       }
1005     }
1006   } else if (TryEmitBitfieldExtract32(this, node)) {
1007     return;
1008   }
1009 
1010   if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
1011       CanCover(node, node->InputAt(0))) {
1012     // Combine this shift with the multiply and shift that would be generated
1013     // by Uint32MulHigh.
1014     Arm64OperandGenerator g(this);
1015     Node* left = m.left().node();
1016     int shift = m.right().Value() & 0x1f;
1017     InstructionOperand const smull_operand = g.TempRegister();
1018     Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
1019          g.UseRegister(left->InputAt(1)));
1020     Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand,
1021          g.TempImmediate(32 + shift));
1022     return;
1023   }
1024 
1025   VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
1026 }
1027 
1028 
VisitWord64Shr(Node * node)1029 void InstructionSelector::VisitWord64Shr(Node* node) {
1030   Int64BinopMatcher m(node);
1031   if (m.left().IsWord64And() && m.right().HasValue()) {
1032     uint32_t lsb = m.right().Value() & 0x3f;
1033     Int64BinopMatcher mleft(m.left().node());
1034     if (mleft.right().HasValue()) {
1035       // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
1036       // shifted into the least-significant bits.
1037       uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
1038       unsigned mask_width = base::bits::CountPopulation64(mask);
1039       unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
1040       if ((mask_msb + mask_width + lsb) == 64) {
1041         Arm64OperandGenerator g(this);
1042         DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
1043         Emit(kArm64Ubfx, g.DefineAsRegister(node),
1044              g.UseRegister(mleft.left().node()),
1045              g.UseImmediateOrTemp(m.right().node(), lsb),
1046              g.TempImmediate(mask_width));
1047         return;
1048       }
1049     }
1050   }
1051   VisitRRO(this, kArm64Lsr, node, kShift64Imm);
1052 }
1053 
1054 
VisitWord32Sar(Node * node)1055 void InstructionSelector::VisitWord32Sar(Node* node) {
1056   if (TryEmitBitfieldExtract32(this, node)) {
1057     return;
1058   }
1059 
1060   Int32BinopMatcher m(node);
1061   if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
1062       CanCover(node, node->InputAt(0))) {
1063     // Combine this shift with the multiply and shift that would be generated
1064     // by Int32MulHigh.
1065     Arm64OperandGenerator g(this);
1066     Node* left = m.left().node();
1067     int shift = m.right().Value() & 0x1f;
1068     InstructionOperand const smull_operand = g.TempRegister();
1069     Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
1070          g.UseRegister(left->InputAt(1)));
1071     Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand,
1072          g.TempImmediate(32 + shift));
1073     return;
1074   }
1075 
1076   if (m.left().IsInt32Add() && m.right().HasValue() &&
1077       CanCover(node, node->InputAt(0))) {
1078     Node* add_node = m.left().node();
1079     Int32BinopMatcher madd_node(add_node);
1080     if (madd_node.left().IsInt32MulHigh() &&
1081         CanCover(add_node, madd_node.left().node())) {
1082       // Combine the shift that would be generated by Int32MulHigh with the add
1083       // on the left of this Sar operation. We do it here, as the result of the
1084       // add potentially has 33 bits, so we have to ensure the result is
1085       // truncated by being the input to this 32-bit Sar operation.
1086       Arm64OperandGenerator g(this);
1087       Node* mul_node = madd_node.left().node();
1088 
1089       InstructionOperand const smull_operand = g.TempRegister();
1090       Emit(kArm64Smull, smull_operand, g.UseRegister(mul_node->InputAt(0)),
1091            g.UseRegister(mul_node->InputAt(1)));
1092 
1093       InstructionOperand const add_operand = g.TempRegister();
1094       Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
1095            add_operand, g.UseRegister(add_node->InputAt(1)), smull_operand,
1096            g.TempImmediate(32));
1097 
1098       Emit(kArm64Asr32, g.DefineAsRegister(node), add_operand,
1099            g.UseImmediate(node->InputAt(1)));
1100       return;
1101     }
1102   }
1103 
1104   VisitRRO(this, kArm64Asr32, node, kShift32Imm);
1105 }
1106 
1107 
VisitWord64Sar(Node * node)1108 void InstructionSelector::VisitWord64Sar(Node* node) {
1109   VisitRRO(this, kArm64Asr, node, kShift64Imm);
1110 }
1111 
1112 
VisitWord32Ror(Node * node)1113 void InstructionSelector::VisitWord32Ror(Node* node) {
1114   VisitRRO(this, kArm64Ror32, node, kShift32Imm);
1115 }
1116 
1117 
VisitWord64Ror(Node * node)1118 void InstructionSelector::VisitWord64Ror(Node* node) {
1119   VisitRRO(this, kArm64Ror, node, kShift64Imm);
1120 }
1121 
1122 
VisitWord64Clz(Node * node)1123 void InstructionSelector::VisitWord64Clz(Node* node) {
1124   Arm64OperandGenerator g(this);
1125   Emit(kArm64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1126 }
1127 
1128 
VisitWord32Clz(Node * node)1129 void InstructionSelector::VisitWord32Clz(Node* node) {
1130   Arm64OperandGenerator g(this);
1131   Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1132 }
1133 
1134 
VisitWord32Ctz(Node * node)1135 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
1136 
1137 
VisitWord64Ctz(Node * node)1138 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
1139 
1140 
VisitWord32ReverseBits(Node * node)1141 void InstructionSelector::VisitWord32ReverseBits(Node* node) {
1142   VisitRR(this, kArm64Rbit32, node);
1143 }
1144 
1145 
VisitWord64ReverseBits(Node * node)1146 void InstructionSelector::VisitWord64ReverseBits(Node* node) {
1147   VisitRR(this, kArm64Rbit, node);
1148 }
1149 
1150 
VisitWord32Popcnt(Node * node)1151 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
1152 
1153 
VisitWord64Popcnt(Node * node)1154 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
1155 
1156 
VisitInt32Add(Node * node)1157 void InstructionSelector::VisitInt32Add(Node* node) {
1158   Arm64OperandGenerator g(this);
1159   Int32BinopMatcher m(node);
1160   // Select Madd(x, y, z) for Add(Mul(x, y), z).
1161   if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
1162     Int32BinopMatcher mleft(m.left().node());
1163     // Check multiply can't be later reduced to addition with shift.
1164     if (LeftShiftForReducedMultiply(&mleft) == 0) {
1165       Emit(kArm64Madd32, g.DefineAsRegister(node),
1166            g.UseRegister(mleft.left().node()),
1167            g.UseRegister(mleft.right().node()),
1168            g.UseRegister(m.right().node()));
1169       return;
1170     }
1171   }
1172   // Select Madd(x, y, z) for Add(z, Mul(x, y)).
1173   if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
1174     Int32BinopMatcher mright(m.right().node());
1175     // Check multiply can't be later reduced to addition with shift.
1176     if (LeftShiftForReducedMultiply(&mright) == 0) {
1177       Emit(kArm64Madd32, g.DefineAsRegister(node),
1178            g.UseRegister(mright.left().node()),
1179            g.UseRegister(mright.right().node()),
1180            g.UseRegister(m.left().node()));
1181       return;
1182     }
1183   }
1184   VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
1185 }
1186 
1187 
VisitInt64Add(Node * node)1188 void InstructionSelector::VisitInt64Add(Node* node) {
1189   Arm64OperandGenerator g(this);
1190   Int64BinopMatcher m(node);
1191   // Select Madd(x, y, z) for Add(Mul(x, y), z).
1192   if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
1193     Int64BinopMatcher mleft(m.left().node());
1194     // Check multiply can't be later reduced to addition with shift.
1195     if (LeftShiftForReducedMultiply(&mleft) == 0) {
1196       Emit(kArm64Madd, g.DefineAsRegister(node),
1197            g.UseRegister(mleft.left().node()),
1198            g.UseRegister(mleft.right().node()),
1199            g.UseRegister(m.right().node()));
1200       return;
1201     }
1202   }
1203   // Select Madd(x, y, z) for Add(z, Mul(x, y)).
1204   if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1205     Int64BinopMatcher mright(m.right().node());
1206     // Check multiply can't be later reduced to addition with shift.
1207     if (LeftShiftForReducedMultiply(&mright) == 0) {
1208       Emit(kArm64Madd, g.DefineAsRegister(node),
1209            g.UseRegister(mright.left().node()),
1210            g.UseRegister(mright.right().node()),
1211            g.UseRegister(m.left().node()));
1212       return;
1213     }
1214   }
1215   VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
1216 }
1217 
1218 
VisitInt32Sub(Node * node)1219 void InstructionSelector::VisitInt32Sub(Node* node) {
1220   Arm64OperandGenerator g(this);
1221   Int32BinopMatcher m(node);
1222 
1223   // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
1224   if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
1225     Int32BinopMatcher mright(m.right().node());
1226     // Check multiply can't be later reduced to addition with shift.
1227     if (LeftShiftForReducedMultiply(&mright) == 0) {
1228       Emit(kArm64Msub32, g.DefineAsRegister(node),
1229            g.UseRegister(mright.left().node()),
1230            g.UseRegister(mright.right().node()),
1231            g.UseRegister(m.left().node()));
1232       return;
1233     }
1234   }
1235 
1236   VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
1237 }
1238 
1239 
VisitInt64Sub(Node * node)1240 void InstructionSelector::VisitInt64Sub(Node* node) {
1241   Arm64OperandGenerator g(this);
1242   Int64BinopMatcher m(node);
1243 
1244   // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
1245   if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1246     Int64BinopMatcher mright(m.right().node());
1247     // Check multiply can't be later reduced to addition with shift.
1248     if (LeftShiftForReducedMultiply(&mright) == 0) {
1249       Emit(kArm64Msub, g.DefineAsRegister(node),
1250            g.UseRegister(mright.left().node()),
1251            g.UseRegister(mright.right().node()),
1252            g.UseRegister(m.left().node()));
1253       return;
1254     }
1255   }
1256 
1257   VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
1258 }
1259 
1260 
VisitInt32Mul(Node * node)1261 void InstructionSelector::VisitInt32Mul(Node* node) {
1262   Arm64OperandGenerator g(this);
1263   Int32BinopMatcher m(node);
1264 
1265   // First, try to reduce the multiplication to addition with left shift.
1266   // x * (2^k + 1) -> x + (x << k)
1267   int32_t shift = LeftShiftForReducedMultiply(&m);
1268   if (shift > 0) {
1269     Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1270          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1271          g.UseRegister(m.left().node()), g.TempImmediate(shift));
1272     return;
1273   }
1274 
1275   if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
1276     Int32BinopMatcher mleft(m.left().node());
1277 
1278     // Select Mneg(x, y) for Mul(Sub(0, x), y).
1279     if (mleft.left().Is(0)) {
1280       Emit(kArm64Mneg32, g.DefineAsRegister(node),
1281            g.UseRegister(mleft.right().node()),
1282            g.UseRegister(m.right().node()));
1283       return;
1284     }
1285   }
1286 
1287   if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
1288     Int32BinopMatcher mright(m.right().node());
1289 
1290     // Select Mneg(x, y) for Mul(x, Sub(0, y)).
1291     if (mright.left().Is(0)) {
1292       Emit(kArm64Mneg32, g.DefineAsRegister(node),
1293            g.UseRegister(m.left().node()),
1294            g.UseRegister(mright.right().node()));
1295       return;
1296     }
1297   }
1298 
1299   VisitRRR(this, kArm64Mul32, node);
1300 }
1301 
1302 
VisitInt64Mul(Node * node)1303 void InstructionSelector::VisitInt64Mul(Node* node) {
1304   Arm64OperandGenerator g(this);
1305   Int64BinopMatcher m(node);
1306 
1307   // First, try to reduce the multiplication to addition with left shift.
1308   // x * (2^k + 1) -> x + (x << k)
1309   int32_t shift = LeftShiftForReducedMultiply(&m);
1310   if (shift > 0) {
1311     Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1312          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1313          g.UseRegister(m.left().node()), g.TempImmediate(shift));
1314     return;
1315   }
1316 
1317   if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
1318     Int64BinopMatcher mleft(m.left().node());
1319 
1320     // Select Mneg(x, y) for Mul(Sub(0, x), y).
1321     if (mleft.left().Is(0)) {
1322       Emit(kArm64Mneg, g.DefineAsRegister(node),
1323            g.UseRegister(mleft.right().node()),
1324            g.UseRegister(m.right().node()));
1325       return;
1326     }
1327   }
1328 
1329   if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
1330     Int64BinopMatcher mright(m.right().node());
1331 
1332     // Select Mneg(x, y) for Mul(x, Sub(0, y)).
1333     if (mright.left().Is(0)) {
1334       Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1335            g.UseRegister(mright.right().node()));
1336       return;
1337     }
1338   }
1339 
1340   VisitRRR(this, kArm64Mul, node);
1341 }
1342 
1343 
VisitInt32MulHigh(Node * node)1344 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1345   Arm64OperandGenerator g(this);
1346   InstructionOperand const smull_operand = g.TempRegister();
1347   Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
1348        g.UseRegister(node->InputAt(1)));
1349   Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1350 }
1351 
1352 
VisitUint32MulHigh(Node * node)1353 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1354   Arm64OperandGenerator g(this);
1355   InstructionOperand const smull_operand = g.TempRegister();
1356   Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
1357        g.UseRegister(node->InputAt(1)));
1358   Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1359 }
1360 
1361 
VisitInt32Div(Node * node)1362 void InstructionSelector::VisitInt32Div(Node* node) {
1363   VisitRRR(this, kArm64Idiv32, node);
1364 }
1365 
1366 
VisitInt64Div(Node * node)1367 void InstructionSelector::VisitInt64Div(Node* node) {
1368   VisitRRR(this, kArm64Idiv, node);
1369 }
1370 
1371 
VisitUint32Div(Node * node)1372 void InstructionSelector::VisitUint32Div(Node* node) {
1373   VisitRRR(this, kArm64Udiv32, node);
1374 }
1375 
1376 
VisitUint64Div(Node * node)1377 void InstructionSelector::VisitUint64Div(Node* node) {
1378   VisitRRR(this, kArm64Udiv, node);
1379 }
1380 
1381 
VisitInt32Mod(Node * node)1382 void InstructionSelector::VisitInt32Mod(Node* node) {
1383   VisitRRR(this, kArm64Imod32, node);
1384 }
1385 
1386 
VisitInt64Mod(Node * node)1387 void InstructionSelector::VisitInt64Mod(Node* node) {
1388   VisitRRR(this, kArm64Imod, node);
1389 }
1390 
1391 
VisitUint32Mod(Node * node)1392 void InstructionSelector::VisitUint32Mod(Node* node) {
1393   VisitRRR(this, kArm64Umod32, node);
1394 }
1395 
1396 
VisitUint64Mod(Node * node)1397 void InstructionSelector::VisitUint64Mod(Node* node) {
1398   VisitRRR(this, kArm64Umod, node);
1399 }
1400 
1401 
VisitChangeFloat32ToFloat64(Node * node)1402 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1403   VisitRR(this, kArm64Float32ToFloat64, node);
1404 }
1405 
1406 
VisitRoundInt32ToFloat32(Node * node)1407 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1408   VisitRR(this, kArm64Int32ToFloat32, node);
1409 }
1410 
1411 
VisitRoundUint32ToFloat32(Node * node)1412 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1413   VisitRR(this, kArm64Uint32ToFloat32, node);
1414 }
1415 
1416 
VisitChangeInt32ToFloat64(Node * node)1417 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1418   VisitRR(this, kArm64Int32ToFloat64, node);
1419 }
1420 
1421 
VisitChangeUint32ToFloat64(Node * node)1422 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1423   VisitRR(this, kArm64Uint32ToFloat64, node);
1424 }
1425 
1426 
VisitTruncateFloat32ToInt32(Node * node)1427 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1428   VisitRR(this, kArm64Float32ToInt32, node);
1429 }
1430 
1431 
VisitChangeFloat64ToInt32(Node * node)1432 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1433   VisitRR(this, kArm64Float64ToInt32, node);
1434 }
1435 
1436 
VisitTruncateFloat32ToUint32(Node * node)1437 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1438   VisitRR(this, kArm64Float32ToUint32, node);
1439 }
1440 
1441 
VisitChangeFloat64ToUint32(Node * node)1442 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1443   VisitRR(this, kArm64Float64ToUint32, node);
1444 }
1445 
VisitTruncateFloat64ToUint32(Node * node)1446 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1447   VisitRR(this, kArm64Float64ToUint32, node);
1448 }
1449 
VisitTryTruncateFloat32ToInt64(Node * node)1450 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1451   Arm64OperandGenerator g(this);
1452 
1453   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1454   InstructionOperand outputs[2];
1455   size_t output_count = 0;
1456   outputs[output_count++] = g.DefineAsRegister(node);
1457 
1458   Node* success_output = NodeProperties::FindProjection(node, 1);
1459   if (success_output) {
1460     outputs[output_count++] = g.DefineAsRegister(success_output);
1461   }
1462 
1463   Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
1464 }
1465 
1466 
VisitTryTruncateFloat64ToInt64(Node * node)1467 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1468   Arm64OperandGenerator g(this);
1469 
1470   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1471   InstructionOperand outputs[2];
1472   size_t output_count = 0;
1473   outputs[output_count++] = g.DefineAsRegister(node);
1474 
1475   Node* success_output = NodeProperties::FindProjection(node, 1);
1476   if (success_output) {
1477     outputs[output_count++] = g.DefineAsRegister(success_output);
1478   }
1479 
1480   Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
1481 }
1482 
1483 
VisitTryTruncateFloat32ToUint64(Node * node)1484 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1485   Arm64OperandGenerator g(this);
1486 
1487   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1488   InstructionOperand outputs[2];
1489   size_t output_count = 0;
1490   outputs[output_count++] = g.DefineAsRegister(node);
1491 
1492   Node* success_output = NodeProperties::FindProjection(node, 1);
1493   if (success_output) {
1494     outputs[output_count++] = g.DefineAsRegister(success_output);
1495   }
1496 
1497   Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
1498 }
1499 
1500 
VisitTryTruncateFloat64ToUint64(Node * node)1501 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1502   Arm64OperandGenerator g(this);
1503 
1504   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1505   InstructionOperand outputs[2];
1506   size_t output_count = 0;
1507   outputs[output_count++] = g.DefineAsRegister(node);
1508 
1509   Node* success_output = NodeProperties::FindProjection(node, 1);
1510   if (success_output) {
1511     outputs[output_count++] = g.DefineAsRegister(success_output);
1512   }
1513 
1514   Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
1515 }
1516 
1517 
VisitChangeInt32ToInt64(Node * node)1518 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1519   VisitRR(this, kArm64Sxtw, node);
1520 }
1521 
1522 
VisitChangeUint32ToUint64(Node * node)1523 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1524   Arm64OperandGenerator g(this);
1525   Node* value = node->InputAt(0);
1526   switch (value->opcode()) {
1527     case IrOpcode::kWord32And:
1528     case IrOpcode::kWord32Or:
1529     case IrOpcode::kWord32Xor:
1530     case IrOpcode::kWord32Shl:
1531     case IrOpcode::kWord32Shr:
1532     case IrOpcode::kWord32Sar:
1533     case IrOpcode::kWord32Ror:
1534     case IrOpcode::kWord32Equal:
1535     case IrOpcode::kInt32Add:
1536     case IrOpcode::kInt32AddWithOverflow:
1537     case IrOpcode::kInt32Sub:
1538     case IrOpcode::kInt32SubWithOverflow:
1539     case IrOpcode::kInt32Mul:
1540     case IrOpcode::kInt32MulHigh:
1541     case IrOpcode::kInt32Div:
1542     case IrOpcode::kInt32Mod:
1543     case IrOpcode::kInt32LessThan:
1544     case IrOpcode::kInt32LessThanOrEqual:
1545     case IrOpcode::kUint32Div:
1546     case IrOpcode::kUint32LessThan:
1547     case IrOpcode::kUint32LessThanOrEqual:
1548     case IrOpcode::kUint32Mod:
1549     case IrOpcode::kUint32MulHigh: {
1550       // 32-bit operations will write their result in a W register (implicitly
1551       // clearing the top 32-bit of the corresponding X register) so the
1552       // zero-extension is a no-op.
1553       Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1554       return;
1555     }
1556     case IrOpcode::kLoad: {
1557       // As for the operations above, a 32-bit load will implicitly clear the
1558       // top 32 bits of the destination register.
1559       LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1560       switch (load_rep.representation()) {
1561         case MachineRepresentation::kWord8:
1562         case MachineRepresentation::kWord16:
1563         case MachineRepresentation::kWord32:
1564           Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1565           return;
1566         default:
1567           break;
1568       }
1569     }
1570     default:
1571       break;
1572   }
1573   Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
1574 }
1575 
1576 
VisitTruncateFloat64ToFloat32(Node * node)1577 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1578   VisitRR(this, kArm64Float64ToFloat32, node);
1579 }
1580 
VisitTruncateFloat64ToWord32(Node * node)1581 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1582   VisitRR(this, kArchTruncateDoubleToI, node);
1583 }
1584 
VisitRoundFloat64ToInt32(Node * node)1585 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1586   VisitRR(this, kArm64Float64ToInt32, node);
1587 }
1588 
1589 
VisitTruncateInt64ToInt32(Node * node)1590 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1591   Arm64OperandGenerator g(this);
1592   Node* value = node->InputAt(0);
1593   if (CanCover(node, value) && value->InputCount() >= 2) {
1594     Int64BinopMatcher m(value);
1595     if ((m.IsWord64Sar() && m.right().HasValue() &&
1596          (m.right().Value() == 32)) ||
1597         (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
1598       Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1599            g.UseImmediate(m.right().node()));
1600       return;
1601     }
1602   }
1603 
1604   Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1605 }
1606 
1607 
VisitRoundInt64ToFloat32(Node * node)1608 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1609   VisitRR(this, kArm64Int64ToFloat32, node);
1610 }
1611 
1612 
VisitRoundInt64ToFloat64(Node * node)1613 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1614   VisitRR(this, kArm64Int64ToFloat64, node);
1615 }
1616 
1617 
VisitRoundUint64ToFloat32(Node * node)1618 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1619   VisitRR(this, kArm64Uint64ToFloat32, node);
1620 }
1621 
1622 
VisitRoundUint64ToFloat64(Node * node)1623 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1624   VisitRR(this, kArm64Uint64ToFloat64, node);
1625 }
1626 
1627 
VisitBitcastFloat32ToInt32(Node * node)1628 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1629   VisitRR(this, kArm64Float64ExtractLowWord32, node);
1630 }
1631 
1632 
VisitBitcastFloat64ToInt64(Node * node)1633 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1634   VisitRR(this, kArm64U64MoveFloat64, node);
1635 }
1636 
1637 
VisitBitcastInt32ToFloat32(Node * node)1638 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1639   VisitRR(this, kArm64Float64MoveU64, node);
1640 }
1641 
1642 
VisitBitcastInt64ToFloat64(Node * node)1643 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1644   VisitRR(this, kArm64Float64MoveU64, node);
1645 }
1646 
1647 
VisitFloat32Add(Node * node)1648 void InstructionSelector::VisitFloat32Add(Node* node) {
1649   VisitRRR(this, kArm64Float32Add, node);
1650 }
1651 
1652 
VisitFloat64Add(Node * node)1653 void InstructionSelector::VisitFloat64Add(Node* node) {
1654   VisitRRR(this, kArm64Float64Add, node);
1655 }
1656 
1657 
VisitFloat32Sub(Node * node)1658 void InstructionSelector::VisitFloat32Sub(Node* node) {
1659   VisitRRR(this, kArm64Float32Sub, node);
1660 }
1661 
VisitFloat32SubPreserveNan(Node * node)1662 void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
1663   VisitRRR(this, kArm64Float32Sub, node);
1664 }
1665 
VisitFloat64Sub(Node * node)1666 void InstructionSelector::VisitFloat64Sub(Node* node) {
1667   Arm64OperandGenerator g(this);
1668   Float64BinopMatcher m(node);
1669   if (m.left().IsMinusZero()) {
1670     if (m.right().IsFloat64RoundDown() &&
1671         CanCover(m.node(), m.right().node())) {
1672       if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
1673           CanCover(m.right().node(), m.right().InputAt(0))) {
1674         Float64BinopMatcher mright0(m.right().InputAt(0));
1675         if (mright0.left().IsMinusZero()) {
1676           Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
1677                g.UseRegister(mright0.right().node()));
1678           return;
1679         }
1680       }
1681     }
1682     Emit(kArm64Float64Neg, g.DefineAsRegister(node),
1683          g.UseRegister(m.right().node()));
1684     return;
1685   }
1686   VisitRRR(this, kArm64Float64Sub, node);
1687 }
1688 
VisitFloat64SubPreserveNan(Node * node)1689 void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
1690   VisitRRR(this, kArm64Float64Sub, node);
1691 }
1692 
VisitFloat32Mul(Node * node)1693 void InstructionSelector::VisitFloat32Mul(Node* node) {
1694   VisitRRR(this, kArm64Float32Mul, node);
1695 }
1696 
1697 
VisitFloat64Mul(Node * node)1698 void InstructionSelector::VisitFloat64Mul(Node* node) {
1699   VisitRRR(this, kArm64Float64Mul, node);
1700 }
1701 
1702 
VisitFloat32Div(Node * node)1703 void InstructionSelector::VisitFloat32Div(Node* node) {
1704   VisitRRR(this, kArm64Float32Div, node);
1705 }
1706 
1707 
VisitFloat64Div(Node * node)1708 void InstructionSelector::VisitFloat64Div(Node* node) {
1709   VisitRRR(this, kArm64Float64Div, node);
1710 }
1711 
1712 
VisitFloat64Mod(Node * node)1713 void InstructionSelector::VisitFloat64Mod(Node* node) {
1714   Arm64OperandGenerator g(this);
1715   Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
1716        g.UseFixed(node->InputAt(0), d0),
1717        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
1718 }
1719 
1720 
VisitFloat32Max(Node * node)1721 void InstructionSelector::VisitFloat32Max(Node* node) {
1722   VisitRRR(this, kArm64Float32Max, node);
1723 }
1724 
1725 
VisitFloat64Max(Node * node)1726 void InstructionSelector::VisitFloat64Max(Node* node) {
1727   VisitRRR(this, kArm64Float64Max, node);
1728 }
1729 
1730 
VisitFloat32Min(Node * node)1731 void InstructionSelector::VisitFloat32Min(Node* node) {
1732   VisitRRR(this, kArm64Float32Min, node);
1733 }
1734 
1735 
VisitFloat64Min(Node * node)1736 void InstructionSelector::VisitFloat64Min(Node* node) {
1737   VisitRRR(this, kArm64Float64Min, node);
1738 }
1739 
1740 
VisitFloat32Abs(Node * node)1741 void InstructionSelector::VisitFloat32Abs(Node* node) {
1742   VisitRR(this, kArm64Float32Abs, node);
1743 }
1744 
1745 
VisitFloat64Abs(Node * node)1746 void InstructionSelector::VisitFloat64Abs(Node* node) {
1747   VisitRR(this, kArm64Float64Abs, node);
1748 }
1749 
VisitFloat32Sqrt(Node * node)1750 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1751   VisitRR(this, kArm64Float32Sqrt, node);
1752 }
1753 
1754 
VisitFloat64Sqrt(Node * node)1755 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1756   VisitRR(this, kArm64Float64Sqrt, node);
1757 }
1758 
1759 
VisitFloat32RoundDown(Node * node)1760 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1761   VisitRR(this, kArm64Float32RoundDown, node);
1762 }
1763 
1764 
VisitFloat64RoundDown(Node * node)1765 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1766   VisitRR(this, kArm64Float64RoundDown, node);
1767 }
1768 
1769 
VisitFloat32RoundUp(Node * node)1770 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1771   VisitRR(this, kArm64Float32RoundUp, node);
1772 }
1773 
1774 
VisitFloat64RoundUp(Node * node)1775 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1776   VisitRR(this, kArm64Float64RoundUp, node);
1777 }
1778 
1779 
VisitFloat32RoundTruncate(Node * node)1780 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1781   VisitRR(this, kArm64Float32RoundTruncate, node);
1782 }
1783 
1784 
VisitFloat64RoundTruncate(Node * node)1785 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1786   VisitRR(this, kArm64Float64RoundTruncate, node);
1787 }
1788 
1789 
VisitFloat64RoundTiesAway(Node * node)1790 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1791   VisitRR(this, kArm64Float64RoundTiesAway, node);
1792 }
1793 
1794 
VisitFloat32RoundTiesEven(Node * node)1795 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1796   VisitRR(this, kArm64Float32RoundTiesEven, node);
1797 }
1798 
1799 
VisitFloat64RoundTiesEven(Node * node)1800 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1801   VisitRR(this, kArm64Float64RoundTiesEven, node);
1802 }
1803 
VisitFloat32Neg(Node * node)1804 void InstructionSelector::VisitFloat32Neg(Node* node) {
1805   VisitRR(this, kArm64Float32Neg, node);
1806 }
1807 
VisitFloat64Neg(Node * node)1808 void InstructionSelector::VisitFloat64Neg(Node* node) {
1809   VisitRR(this, kArm64Float64Neg, node);
1810 }
1811 
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1812 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1813                                                    InstructionCode opcode) {
1814   Arm64OperandGenerator g(this);
1815   Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
1816        g.UseFixed(node->InputAt(1), d1))
1817       ->MarkAsCall();
1818 }
1819 
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1820 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1821                                                   InstructionCode opcode) {
1822   Arm64OperandGenerator g(this);
1823   Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
1824       ->MarkAsCall();
1825 }
1826 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1827 void InstructionSelector::EmitPrepareArguments(
1828     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1829     Node* node) {
1830   Arm64OperandGenerator g(this);
1831 
1832   bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
1833   bool to_native_stack = descriptor->UseNativeStack();
1834 
1835   bool always_claim = to_native_stack != from_native_stack;
1836 
1837   int claim_count = static_cast<int>(arguments->size());
1838   int slot = claim_count - 1;
1839   // Bump the stack pointer(s).
1840   if (claim_count > 0 || always_claim) {
1841     // TODO(titzer): claim and poke probably take small immediates.
1842     // TODO(titzer): it would be better to bump the csp here only
1843     //                and emit paired stores with increment for non c frames.
1844     ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
1845     // Claim(0) isn't a nop if there is a mismatch between CSP and JSSP.
1846     Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
1847   }
1848 
1849   // Poke the arguments into the stack.
1850   ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
1851   while (slot >= 0) {
1852     Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
1853          g.TempImmediate(slot));
1854     slot--;
1855     // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
1856     //              same type.
1857     // Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
1858     //      g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
1859     // slot -= 2;
1860   }
1861 }
1862 
1863 
IsTailCallAddressImmediate()1864 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1865 
GetTempsCountForTailCallFromJSFunction()1866 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1867 
1868 namespace {
1869 
1870 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1871 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1872                   InstructionOperand left, InstructionOperand right,
1873                   FlagsContinuation* cont) {
1874   Arm64OperandGenerator g(selector);
1875   opcode = cont->Encode(opcode);
1876   if (cont->IsBranch()) {
1877     selector->Emit(opcode, g.NoOutput(), left, right,
1878                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1879   } else if (cont->IsDeoptimize()) {
1880     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
1881                              cont->frame_state());
1882   } else {
1883     DCHECK(cont->IsSet());
1884     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1885   }
1886 }
1887 
1888 
1889 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative,ImmediateMode immediate_mode)1890 void VisitWordCompare(InstructionSelector* selector, Node* node,
1891                       InstructionCode opcode, FlagsContinuation* cont,
1892                       bool commutative, ImmediateMode immediate_mode) {
1893   Arm64OperandGenerator g(selector);
1894   Node* left = node->InputAt(0);
1895   Node* right = node->InputAt(1);
1896 
1897   // Match immediates on left or right side of comparison.
1898   if (g.CanBeImmediate(right, immediate_mode)) {
1899     VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1900                  cont);
1901   } else if (g.CanBeImmediate(left, immediate_mode)) {
1902     if (!commutative) cont->Commute();
1903     VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1904                  cont);
1905   } else {
1906     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1907                  cont);
1908   }
1909 }
1910 
1911 
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1912 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1913                         FlagsContinuation* cont) {
1914   Int32BinopMatcher m(node);
1915   ArchOpcode opcode = kArm64Cmp32;
1916 
1917   // Select negated compare for comparisons with negated right input.
1918   if (m.right().IsInt32Sub()) {
1919     Node* sub = m.right().node();
1920     Int32BinopMatcher msub(sub);
1921     if (msub.left().Is(0)) {
1922       bool can_cover = selector->CanCover(node, sub);
1923       node->ReplaceInput(1, msub.right().node());
1924       // Even if the comparison node covers the subtraction, after the input
1925       // replacement above, the node still won't cover the input to the
1926       // subtraction; the subtraction still uses it.
1927       // In order to get shifted operations to work, we must remove the rhs
1928       // input to the subtraction, as TryMatchAnyShift requires this node to
1929       // cover the input shift. We do this by setting it to the lhs input,
1930       // as we know it's zero, and the result of the subtraction isn't used by
1931       // any other node.
1932       if (can_cover) sub->ReplaceInput(1, msub.left().node());
1933       opcode = kArm64Cmn32;
1934     }
1935   }
1936   VisitBinop<Int32BinopMatcher>(selector, node, opcode, kArithmeticImm, cont);
1937 }
1938 
1939 
VisitWordTest(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1940 void VisitWordTest(InstructionSelector* selector, Node* node,
1941                    InstructionCode opcode, FlagsContinuation* cont) {
1942   Arm64OperandGenerator g(selector);
1943   VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
1944                cont);
1945 }
1946 
1947 
VisitWord32Test(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1948 void VisitWord32Test(InstructionSelector* selector, Node* node,
1949                      FlagsContinuation* cont) {
1950   VisitWordTest(selector, node, kArm64Tst32, cont);
1951 }
1952 
1953 
VisitWord64Test(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1954 void VisitWord64Test(InstructionSelector* selector, Node* node,
1955                      FlagsContinuation* cont) {
1956   VisitWordTest(selector, node, kArm64Tst, cont);
1957 }
1958 
1959 template <typename Matcher, ArchOpcode kOpcode>
TryEmitTestAndBranch(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1960 bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
1961                           FlagsContinuation* cont) {
1962   Arm64OperandGenerator g(selector);
1963   Matcher m(node);
1964   if (cont->IsBranch() && m.right().HasValue() &&
1965       (base::bits::CountPopulation(m.right().Value()) == 1)) {
1966     // If the mask has only one bit set, we can use tbz/tbnz.
1967     DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
1968     selector->Emit(
1969         cont->Encode(kOpcode), g.NoOutput(), g.UseRegister(m.left().node()),
1970         g.TempImmediate(base::bits::CountTrailingZeros(m.right().Value())),
1971         g.Label(cont->true_block()), g.Label(cont->false_block()));
1972     return true;
1973   }
1974   return false;
1975 }
1976 
1977 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1978 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1979                          FlagsContinuation* cont) {
1980   Arm64OperandGenerator g(selector);
1981   Float32BinopMatcher m(node);
1982   if (m.right().Is(0.0f)) {
1983     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
1984                  g.UseImmediate(m.right().node()), cont);
1985   } else if (m.left().Is(0.0f)) {
1986     cont->Commute();
1987     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.right().node()),
1988                  g.UseImmediate(m.left().node()), cont);
1989   } else {
1990     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
1991                  g.UseRegister(m.right().node()), cont);
1992   }
1993 }
1994 
1995 
1996 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1997 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1998                          FlagsContinuation* cont) {
1999   Arm64OperandGenerator g(selector);
2000   Float64BinopMatcher m(node);
2001   if (m.right().Is(0.0)) {
2002     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
2003                  g.UseImmediate(m.right().node()), cont);
2004   } else if (m.left().Is(0.0)) {
2005     cont->Commute();
2006     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.right().node()),
2007                  g.UseImmediate(m.left().node()), cont);
2008   } else {
2009     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
2010                  g.UseRegister(m.right().node()), cont);
2011   }
2012 }
2013 
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)2014 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
2015                           Node* value, FlagsContinuation* cont) {
2016   Arm64OperandGenerator g(selector);
2017   while (selector->CanCover(user, value)) {
2018     switch (value->opcode()) {
2019       case IrOpcode::kWord32Equal: {
2020         // Combine with comparisons against 0 by simply inverting the
2021         // continuation.
2022         Int32BinopMatcher m(value);
2023         if (m.right().Is(0)) {
2024           user = value;
2025           value = m.left().node();
2026           cont->Negate();
2027           continue;
2028         }
2029         cont->OverwriteAndNegateIfEqual(kEqual);
2030         return VisitWord32Compare(selector, value, cont);
2031       }
2032       case IrOpcode::kInt32LessThan:
2033         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2034         return VisitWord32Compare(selector, value, cont);
2035       case IrOpcode::kInt32LessThanOrEqual:
2036         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2037         return VisitWord32Compare(selector, value, cont);
2038       case IrOpcode::kUint32LessThan:
2039         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2040         return VisitWord32Compare(selector, value, cont);
2041       case IrOpcode::kUint32LessThanOrEqual:
2042         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2043         return VisitWord32Compare(selector, value, cont);
2044       case IrOpcode::kWord64Equal: {
2045         cont->OverwriteAndNegateIfEqual(kEqual);
2046         Int64BinopMatcher m(value);
2047         if (m.right().Is(0)) {
2048           Node* const left = m.left().node();
2049           if (selector->CanCover(value, left) &&
2050               left->opcode() == IrOpcode::kWord64And) {
2051             // Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
2052             // into a tbz/tbnz instruction.
2053             if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
2054                     selector, left, cont)) {
2055               return;
2056             }
2057             return VisitWordCompare(selector, left, kArm64Tst, cont, true,
2058                                     kLogical64Imm);
2059           }
2060           // Merge the Word64Equal(x, 0) comparison into a cbz instruction.
2061           if (cont->IsBranch()) {
2062             selector->Emit(cont->Encode(kArm64CompareAndBranch), g.NoOutput(),
2063                            g.UseRegister(left), g.Label(cont->true_block()),
2064                            g.Label(cont->false_block()));
2065             return;
2066           }
2067         }
2068         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
2069                                 kArithmeticImm);
2070       }
2071       case IrOpcode::kInt64LessThan:
2072         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2073         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
2074                                 kArithmeticImm);
2075       case IrOpcode::kInt64LessThanOrEqual:
2076         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2077         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
2078                                 kArithmeticImm);
2079       case IrOpcode::kUint64LessThan:
2080         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2081         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
2082                                 kArithmeticImm);
2083       case IrOpcode::kUint64LessThanOrEqual:
2084         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2085         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
2086                                 kArithmeticImm);
2087       case IrOpcode::kFloat32Equal:
2088         cont->OverwriteAndNegateIfEqual(kEqual);
2089         return VisitFloat32Compare(selector, value, cont);
2090       case IrOpcode::kFloat32LessThan:
2091         cont->OverwriteAndNegateIfEqual(kFloatLessThan);
2092         return VisitFloat32Compare(selector, value, cont);
2093       case IrOpcode::kFloat32LessThanOrEqual:
2094         cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
2095         return VisitFloat32Compare(selector, value, cont);
2096       case IrOpcode::kFloat64Equal:
2097         cont->OverwriteAndNegateIfEqual(kEqual);
2098         return VisitFloat64Compare(selector, value, cont);
2099       case IrOpcode::kFloat64LessThan:
2100         cont->OverwriteAndNegateIfEqual(kFloatLessThan);
2101         return VisitFloat64Compare(selector, value, cont);
2102       case IrOpcode::kFloat64LessThanOrEqual:
2103         cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
2104         return VisitFloat64Compare(selector, value, cont);
2105       case IrOpcode::kProjection:
2106         // Check if this is the overflow output projection of an
2107         // <Operation>WithOverflow node.
2108         if (ProjectionIndexOf(value->op()) == 1u) {
2109           // We cannot combine the <Operation>WithOverflow with this branch
2110           // unless the 0th projection (the use of the actual value of the
2111           // <Operation> is either nullptr, which means there's no use of the
2112           // actual value, or was already defined, which means it is scheduled
2113           // *AFTER* this branch).
2114           Node* const node = value->InputAt(0);
2115           Node* const result = NodeProperties::FindProjection(node, 0);
2116           if (result == nullptr || selector->IsDefined(result)) {
2117             switch (node->opcode()) {
2118               case IrOpcode::kInt32AddWithOverflow:
2119                 cont->OverwriteAndNegateIfEqual(kOverflow);
2120                 return VisitBinop<Int32BinopMatcher>(
2121                     selector, node, kArm64Add32, kArithmeticImm, cont);
2122               case IrOpcode::kInt32SubWithOverflow:
2123                 cont->OverwriteAndNegateIfEqual(kOverflow);
2124                 return VisitBinop<Int32BinopMatcher>(
2125                     selector, node, kArm64Sub32, kArithmeticImm, cont);
2126               case IrOpcode::kInt64AddWithOverflow:
2127                 cont->OverwriteAndNegateIfEqual(kOverflow);
2128                 return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Add,
2129                                                      kArithmeticImm, cont);
2130               case IrOpcode::kInt64SubWithOverflow:
2131                 cont->OverwriteAndNegateIfEqual(kOverflow);
2132                 return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Sub,
2133                                                      kArithmeticImm, cont);
2134               default:
2135                 break;
2136             }
2137           }
2138         }
2139         break;
2140       case IrOpcode::kInt32Add:
2141         return VisitWordCompare(selector, value, kArm64Cmn32, cont, true,
2142                                 kArithmeticImm);
2143       case IrOpcode::kInt32Sub:
2144         return VisitWord32Compare(selector, value, cont);
2145       case IrOpcode::kWord32And:
2146         if (TryEmitTestAndBranch<Uint32BinopMatcher, kArm64TestAndBranch32>(
2147                 selector, value, cont)) {
2148           return;
2149         }
2150         return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
2151                                 kLogical32Imm);
2152       case IrOpcode::kWord64And:
2153         if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
2154                 selector, value, cont)) {
2155           return;
2156         }
2157         return VisitWordCompare(selector, value, kArm64Tst, cont, true,
2158                                 kLogical64Imm);
2159       default:
2160         break;
2161     }
2162     break;
2163   }
2164 
2165   // Branch could not be combined with a compare, compare against 0 and branch.
2166   if (cont->IsBranch()) {
2167     selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
2168                    g.UseRegister(value), g.Label(cont->true_block()),
2169                    g.Label(cont->false_block()));
2170   } else {
2171     DCHECK(cont->IsDeoptimize());
2172     selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
2173                              g.UseRegister(value), g.UseRegister(value),
2174                              cont->frame_state());
2175   }
2176 }
2177 
2178 }  // namespace
2179 
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)2180 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
2181                                       BasicBlock* fbranch) {
2182   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
2183   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
2184 }
2185 
VisitDeoptimizeIf(Node * node)2186 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
2187   FlagsContinuation cont =
2188       FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
2189   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
2190 }
2191 
VisitDeoptimizeUnless(Node * node)2192 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
2193   FlagsContinuation cont =
2194       FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
2195   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
2196 }
2197 
VisitSwitch(Node * node,const SwitchInfo & sw)2198 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2199   Arm64OperandGenerator g(this);
2200   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2201 
2202   // Emit either ArchTableSwitch or ArchLookupSwitch.
2203   size_t table_space_cost = 4 + sw.value_range;
2204   size_t table_time_cost = 3;
2205   size_t lookup_space_cost = 3 + 2 * sw.case_count;
2206   size_t lookup_time_cost = sw.case_count;
2207   if (sw.case_count > 0 &&
2208       table_space_cost + 3 * table_time_cost <=
2209           lookup_space_cost + 3 * lookup_time_cost &&
2210       sw.min_value > std::numeric_limits<int32_t>::min()) {
2211     InstructionOperand index_operand = value_operand;
2212     if (sw.min_value) {
2213       index_operand = g.TempRegister();
2214       Emit(kArm64Sub32, index_operand, value_operand,
2215            g.TempImmediate(sw.min_value));
2216     }
2217     // Generate a table lookup.
2218     return EmitTableSwitch(sw, index_operand);
2219   }
2220 
2221   // Generate a sequence of conditional jumps.
2222   return EmitLookupSwitch(sw, value_operand);
2223 }
2224 
2225 
VisitWord32Equal(Node * const node)2226 void InstructionSelector::VisitWord32Equal(Node* const node) {
2227   Node* const user = node;
2228   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2229   Int32BinopMatcher m(user);
2230   if (m.right().Is(0)) {
2231     Node* const value = m.left().node();
2232     if (CanCover(user, value)) {
2233       switch (value->opcode()) {
2234         case IrOpcode::kInt32Add:
2235           return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
2236                                   kArithmeticImm);
2237         case IrOpcode::kInt32Sub:
2238           return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
2239                                   kArithmeticImm);
2240         case IrOpcode::kWord32And:
2241           return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
2242                                   kLogical32Imm);
2243         case IrOpcode::kWord32Equal: {
2244           // Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne).
2245           Int32BinopMatcher mequal(value);
2246           node->ReplaceInput(0, mequal.left().node());
2247           node->ReplaceInput(1, mequal.right().node());
2248           cont.Negate();
2249           return VisitWord32Compare(this, node, &cont);
2250         }
2251         default:
2252           break;
2253       }
2254       return VisitWord32Test(this, value, &cont);
2255     }
2256   }
2257   VisitWord32Compare(this, node, &cont);
2258 }
2259 
2260 
VisitInt32LessThan(Node * node)2261 void InstructionSelector::VisitInt32LessThan(Node* node) {
2262   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2263   VisitWord32Compare(this, node, &cont);
2264 }
2265 
2266 
VisitInt32LessThanOrEqual(Node * node)2267 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2268   FlagsContinuation cont =
2269       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2270   VisitWord32Compare(this, node, &cont);
2271 }
2272 
2273 
VisitUint32LessThan(Node * node)2274 void InstructionSelector::VisitUint32LessThan(Node* node) {
2275   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2276   VisitWord32Compare(this, node, &cont);
2277 }
2278 
2279 
VisitUint32LessThanOrEqual(Node * node)2280 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2281   FlagsContinuation cont =
2282       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2283   VisitWord32Compare(this, node, &cont);
2284 }
2285 
2286 
VisitWord64Equal(Node * const node)2287 void InstructionSelector::VisitWord64Equal(Node* const node) {
2288   Node* const user = node;
2289   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2290   Int64BinopMatcher m(user);
2291   if (m.right().Is(0)) {
2292     Node* const value = m.left().node();
2293     if (CanCover(user, value)) {
2294       switch (value->opcode()) {
2295         case IrOpcode::kWord64And:
2296           return VisitWordCompare(this, value, kArm64Tst, &cont, true,
2297                                   kLogical64Imm);
2298         default:
2299           break;
2300       }
2301       return VisitWord64Test(this, value, &cont);
2302     }
2303   }
2304   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2305 }
2306 
2307 
VisitInt32AddWithOverflow(Node * node)2308 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2309   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2310     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2311     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
2312                                          kArithmeticImm, &cont);
2313   }
2314   FlagsContinuation cont;
2315   VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, &cont);
2316 }
2317 
2318 
VisitInt32SubWithOverflow(Node * node)2319 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2320   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2321     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2322     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
2323                                          kArithmeticImm, &cont);
2324   }
2325   FlagsContinuation cont;
2326   VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, &cont);
2327 }
2328 
2329 
VisitInt64AddWithOverflow(Node * node)2330 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2331   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2332     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2333     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
2334                                          &cont);
2335   }
2336   FlagsContinuation cont;
2337   VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm, &cont);
2338 }
2339 
2340 
VisitInt64SubWithOverflow(Node * node)2341 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2342   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2343     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2344     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
2345                                          &cont);
2346   }
2347   FlagsContinuation cont;
2348   VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm, &cont);
2349 }
2350 
2351 
VisitInt64LessThan(Node * node)2352 void InstructionSelector::VisitInt64LessThan(Node* node) {
2353   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2354   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2355 }
2356 
2357 
VisitInt64LessThanOrEqual(Node * node)2358 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2359   FlagsContinuation cont =
2360       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2361   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2362 }
2363 
2364 
VisitUint64LessThan(Node * node)2365 void InstructionSelector::VisitUint64LessThan(Node* node) {
2366   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2367   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2368 }
2369 
2370 
VisitUint64LessThanOrEqual(Node * node)2371 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2372   FlagsContinuation cont =
2373       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2374   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2375 }
2376 
2377 
VisitFloat32Equal(Node * node)2378 void InstructionSelector::VisitFloat32Equal(Node* node) {
2379   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2380   VisitFloat32Compare(this, node, &cont);
2381 }
2382 
2383 
VisitFloat32LessThan(Node * node)2384 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2385   FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
2386   VisitFloat32Compare(this, node, &cont);
2387 }
2388 
2389 
VisitFloat32LessThanOrEqual(Node * node)2390 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2391   FlagsContinuation cont =
2392       FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
2393   VisitFloat32Compare(this, node, &cont);
2394 }
2395 
2396 
VisitFloat64Equal(Node * node)2397 void InstructionSelector::VisitFloat64Equal(Node* node) {
2398   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2399   VisitFloat64Compare(this, node, &cont);
2400 }
2401 
2402 
VisitFloat64LessThan(Node * node)2403 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2404   FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
2405   VisitFloat64Compare(this, node, &cont);
2406 }
2407 
2408 
VisitFloat64LessThanOrEqual(Node * node)2409 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2410   FlagsContinuation cont =
2411       FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
2412   VisitFloat64Compare(this, node, &cont);
2413 }
2414 
2415 
VisitFloat64ExtractLowWord32(Node * node)2416 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2417   Arm64OperandGenerator g(this);
2418   Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
2419        g.UseRegister(node->InputAt(0)));
2420 }
2421 
2422 
VisitFloat64ExtractHighWord32(Node * node)2423 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2424   Arm64OperandGenerator g(this);
2425   Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
2426        g.UseRegister(node->InputAt(0)));
2427 }
2428 
2429 
VisitFloat64InsertLowWord32(Node * node)2430 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2431   Arm64OperandGenerator g(this);
2432   Node* left = node->InputAt(0);
2433   Node* right = node->InputAt(1);
2434   if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
2435       CanCover(node, left)) {
2436     Node* right_of_left = left->InputAt(1);
2437     Emit(kArm64Bfi, g.DefineSameAsFirst(right), g.UseRegister(right),
2438          g.UseRegister(right_of_left), g.TempImmediate(32),
2439          g.TempImmediate(32));
2440     Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
2441     return;
2442   }
2443   Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
2444        g.UseRegister(left), g.UseRegister(right));
2445 }
2446 
2447 
VisitFloat64InsertHighWord32(Node * node)2448 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2449   Arm64OperandGenerator g(this);
2450   Node* left = node->InputAt(0);
2451   Node* right = node->InputAt(1);
2452   if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
2453       CanCover(node, left)) {
2454     Node* right_of_left = left->InputAt(1);
2455     Emit(kArm64Bfi, g.DefineSameAsFirst(left), g.UseRegister(right_of_left),
2456          g.UseRegister(right), g.TempImmediate(32), g.TempImmediate(32));
2457     Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
2458     return;
2459   }
2460   Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
2461        g.UseRegister(left), g.UseRegister(right));
2462 }
2463 
VisitFloat64SilenceNaN(Node * node)2464 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2465   VisitRR(this, kArm64Float64SilenceNaN, node);
2466 }
2467 
VisitAtomicLoad(Node * node)2468 void InstructionSelector::VisitAtomicLoad(Node* node) {
2469   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2470   Arm64OperandGenerator g(this);
2471   Node* base = node->InputAt(0);
2472   Node* index = node->InputAt(1);
2473   ArchOpcode opcode = kArchNop;
2474   switch (load_rep.representation()) {
2475     case MachineRepresentation::kWord8:
2476       opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
2477       break;
2478     case MachineRepresentation::kWord16:
2479       opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
2480       break;
2481     case MachineRepresentation::kWord32:
2482       opcode = kAtomicLoadWord32;
2483       break;
2484     default:
2485       UNREACHABLE();
2486       return;
2487   }
2488   Emit(opcode | AddressingModeField::encode(kMode_MRR),
2489        g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
2490 }
2491 
VisitAtomicStore(Node * node)2492 void InstructionSelector::VisitAtomicStore(Node* node) {
2493   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2494   Arm64OperandGenerator g(this);
2495   Node* base = node->InputAt(0);
2496   Node* index = node->InputAt(1);
2497   Node* value = node->InputAt(2);
2498   ArchOpcode opcode = kArchNop;
2499   switch (rep) {
2500     case MachineRepresentation::kWord8:
2501       opcode = kAtomicStoreWord8;
2502       break;
2503     case MachineRepresentation::kWord16:
2504       opcode = kAtomicStoreWord16;
2505       break;
2506     case MachineRepresentation::kWord32:
2507       opcode = kAtomicStoreWord32;
2508       break;
2509     default:
2510       UNREACHABLE();
2511       return;
2512   }
2513 
2514   AddressingMode addressing_mode = kMode_MRR;
2515   InstructionOperand inputs[3];
2516   size_t input_count = 0;
2517   inputs[input_count++] = g.UseUniqueRegister(base);
2518   inputs[input_count++] = g.UseUniqueRegister(index);
2519   inputs[input_count++] = g.UseUniqueRegister(value);
2520   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2521   Emit(code, 0, nullptr, input_count, inputs);
2522 }
2523 
2524 // static
2525 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2526 InstructionSelector::SupportedMachineOperatorFlags() {
2527   return MachineOperatorBuilder::kFloat32Max |
2528          MachineOperatorBuilder::kFloat32Min |
2529          MachineOperatorBuilder::kFloat32RoundDown |
2530          MachineOperatorBuilder::kFloat64Max |
2531          MachineOperatorBuilder::kFloat64Min |
2532          MachineOperatorBuilder::kFloat64RoundDown |
2533          MachineOperatorBuilder::kFloat32RoundUp |
2534          MachineOperatorBuilder::kFloat64RoundUp |
2535          MachineOperatorBuilder::kFloat32RoundTruncate |
2536          MachineOperatorBuilder::kFloat64RoundTruncate |
2537          MachineOperatorBuilder::kFloat64RoundTiesAway |
2538          MachineOperatorBuilder::kFloat32RoundTiesEven |
2539          MachineOperatorBuilder::kFloat64RoundTiesEven |
2540          MachineOperatorBuilder::kWord32ShiftIsSafe |
2541          MachineOperatorBuilder::kInt32DivIsSafe |
2542          MachineOperatorBuilder::kUint32DivIsSafe |
2543          MachineOperatorBuilder::kWord32ReverseBits |
2544          MachineOperatorBuilder::kWord64ReverseBits |
2545          MachineOperatorBuilder::kFloat32Neg |
2546          MachineOperatorBuilder::kFloat64Neg;
2547 }
2548 
2549 // static
2550 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2551 InstructionSelector::AlignmentRequirements() {
2552   return MachineOperatorBuilder::AlignmentRequirements::
2553       FullUnalignedAccessSupport();
2554 }
2555 
2556 }  // namespace compiler
2557 }  // namespace internal
2558 }  // namespace v8
2559