• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/base/bits.h"
7 #include "src/compiler/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 #define TRACE_UNIMPL() \
16   PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17 
18 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
19 
20 
21 // Adds Mips-specific methods for generating InstructionOperands.
22 class Mips64OperandGenerator final : public OperandGenerator {
23  public:
Mips64OperandGenerator(InstructionSelector * selector)24   explicit Mips64OperandGenerator(InstructionSelector* selector)
25       : OperandGenerator(selector) {}
26 
UseOperand(Node * node,InstructionCode opcode)27   InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
28     if (CanBeImmediate(node, opcode)) {
29       return UseImmediate(node);
30     }
31     return UseRegister(node);
32   }
33 
34   // Use the zero register if the node has the immediate value zero, otherwise
35   // assign a register.
UseRegisterOrImmediateZero(Node * node)36   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
37     if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
38         (IsFloatConstant(node) &&
39          (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
40       return UseImmediate(node);
41     }
42     return UseRegister(node);
43   }
44 
IsIntegerConstant(Node * node)45   bool IsIntegerConstant(Node* node) {
46     return (node->opcode() == IrOpcode::kInt32Constant) ||
47            (node->opcode() == IrOpcode::kInt64Constant);
48   }
49 
GetIntegerConstantValue(Node * node)50   int64_t GetIntegerConstantValue(Node* node) {
51     if (node->opcode() == IrOpcode::kInt32Constant) {
52       return OpParameter<int32_t>(node);
53     }
54     DCHECK(node->opcode() == IrOpcode::kInt64Constant);
55     return OpParameter<int64_t>(node);
56   }
57 
IsFloatConstant(Node * node)58   bool IsFloatConstant(Node* node) {
59     return (node->opcode() == IrOpcode::kFloat32Constant) ||
60            (node->opcode() == IrOpcode::kFloat64Constant);
61   }
62 
GetFloatConstantValue(Node * node)63   double GetFloatConstantValue(Node* node) {
64     if (node->opcode() == IrOpcode::kFloat32Constant) {
65       return OpParameter<float>(node);
66     }
67     DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
68     return OpParameter<double>(node);
69   }
70 
CanBeImmediate(Node * node,InstructionCode mode)71   bool CanBeImmediate(Node* node, InstructionCode mode) {
72     return IsIntegerConstant(node) &&
73            CanBeImmediate(GetIntegerConstantValue(node), mode);
74   }
75 
CanBeImmediate(int64_t value,InstructionCode opcode)76   bool CanBeImmediate(int64_t value, InstructionCode opcode) {
77     switch (ArchOpcodeField::decode(opcode)) {
78       case kMips64Shl:
79       case kMips64Sar:
80       case kMips64Shr:
81         return is_uint5(value);
82       case kMips64Dshl:
83       case kMips64Dsar:
84       case kMips64Dshr:
85         return is_uint6(value);
86       case kMips64Add:
87       case kMips64And32:
88       case kMips64And:
89       case kMips64Dadd:
90       case kMips64Or32:
91       case kMips64Or:
92       case kMips64Tst:
93       case kMips64Xor:
94         return is_uint16(value);
95       case kMips64Ldc1:
96       case kMips64Sdc1:
97         return is_int16(value + kIntSize);
98       default:
99         return is_int16(value);
100     }
101   }
102 
103  private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const104   bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
105     TRACE_UNIMPL();
106     return false;
107   }
108 };
109 
110 
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)111 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
112                     Node* node) {
113   Mips64OperandGenerator g(selector);
114   selector->Emit(opcode, g.DefineAsRegister(node),
115                  g.UseRegister(node->InputAt(0)));
116 }
117 
118 
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)119 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
120                      Node* node) {
121   Mips64OperandGenerator g(selector);
122   selector->Emit(opcode, g.DefineAsRegister(node),
123                  g.UseRegister(node->InputAt(0)),
124                  g.UseRegister(node->InputAt(1)));
125 }
126 
127 
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)128 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
129                      Node* node) {
130   Mips64OperandGenerator g(selector);
131   selector->Emit(opcode, g.DefineAsRegister(node),
132                  g.UseRegister(node->InputAt(0)),
133                  g.UseOperand(node->InputAt(1), opcode));
134 }
135 
136 struct ExtendingLoadMatcher {
ExtendingLoadMatcherv8::internal::compiler::ExtendingLoadMatcher137   ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
138       : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
139     Initialize(node);
140   }
141 
Matchesv8::internal::compiler::ExtendingLoadMatcher142   bool Matches() const { return matches_; }
143 
basev8::internal::compiler::ExtendingLoadMatcher144   Node* base() const {
145     DCHECK(Matches());
146     return base_;
147   }
immediatev8::internal::compiler::ExtendingLoadMatcher148   int64_t immediate() const {
149     DCHECK(Matches());
150     return immediate_;
151   }
opcodev8::internal::compiler::ExtendingLoadMatcher152   ArchOpcode opcode() const {
153     DCHECK(Matches());
154     return opcode_;
155   }
156 
157  private:
158   bool matches_;
159   InstructionSelector* selector_;
160   Node* base_;
161   int64_t immediate_;
162   ArchOpcode opcode_;
163 
Initializev8::internal::compiler::ExtendingLoadMatcher164   void Initialize(Node* node) {
165     Int64BinopMatcher m(node);
166     // When loading a 64-bit value and shifting by 32, we should
167     // just load and sign-extend the interesting 4 bytes instead.
168     // This happens, for example, when we're loading and untagging SMIs.
169     DCHECK(m.IsWord64Sar());
170     if (m.left().IsLoad() && m.right().Is(32) &&
171         selector_->CanCover(m.node(), m.left().node())) {
172       Mips64OperandGenerator g(selector_);
173       Node* load = m.left().node();
174       Node* offset = load->InputAt(1);
175       base_ = load->InputAt(0);
176       opcode_ = kMips64Lw;
177       if (g.CanBeImmediate(offset, opcode_)) {
178 #if defined(V8_TARGET_LITTLE_ENDIAN)
179         immediate_ = g.GetIntegerConstantValue(offset) + 4;
180 #elif defined(V8_TARGET_BIG_ENDIAN)
181         immediate_ = g.GetIntegerConstantValue(offset);
182 #endif
183         matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
184       }
185     }
186   }
187 };
188 
TryEmitExtendingLoad(InstructionSelector * selector,Node * node)189 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
190   ExtendingLoadMatcher m(node, selector);
191   Mips64OperandGenerator g(selector);
192   if (m.Matches()) {
193     InstructionOperand inputs[2];
194     inputs[0] = g.UseRegister(m.base());
195     InstructionCode opcode =
196         m.opcode() | AddressingModeField::encode(kMode_MRI);
197     DCHECK(is_int32(m.immediate()));
198     inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
199     InstructionOperand outputs[] = {g.DefineAsRegister(node)};
200     selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
201                    inputs);
202     return true;
203   }
204   return false;
205 }
206 
TryMatchImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,size_t * input_count_return,InstructionOperand * inputs)207 bool TryMatchImmediate(InstructionSelector* selector,
208                        InstructionCode* opcode_return, Node* node,
209                        size_t* input_count_return, InstructionOperand* inputs) {
210   Mips64OperandGenerator g(selector);
211   if (g.CanBeImmediate(node, *opcode_return)) {
212     *opcode_return |= AddressingModeField::encode(kMode_MRI);
213     inputs[0] = g.UseImmediate(node);
214     *input_count_return = 1;
215     return true;
216   }
217   return false;
218 }
219 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode,FlagsContinuation * cont)220 static void VisitBinop(InstructionSelector* selector, Node* node,
221                        InstructionCode opcode, bool has_reverse_opcode,
222                        InstructionCode reverse_opcode,
223                        FlagsContinuation* cont) {
224   Mips64OperandGenerator g(selector);
225   Int32BinopMatcher m(node);
226   InstructionOperand inputs[4];
227   size_t input_count = 0;
228   InstructionOperand outputs[2];
229   size_t output_count = 0;
230 
231   if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
232                         &inputs[1])) {
233     inputs[0] = g.UseRegister(m.left().node());
234     input_count++;
235   }
236   if (has_reverse_opcode &&
237       TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
238                         &input_count, &inputs[1])) {
239     inputs[0] = g.UseRegister(m.right().node());
240     opcode = reverse_opcode;
241     input_count++;
242   } else {
243     inputs[input_count++] = g.UseRegister(m.left().node());
244     inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
245   }
246 
247   if (cont->IsBranch()) {
248     inputs[input_count++] = g.Label(cont->true_block());
249     inputs[input_count++] = g.Label(cont->false_block());
250   }
251 
252   if (cont->IsDeoptimize()) {
253     // If we can deoptimize as a result of the binop, we need to make sure that
254     // the deopt inputs are not overwritten by the binop result. One way
255     // to achieve that is to declare the output register as same-as-first.
256     outputs[output_count++] = g.DefineSameAsFirst(node);
257   } else {
258     outputs[output_count++] = g.DefineAsRegister(node);
259   }
260   if (cont->IsSet()) {
261     outputs[output_count++] = g.DefineAsRegister(cont->result());
262   }
263 
264   DCHECK_NE(0u, input_count);
265   DCHECK_NE(0u, output_count);
266   DCHECK_GE(arraysize(inputs), input_count);
267   DCHECK_GE(arraysize(outputs), output_count);
268 
269   opcode = cont->Encode(opcode);
270   if (cont->IsDeoptimize()) {
271     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
272                              cont->reason(), cont->frame_state());
273   } else {
274     selector->Emit(opcode, output_count, outputs, input_count, inputs);
275   }
276 }
277 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode)278 static void VisitBinop(InstructionSelector* selector, Node* node,
279                        InstructionCode opcode, bool has_reverse_opcode,
280                        InstructionCode reverse_opcode) {
281   FlagsContinuation cont;
282   VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
283 }
284 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)285 static void VisitBinop(InstructionSelector* selector, Node* node,
286                        InstructionCode opcode, FlagsContinuation* cont) {
287   VisitBinop(selector, node, opcode, false, kArchNop, cont);
288 }
289 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)290 static void VisitBinop(InstructionSelector* selector, Node* node,
291                        InstructionCode opcode) {
292   VisitBinop(selector, node, opcode, false, kArchNop);
293 }
294 
EmitLoad(InstructionSelector * selector,Node * node,InstructionCode opcode,Node * output=nullptr)295 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
296               Node* output = nullptr) {
297   Mips64OperandGenerator g(selector);
298   Node* base = node->InputAt(0);
299   Node* index = node->InputAt(1);
300 
301   if (g.CanBeImmediate(index, opcode)) {
302     selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
303                    g.DefineAsRegister(output == nullptr ? node : output),
304                    g.UseRegister(base), g.UseImmediate(index));
305   } else {
306     InstructionOperand addr_reg = g.TempRegister();
307     selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
308                    addr_reg, g.UseRegister(index), g.UseRegister(base));
309     // Emit desired load opcode, using temp addr_reg.
310     selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
311                    g.DefineAsRegister(output == nullptr ? node : output),
312                    addr_reg, g.TempImmediate(0));
313   }
314 }
315 
VisitLoad(Node * node)316 void InstructionSelector::VisitLoad(Node* node) {
317   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
318 
319   ArchOpcode opcode = kArchNop;
320   switch (load_rep.representation()) {
321     case MachineRepresentation::kFloat32:
322       opcode = kMips64Lwc1;
323       break;
324     case MachineRepresentation::kFloat64:
325       opcode = kMips64Ldc1;
326       break;
327     case MachineRepresentation::kBit:  // Fall through.
328     case MachineRepresentation::kWord8:
329       opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
330       break;
331     case MachineRepresentation::kWord16:
332       opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
333       break;
334     case MachineRepresentation::kWord32:
335       opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
336       break;
337     case MachineRepresentation::kTaggedSigned:   // Fall through.
338     case MachineRepresentation::kTaggedPointer:  // Fall through.
339     case MachineRepresentation::kTagged:  // Fall through.
340     case MachineRepresentation::kWord64:
341       opcode = kMips64Ld;
342       break;
343     case MachineRepresentation::kSimd128:  // Fall through.
344     case MachineRepresentation::kNone:
345       UNREACHABLE();
346       return;
347   }
348 
349   EmitLoad(this, node, opcode);
350 }
351 
VisitProtectedLoad(Node * node)352 void InstructionSelector::VisitProtectedLoad(Node* node) {
353   // TODO(eholk)
354   UNIMPLEMENTED();
355 }
356 
VisitStore(Node * node)357 void InstructionSelector::VisitStore(Node* node) {
358   Mips64OperandGenerator g(this);
359   Node* base = node->InputAt(0);
360   Node* index = node->InputAt(1);
361   Node* value = node->InputAt(2);
362 
363   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
364   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
365   MachineRepresentation rep = store_rep.representation();
366 
367   // TODO(mips): I guess this could be done in a better way.
368   if (write_barrier_kind != kNoWriteBarrier) {
369     DCHECK(CanBeTaggedPointer(rep));
370     InstructionOperand inputs[3];
371     size_t input_count = 0;
372     inputs[input_count++] = g.UseUniqueRegister(base);
373     inputs[input_count++] = g.UseUniqueRegister(index);
374     inputs[input_count++] = g.UseUniqueRegister(value);
375     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
376     switch (write_barrier_kind) {
377       case kNoWriteBarrier:
378         UNREACHABLE();
379         break;
380       case kMapWriteBarrier:
381         record_write_mode = RecordWriteMode::kValueIsMap;
382         break;
383       case kPointerWriteBarrier:
384         record_write_mode = RecordWriteMode::kValueIsPointer;
385         break;
386       case kFullWriteBarrier:
387         record_write_mode = RecordWriteMode::kValueIsAny;
388         break;
389     }
390     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
391     size_t const temp_count = arraysize(temps);
392     InstructionCode code = kArchStoreWithWriteBarrier;
393     code |= MiscField::encode(static_cast<int>(record_write_mode));
394     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
395   } else {
396     ArchOpcode opcode = kArchNop;
397     switch (rep) {
398       case MachineRepresentation::kFloat32:
399         opcode = kMips64Swc1;
400         break;
401       case MachineRepresentation::kFloat64:
402         opcode = kMips64Sdc1;
403         break;
404       case MachineRepresentation::kBit:  // Fall through.
405       case MachineRepresentation::kWord8:
406         opcode = kMips64Sb;
407         break;
408       case MachineRepresentation::kWord16:
409         opcode = kMips64Sh;
410         break;
411       case MachineRepresentation::kWord32:
412         opcode = kMips64Sw;
413         break;
414       case MachineRepresentation::kTaggedSigned:   // Fall through.
415       case MachineRepresentation::kTaggedPointer:  // Fall through.
416       case MachineRepresentation::kTagged:  // Fall through.
417       case MachineRepresentation::kWord64:
418         opcode = kMips64Sd;
419         break;
420       case MachineRepresentation::kSimd128:  // Fall through.
421       case MachineRepresentation::kNone:
422         UNREACHABLE();
423         return;
424     }
425 
426     if (g.CanBeImmediate(index, opcode)) {
427       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
428            g.UseRegister(base), g.UseImmediate(index),
429            g.UseRegisterOrImmediateZero(value));
430     } else {
431       InstructionOperand addr_reg = g.TempRegister();
432       Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
433            g.UseRegister(index), g.UseRegister(base));
434       // Emit desired store opcode, using temp addr_reg.
435       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
436            addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
437     }
438   }
439 }
440 
441 
VisitWord32And(Node * node)442 void InstructionSelector::VisitWord32And(Node* node) {
443   Mips64OperandGenerator g(this);
444   Int32BinopMatcher m(node);
445   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
446       m.right().HasValue()) {
447     uint32_t mask = m.right().Value();
448     uint32_t mask_width = base::bits::CountPopulation32(mask);
449     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
450     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
451       // The mask must be contiguous, and occupy the least-significant bits.
452       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
453 
454       // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
455       // significant bits.
456       Int32BinopMatcher mleft(m.left().node());
457       if (mleft.right().HasValue()) {
458         // Any shift value can match; int32 shifts use `value % 32`.
459         uint32_t lsb = mleft.right().Value() & 0x1f;
460 
461         // Ext cannot extract bits past the register size, however since
462         // shifting the original value would have introduced some zeros we can
463         // still use Ext with a smaller mask and the remaining bits will be
464         // zeros.
465         if (lsb + mask_width > 32) mask_width = 32 - lsb;
466 
467         Emit(kMips64Ext, g.DefineAsRegister(node),
468              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
469              g.TempImmediate(mask_width));
470         return;
471       }
472       // Other cases fall through to the normal And operation.
473     }
474   }
475   if (m.right().HasValue()) {
476     uint32_t mask = m.right().Value();
477     uint32_t shift = base::bits::CountPopulation32(~mask);
478     uint32_t msb = base::bits::CountLeadingZeros32(~mask);
479     if (shift != 0 && shift != 32 && msb + shift == 32) {
480       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
481       // and remove constant loading of inverted mask.
482       Emit(kMips64Ins, g.DefineSameAsFirst(node),
483            g.UseRegister(m.left().node()), g.TempImmediate(0),
484            g.TempImmediate(shift));
485       return;
486     }
487   }
488   VisitBinop(this, node, kMips64And32, true, kMips64And32);
489 }
490 
491 
VisitWord64And(Node * node)492 void InstructionSelector::VisitWord64And(Node* node) {
493   Mips64OperandGenerator g(this);
494   Int64BinopMatcher m(node);
495   if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
496       m.right().HasValue()) {
497     uint64_t mask = m.right().Value();
498     uint32_t mask_width = base::bits::CountPopulation64(mask);
499     uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
500     if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
501       // The mask must be contiguous, and occupy the least-significant bits.
502       DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
503 
504       // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
505       // significant bits.
506       Int64BinopMatcher mleft(m.left().node());
507       if (mleft.right().HasValue()) {
508         // Any shift value can match; int64 shifts use `value % 64`.
509         uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
510 
511         // Dext cannot extract bits past the register size, however since
512         // shifting the original value would have introduced some zeros we can
513         // still use Dext with a smaller mask and the remaining bits will be
514         // zeros.
515         if (lsb + mask_width > 64) mask_width = 64 - lsb;
516 
517         Emit(kMips64Dext, g.DefineAsRegister(node),
518              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
519              g.TempImmediate(static_cast<int32_t>(mask_width)));
520         return;
521       }
522       // Other cases fall through to the normal And operation.
523     }
524   }
525   if (m.right().HasValue()) {
526     uint64_t mask = m.right().Value();
527     uint32_t shift = base::bits::CountPopulation64(~mask);
528     uint32_t msb = base::bits::CountLeadingZeros64(~mask);
529     if (shift != 0 && shift < 32 && msb + shift == 64) {
530       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
531       // and remove constant loading of inverted mask. Dins cannot insert bits
532       // past word size, so shifts smaller than 32 are covered.
533       Emit(kMips64Dins, g.DefineSameAsFirst(node),
534            g.UseRegister(m.left().node()), g.TempImmediate(0),
535            g.TempImmediate(shift));
536       return;
537     }
538   }
539   VisitBinop(this, node, kMips64And, true, kMips64And);
540 }
541 
542 
VisitWord32Or(Node * node)543 void InstructionSelector::VisitWord32Or(Node* node) {
544   VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
545 }
546 
547 
VisitWord64Or(Node * node)548 void InstructionSelector::VisitWord64Or(Node* node) {
549   VisitBinop(this, node, kMips64Or, true, kMips64Or);
550 }
551 
552 
VisitWord32Xor(Node * node)553 void InstructionSelector::VisitWord32Xor(Node* node) {
554   Int32BinopMatcher m(node);
555   if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
556       m.right().Is(-1)) {
557     Int32BinopMatcher mleft(m.left().node());
558     if (!mleft.right().HasValue()) {
559       Mips64OperandGenerator g(this);
560       Emit(kMips64Nor32, g.DefineAsRegister(node),
561            g.UseRegister(mleft.left().node()),
562            g.UseRegister(mleft.right().node()));
563       return;
564     }
565   }
566   if (m.right().Is(-1)) {
567     // Use Nor for bit negation and eliminate constant loading for xori.
568     Mips64OperandGenerator g(this);
569     Emit(kMips64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
570          g.TempImmediate(0));
571     return;
572   }
573   VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
574 }
575 
576 
VisitWord64Xor(Node * node)577 void InstructionSelector::VisitWord64Xor(Node* node) {
578   Int64BinopMatcher m(node);
579   if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
580       m.right().Is(-1)) {
581     Int64BinopMatcher mleft(m.left().node());
582     if (!mleft.right().HasValue()) {
583       Mips64OperandGenerator g(this);
584       Emit(kMips64Nor, g.DefineAsRegister(node),
585            g.UseRegister(mleft.left().node()),
586            g.UseRegister(mleft.right().node()));
587       return;
588     }
589   }
590   if (m.right().Is(-1)) {
591     // Use Nor for bit negation and eliminate constant loading for xori.
592     Mips64OperandGenerator g(this);
593     Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
594          g.TempImmediate(0));
595     return;
596   }
597   VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
598 }
599 
600 
VisitWord32Shl(Node * node)601 void InstructionSelector::VisitWord32Shl(Node* node) {
602   Int32BinopMatcher m(node);
603   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
604       m.right().IsInRange(1, 31)) {
605     Mips64OperandGenerator g(this);
606     Int32BinopMatcher mleft(m.left().node());
607     // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
608     // contiguous, and the shift immediate non-zero.
609     if (mleft.right().HasValue()) {
610       uint32_t mask = mleft.right().Value();
611       uint32_t mask_width = base::bits::CountPopulation32(mask);
612       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
613       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
614         uint32_t shift = m.right().Value();
615         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
616         DCHECK_NE(0u, shift);
617         if ((shift + mask_width) >= 32) {
618           // If the mask is contiguous and reaches or extends beyond the top
619           // bit, only the shift is needed.
620           Emit(kMips64Shl, g.DefineAsRegister(node),
621                g.UseRegister(mleft.left().node()),
622                g.UseImmediate(m.right().node()));
623           return;
624         }
625       }
626     }
627   }
628   VisitRRO(this, kMips64Shl, node);
629 }
630 
631 
VisitWord32Shr(Node * node)632 void InstructionSelector::VisitWord32Shr(Node* node) {
633   Int32BinopMatcher m(node);
634   if (m.left().IsWord32And() && m.right().HasValue()) {
635     uint32_t lsb = m.right().Value() & 0x1f;
636     Int32BinopMatcher mleft(m.left().node());
637     if (mleft.right().HasValue()) {
638       // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
639       // shifted into the least-significant bits.
640       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
641       unsigned mask_width = base::bits::CountPopulation32(mask);
642       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
643       if ((mask_msb + mask_width + lsb) == 32) {
644         Mips64OperandGenerator g(this);
645         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
646         Emit(kMips64Ext, g.DefineAsRegister(node),
647              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
648              g.TempImmediate(mask_width));
649         return;
650       }
651     }
652   }
653   VisitRRO(this, kMips64Shr, node);
654 }
655 
656 
VisitWord32Sar(Node * node)657 void InstructionSelector::VisitWord32Sar(Node* node) {
658   Int32BinopMatcher m(node);
659   if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
660     Int32BinopMatcher mleft(m.left().node());
661     if (m.right().HasValue() && mleft.right().HasValue()) {
662       Mips64OperandGenerator g(this);
663       uint32_t sar = m.right().Value();
664       uint32_t shl = mleft.right().Value();
665       if ((sar == shl) && (sar == 16)) {
666         Emit(kMips64Seh, g.DefineAsRegister(node),
667              g.UseRegister(mleft.left().node()));
668         return;
669       } else if ((sar == shl) && (sar == 24)) {
670         Emit(kMips64Seb, g.DefineAsRegister(node),
671              g.UseRegister(mleft.left().node()));
672         return;
673       } else if ((sar == shl) && (sar == 32)) {
674         Emit(kMips64Shl, g.DefineAsRegister(node),
675              g.UseRegister(mleft.left().node()), g.TempImmediate(0));
676         return;
677       }
678     }
679   }
680   VisitRRO(this, kMips64Sar, node);
681 }
682 
683 
VisitWord64Shl(Node * node)684 void InstructionSelector::VisitWord64Shl(Node* node) {
685   Mips64OperandGenerator g(this);
686   Int64BinopMatcher m(node);
687   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
688       m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
689     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
690     // 32 bits anyway.
691     Emit(kMips64Dshl, g.DefineSameAsFirst(node),
692          g.UseRegister(m.left().node()->InputAt(0)),
693          g.UseImmediate(m.right().node()));
694     return;
695   }
696   if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
697       m.right().IsInRange(1, 63)) {
698     // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
699     // contiguous, and the shift immediate non-zero.
700     Int64BinopMatcher mleft(m.left().node());
701     if (mleft.right().HasValue()) {
702       uint64_t mask = mleft.right().Value();
703       uint32_t mask_width = base::bits::CountPopulation64(mask);
704       uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
705       if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
706         uint64_t shift = m.right().Value();
707         DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
708         DCHECK_NE(0u, shift);
709 
710         if ((shift + mask_width) >= 64) {
711           // If the mask is contiguous and reaches or extends beyond the top
712           // bit, only the shift is needed.
713           Emit(kMips64Dshl, g.DefineAsRegister(node),
714                g.UseRegister(mleft.left().node()),
715                g.UseImmediate(m.right().node()));
716           return;
717         }
718       }
719     }
720   }
721   VisitRRO(this, kMips64Dshl, node);
722 }
723 
724 
VisitWord64Shr(Node * node)725 void InstructionSelector::VisitWord64Shr(Node* node) {
726   Int64BinopMatcher m(node);
727   if (m.left().IsWord64And() && m.right().HasValue()) {
728     uint32_t lsb = m.right().Value() & 0x3f;
729     Int64BinopMatcher mleft(m.left().node());
730     if (mleft.right().HasValue()) {
731       // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
732       // shifted into the least-significant bits.
733       uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
734       unsigned mask_width = base::bits::CountPopulation64(mask);
735       unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
736       if ((mask_msb + mask_width + lsb) == 64) {
737         Mips64OperandGenerator g(this);
738         DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
739         Emit(kMips64Dext, g.DefineAsRegister(node),
740              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
741              g.TempImmediate(mask_width));
742         return;
743       }
744     }
745   }
746   VisitRRO(this, kMips64Dshr, node);
747 }
748 
749 
VisitWord64Sar(Node * node)750 void InstructionSelector::VisitWord64Sar(Node* node) {
751   if (TryEmitExtendingLoad(this, node)) return;
752   VisitRRO(this, kMips64Dsar, node);
753 }
754 
755 
VisitWord32Ror(Node * node)756 void InstructionSelector::VisitWord32Ror(Node* node) {
757   VisitRRO(this, kMips64Ror, node);
758 }
759 
760 
VisitWord32Clz(Node * node)761 void InstructionSelector::VisitWord32Clz(Node* node) {
762   VisitRR(this, kMips64Clz, node);
763 }
764 
765 
VisitWord32ReverseBits(Node * node)766 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
767 
768 
VisitWord64ReverseBits(Node * node)769 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
770 
VisitWord64ReverseBytes(Node * node)771 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
772   Mips64OperandGenerator g(this);
773   Emit(kMips64ByteSwap64, g.DefineAsRegister(node),
774        g.UseRegister(node->InputAt(0)));
775 }
776 
VisitWord32ReverseBytes(Node * node)777 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
778   Mips64OperandGenerator g(this);
779   Emit(kMips64ByteSwap32, g.DefineAsRegister(node),
780        g.UseRegister(node->InputAt(0)));
781 }
782 
VisitWord32Ctz(Node * node)783 void InstructionSelector::VisitWord32Ctz(Node* node) {
784   Mips64OperandGenerator g(this);
785   Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
786 }
787 
788 
VisitWord64Ctz(Node * node)789 void InstructionSelector::VisitWord64Ctz(Node* node) {
790   Mips64OperandGenerator g(this);
791   Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
792 }
793 
794 
VisitWord32Popcnt(Node * node)795 void InstructionSelector::VisitWord32Popcnt(Node* node) {
796   Mips64OperandGenerator g(this);
797   Emit(kMips64Popcnt, g.DefineAsRegister(node),
798        g.UseRegister(node->InputAt(0)));
799 }
800 
801 
VisitWord64Popcnt(Node * node)802 void InstructionSelector::VisitWord64Popcnt(Node* node) {
803   Mips64OperandGenerator g(this);
804   Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
805        g.UseRegister(node->InputAt(0)));
806 }
807 
808 
VisitWord64Ror(Node * node)809 void InstructionSelector::VisitWord64Ror(Node* node) {
810   VisitRRO(this, kMips64Dror, node);
811 }
812 
813 
VisitWord64Clz(Node * node)814 void InstructionSelector::VisitWord64Clz(Node* node) {
815   VisitRR(this, kMips64Dclz, node);
816 }
817 
818 
VisitInt32Add(Node * node)819 void InstructionSelector::VisitInt32Add(Node* node) {
820   Mips64OperandGenerator g(this);
821   Int32BinopMatcher m(node);
822 
823   // Select Lsa for (left + (left_of_right << imm)).
824   if (m.right().opcode() == IrOpcode::kWord32Shl &&
825       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
826     Int32BinopMatcher mright(m.right().node());
827     if (mright.right().HasValue()) {
828       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
829       Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
830            g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
831       return;
832     }
833   }
834 
835   // Select Lsa for ((left_of_left << imm) + right).
836   if (m.left().opcode() == IrOpcode::kWord32Shl &&
837       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
838     Int32BinopMatcher mleft(m.left().node());
839     if (mleft.right().HasValue()) {
840       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
841       Emit(kMips64Lsa, g.DefineAsRegister(node),
842            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
843            g.TempImmediate(shift_value));
844       return;
845     }
846   }
847   VisitBinop(this, node, kMips64Add, true, kMips64Add);
848 }
849 
850 
VisitInt64Add(Node * node)851 void InstructionSelector::VisitInt64Add(Node* node) {
852   Mips64OperandGenerator g(this);
853   Int64BinopMatcher m(node);
854 
855   // Select Dlsa for (left + (left_of_right << imm)).
856   if (m.right().opcode() == IrOpcode::kWord64Shl &&
857       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
858     Int64BinopMatcher mright(m.right().node());
859     if (mright.right().HasValue()) {
860       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
861       Emit(kMips64Dlsa, g.DefineAsRegister(node),
862            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
863            g.TempImmediate(shift_value));
864       return;
865     }
866   }
867 
868   // Select Dlsa for ((left_of_left << imm) + right).
869   if (m.left().opcode() == IrOpcode::kWord64Shl &&
870       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
871     Int64BinopMatcher mleft(m.left().node());
872     if (mleft.right().HasValue()) {
873       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
874       Emit(kMips64Dlsa, g.DefineAsRegister(node),
875            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
876            g.TempImmediate(shift_value));
877       return;
878     }
879   }
880 
881   VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
882 }
883 
884 
VisitInt32Sub(Node * node)885 void InstructionSelector::VisitInt32Sub(Node* node) {
886   VisitBinop(this, node, kMips64Sub);
887 }
888 
889 
VisitInt64Sub(Node * node)890 void InstructionSelector::VisitInt64Sub(Node* node) {
891   VisitBinop(this, node, kMips64Dsub);
892 }
893 
894 
VisitInt32Mul(Node * node)895 void InstructionSelector::VisitInt32Mul(Node* node) {
896   Mips64OperandGenerator g(this);
897   Int32BinopMatcher m(node);
898   if (m.right().HasValue() && m.right().Value() > 0) {
899     int32_t value = m.right().Value();
900     if (base::bits::IsPowerOfTwo32(value)) {
901       Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
902            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
903            g.TempImmediate(WhichPowerOf2(value)));
904       return;
905     }
906     if (base::bits::IsPowerOfTwo32(value - 1)) {
907       Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
908            g.UseRegister(m.left().node()),
909            g.TempImmediate(WhichPowerOf2(value - 1)));
910       return;
911     }
912     if (base::bits::IsPowerOfTwo32(value + 1)) {
913       InstructionOperand temp = g.TempRegister();
914       Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
915            g.UseRegister(m.left().node()),
916            g.TempImmediate(WhichPowerOf2(value + 1)));
917       Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
918            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
919       return;
920     }
921   }
922   Node* left = node->InputAt(0);
923   Node* right = node->InputAt(1);
924   if (CanCover(node, left) && CanCover(node, right)) {
925     if (left->opcode() == IrOpcode::kWord64Sar &&
926         right->opcode() == IrOpcode::kWord64Sar) {
927       Int64BinopMatcher leftInput(left), rightInput(right);
928       if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
929         // Combine untagging shifts with Dmul high.
930         Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
931              g.UseRegister(leftInput.left().node()),
932              g.UseRegister(rightInput.left().node()));
933         return;
934       }
935     }
936   }
937   VisitRRR(this, kMips64Mul, node);
938 }
939 
940 
VisitInt32MulHigh(Node * node)941 void InstructionSelector::VisitInt32MulHigh(Node* node) {
942   VisitRRR(this, kMips64MulHigh, node);
943 }
944 
945 
VisitUint32MulHigh(Node * node)946 void InstructionSelector::VisitUint32MulHigh(Node* node) {
947   VisitRRR(this, kMips64MulHighU, node);
948 }
949 
950 
VisitInt64Mul(Node * node)951 void InstructionSelector::VisitInt64Mul(Node* node) {
952   Mips64OperandGenerator g(this);
953   Int64BinopMatcher m(node);
954   // TODO(dusmil): Add optimization for shifts larger than 32.
955   if (m.right().HasValue() && m.right().Value() > 0) {
956     int32_t value = static_cast<int32_t>(m.right().Value());
957     if (base::bits::IsPowerOfTwo32(value)) {
958       Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
959            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
960            g.TempImmediate(WhichPowerOf2(value)));
961       return;
962     }
963     if (base::bits::IsPowerOfTwo32(value - 1)) {
964       // Dlsa macro will handle the shifting value out of bound cases.
965       Emit(kMips64Dlsa, g.DefineAsRegister(node),
966            g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
967            g.TempImmediate(WhichPowerOf2(value - 1)));
968       return;
969     }
970     if (base::bits::IsPowerOfTwo32(value + 1)) {
971       InstructionOperand temp = g.TempRegister();
972       Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
973            g.UseRegister(m.left().node()),
974            g.TempImmediate(WhichPowerOf2(value + 1)));
975       Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
976            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
977       return;
978     }
979   }
980   Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
981        g.UseRegister(m.right().node()));
982 }
983 
984 
VisitInt32Div(Node * node)985 void InstructionSelector::VisitInt32Div(Node* node) {
986   Mips64OperandGenerator g(this);
987   Int32BinopMatcher m(node);
988   Node* left = node->InputAt(0);
989   Node* right = node->InputAt(1);
990   if (CanCover(node, left) && CanCover(node, right)) {
991     if (left->opcode() == IrOpcode::kWord64Sar &&
992         right->opcode() == IrOpcode::kWord64Sar) {
993       Int64BinopMatcher rightInput(right), leftInput(left);
994       if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
995         // Combine both shifted operands with Ddiv.
996         Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
997              g.UseRegister(leftInput.left().node()),
998              g.UseRegister(rightInput.left().node()));
999         return;
1000       }
1001     }
1002   }
1003   Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1004        g.UseRegister(m.right().node()));
1005 }
1006 
1007 
VisitUint32Div(Node * node)1008 void InstructionSelector::VisitUint32Div(Node* node) {
1009   Mips64OperandGenerator g(this);
1010   Int32BinopMatcher m(node);
1011   Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1012        g.UseRegister(m.right().node()));
1013 }
1014 
1015 
VisitInt32Mod(Node * node)1016 void InstructionSelector::VisitInt32Mod(Node* node) {
1017   Mips64OperandGenerator g(this);
1018   Int32BinopMatcher m(node);
1019   Node* left = node->InputAt(0);
1020   Node* right = node->InputAt(1);
1021   if (CanCover(node, left) && CanCover(node, right)) {
1022     if (left->opcode() == IrOpcode::kWord64Sar &&
1023         right->opcode() == IrOpcode::kWord64Sar) {
1024       Int64BinopMatcher rightInput(right), leftInput(left);
1025       if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1026         // Combine both shifted operands with Dmod.
1027         Emit(kMips64Dmod, g.DefineSameAsFirst(node),
1028              g.UseRegister(leftInput.left().node()),
1029              g.UseRegister(rightInput.left().node()));
1030         return;
1031       }
1032     }
1033   }
1034   Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1035        g.UseRegister(m.right().node()));
1036 }
1037 
1038 
VisitUint32Mod(Node * node)1039 void InstructionSelector::VisitUint32Mod(Node* node) {
1040   Mips64OperandGenerator g(this);
1041   Int32BinopMatcher m(node);
1042   Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1043        g.UseRegister(m.right().node()));
1044 }
1045 
1046 
VisitInt64Div(Node * node)1047 void InstructionSelector::VisitInt64Div(Node* node) {
1048   Mips64OperandGenerator g(this);
1049   Int64BinopMatcher m(node);
1050   Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1051        g.UseRegister(m.right().node()));
1052 }
1053 
1054 
VisitUint64Div(Node * node)1055 void InstructionSelector::VisitUint64Div(Node* node) {
1056   Mips64OperandGenerator g(this);
1057   Int64BinopMatcher m(node);
1058   Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1059        g.UseRegister(m.right().node()));
1060 }
1061 
1062 
VisitInt64Mod(Node * node)1063 void InstructionSelector::VisitInt64Mod(Node* node) {
1064   Mips64OperandGenerator g(this);
1065   Int64BinopMatcher m(node);
1066   Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1067        g.UseRegister(m.right().node()));
1068 }
1069 
1070 
VisitUint64Mod(Node * node)1071 void InstructionSelector::VisitUint64Mod(Node* node) {
1072   Mips64OperandGenerator g(this);
1073   Int64BinopMatcher m(node);
1074   Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1075        g.UseRegister(m.right().node()));
1076 }
1077 
1078 
VisitChangeFloat32ToFloat64(Node * node)1079 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1080   VisitRR(this, kMips64CvtDS, node);
1081 }
1082 
1083 
VisitRoundInt32ToFloat32(Node * node)1084 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1085   VisitRR(this, kMips64CvtSW, node);
1086 }
1087 
1088 
VisitRoundUint32ToFloat32(Node * node)1089 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1090   VisitRR(this, kMips64CvtSUw, node);
1091 }
1092 
1093 
VisitChangeInt32ToFloat64(Node * node)1094 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1095   VisitRR(this, kMips64CvtDW, node);
1096 }
1097 
1098 
VisitChangeUint32ToFloat64(Node * node)1099 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1100   VisitRR(this, kMips64CvtDUw, node);
1101 }
1102 
1103 
VisitTruncateFloat32ToInt32(Node * node)1104 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1105   VisitRR(this, kMips64TruncWS, node);
1106 }
1107 
1108 
VisitTruncateFloat32ToUint32(Node * node)1109 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1110   VisitRR(this, kMips64TruncUwS, node);
1111 }
1112 
1113 
VisitChangeFloat64ToInt32(Node * node)1114 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1115   Mips64OperandGenerator g(this);
1116   Node* value = node->InputAt(0);
1117   // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
1118   // which does rounding and conversion to integer format.
1119   if (CanCover(node, value)) {
1120     switch (value->opcode()) {
1121       case IrOpcode::kFloat64RoundDown:
1122         Emit(kMips64FloorWD, g.DefineAsRegister(node),
1123              g.UseRegister(value->InputAt(0)));
1124         return;
1125       case IrOpcode::kFloat64RoundUp:
1126         Emit(kMips64CeilWD, g.DefineAsRegister(node),
1127              g.UseRegister(value->InputAt(0)));
1128         return;
1129       case IrOpcode::kFloat64RoundTiesEven:
1130         Emit(kMips64RoundWD, g.DefineAsRegister(node),
1131              g.UseRegister(value->InputAt(0)));
1132         return;
1133       case IrOpcode::kFloat64RoundTruncate:
1134         Emit(kMips64TruncWD, g.DefineAsRegister(node),
1135              g.UseRegister(value->InputAt(0)));
1136         return;
1137       default:
1138         break;
1139     }
1140     if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
1141       Node* next = value->InputAt(0);
1142       if (CanCover(value, next)) {
1143         // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
1144         switch (next->opcode()) {
1145           case IrOpcode::kFloat32RoundDown:
1146             Emit(kMips64FloorWS, g.DefineAsRegister(node),
1147                  g.UseRegister(next->InputAt(0)));
1148             return;
1149           case IrOpcode::kFloat32RoundUp:
1150             Emit(kMips64CeilWS, g.DefineAsRegister(node),
1151                  g.UseRegister(next->InputAt(0)));
1152             return;
1153           case IrOpcode::kFloat32RoundTiesEven:
1154             Emit(kMips64RoundWS, g.DefineAsRegister(node),
1155                  g.UseRegister(next->InputAt(0)));
1156             return;
1157           case IrOpcode::kFloat32RoundTruncate:
1158             Emit(kMips64TruncWS, g.DefineAsRegister(node),
1159                  g.UseRegister(next->InputAt(0)));
1160             return;
1161           default:
1162             Emit(kMips64TruncWS, g.DefineAsRegister(node),
1163                  g.UseRegister(value->InputAt(0)));
1164             return;
1165         }
1166       } else {
1167         // Match float32 -> float64 -> int32 representation change path.
1168         Emit(kMips64TruncWS, g.DefineAsRegister(node),
1169              g.UseRegister(value->InputAt(0)));
1170         return;
1171       }
1172     }
1173   }
1174   VisitRR(this, kMips64TruncWD, node);
1175 }
1176 
1177 
VisitChangeFloat64ToUint32(Node * node)1178 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1179   VisitRR(this, kMips64TruncUwD, node);
1180 }
1181 
VisitTruncateFloat64ToUint32(Node * node)1182 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1183   VisitRR(this, kMips64TruncUwD, node);
1184 }
1185 
VisitTryTruncateFloat32ToInt64(Node * node)1186 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1187   Mips64OperandGenerator g(this);
1188   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1189   InstructionOperand outputs[2];
1190   size_t output_count = 0;
1191   outputs[output_count++] = g.DefineAsRegister(node);
1192 
1193   Node* success_output = NodeProperties::FindProjection(node, 1);
1194   if (success_output) {
1195     outputs[output_count++] = g.DefineAsRegister(success_output);
1196   }
1197 
1198   this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
1199 }
1200 
1201 
VisitTryTruncateFloat64ToInt64(Node * node)1202 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1203   Mips64OperandGenerator g(this);
1204   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1205   InstructionOperand outputs[2];
1206   size_t output_count = 0;
1207   outputs[output_count++] = g.DefineAsRegister(node);
1208 
1209   Node* success_output = NodeProperties::FindProjection(node, 1);
1210   if (success_output) {
1211     outputs[output_count++] = g.DefineAsRegister(success_output);
1212   }
1213 
1214   Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
1215 }
1216 
1217 
VisitTryTruncateFloat32ToUint64(Node * node)1218 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1219   Mips64OperandGenerator g(this);
1220   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1221   InstructionOperand outputs[2];
1222   size_t output_count = 0;
1223   outputs[output_count++] = g.DefineAsRegister(node);
1224 
1225   Node* success_output = NodeProperties::FindProjection(node, 1);
1226   if (success_output) {
1227     outputs[output_count++] = g.DefineAsRegister(success_output);
1228   }
1229 
1230   Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
1231 }
1232 
1233 
VisitTryTruncateFloat64ToUint64(Node * node)1234 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1235   Mips64OperandGenerator g(this);
1236 
1237   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1238   InstructionOperand outputs[2];
1239   size_t output_count = 0;
1240   outputs[output_count++] = g.DefineAsRegister(node);
1241 
1242   Node* success_output = NodeProperties::FindProjection(node, 1);
1243   if (success_output) {
1244     outputs[output_count++] = g.DefineAsRegister(success_output);
1245   }
1246 
1247   Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
1248 }
1249 
1250 
VisitChangeInt32ToInt64(Node * node)1251 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1252   Node* value = node->InputAt(0);
1253   if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1254     // Generate sign-extending load.
1255     LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1256     InstructionCode opcode = kArchNop;
1257     switch (load_rep.representation()) {
1258       case MachineRepresentation::kBit:  // Fall through.
1259       case MachineRepresentation::kWord8:
1260         opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1261         break;
1262       case MachineRepresentation::kWord16:
1263         opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
1264         break;
1265       case MachineRepresentation::kWord32:
1266         opcode = kMips64Lw;
1267         break;
1268       default:
1269         UNREACHABLE();
1270         return;
1271     }
1272     EmitLoad(this, value, opcode, node);
1273   } else {
1274     Mips64OperandGenerator g(this);
1275     Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1276          g.TempImmediate(0));
1277   }
1278 }
1279 
1280 
VisitChangeUint32ToUint64(Node * node)1281 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1282   Mips64OperandGenerator g(this);
1283   Node* value = node->InputAt(0);
1284   switch (value->opcode()) {
1285     // 32-bit operations will write their result in a 64 bit register,
1286     // clearing the top 32 bits of the destination register.
1287     case IrOpcode::kUint32Div:
1288     case IrOpcode::kUint32Mod:
1289     case IrOpcode::kUint32MulHigh: {
1290       Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1291       return;
1292     }
1293     case IrOpcode::kLoad: {
1294       LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1295       if (load_rep.IsUnsigned()) {
1296         switch (load_rep.representation()) {
1297           case MachineRepresentation::kWord8:
1298           case MachineRepresentation::kWord16:
1299           case MachineRepresentation::kWord32:
1300             Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1301             return;
1302           default:
1303             break;
1304         }
1305       }
1306     }
1307     default:
1308       break;
1309   }
1310   Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1311        g.TempImmediate(0), g.TempImmediate(32));
1312 }
1313 
1314 
VisitTruncateInt64ToInt32(Node * node)1315 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1316   Mips64OperandGenerator g(this);
1317   Node* value = node->InputAt(0);
1318   if (CanCover(node, value)) {
1319     switch (value->opcode()) {
1320       case IrOpcode::kWord64Sar: {
1321         Int64BinopMatcher m(value);
1322         if (m.right().IsInRange(32, 63)) {
1323           // After smi untagging no need for truncate. Combine sequence.
1324           Emit(kMips64Dsar, g.DefineSameAsFirst(node),
1325                g.UseRegister(m.left().node()),
1326                g.UseImmediate(m.right().node()));
1327           return;
1328         }
1329         break;
1330       }
1331       default:
1332         break;
1333     }
1334   }
1335   Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1336        g.TempImmediate(0), g.TempImmediate(32));
1337 }
1338 
1339 
VisitTruncateFloat64ToFloat32(Node * node)1340 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1341   Mips64OperandGenerator g(this);
1342   Node* value = node->InputAt(0);
1343   // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1344   // instruction.
1345   if (CanCover(node, value) &&
1346       value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1347     Emit(kMips64CvtSW, g.DefineAsRegister(node),
1348          g.UseRegister(value->InputAt(0)));
1349     return;
1350   }
1351   VisitRR(this, kMips64CvtSD, node);
1352 }
1353 
VisitTruncateFloat64ToWord32(Node * node)1354 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1355   VisitRR(this, kArchTruncateDoubleToI, node);
1356 }
1357 
VisitRoundFloat64ToInt32(Node * node)1358 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1359   VisitRR(this, kMips64TruncWD, node);
1360 }
1361 
VisitRoundInt64ToFloat32(Node * node)1362 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1363   VisitRR(this, kMips64CvtSL, node);
1364 }
1365 
1366 
VisitRoundInt64ToFloat64(Node * node)1367 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1368   VisitRR(this, kMips64CvtDL, node);
1369 }
1370 
1371 
VisitRoundUint64ToFloat32(Node * node)1372 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1373   VisitRR(this, kMips64CvtSUl, node);
1374 }
1375 
1376 
VisitRoundUint64ToFloat64(Node * node)1377 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1378   VisitRR(this, kMips64CvtDUl, node);
1379 }
1380 
1381 
VisitBitcastFloat32ToInt32(Node * node)1382 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1383   VisitRR(this, kMips64Float64ExtractLowWord32, node);
1384 }
1385 
1386 
VisitBitcastFloat64ToInt64(Node * node)1387 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1388   VisitRR(this, kMips64BitcastDL, node);
1389 }
1390 
1391 
VisitBitcastInt32ToFloat32(Node * node)1392 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1393   Mips64OperandGenerator g(this);
1394   Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
1395        ImmediateOperand(ImmediateOperand::INLINE, 0),
1396        g.UseRegister(node->InputAt(0)));
1397 }
1398 
1399 
VisitBitcastInt64ToFloat64(Node * node)1400 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1401   VisitRR(this, kMips64BitcastLD, node);
1402 }
1403 
1404 
VisitFloat32Add(Node * node)1405 void InstructionSelector::VisitFloat32Add(Node* node) {
1406   Mips64OperandGenerator g(this);
1407   Float32BinopMatcher m(node);
1408   if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
1409     // For Add.S(Mul.S(x, y), z):
1410     Float32BinopMatcher mleft(m.left().node());
1411     if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
1412       Emit(kMips64MaddS, g.DefineAsRegister(node),
1413            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1414            g.UseRegister(mleft.right().node()));
1415       return;
1416     } else if (kArchVariant == kMips64r6) {  // Select Maddf.S(z, x, y).
1417       Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
1418            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1419            g.UseRegister(mleft.right().node()));
1420       return;
1421     }
1422   }
1423   if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
1424     // For Add.S(x, Mul.S(y, z)):
1425     Float32BinopMatcher mright(m.right().node());
1426     if (kArchVariant == kMips64r2) {  // Select Madd.S(x, y, z).
1427       Emit(kMips64MaddS, g.DefineAsRegister(node),
1428            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
1429            g.UseRegister(mright.right().node()));
1430       return;
1431     } else if (kArchVariant == kMips64r6) {  // Select Maddf.S(x, y, z).
1432       Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
1433            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
1434            g.UseRegister(mright.right().node()));
1435       return;
1436     }
1437   }
1438   VisitRRR(this, kMips64AddS, node);
1439 }
1440 
1441 
VisitFloat64Add(Node * node)1442 void InstructionSelector::VisitFloat64Add(Node* node) {
1443   Mips64OperandGenerator g(this);
1444   Float64BinopMatcher m(node);
1445   if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
1446     // For Add.D(Mul.D(x, y), z):
1447     Float64BinopMatcher mleft(m.left().node());
1448     if (kArchVariant == kMips64r2) {  // Select Madd.D(z, x, y).
1449       Emit(kMips64MaddD, g.DefineAsRegister(node),
1450            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1451            g.UseRegister(mleft.right().node()));
1452       return;
1453     } else if (kArchVariant == kMips64r6) {  // Select Maddf.D(z, x, y).
1454       Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
1455            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1456            g.UseRegister(mleft.right().node()));
1457       return;
1458     }
1459   }
1460   if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
1461     // For Add.D(x, Mul.D(y, z)):
1462     Float64BinopMatcher mright(m.right().node());
1463     if (kArchVariant == kMips64r2) {  // Select Madd.D(x, y, z).
1464       Emit(kMips64MaddD, g.DefineAsRegister(node),
1465            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
1466            g.UseRegister(mright.right().node()));
1467       return;
1468     } else if (kArchVariant == kMips64r6) {  // Select Maddf.D(x, y, z).
1469       Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
1470            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
1471            g.UseRegister(mright.right().node()));
1472       return;
1473     }
1474   }
1475   VisitRRR(this, kMips64AddD, node);
1476 }
1477 
1478 
VisitFloat32Sub(Node * node)1479 void InstructionSelector::VisitFloat32Sub(Node* node) {
1480   Mips64OperandGenerator g(this);
1481   Float32BinopMatcher m(node);
1482   if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
1483     if (kArchVariant == kMips64r2) {
1484       // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
1485       Float32BinopMatcher mleft(m.left().node());
1486       Emit(kMips64MsubS, g.DefineAsRegister(node),
1487            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1488            g.UseRegister(mleft.right().node()));
1489       return;
1490     }
1491   } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
1492     if (kArchVariant == kMips64r6) {
1493       // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
1494       Float32BinopMatcher mright(m.right().node());
1495       Emit(kMips64MsubfS, g.DefineSameAsFirst(node),
1496            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
1497            g.UseRegister(mright.right().node()));
1498       return;
1499     }
1500   }
1501   VisitRRR(this, kMips64SubS, node);
1502 }
1503 
VisitFloat64Sub(Node * node)1504 void InstructionSelector::VisitFloat64Sub(Node* node) {
1505   Mips64OperandGenerator g(this);
1506   Float64BinopMatcher m(node);
1507   if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
1508     if (kArchVariant == kMips64r2) {
1509       // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
1510       Float64BinopMatcher mleft(m.left().node());
1511       Emit(kMips64MsubD, g.DefineAsRegister(node),
1512            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1513            g.UseRegister(mleft.right().node()));
1514       return;
1515     }
1516   } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
1517     if (kArchVariant == kMips64r6) {
1518       // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
1519       Float64BinopMatcher mright(m.right().node());
1520       Emit(kMips64MsubfD, g.DefineSameAsFirst(node),
1521            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
1522            g.UseRegister(mright.right().node()));
1523       return;
1524     }
1525   }
1526   VisitRRR(this, kMips64SubD, node);
1527 }
1528 
VisitFloat32Mul(Node * node)1529 void InstructionSelector::VisitFloat32Mul(Node* node) {
1530   VisitRRR(this, kMips64MulS, node);
1531 }
1532 
1533 
VisitFloat64Mul(Node * node)1534 void InstructionSelector::VisitFloat64Mul(Node* node) {
1535   VisitRRR(this, kMips64MulD, node);
1536 }
1537 
1538 
VisitFloat32Div(Node * node)1539 void InstructionSelector::VisitFloat32Div(Node* node) {
1540   VisitRRR(this, kMips64DivS, node);
1541 }
1542 
1543 
VisitFloat64Div(Node * node)1544 void InstructionSelector::VisitFloat64Div(Node* node) {
1545   VisitRRR(this, kMips64DivD, node);
1546 }
1547 
1548 
VisitFloat64Mod(Node * node)1549 void InstructionSelector::VisitFloat64Mod(Node* node) {
1550   Mips64OperandGenerator g(this);
1551   Emit(kMips64ModD, g.DefineAsFixed(node, f0),
1552        g.UseFixed(node->InputAt(0), f12),
1553        g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
1554 }
1555 
VisitFloat32Max(Node * node)1556 void InstructionSelector::VisitFloat32Max(Node* node) {
1557   Mips64OperandGenerator g(this);
1558   Emit(kMips64Float32Max, g.DefineAsRegister(node),
1559        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1560 }
1561 
VisitFloat64Max(Node * node)1562 void InstructionSelector::VisitFloat64Max(Node* node) {
1563   Mips64OperandGenerator g(this);
1564   Emit(kMips64Float64Max, g.DefineAsRegister(node),
1565        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1566 }
1567 
VisitFloat32Min(Node * node)1568 void InstructionSelector::VisitFloat32Min(Node* node) {
1569   Mips64OperandGenerator g(this);
1570   Emit(kMips64Float32Min, g.DefineAsRegister(node),
1571        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1572 }
1573 
VisitFloat64Min(Node * node)1574 void InstructionSelector::VisitFloat64Min(Node* node) {
1575   Mips64OperandGenerator g(this);
1576   Emit(kMips64Float64Min, g.DefineAsRegister(node),
1577        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1578 }
1579 
1580 
VisitFloat32Abs(Node * node)1581 void InstructionSelector::VisitFloat32Abs(Node* node) {
1582   VisitRR(this, kMips64AbsS, node);
1583 }
1584 
1585 
VisitFloat64Abs(Node * node)1586 void InstructionSelector::VisitFloat64Abs(Node* node) {
1587   VisitRR(this, kMips64AbsD, node);
1588 }
1589 
VisitFloat32Sqrt(Node * node)1590 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1591   VisitRR(this, kMips64SqrtS, node);
1592 }
1593 
1594 
VisitFloat64Sqrt(Node * node)1595 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1596   VisitRR(this, kMips64SqrtD, node);
1597 }
1598 
1599 
VisitFloat32RoundDown(Node * node)1600 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1601   VisitRR(this, kMips64Float32RoundDown, node);
1602 }
1603 
1604 
VisitFloat64RoundDown(Node * node)1605 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1606   VisitRR(this, kMips64Float64RoundDown, node);
1607 }
1608 
1609 
VisitFloat32RoundUp(Node * node)1610 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1611   VisitRR(this, kMips64Float32RoundUp, node);
1612 }
1613 
1614 
VisitFloat64RoundUp(Node * node)1615 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1616   VisitRR(this, kMips64Float64RoundUp, node);
1617 }
1618 
1619 
VisitFloat32RoundTruncate(Node * node)1620 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1621   VisitRR(this, kMips64Float32RoundTruncate, node);
1622 }
1623 
1624 
VisitFloat64RoundTruncate(Node * node)1625 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1626   VisitRR(this, kMips64Float64RoundTruncate, node);
1627 }
1628 
1629 
VisitFloat64RoundTiesAway(Node * node)1630 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1631   UNREACHABLE();
1632 }
1633 
1634 
VisitFloat32RoundTiesEven(Node * node)1635 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1636   VisitRR(this, kMips64Float32RoundTiesEven, node);
1637 }
1638 
1639 
VisitFloat64RoundTiesEven(Node * node)1640 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1641   VisitRR(this, kMips64Float64RoundTiesEven, node);
1642 }
1643 
VisitFloat32Neg(Node * node)1644 void InstructionSelector::VisitFloat32Neg(Node* node) {
1645   VisitRR(this, kMips64NegS, node);
1646 }
1647 
VisitFloat64Neg(Node * node)1648 void InstructionSelector::VisitFloat64Neg(Node* node) {
1649   VisitRR(this, kMips64NegD, node);
1650 }
1651 
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1652 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1653                                                    InstructionCode opcode) {
1654   Mips64OperandGenerator g(this);
1655   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
1656        g.UseFixed(node->InputAt(1), f4))
1657       ->MarkAsCall();
1658 }
1659 
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1660 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1661                                                   InstructionCode opcode) {
1662   Mips64OperandGenerator g(this);
1663   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
1664       ->MarkAsCall();
1665 }
1666 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1667 void InstructionSelector::EmitPrepareArguments(
1668     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1669     Node* node) {
1670   Mips64OperandGenerator g(this);
1671 
1672   // Prepare for C function call.
1673   if (descriptor->IsCFunctionCall()) {
1674     Emit(kArchPrepareCallCFunction |
1675              MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
1676          0, nullptr, 0, nullptr);
1677 
1678     // Poke any stack arguments.
1679     int slot = kCArgSlotCount;
1680     for (PushParameter input : (*arguments)) {
1681       Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
1682            g.TempImmediate(slot << kPointerSizeLog2));
1683       ++slot;
1684     }
1685   } else {
1686     int push_count = static_cast<int>(descriptor->StackParameterCount());
1687     if (push_count > 0) {
1688       Emit(kMips64StackClaim, g.NoOutput(),
1689            g.TempImmediate(push_count << kPointerSizeLog2));
1690     }
1691     for (size_t n = 0; n < arguments->size(); ++n) {
1692       PushParameter input = (*arguments)[n];
1693       if (input.node()) {
1694         Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
1695              g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
1696       }
1697     }
1698   }
1699 }
1700 
1701 
IsTailCallAddressImmediate()1702 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1703 
GetTempsCountForTailCallFromJSFunction()1704 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1705 
VisitUnalignedLoad(Node * node)1706 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1707   UnalignedLoadRepresentation load_rep =
1708       UnalignedLoadRepresentationOf(node->op());
1709   Mips64OperandGenerator g(this);
1710   Node* base = node->InputAt(0);
1711   Node* index = node->InputAt(1);
1712 
1713   ArchOpcode opcode = kArchNop;
1714   switch (load_rep.representation()) {
1715     case MachineRepresentation::kFloat32:
1716       opcode = kMips64Ulwc1;
1717       break;
1718     case MachineRepresentation::kFloat64:
1719       opcode = kMips64Uldc1;
1720       break;
1721     case MachineRepresentation::kBit:  // Fall through.
1722     case MachineRepresentation::kWord8:
1723       UNREACHABLE();
1724       break;
1725     case MachineRepresentation::kWord16:
1726       opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
1727       break;
1728     case MachineRepresentation::kWord32:
1729       opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
1730       break;
1731     case MachineRepresentation::kTaggedSigned:   // Fall through.
1732     case MachineRepresentation::kTaggedPointer:  // Fall through.
1733     case MachineRepresentation::kTagged:  // Fall through.
1734     case MachineRepresentation::kWord64:
1735       opcode = kMips64Uld;
1736       break;
1737     case MachineRepresentation::kSimd128:  // Fall through.
1738     case MachineRepresentation::kNone:
1739       UNREACHABLE();
1740       return;
1741   }
1742 
1743   if (g.CanBeImmediate(index, opcode)) {
1744     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1745          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1746   } else {
1747     InstructionOperand addr_reg = g.TempRegister();
1748     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1749          g.UseRegister(index), g.UseRegister(base));
1750     // Emit desired load opcode, using temp addr_reg.
1751     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1752          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1753   }
1754 }
1755 
VisitUnalignedStore(Node * node)1756 void InstructionSelector::VisitUnalignedStore(Node* node) {
1757   Mips64OperandGenerator g(this);
1758   Node* base = node->InputAt(0);
1759   Node* index = node->InputAt(1);
1760   Node* value = node->InputAt(2);
1761 
1762   UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1763   ArchOpcode opcode = kArchNop;
1764   switch (rep) {
1765     case MachineRepresentation::kFloat32:
1766       opcode = kMips64Uswc1;
1767       break;
1768     case MachineRepresentation::kFloat64:
1769       opcode = kMips64Usdc1;
1770       break;
1771     case MachineRepresentation::kBit:  // Fall through.
1772     case MachineRepresentation::kWord8:
1773       UNREACHABLE();
1774       break;
1775     case MachineRepresentation::kWord16:
1776       opcode = kMips64Ush;
1777       break;
1778     case MachineRepresentation::kWord32:
1779       opcode = kMips64Usw;
1780       break;
1781     case MachineRepresentation::kTaggedSigned:   // Fall through.
1782     case MachineRepresentation::kTaggedPointer:  // Fall through.
1783     case MachineRepresentation::kTagged:  // Fall through.
1784     case MachineRepresentation::kWord64:
1785       opcode = kMips64Usd;
1786       break;
1787     case MachineRepresentation::kSimd128:  // Fall through.
1788     case MachineRepresentation::kNone:
1789       UNREACHABLE();
1790       return;
1791   }
1792 
1793   if (g.CanBeImmediate(index, opcode)) {
1794     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1795          g.UseRegister(base), g.UseImmediate(index),
1796          g.UseRegisterOrImmediateZero(value));
1797   } else {
1798     InstructionOperand addr_reg = g.TempRegister();
1799     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1800          g.UseRegister(index), g.UseRegister(base));
1801     // Emit desired store opcode, using temp addr_reg.
1802     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1803          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1804   }
1805 }
1806 
VisitCheckedLoad(Node * node)1807 void InstructionSelector::VisitCheckedLoad(Node* node) {
1808   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
1809   Mips64OperandGenerator g(this);
1810   Node* const buffer = node->InputAt(0);
1811   Node* const offset = node->InputAt(1);
1812   Node* const length = node->InputAt(2);
1813   ArchOpcode opcode = kArchNop;
1814   switch (load_rep.representation()) {
1815     case MachineRepresentation::kWord8:
1816       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
1817       break;
1818     case MachineRepresentation::kWord16:
1819       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
1820       break;
1821     case MachineRepresentation::kWord32:
1822       opcode = kCheckedLoadWord32;
1823       break;
1824     case MachineRepresentation::kWord64:
1825       opcode = kCheckedLoadWord64;
1826       break;
1827     case MachineRepresentation::kFloat32:
1828       opcode = kCheckedLoadFloat32;
1829       break;
1830     case MachineRepresentation::kFloat64:
1831       opcode = kCheckedLoadFloat64;
1832       break;
1833     case MachineRepresentation::kBit:
1834     case MachineRepresentation::kTaggedSigned:   // Fall through.
1835     case MachineRepresentation::kTaggedPointer:  // Fall through.
1836     case MachineRepresentation::kTagged:
1837     case MachineRepresentation::kSimd128:
1838     case MachineRepresentation::kNone:
1839       UNREACHABLE();
1840       return;
1841   }
1842   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
1843                                           ? g.UseImmediate(offset)
1844                                           : g.UseRegister(offset);
1845 
1846   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
1847                                           ? g.CanBeImmediate(length, opcode)
1848                                                 ? g.UseImmediate(length)
1849                                                 : g.UseRegister(length)
1850                                           : g.UseRegister(length);
1851 
1852   Emit(opcode | AddressingModeField::encode(kMode_MRI),
1853        g.DefineAsRegister(node), offset_operand, length_operand,
1854        g.UseRegister(buffer));
1855 }
1856 
1857 
VisitCheckedStore(Node * node)1858 void InstructionSelector::VisitCheckedStore(Node* node) {
1859   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
1860   Mips64OperandGenerator g(this);
1861   Node* const buffer = node->InputAt(0);
1862   Node* const offset = node->InputAt(1);
1863   Node* const length = node->InputAt(2);
1864   Node* const value = node->InputAt(3);
1865   ArchOpcode opcode = kArchNop;
1866   switch (rep) {
1867     case MachineRepresentation::kWord8:
1868       opcode = kCheckedStoreWord8;
1869       break;
1870     case MachineRepresentation::kWord16:
1871       opcode = kCheckedStoreWord16;
1872       break;
1873     case MachineRepresentation::kWord32:
1874       opcode = kCheckedStoreWord32;
1875       break;
1876     case MachineRepresentation::kWord64:
1877       opcode = kCheckedStoreWord64;
1878       break;
1879     case MachineRepresentation::kFloat32:
1880       opcode = kCheckedStoreFloat32;
1881       break;
1882     case MachineRepresentation::kFloat64:
1883       opcode = kCheckedStoreFloat64;
1884       break;
1885     case MachineRepresentation::kBit:
1886     case MachineRepresentation::kTaggedSigned:   // Fall through.
1887     case MachineRepresentation::kTaggedPointer:  // Fall through.
1888     case MachineRepresentation::kTagged:
1889     case MachineRepresentation::kSimd128:
1890     case MachineRepresentation::kNone:
1891       UNREACHABLE();
1892       return;
1893   }
1894   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
1895                                           ? g.UseImmediate(offset)
1896                                           : g.UseRegister(offset);
1897 
1898   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
1899                                           ? g.CanBeImmediate(length, opcode)
1900                                                 ? g.UseImmediate(length)
1901                                                 : g.UseRegister(length)
1902                                           : g.UseRegister(length);
1903 
1904   Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1905        offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
1906        g.UseRegister(buffer));
1907 }
1908 
1909 
1910 namespace {
1911 
1912 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1913 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1914                          InstructionOperand left, InstructionOperand right,
1915                          FlagsContinuation* cont) {
1916   Mips64OperandGenerator g(selector);
1917   opcode = cont->Encode(opcode);
1918   if (cont->IsBranch()) {
1919     selector->Emit(opcode, g.NoOutput(), left, right,
1920                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1921   } else if (cont->IsDeoptimize()) {
1922     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
1923                              cont->frame_state());
1924   } else {
1925     DCHECK(cont->IsSet());
1926     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1927   }
1928 }
1929 
1930 
1931 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1932 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1933                          FlagsContinuation* cont) {
1934   Mips64OperandGenerator g(selector);
1935   Float32BinopMatcher m(node);
1936   InstructionOperand lhs, rhs;
1937 
1938   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1939                           : g.UseRegister(m.left().node());
1940   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1941                            : g.UseRegister(m.right().node());
1942   VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
1943 }
1944 
1945 
1946 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1947 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1948                          FlagsContinuation* cont) {
1949   Mips64OperandGenerator g(selector);
1950   Float64BinopMatcher m(node);
1951   InstructionOperand lhs, rhs;
1952 
1953   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1954                           : g.UseRegister(m.left().node());
1955   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1956                            : g.UseRegister(m.right().node());
1957   VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
1958 }
1959 
1960 
1961 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)1962 void VisitWordCompare(InstructionSelector* selector, Node* node,
1963                       InstructionCode opcode, FlagsContinuation* cont,
1964                       bool commutative) {
1965   Mips64OperandGenerator g(selector);
1966   Node* left = node->InputAt(0);
1967   Node* right = node->InputAt(1);
1968 
1969   // Match immediates on left or right side of comparison.
1970   if (g.CanBeImmediate(right, opcode)) {
1971     if (opcode == kMips64Tst) {
1972       VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1973                    cont);
1974     } else {
1975       switch (cont->condition()) {
1976         case kEqual:
1977         case kNotEqual:
1978           if (cont->IsSet()) {
1979             VisitCompare(selector, opcode, g.UseRegister(left),
1980                          g.UseImmediate(right), cont);
1981           } else {
1982             VisitCompare(selector, opcode, g.UseRegister(left),
1983                          g.UseRegister(right), cont);
1984           }
1985           break;
1986         case kSignedLessThan:
1987         case kSignedGreaterThanOrEqual:
1988         case kUnsignedLessThan:
1989         case kUnsignedGreaterThanOrEqual:
1990           VisitCompare(selector, opcode, g.UseRegister(left),
1991                        g.UseImmediate(right), cont);
1992           break;
1993         default:
1994           VisitCompare(selector, opcode, g.UseRegister(left),
1995                        g.UseRegister(right), cont);
1996       }
1997     }
1998   } else if (g.CanBeImmediate(left, opcode)) {
1999     if (!commutative) cont->Commute();
2000     if (opcode == kMips64Tst) {
2001       VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
2002                    cont);
2003     } else {
2004       switch (cont->condition()) {
2005         case kEqual:
2006         case kNotEqual:
2007           if (cont->IsSet()) {
2008             VisitCompare(selector, opcode, g.UseRegister(right),
2009                          g.UseImmediate(left), cont);
2010           } else {
2011             VisitCompare(selector, opcode, g.UseRegister(right),
2012                          g.UseRegister(left), cont);
2013           }
2014           break;
2015         case kSignedLessThan:
2016         case kSignedGreaterThanOrEqual:
2017         case kUnsignedLessThan:
2018         case kUnsignedGreaterThanOrEqual:
2019           VisitCompare(selector, opcode, g.UseRegister(right),
2020                        g.UseImmediate(left), cont);
2021           break;
2022         default:
2023           VisitCompare(selector, opcode, g.UseRegister(right),
2024                        g.UseRegister(left), cont);
2025       }
2026     }
2027   } else {
2028     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
2029                  cont);
2030   }
2031 }
2032 
IsNodeUnsigned(Node * n)2033 bool IsNodeUnsigned(Node* n) {
2034   NodeMatcher m(n);
2035 
2036   if (m.IsLoad()) {
2037     LoadRepresentation load_rep = LoadRepresentationOf(n->op());
2038     return load_rep.IsUnsigned();
2039   } else if (m.IsUnalignedLoad()) {
2040     UnalignedLoadRepresentation load_rep =
2041         UnalignedLoadRepresentationOf(n->op());
2042     return load_rep.IsUnsigned();
2043   } else {
2044     return m.IsUint32Div() || m.IsUint32LessThan() ||
2045            m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
2046            m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
2047            m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
2048   }
2049 }
2050 
2051 // Shared routine for multiple word compare operations.
VisitFullWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)2052 void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
2053                             InstructionCode opcode, FlagsContinuation* cont) {
2054   Mips64OperandGenerator g(selector);
2055   InstructionOperand leftOp = g.TempRegister();
2056   InstructionOperand rightOp = g.TempRegister();
2057 
2058   selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
2059                  g.TempImmediate(32));
2060   selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
2061                  g.TempImmediate(32));
2062 
2063   VisitCompare(selector, opcode, leftOp, rightOp, cont);
2064 }
2065 
VisitOptimizedWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)2066 void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
2067                                  InstructionCode opcode,
2068                                  FlagsContinuation* cont) {
2069   if (FLAG_debug_code) {
2070     Mips64OperandGenerator g(selector);
2071     InstructionOperand leftOp = g.TempRegister();
2072     InstructionOperand rightOp = g.TempRegister();
2073     InstructionOperand optimizedResult = g.TempRegister();
2074     InstructionOperand fullResult = g.TempRegister();
2075     FlagsCondition condition = cont->condition();
2076     InstructionCode testOpcode = opcode |
2077                                  FlagsConditionField::encode(condition) |
2078                                  FlagsModeField::encode(kFlags_set);
2079 
2080     selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
2081                    g.UseRegister(node->InputAt(1)));
2082 
2083     selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
2084                    g.TempImmediate(32));
2085     selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
2086                    g.TempImmediate(32));
2087     selector->Emit(testOpcode, fullResult, leftOp, rightOp);
2088 
2089     selector->Emit(
2090         kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
2091         g.TempImmediate(BailoutReason::kUnsupportedNonPrimitiveCompare));
2092   }
2093 
2094   VisitWordCompare(selector, node, opcode, cont, false);
2095 }
2096 
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2097 void VisitWord32Compare(InstructionSelector* selector, Node* node,
2098                         FlagsContinuation* cont) {
2099   // MIPS64 doesn't support Word32 compare instructions. Instead it relies
2100   // that the values in registers are correctly sign-extended and uses
2101   // Word64 comparison instead. This behavior is correct in most cases,
2102   // but doesn't work when comparing signed with unsigned operands.
2103   // We could simulate full Word32 compare in all cases but this would
2104   // create an unnecessary overhead since unsigned integers are rarely
2105   // used in JavaScript.
2106   // The solution proposed here tries to match a comparison of signed
2107   // with unsigned operand, and perform full Word32Compare only
2108   // in those cases. Unfortunately, the solution is not complete because
2109   // it might skip cases where Word32 full compare is needed, so
2110   // basically it is a hack.
2111   if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
2112     VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
2113   } else {
2114     VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
2115   }
2116 }
2117 
2118 
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2119 void VisitWord64Compare(InstructionSelector* selector, Node* node,
2120                         FlagsContinuation* cont) {
2121   VisitWordCompare(selector, node, kMips64Cmp, cont, false);
2122 }
2123 
2124 
2125 
EmitWordCompareZero(InstructionSelector * selector,Node * value,FlagsContinuation * cont)2126 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
2127                          FlagsContinuation* cont) {
2128   Mips64OperandGenerator g(selector);
2129   InstructionCode opcode = cont->Encode(kMips64Cmp);
2130   InstructionOperand const value_operand = g.UseRegister(value);
2131   if (cont->IsBranch()) {
2132     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
2133                    g.Label(cont->true_block()), g.Label(cont->false_block()));
2134   } else if (cont->IsDeoptimize()) {
2135     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
2136                              g.TempImmediate(0), cont->reason(),
2137                              cont->frame_state());
2138   } else {
2139     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
2140                    g.TempImmediate(0));
2141   }
2142 }
2143 
2144 
2145 // Shared routine for word comparisons against zero.
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)2146 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
2147                           Node* value, FlagsContinuation* cont) {
2148   // Try to combine with comparisons against 0 by simply inverting the branch.
2149   while (selector->CanCover(user, value)) {
2150     if (value->opcode() == IrOpcode::kWord32Equal) {
2151       Int32BinopMatcher m(value);
2152       if (!m.right().Is(0)) break;
2153       user = value;
2154       value = m.left().node();
2155     } else if (value->opcode() == IrOpcode::kWord64Equal) {
2156       Int64BinopMatcher m(value);
2157       if (!m.right().Is(0)) break;
2158       user = value;
2159       value = m.left().node();
2160     } else {
2161       break;
2162     }
2163 
2164     cont->Negate();
2165   }
2166 
2167   if (selector->CanCover(user, value)) {
2168     switch (value->opcode()) {
2169       case IrOpcode::kWord32Equal:
2170         cont->OverwriteAndNegateIfEqual(kEqual);
2171         return VisitWord32Compare(selector, value, cont);
2172       case IrOpcode::kInt32LessThan:
2173         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2174         return VisitWord32Compare(selector, value, cont);
2175       case IrOpcode::kInt32LessThanOrEqual:
2176         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2177         return VisitWord32Compare(selector, value, cont);
2178       case IrOpcode::kUint32LessThan:
2179         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2180         return VisitWord32Compare(selector, value, cont);
2181       case IrOpcode::kUint32LessThanOrEqual:
2182         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2183         return VisitWord32Compare(selector, value, cont);
2184       case IrOpcode::kWord64Equal:
2185         cont->OverwriteAndNegateIfEqual(kEqual);
2186         return VisitWord64Compare(selector, value, cont);
2187       case IrOpcode::kInt64LessThan:
2188         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2189         return VisitWord64Compare(selector, value, cont);
2190       case IrOpcode::kInt64LessThanOrEqual:
2191         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2192         return VisitWord64Compare(selector, value, cont);
2193       case IrOpcode::kUint64LessThan:
2194         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2195         return VisitWord64Compare(selector, value, cont);
2196       case IrOpcode::kUint64LessThanOrEqual:
2197         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2198         return VisitWord64Compare(selector, value, cont);
2199       case IrOpcode::kFloat32Equal:
2200         cont->OverwriteAndNegateIfEqual(kEqual);
2201         return VisitFloat32Compare(selector, value, cont);
2202       case IrOpcode::kFloat32LessThan:
2203         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2204         return VisitFloat32Compare(selector, value, cont);
2205       case IrOpcode::kFloat32LessThanOrEqual:
2206         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2207         return VisitFloat32Compare(selector, value, cont);
2208       case IrOpcode::kFloat64Equal:
2209         cont->OverwriteAndNegateIfEqual(kEqual);
2210         return VisitFloat64Compare(selector, value, cont);
2211       case IrOpcode::kFloat64LessThan:
2212         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2213         return VisitFloat64Compare(selector, value, cont);
2214       case IrOpcode::kFloat64LessThanOrEqual:
2215         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2216         return VisitFloat64Compare(selector, value, cont);
2217       case IrOpcode::kProjection:
2218         // Check if this is the overflow output projection of an
2219         // <Operation>WithOverflow node.
2220         if (ProjectionIndexOf(value->op()) == 1u) {
2221           // We cannot combine the <Operation>WithOverflow with this branch
2222           // unless the 0th projection (the use of the actual value of the
2223           // <Operation> is either nullptr, which means there's no use of the
2224           // actual value, or was already defined, which means it is scheduled
2225           // *AFTER* this branch).
2226           Node* const node = value->InputAt(0);
2227           Node* const result = NodeProperties::FindProjection(node, 0);
2228           if (result == nullptr || selector->IsDefined(result)) {
2229             switch (node->opcode()) {
2230               case IrOpcode::kInt32AddWithOverflow:
2231                 cont->OverwriteAndNegateIfEqual(kOverflow);
2232                 return VisitBinop(selector, node, kMips64Dadd, cont);
2233               case IrOpcode::kInt32SubWithOverflow:
2234                 cont->OverwriteAndNegateIfEqual(kOverflow);
2235                 return VisitBinop(selector, node, kMips64Dsub, cont);
2236               case IrOpcode::kInt32MulWithOverflow:
2237                 cont->OverwriteAndNegateIfEqual(kOverflow);
2238                 return VisitBinop(selector, node, kMips64MulOvf, cont);
2239               case IrOpcode::kInt64AddWithOverflow:
2240                 cont->OverwriteAndNegateIfEqual(kOverflow);
2241                 return VisitBinop(selector, node, kMips64DaddOvf, cont);
2242               case IrOpcode::kInt64SubWithOverflow:
2243                 cont->OverwriteAndNegateIfEqual(kOverflow);
2244                 return VisitBinop(selector, node, kMips64DsubOvf, cont);
2245               default:
2246                 break;
2247             }
2248           }
2249         }
2250         break;
2251       case IrOpcode::kWord32And:
2252       case IrOpcode::kWord64And:
2253         return VisitWordCompare(selector, value, kMips64Tst, cont, true);
2254       default:
2255         break;
2256     }
2257   }
2258 
2259   // Continuation could not be combined with a compare, emit compare against 0.
2260   EmitWordCompareZero(selector, value, cont);
2261 }
2262 
2263 }  // namespace
2264 
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)2265 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
2266                                       BasicBlock* fbranch) {
2267   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
2268   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
2269 }
2270 
VisitDeoptimizeIf(Node * node)2271 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
2272   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2273       kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
2274   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
2275 }
2276 
VisitDeoptimizeUnless(Node * node)2277 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
2278   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2279       kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
2280   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
2281 }
2282 
VisitSwitch(Node * node,const SwitchInfo & sw)2283 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2284   Mips64OperandGenerator g(this);
2285   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2286 
2287   // Emit either ArchTableSwitch or ArchLookupSwitch.
2288   size_t table_space_cost = 10 + 2 * sw.value_range;
2289   size_t table_time_cost = 3;
2290   size_t lookup_space_cost = 2 + 2 * sw.case_count;
2291   size_t lookup_time_cost = sw.case_count;
2292   if (sw.case_count > 0 &&
2293       table_space_cost + 3 * table_time_cost <=
2294           lookup_space_cost + 3 * lookup_time_cost &&
2295       sw.min_value > std::numeric_limits<int32_t>::min()) {
2296     InstructionOperand index_operand = value_operand;
2297     if (sw.min_value) {
2298       index_operand = g.TempRegister();
2299       Emit(kMips64Sub, index_operand, value_operand,
2300            g.TempImmediate(sw.min_value));
2301     }
2302     // Generate a table lookup.
2303     return EmitTableSwitch(sw, index_operand);
2304   }
2305 
2306   // Generate a sequence of conditional jumps.
2307   return EmitLookupSwitch(sw, value_operand);
2308 }
2309 
2310 
VisitWord32Equal(Node * const node)2311 void InstructionSelector::VisitWord32Equal(Node* const node) {
2312   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2313   Int32BinopMatcher m(node);
2314   if (m.right().Is(0)) {
2315     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
2316   }
2317 
2318   VisitWord32Compare(this, node, &cont);
2319 }
2320 
2321 
VisitInt32LessThan(Node * node)2322 void InstructionSelector::VisitInt32LessThan(Node* node) {
2323   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2324   VisitWord32Compare(this, node, &cont);
2325 }
2326 
2327 
VisitInt32LessThanOrEqual(Node * node)2328 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2329   FlagsContinuation cont =
2330       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2331   VisitWord32Compare(this, node, &cont);
2332 }
2333 
2334 
VisitUint32LessThan(Node * node)2335 void InstructionSelector::VisitUint32LessThan(Node* node) {
2336   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2337   VisitWord32Compare(this, node, &cont);
2338 }
2339 
2340 
VisitUint32LessThanOrEqual(Node * node)2341 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2342   FlagsContinuation cont =
2343       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2344   VisitWord32Compare(this, node, &cont);
2345 }
2346 
2347 
VisitInt32AddWithOverflow(Node * node)2348 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2349   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2350     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2351     return VisitBinop(this, node, kMips64Dadd, &cont);
2352   }
2353   FlagsContinuation cont;
2354   VisitBinop(this, node, kMips64Dadd, &cont);
2355 }
2356 
2357 
VisitInt32SubWithOverflow(Node * node)2358 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2359   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2360     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2361     return VisitBinop(this, node, kMips64Dsub, &cont);
2362   }
2363   FlagsContinuation cont;
2364   VisitBinop(this, node, kMips64Dsub, &cont);
2365 }
2366 
VisitInt32MulWithOverflow(Node * node)2367 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2368   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2369     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2370     return VisitBinop(this, node, kMips64MulOvf, &cont);
2371   }
2372   FlagsContinuation cont;
2373   VisitBinop(this, node, kMips64MulOvf, &cont);
2374 }
2375 
VisitInt64AddWithOverflow(Node * node)2376 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2377   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2378     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2379     return VisitBinop(this, node, kMips64DaddOvf, &cont);
2380   }
2381   FlagsContinuation cont;
2382   VisitBinop(this, node, kMips64DaddOvf, &cont);
2383 }
2384 
2385 
VisitInt64SubWithOverflow(Node * node)2386 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2387   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2388     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2389     return VisitBinop(this, node, kMips64DsubOvf, &cont);
2390   }
2391   FlagsContinuation cont;
2392   VisitBinop(this, node, kMips64DsubOvf, &cont);
2393 }
2394 
2395 
VisitWord64Equal(Node * const node)2396 void InstructionSelector::VisitWord64Equal(Node* const node) {
2397   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2398   Int64BinopMatcher m(node);
2399   if (m.right().Is(0)) {
2400     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
2401   }
2402 
2403   VisitWord64Compare(this, node, &cont);
2404 }
2405 
2406 
VisitInt64LessThan(Node * node)2407 void InstructionSelector::VisitInt64LessThan(Node* node) {
2408   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2409   VisitWord64Compare(this, node, &cont);
2410 }
2411 
2412 
VisitInt64LessThanOrEqual(Node * node)2413 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2414   FlagsContinuation cont =
2415       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2416   VisitWord64Compare(this, node, &cont);
2417 }
2418 
2419 
VisitUint64LessThan(Node * node)2420 void InstructionSelector::VisitUint64LessThan(Node* node) {
2421   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2422   VisitWord64Compare(this, node, &cont);
2423 }
2424 
2425 
VisitUint64LessThanOrEqual(Node * node)2426 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2427   FlagsContinuation cont =
2428       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2429   VisitWord64Compare(this, node, &cont);
2430 }
2431 
2432 
VisitFloat32Equal(Node * node)2433 void InstructionSelector::VisitFloat32Equal(Node* node) {
2434   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2435   VisitFloat32Compare(this, node, &cont);
2436 }
2437 
2438 
VisitFloat32LessThan(Node * node)2439 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2440   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2441   VisitFloat32Compare(this, node, &cont);
2442 }
2443 
2444 
VisitFloat32LessThanOrEqual(Node * node)2445 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2446   FlagsContinuation cont =
2447       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2448   VisitFloat32Compare(this, node, &cont);
2449 }
2450 
2451 
VisitFloat64Equal(Node * node)2452 void InstructionSelector::VisitFloat64Equal(Node* node) {
2453   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2454   VisitFloat64Compare(this, node, &cont);
2455 }
2456 
2457 
VisitFloat64LessThan(Node * node)2458 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2459   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2460   VisitFloat64Compare(this, node, &cont);
2461 }
2462 
2463 
VisitFloat64LessThanOrEqual(Node * node)2464 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2465   FlagsContinuation cont =
2466       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2467   VisitFloat64Compare(this, node, &cont);
2468 }
2469 
2470 
VisitFloat64ExtractLowWord32(Node * node)2471 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2472   VisitRR(this, kMips64Float64ExtractLowWord32, node);
2473 }
2474 
2475 
VisitFloat64ExtractHighWord32(Node * node)2476 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2477   VisitRR(this, kMips64Float64ExtractHighWord32, node);
2478 }
2479 
VisitFloat64SilenceNaN(Node * node)2480 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2481   VisitRR(this, kMips64Float64SilenceNaN, node);
2482 }
2483 
VisitFloat64InsertLowWord32(Node * node)2484 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2485   Mips64OperandGenerator g(this);
2486   Node* left = node->InputAt(0);
2487   Node* right = node->InputAt(1);
2488   Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
2489        g.UseRegister(left), g.UseRegister(right));
2490 }
2491 
2492 
VisitFloat64InsertHighWord32(Node * node)2493 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2494   Mips64OperandGenerator g(this);
2495   Node* left = node->InputAt(0);
2496   Node* right = node->InputAt(1);
2497   Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
2498        g.UseRegister(left), g.UseRegister(right));
2499 }
2500 
VisitAtomicLoad(Node * node)2501 void InstructionSelector::VisitAtomicLoad(Node* node) {
2502   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2503   Mips64OperandGenerator g(this);
2504   Node* base = node->InputAt(0);
2505   Node* index = node->InputAt(1);
2506   ArchOpcode opcode = kArchNop;
2507   switch (load_rep.representation()) {
2508     case MachineRepresentation::kWord8:
2509       opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
2510       break;
2511     case MachineRepresentation::kWord16:
2512       opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
2513       break;
2514     case MachineRepresentation::kWord32:
2515       opcode = kAtomicLoadWord32;
2516       break;
2517     default:
2518       UNREACHABLE();
2519       return;
2520   }
2521   if (g.CanBeImmediate(index, opcode)) {
2522     Emit(opcode | AddressingModeField::encode(kMode_MRI),
2523          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
2524   } else {
2525     InstructionOperand addr_reg = g.TempRegister();
2526     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
2527          g.UseRegister(index), g.UseRegister(base));
2528     // Emit desired load opcode, using temp addr_reg.
2529     Emit(opcode | AddressingModeField::encode(kMode_MRI),
2530          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
2531   }
2532 }
2533 
VisitAtomicStore(Node * node)2534 void InstructionSelector::VisitAtomicStore(Node* node) {
2535   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2536   Mips64OperandGenerator g(this);
2537   Node* base = node->InputAt(0);
2538   Node* index = node->InputAt(1);
2539   Node* value = node->InputAt(2);
2540   ArchOpcode opcode = kArchNop;
2541   switch (rep) {
2542     case MachineRepresentation::kWord8:
2543       opcode = kAtomicStoreWord8;
2544       break;
2545     case MachineRepresentation::kWord16:
2546       opcode = kAtomicStoreWord16;
2547       break;
2548     case MachineRepresentation::kWord32:
2549       opcode = kAtomicStoreWord32;
2550       break;
2551     default:
2552       UNREACHABLE();
2553       return;
2554   }
2555 
2556   if (g.CanBeImmediate(index, opcode)) {
2557     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
2558          g.UseRegister(base), g.UseImmediate(index),
2559          g.UseRegisterOrImmediateZero(value));
2560   } else {
2561     InstructionOperand addr_reg = g.TempRegister();
2562     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
2563          g.UseRegister(index), g.UseRegister(base));
2564     // Emit desired store opcode, using temp addr_reg.
2565     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
2566          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
2567   }
2568 }
2569 
2570 // static
2571 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2572 InstructionSelector::SupportedMachineOperatorFlags() {
2573   MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2574   return flags | MachineOperatorBuilder::kWord32Ctz |
2575          MachineOperatorBuilder::kWord64Ctz |
2576          MachineOperatorBuilder::kWord32Popcnt |
2577          MachineOperatorBuilder::kWord64Popcnt |
2578          MachineOperatorBuilder::kWord32ShiftIsSafe |
2579          MachineOperatorBuilder::kInt32DivIsSafe |
2580          MachineOperatorBuilder::kUint32DivIsSafe |
2581          MachineOperatorBuilder::kFloat64RoundDown |
2582          MachineOperatorBuilder::kFloat32RoundDown |
2583          MachineOperatorBuilder::kFloat64RoundUp |
2584          MachineOperatorBuilder::kFloat32RoundUp |
2585          MachineOperatorBuilder::kFloat64RoundTruncate |
2586          MachineOperatorBuilder::kFloat32RoundTruncate |
2587          MachineOperatorBuilder::kFloat64RoundTiesEven |
2588          MachineOperatorBuilder::kFloat32RoundTiesEven |
2589          MachineOperatorBuilder::kWord32ReverseBytes |
2590          MachineOperatorBuilder::kWord64ReverseBytes;
2591 }
2592 
2593 // static
2594 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2595 InstructionSelector::AlignmentRequirements() {
2596   if (kArchVariant == kMips64r6) {
2597     return MachineOperatorBuilder::AlignmentRequirements::
2598         FullUnalignedAccessSupport();
2599   } else {
2600     DCHECK(kArchVariant == kMips64r2);
2601     return MachineOperatorBuilder::AlignmentRequirements::
2602         NoUnalignedAccessSupport();
2603   }
2604 }
2605 
2606 }  // namespace compiler
2607 }  // namespace internal
2608 }  // namespace v8
2609